aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel')
-rw-r--r--lib/kernel/Makefile2
-rw-r--r--lib/kernel/doc/src/.gitignore1
-rw-r--r--lib/kernel/doc/src/Makefile57
-rw-r--r--lib/kernel/doc/src/app.xml170
-rw-r--r--lib/kernel/doc/src/application.xml464
-rw-r--r--lib/kernel/doc/src/auth.xml58
-rw-r--r--lib/kernel/doc/src/book.xml5
-rw-r--r--lib/kernel/doc/src/code.xml810
-rw-r--r--lib/kernel/doc/src/config.xml100
-rw-r--r--lib/kernel/doc/src/disk_log.xml1000
-rw-r--r--lib/kernel/doc/src/erl_boot_server.xml69
-rw-r--r--lib/kernel/doc/src/erl_ddll.xml1344
-rw-r--r--lib/kernel/doc/src/erl_epmd.xml104
-rw-r--r--lib/kernel/doc/src/erl_prim_loader_stub.xml4
-rw-r--r--lib/kernel/doc/src/erlang_stub.xml4
-rw-r--r--lib/kernel/doc/src/error_handler.xml80
-rw-r--r--lib/kernel/doc/src/error_logger.xml569
-rw-r--r--lib/kernel/doc/src/fascicules.xml15
-rw-r--r--lib/kernel/doc/src/file.xml1607
-rw-r--r--lib/kernel/doc/src/gen_sctp.xml1535
-rw-r--r--lib/kernel/doc/src/gen_tcp.xml549
-rw-r--r--lib/kernel/doc/src/gen_udp.xml268
-rw-r--r--lib/kernel/doc/src/global.xml296
-rw-r--r--lib/kernel/doc/src/global_group.xml175
-rw-r--r--lib/kernel/doc/src/heart.xml175
-rw-r--r--lib/kernel/doc/src/inet.xml2078
-rw-r--r--lib/kernel/doc/src/inet_res.xml302
-rw-r--r--lib/kernel/doc/src/init_stub.xml16
-rw-r--r--lib/kernel/doc/src/introduction_chapter.xml63
-rw-r--r--lib/kernel/doc/src/kernel_app.xml579
-rw-r--r--lib/kernel/doc/src/logger.xml1268
-rw-r--r--lib/kernel/doc/src/logger_arch.diabin0 -> 2527 bytes
-rw-r--r--lib/kernel/doc/src/logger_arch.pngbin0 -> 117205 bytes
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml1345
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml168
-rw-r--r--lib/kernel/doc/src/logger_filters.xml254
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml354
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml141
-rw-r--r--lib/kernel/doc/src/net_adm.xml87
-rw-r--r--lib/kernel/doc/src/net_kernel.xml337
-rw-r--r--lib/kernel/doc/src/notes.xml1107
-rw-r--r--lib/kernel/doc/src/notes_history.xml2
-rw-r--r--lib/kernel/doc/src/os.xml414
-rw-r--r--lib/kernel/doc/src/part.xml (renamed from lib/kernel/doc/src/part_notes.xml)19
-rw-r--r--lib/kernel/doc/src/part_notes_history.xml40
-rw-r--r--lib/kernel/doc/src/pg2.xml123
-rw-r--r--lib/kernel/doc/src/ref_man.xml18
-rw-r--r--lib/kernel/doc/src/ref_man.xml.src68
-rw-r--r--lib/kernel/doc/src/rpc.xml455
-rw-r--r--lib/kernel/doc/src/seq_trace.xml243
-rw-r--r--lib/kernel/doc/src/specs.xml6
-rw-r--r--lib/kernel/doc/src/user.xml8
-rw-r--r--lib/kernel/doc/src/user_guide.gifbin1581 -> 0 bytes
-rw-r--r--lib/kernel/doc/src/wrap_log_reader.xml154
-rw-r--r--lib/kernel/doc/src/zlib_stub.xml16
-rw-r--r--lib/kernel/examples/Makefile2
-rw-r--r--lib/kernel/examples/gen_tcp_dist/Makefile20
-rw-r--r--lib/kernel/examples/gen_tcp_dist/ebin/.gitignore0
-rw-r--r--lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl781
-rw-r--r--lib/kernel/include/dist.hrl8
-rw-r--r--lib/kernel/include/dist_util.hrl20
-rw-r--r--lib/kernel/include/file.hrl31
-rw-r--r--lib/kernel/include/inet.hrl4
-rw-r--r--lib/kernel/include/inet_sctp.hrl2
-rw-r--r--lib/kernel/include/logger.hrl53
-rw-r--r--lib/kernel/include/net_address.hrl2
-rw-r--r--lib/kernel/src/Makefile58
-rw-r--r--lib/kernel/src/application.erl2
-rw-r--r--lib/kernel/src/application_controller.erl83
-rw-r--r--lib/kernel/src/application_master.erl6
-rw-r--r--lib/kernel/src/application_master.hrl2
-rw-r--r--lib/kernel/src/application_starter.erl2
-rw-r--r--lib/kernel/src/auth.erl8
-rw-r--r--lib/kernel/src/code.erl609
-rw-r--r--lib/kernel/src/code_server.erl1171
-rw-r--r--lib/kernel/src/disk_log.erl467
-rw-r--r--lib/kernel/src/disk_log.hrl14
-rw-r--r--lib/kernel/src/disk_log_1.erl64
-rw-r--r--lib/kernel/src/disk_log_server.erl2
-rw-r--r--lib/kernel/src/disk_log_sup.erl2
-rw-r--r--lib/kernel/src/dist_ac.erl12
-rw-r--r--lib/kernel/src/dist_util.erl568
-rw-r--r--lib/kernel/src/erl_boot_server.erl41
-rw-r--r--lib/kernel/src/erl_ddll.erl2
-rw-r--r--lib/kernel/src/erl_distribution.erl92
-rw-r--r--lib/kernel/src/erl_epmd.erl80
-rw-r--r--lib/kernel/src/erl_epmd.hrl2
-rw-r--r--lib/kernel/src/erl_reply.erl4
-rw-r--r--lib/kernel/src/erl_signal_handler.erl66
-rw-r--r--lib/kernel/src/error_handler.erl6
-rw-r--r--lib/kernel/src/error_logger.erl544
-rw-r--r--lib/kernel/src/erts_debug.erl197
-rw-r--r--lib/kernel/src/file.erl150
-rw-r--r--lib/kernel/src/file_int.hrl33
-rw-r--r--lib/kernel/src/file_io_server.erl73
-rw-r--r--lib/kernel/src/file_server.erl149
-rw-r--r--lib/kernel/src/gen_sctp.erl57
-rw-r--r--lib/kernel/src/gen_tcp.erl74
-rw-r--r--lib/kernel/src/gen_udp.erl71
-rw-r--r--lib/kernel/src/global.erl75
-rw-r--r--lib/kernel/src/global_group.erl10
-rw-r--r--lib/kernel/src/global_search.erl2
-rw-r--r--lib/kernel/src/group.erl304
-rw-r--r--lib/kernel/src/group_history.erl341
-rw-r--r--lib/kernel/src/heart.erl15
-rw-r--r--lib/kernel/src/hipe_ext_format.hrl12
-rw-r--r--lib/kernel/src/hipe_unified_loader.erl395
-rw-r--r--lib/kernel/src/inet.erl397
-rw-r--r--lib/kernel/src/inet6_sctp.erl28
-rw-r--r--lib/kernel/src/inet6_tcp.erl94
-rw-r--r--lib/kernel/src/inet6_tcp_dist.erl9
-rw-r--r--lib/kernel/src/inet6_udp.erl44
-rw-r--r--lib/kernel/src/inet_boot.hrl2
-rw-r--r--lib/kernel/src/inet_config.erl7
-rw-r--r--lib/kernel/src/inet_config.hrl2
-rw-r--r--lib/kernel/src/inet_db.erl8
-rw-r--r--lib/kernel/src/inet_dns.erl6
-rw-r--r--lib/kernel/src/inet_dns.hrl2
-rw-r--r--lib/kernel/src/inet_dns_record_adts.pl2
-rw-r--r--lib/kernel/src/inet_gethost_native.erl2
-rw-r--r--lib/kernel/src/inet_hosts.erl5
-rw-r--r--lib/kernel/src/inet_int.hrl23
-rw-r--r--lib/kernel/src/inet_parse.erl108
-rw-r--r--lib/kernel/src/inet_res.erl40
-rw-r--r--lib/kernel/src/inet_res.hrl2
-rw-r--r--lib/kernel/src/inet_sctp.erl21
-rw-r--r--lib/kernel/src/inet_tcp.erl92
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl175
-rw-r--r--lib/kernel/src/inet_udp.erl60
-rw-r--r--lib/kernel/src/kernel.app.src36
-rw-r--r--lib/kernel/src/kernel.appup.src14
-rw-r--r--lib/kernel/src/kernel.erl292
-rw-r--r--lib/kernel/src/kernel_config.erl9
-rw-r--r--lib/kernel/src/kernel_refc.erl139
-rw-r--r--lib/kernel/src/local_tcp.erl178
-rw-r--r--lib/kernel/src/local_udp.erl106
-rw-r--r--lib/kernel/src/logger.erl948
-rw-r--r--lib/kernel/src/logger_backend.erl133
-rw-r--r--lib/kernel/src/logger_config.erl150
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl740
-rw-r--r--lib/kernel/src/logger_filters.erl127
-rw-r--r--lib/kernel/src/logger_formatter.erl514
-rw-r--r--lib/kernel/src/logger_h_common.erl339
-rw-r--r--lib/kernel/src/logger_h_common.hrl263
-rw-r--r--lib/kernel/src/logger_handler_watcher.erl113
-rw-r--r--lib/kernel/src/logger_internal.hrl101
-rw-r--r--lib/kernel/src/logger_server.erl595
-rw-r--r--lib/kernel/src/logger_simple_h.erl212
-rw-r--r--lib/kernel/src/logger_std_h.erl843
-rw-r--r--lib/kernel/src/logger_sup.erl57
-rw-r--r--lib/kernel/src/net.erl2
-rw-r--r--lib/kernel/src/net_adm.erl5
-rw-r--r--lib/kernel/src/net_kernel.erl654
-rw-r--r--lib/kernel/src/os.erl412
-rw-r--r--lib/kernel/src/pg2.erl4
-rw-r--r--lib/kernel/src/ram_file.erl2
-rw-r--r--lib/kernel/src/raw_file_io.erl75
-rw-r--r--lib/kernel/src/raw_file_io_compressed.erl134
-rw-r--r--lib/kernel/src/raw_file_io_deflate.erl159
-rw-r--r--lib/kernel/src/raw_file_io_delayed.erl320
-rw-r--r--lib/kernel/src/raw_file_io_inflate.erl261
-rw-r--r--lib/kernel/src/raw_file_io_list.erl128
-rw-r--r--lib/kernel/src/raw_file_io_raw.erl25
-rw-r--r--lib/kernel/src/rpc.erl106
-rw-r--r--lib/kernel/src/seq_trace.erl24
-rw-r--r--lib/kernel/src/standard_error.erl2
-rw-r--r--lib/kernel/src/user.erl6
-rw-r--r--lib/kernel/src/user_drv.erl33
-rw-r--r--lib/kernel/src/user_sup.erl2
-rw-r--r--lib/kernel/src/wrap_log_reader.erl2
-rw-r--r--lib/kernel/test/Makefile24
-rw-r--r--lib/kernel/test/appinc1.erl2
-rw-r--r--lib/kernel/test/appinc1x.erl2
-rw-r--r--lib/kernel/test/appinc2.erl2
-rw-r--r--lib/kernel/test/appinc2A.erl2
-rw-r--r--lib/kernel/test/appinc2B.erl2
-rw-r--r--lib/kernel/test/appinc2top.erl2
-rw-r--r--lib/kernel/test/application_SUITE.erl602
-rw-r--r--lib/kernel/test/application_SUITE_data/app_start_error.erl2
-rw-r--r--lib/kernel/test/application_SUITE_data/group_leader.erl2
-rw-r--r--lib/kernel/test/application_SUITE_data/group_leader_sup.erl2
-rw-r--r--lib/kernel/test/application_SUITE_data/trans_abnormal_sup.erl2
-rw-r--r--lib/kernel/test/application_SUITE_data/trans_normal_sup.erl2
-rw-r--r--lib/kernel/test/application_SUITE_data/transient.erl2
-rw-r--r--lib/kernel/test/bif_SUITE.erl811
-rw-r--r--lib/kernel/test/ch.erl2
-rw-r--r--lib/kernel/test/ch_sup.erl2
-rw-r--r--lib/kernel/test/cleanup.erl25
-rw-r--r--lib/kernel/test/code_SUITE.erl1159
-rw-r--r--lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict.erl2
-rw-r--r--lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_app.erl2
-rw-r--r--lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_sup.erl2
-rw-r--r--lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-b/ebin/lists.erl2
-rw-r--r--lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-c/ebin/code_SUITE_mult_root_module.erl2
-rw-r--r--lib/kernel/test/code_SUITE_data/upgrade_client.erl94
-rw-r--r--lib/kernel/test/code_SUITE_data/upgradee.erl12
-rw-r--r--lib/kernel/test/code_a_test.erl2
-rw-r--r--lib/kernel/test/code_b_test.erl2
-rw-r--r--lib/kernel/test/disk_log_SUITE.erl6065
-rw-r--r--lib/kernel/test/disk_log_SUITE_data/wrap_log_test.erl2
-rw-r--r--lib/kernel/test/erl_boot_server_SUITE.erl320
-rw-r--r--lib/kernel/test/erl_distribution_SUITE.erl1591
-rw-r--r--lib/kernel/test/erl_distribution_wb_SUITE.erl477
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE.erl547
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict.erl2
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_app.erl2
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_sup.erl2
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy.erl2
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_app.erl2
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_sup.erl2
-rw-r--r--lib/kernel/test/error_handler_SUITE.erl6
-rw-r--r--lib/kernel/test/error_logger_SUITE.erl233
-rw-r--r--lib/kernel/test/error_logger_warn_SUITE.erl88
-rw-r--r--lib/kernel/test/file_SUITE.erl4207
-rw-r--r--lib/kernel/test/file_SUITE_data/realmen.html4
-rw-r--r--lib/kernel/test/file_name_SUITE.erl776
-rw-r--r--lib/kernel/test/gen_sctp_SUITE.erl1886
-rw-r--r--lib/kernel/test/gen_tcp_api_SUITE.erl617
-rw-r--r--lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c4
-rw-r--r--lib/kernel/test/gen_tcp_echo_SUITE.erl489
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl1171
-rw-r--r--lib/kernel/test/gen_udp_SUITE.erl866
-rw-r--r--lib/kernel/test/global_SUITE.erl3345
-rw-r--r--lib/kernel/test/global_SUITE_data/global_trace.erl2
-rw-r--r--lib/kernel/test/global_group_SUITE.erl1467
-rw-r--r--lib/kernel/test/heart_SUITE.erl200
-rw-r--r--lib/kernel/test/ignore_cores.erl16
-rw-r--r--lib/kernel/test/inet_SUITE.erl1014
-rw-r--r--lib/kernel/test/inet_res_SUITE.erl67
-rwxr-xr-xlib/kernel/test/inet_res_SUITE_data/run-named2
-rw-r--r--lib/kernel/test/inet_sockopt_SUITE.erl813
-rw-r--r--lib/kernel/test/init_SUITE.erl657
-rw-r--r--lib/kernel/test/interactive_shell_SUITE.erl464
-rw-r--r--lib/kernel/test/kernel_SUITE.erl97
-rw-r--r--lib/kernel/test/kernel_bench.spec2
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl48
-rw-r--r--lib/kernel/test/logger.cover14
-rw-r--r--lib/kernel/test/logger.spec11
-rw-r--r--lib/kernel/test/logger_SUITE.erl1330
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl1679
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl683
-rw-r--r--lib/kernel/test/logger_filters_SUITE.erl227
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl886
-rw-r--r--lib/kernel/test/logger_legacy_SUITE.erl288
-rw-r--r--lib/kernel/test/logger_simple_h_SUITE.erl209
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl1718
-rw-r--r--lib/kernel/test/logger_test_lib.erl82
-rw-r--r--lib/kernel/test/loose_node.erl13
-rw-r--r--lib/kernel/test/multi_load_SUITE.erl419
-rw-r--r--lib/kernel/test/myApp.erl2
-rw-r--r--lib/kernel/test/os_SUITE.erl390
-rw-r--r--lib/kernel/test/os_SUITE_data/Makefile.src8
-rw-r--r--lib/kernel/test/os_SUITE_data/my_fds.c9
-rw-r--r--lib/kernel/test/pdict_SUITE.erl189
-rw-r--r--lib/kernel/test/pg2_SUITE.erl243
-rw-r--r--lib/kernel/test/prim_file_SUITE.erl2393
-rw-r--r--lib/kernel/test/ram_file_SUITE.erl760
-rw-r--r--lib/kernel/test/rpc_SUITE.erl589
-rw-r--r--lib/kernel/test/sendfile_SUITE.erl142
-rw-r--r--lib/kernel/test/seq_trace_SUITE.erl578
-rw-r--r--lib/kernel/test/standard_error_SUITE.erl2
-rw-r--r--lib/kernel/test/topApp.erl2
-rw-r--r--lib/kernel/test/topApp2.erl2
-rw-r--r--lib/kernel/test/topApp3.erl2
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE.erl434
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl26
-rw-r--r--lib/kernel/test/zlib_SUITE.erl1340
-rw-r--r--lib/kernel/test/zzz_SUITE.erl37
-rw-r--r--lib/kernel/vsn.mk2
269 files changed, 52351 insertions, 27990 deletions
diff --git a/lib/kernel/Makefile b/lib/kernel/Makefile
index 74f942b027..b956f5eaf5 100644
--- a/lib/kernel/Makefile
+++ b/lib/kernel/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2009. All Rights Reserved.
+# Copyright Ericsson AB 1996-2016. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/lib/kernel/doc/src/.gitignore b/lib/kernel/doc/src/.gitignore
new file mode 100644
index 0000000000..c2813ac866
--- /dev/null
+++ b/lib/kernel/doc/src/.gitignore
@@ -0,0 +1 @@
+*.eps \ No newline at end of file
diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile
index 3c6414a620..f8867ccf25 100644
--- a/lib/kernel/doc/src/Makefile
+++ b/lib/kernel/doc/src/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2012. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -42,6 +42,7 @@ XML_REF3_FILES = application.xml \
disk_log.xml \
erl_boot_server.xml \
erl_ddll.xml \
+ erl_epmd.xml \
erl_prim_loader_stub.xml \
erlang_stub.xml \
error_handler.xml \
@@ -56,6 +57,11 @@ XML_REF3_FILES = application.xml \
inet.xml \
inet_res.xml \
init_stub.xml \
+ logger.xml \
+ logger_std_h.xml \
+ logger_disk_log_h.xml \
+ logger_filters.xml \
+ logger_formatter.xml \
net_adm.xml \
net_kernel.xml \
os.xml \
@@ -70,11 +76,18 @@ XML_REF4_FILES = app.xml config.xml
XML_REF6_FILES = kernel_app.xml
-XML_PART_FILES = part_notes.xml part_notes_history.xml
-XML_CHAPTER_FILES = notes.xml notes_history.xml
+XML_PART_FILES = part.xml
+XML_CHAPTER_FILES = \
+ notes.xml \
+ introduction_chapter.xml \
+ logger_chapter.xml
BOOK_FILES = book.xml
+# The .png file is generated from a .dia file with target 'update_png'
+IMAGE_FILES = \
+ logger_arch.png
+
XML_FILES = \
$(BOOK_FILES) $(XML_CHAPTER_FILES) \
$(XML_PART_FILES) $(XML_REF3_FILES) $(XML_REF4_FILES)\
@@ -100,9 +113,20 @@ SPECS_FILES = $(XML_REF3_FILES:%.xml=$(SPECDIR)/specs_%.xml)
TOP_SPECS_FILE = specs.xml
# ----------------------------------------------------
-# FLAGS
+# FIGURES
+# ----------------------------------------------------
+# In order to update the figures you have to have both dia
+# and imagemagick installed.
+# The generated .png file must be committed.
+
+update_png:
+ dia --export=logger_arch.eps logger_arch.dia
+ convert logger_arch.eps -resize 65% logger_arch.png
+
+# ----------------------------------------------------
+# FLAGS
# ----------------------------------------------------
-XML_FLAGS +=
+XML_FLAGS +=
SPECS_ESRC = ../../src
@@ -111,7 +135,7 @@ SPECS_FLAGS = -I../../include
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
-$(HTMLDIR)/%.gif: %.gif
+$(HTMLDIR)/%: %
$(INSTALL_DATA) $< $@
docs: man pdf html
@@ -120,38 +144,40 @@ $(TOP_PDF_FILE): $(XML_FILES)
pdf: $(TOP_PDF_FILE)
-html: gifs $(HTML_REF_MAN_FILE)
+html: images $(HTML_REF_MAN_FILE)
man: $(MAN3_FILES) $(MAN4_FILES) $(MAN6_FILES)
-gifs: $(GIF_FILES:%=$(HTMLDIR)/%)
-debug opt:
+images: $(IMAGE_FILES:%=$(HTMLDIR)/%)
+
+debug opt:
clean clean_docs:
rm -rf $(HTMLDIR)/*
+ rm -rf $(XMLDIR)
rm -f $(MAN3DIR)/*
rm -f $(MAN4DIR)/*
rm -f $(MAN6DIR)/*
rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
rm -f $(SPECDIR)/*
- rm -f errs core *~
+ rm -f errs core *~ *.eps
$(SPECDIR)/specs_erl_prim_loader_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module erl_prim_loader_stub
$(SPECDIR)/specs_erlang_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module erlang_stub
$(SPECDIR)/specs_init_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module init_stub
$(SPECDIR)/specs_zlib_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module zlib_stub
# ----------------------------------------------------
# Release Target
-# ----------------------------------------------------
+# ----------------------------------------------------
include $(ERL_TOP)/make/otp_release_targets.mk
release_docs_spec: docs
@@ -169,4 +195,3 @@ release_docs_spec: docs
$(INSTALL_DATA) $(MAN6_FILES) "$(RELEASE_PATH)/man/man6"
release_spec:
-
diff --git a/lib/kernel/doc/src/app.xml b/lib/kernel/doc/src/app.xml
index 35feec144e..d2e9390d7e 100644
--- a/lib/kernel/doc/src/app.xml
+++ b/lib/kernel/doc/src/app.xml
@@ -4,7 +4,7 @@
<fileref>
<header>
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -42,12 +42,12 @@
</description>
<section>
- <title>FILE SYNTAX</title>
- <p>The application resource file should be called
- <c>Application.app</c> where <c>Application</c> is the name of
- the application. The file should be located in the <c>ebin</c>
- directory for the application.</p>
- <p>It must contain one single Erlang term, which is called an
+ <title>File Syntax</title>
+ <p>The application resource file is to be called
+ <c>Application.app</c>, where <c>Application</c> is the
+ application name. The file is to be located in directory <c>ebin</c>
+ for the application.</p>
+ <p>The file must contain a single Erlang term, which is called an
<em>application specification</em>:</p>
<code type="none">
{application, Application,
@@ -80,19 +80,26 @@ Env [{Par,Val}] []
Start {Module,StartArgs} []
Phases [{Phase,PhaseArgs}] undefined
RTDeps [ApplicationVersion] []
- Module = Name = App = Par = Phase = atom()
- Val = StartArgs = PhaseArgs = term()
- ApplicationVersion = string()</code>
- <p><c>Application</c> is the name of the application.</p>
+
+Module = Name = App = Par = Phase = atom()
+Val = StartArgs = PhaseArgs = term()
+ApplicationVersion = string()</code>
+ <taglist>
+ <tag><c>Application</c></tag>
+ <item>Application name.</item>
+ </taglist>
<p>For the application controller, all keys are optional.
The respective default values are used for any omitted keys.</p>
<p>The functions in <c>systools</c> require more information. If
- they are used, the following keys are mandatory:
- <c>description</c>, <c>vsn</c>, <c>modules</c>, <c>registered</c>
- and <c>applications</c>. The other keys are ignored by
- <c>systools</c>.</p>
- <warning><p>The <c>RTDeps</c> type was introduced in OTP 17.0 and
- might be subject to changes during the OTP 17 release.</p></warning>
+ they are used, the following keys are mandatory:</p>
+ <list type="bulleted">
+ <item><c>description</c></item>
+ <item><c>vsn</c></item>
+ <item><c>modules</c></item>
+ <item><c>registered</c></item>
+ <item><c>applications</c></item>
+ </list>
+ <p>The other keys are ignored by <c>systools</c>.</p>
<taglist>
<tag><c>description</c></tag>
<item>
@@ -104,7 +111,7 @@ RTDeps [ApplicationVersion] []
</item>
<tag><c>vsn</c></tag>
<item>
- <p>The version of the application.</p>
+ <p>Version of the application.</p>
</item>
<tag><c>modules</c></tag>
<item>
@@ -114,15 +121,14 @@ RTDeps [ApplicationVersion] []
</item>
<tag><c>maxP</c></tag>
<item>
- <p><em>Deprecated - will be ignored</em> <br></br>
-
- The maximum number of processes allowed in the application.</p>
+ <p><em>Deprecated - is ignored</em></p>
+ <p>Maximum number of processes allowed in the application.</p>
</item>
<tag><c>maxT</c></tag>
<item>
- <p>The maximum time in milliseconds that the application is
- allowed to run. After the specified time the application will
- automatically terminate.</p>
+ <p>Maximum time, in milliseconds, that the application is
+ allowed to run. After the specified time, the application
+ terminates automatically.</p>
</item>
<tag><c>registered</c></tag>
<item>
@@ -132,20 +138,20 @@ RTDeps [ApplicationVersion] []
</item>
<tag><c>included_applications</c></tag>
<item>
- <p>All applications which are included by this application.
- When this application is started, all included application
- will automatically be loaded, but not started, by
- the application controller. It is assumed that the topmost
+ <p>All applications included by this application.
+ When this application is started, all included applications
+ are loaded automatically, but not started, by
+ the application controller. It is assumed that the top-most
supervisor of the included application is started by a
supervisor of this application.</p>
</item>
<tag><c>applications</c></tag>
<item>
- <p>All applications which must be started before this
+ <p>All applications that must be started before this
application is allowed to be started. <c>systools</c> uses
this list to generate correct start scripts. Defaults to
- the empty list, but note that all applications have
- dependencies to (at least) <c>kernel</c> and <c>stdlib</c>.</p>
+ the empty list, but notice that all applications have
+ dependencies to (at least) Kernel and STDLIB.</p>
</item>
<tag><c>env</c></tag>
<item>
@@ -153,78 +159,84 @@ RTDeps [ApplicationVersion] []
of a configuration parameter is retrieved by calling
<c>application:get_env/1,2</c>. The values in the application
resource file can be overridden by values in a configuration
- file (see <c>config(4)</c>) or by command line flags (see
- <c>erl(1)</c>).</p>
+ file (see <seealso marker="config"><c>config(4)</c></seealso>)
+ or by command-line flags (see
+ <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>).</p>
</item>
<tag><c>mod</c></tag>
<item>
- <p>Specifies the application callback module and a start
- argument, see <c>application(3)</c>.</p>
- <p>The <c>mod</c> key is necessary for an application
- implemented as a supervision tree, or the application
- controller will not know how to start it. The <c>mod</c> key
+ <p>Specifies the application callback module and a start argument, see
+ <seealso marker="application"><c>application(3)</c></seealso>.</p>
+ <p>Key <c>mod</c> is necessary for an application
+ implemented as a supervision tree, otherwise the application
+ controller does not know how to start it. <c>mod</c>
can be omitted for applications without processes, typically
- code libraries such as the application STDLIB.</p>
+ code libraries, for example, STDLIB.</p>
</item>
<tag><c>start_phases</c></tag>
<item>
<p>A list of start phases and corresponding start arguments for
the application. If this key is present, the application
- master will - in addition to the usual call to
- <c>Module:start/2</c> - also call
+ master, in addition to the usual call to
+ <c>Module:start/2</c>, also calls
<c>Module:start_phase(Phase,Type,PhaseArgs)</c> for each
- start phase defined by the <c>start_phases</c> key, and only
- after this extended start procedure will
- <c>application:start(Application)</c> return.</p>
- <p>Start phases may be used to synchronize startup of an
+ start phase defined by key <c>start_phases</c>. Only
+ after this extended start procedure,
+ <c>application:start(Application)</c> returns.</p>
+ <p>Start phases can be used to synchronize startup of an
application and its included applications. In this case,
- the <c>mod</c> key must be specified as:</p>
+ key <c>mod</c> must be specified as follows:</p>
<code type="none">
{mod, {application_starter,[Module,StartArgs]}}</code>
- <p>The application master will then call <c>Module:start/2</c>
+ <p>The application master then calls <c>Module:start/2</c>
for the primary application, followed by calls to
<c>Module:start_phase/3</c> for each start phase (as defined
- for the primary application) both for the primary application
- and for each of its included application, for which the start
+ for the primary application), both for the primary application
+ and for each of its included applications, for which the start
phase is defined.</p>
<p>This implies that for an included application, the set of
start phases must be a subset of the set of phases defined
- for the primary application. Refer to <em>OTP Design Principles</em> for more information.</p>
+ for the primary application. For more information, see
+ <seealso marker="doc/design_principles:applications">OTP Design Principles</seealso>.
+ </p>
</item>
- <tag><marker id="runtime_dependencies"></marker><c>runtime_dependencies</c></tag>
- <item><p>A list of application versions that the application
- depends on. An example of such an application version is
- <c>"kernel-3.0"</c>. Application versions specified as runtime
- dependencies are minimum requirements. That is, a larger
- application version than the one specified in the
- dependency satisfies the requirement. For information on
- how to compare application versions see
- <seealso marker="doc/system_principles:versions">the
- documentation of versions in the system principles
- guide</seealso>. Note that the application version
- specifies a source code version. An additional indirect
- requirement is that installed binary application of
- the specified version has been built so that it is
- compatible with the rest of the system.</p>
- <p>Some dependencies might only be required in specific runtime
- scenarios. In the case such optional dependencies exist, these are
- specified and documented in the corresponding "App" documentation
- of the specific application.</p>
- <warning><p>The <c>runtime_dependencies</c> key was introduced in
- OTP 17.0. The type of its value might be subject to changes during
- the OTP 17 release.</p></warning>
- <warning><p>All runtime dependencies specified in OTP applications
- during the OTP 17 release may not be completely correct. This
- is actively being worked on. Declared runtime dependencies in OTP
- applications are expected to be correct in OTP 18.</p></warning>
+ <tag>
+ <marker id="runtime_dependencies"></marker>
+ <c>runtime_dependencies</c></tag>
+ <item>
+ <p>A list of application versions that the application
+ depends on. An example of such an application version is
+ <c>"kernel-3.0"</c>. Application versions specified as runtime
+ dependencies are minimum requirements. That is, a larger
+ application version than the one specified in the
+ dependency satisfies the requirement. For information about
+ how to compare application versions, see section
+ <seealso marker="doc/system_principles:versions">Versions</seealso>
+ in the System Principles User's Guide.</p>
+ <p>Notice that the application version
+ specifies a source code version. One more, indirect,
+ requirement is that the installed binary application of
+ the specified version is built so that it is
+ compatible with the rest of the system.</p>
+ <p>Some dependencies can only be required in specific runtime
+ scenarios. When such optional dependencies exist, these are
+ specified and documented in the corresponding "App" documentation
+ of the specific application.</p>
+ <warning><p>The <c>runtime_dependencies</c> key was introduced in
+ OTP 17.0. The type of its value might be subject to changes during
+ the OTP 17 release.</p></warning>
+ <warning><p>All runtime dependencies specified in OTP applications
+ during the OTP 17 release may not be completely correct. This
+ is actively being worked on. Declared runtime dependencies in OTP
+ applications are expected to be correct in OTP 18.</p></warning>
</item>
</taglist>
</section>
<section>
- <title>SEE ALSO</title>
- <p><seealso marker="application">application(3)</seealso>,
- systools(3)</p>
+ <title>See Also</title>
+ <p><seealso marker="application"><c>application(3)</c></seealso>,
+ <seealso marker="sasl:systools"><c>systools(3)</c></seealso></p>
</section>
</fileref>
diff --git a/lib/kernel/doc/src/application.xml b/lib/kernel/doc/src/application.xml
index 4d8e6ce94b..38c7b5acf1 100644
--- a/lib/kernel/doc/src/application.xml
+++ b/lib/kernel/doc/src/application.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2014</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -33,23 +33,25 @@
<description>
<p>In OTP, <em>application</em> denotes a component implementing
some specific functionality, that can be started and stopped as a
- unit, and which can be re-used in other systems as well. This
- module interfaces the <em>application controller</em>, a process
- started at every Erlang runtime system, and contains functions
- for controlling applications (for example starting and stopping
+ unit, and that can be reused in other systems. This
+ module interacts with <em>application controller</em>, a process
+ started at every Erlang runtime system. This module contains functions
+ for controlling applications (for example, starting and stopping
applications), and functions to access information about
- applications (for example configuration parameters).</p>
- <p>An application is defined by an <em>application specification</em>. The specification is normally located in an
- <em>application resource file</em> called <c>Application.app</c>,
- where <c>Application</c> is the name of the application. Refer to
- <seealso marker="app">app(4)</seealso> for more information about
- the application specification.</p>
+ applications (for example, configuration parameters).</p>
+ <p>An application is defined by an <em>application specification</em>.
+ The specification is normally located in an
+ <em>application resource file</em> named <c>Application.app</c>,
+ where <c>Application</c> is the application name. For details
+ about the application specification, see
+ <seealso marker="app"><c>app(4)</c></seealso>.</p>
<p>This module can also be viewed as a behaviour for an application
implemented according to the OTP design principles as a
supervision tree. The definition of how to start and stop
- the tree should be located in an <em>application callback module</em> exporting a pre-defined set of functions.</p>
- <p>Refer to <seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> for more information about
- applications and behaviours.</p>
+ the tree is to be located in an <em>application callback module</em>,
+ exporting a predefined set of functions.</p>
+ <p>For details about applications and behaviours, see
+ <seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso>.</p>
</description>
<datatypes>
<datatype>
@@ -59,17 +61,43 @@
<name name="restart_type"/>
</datatype>
<datatype>
- <!-- Parameterized opaque types are NYI: -->
<name>tuple_of(T)</name>
- <desc><p><marker id="type-tuple_of"/>
- A tuple where the elements are of type <c>T</c>.</p></desc>
+ <desc><p>A tuple where the elements are of type <c>T</c>.</p></desc>
</datatype>
</datatypes>
<funcs>
<func>
+ <name name="ensure_all_started" arity="1"/>
+ <name name="ensure_all_started" arity="2"/>
+ <fsummary>Load and start an application and its dependencies, recursively.</fsummary>
+ <desc>
+ <p>Equivalent to calling
+ <seealso marker="#start/1"><c>start/1,2</c></seealso>
+ repeatedly on all dependencies that are not yet started for an application.</p>
+ <p>Returns <c>{ok, AppNames}</c> for a successful start or for an already started
+ application (which is, however, omitted from the <c>AppNames</c> list).</p>
+ <p>The function reports <c>{error, {AppName,Reason}}</c> for errors, where
+ <c>Reason</c> is any possible reason returned by
+ <seealso marker="#start/1"><c>start/1,2</c></seealso>
+ when starting a specific dependency.</p>
+ <p>If an error occurs, the applications started by the function are stopped
+ to bring the set of running applications back to its initial state.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="ensure_started" arity="1"/>
+ <name name="ensure_started" arity="2"/>
+ <fsummary>Load and start an application.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#start/1"><c>start/1,2</c></seealso>
+ except it returns <c>ok</c> for already started applications.</p>
+ </desc>
+ </func>
+ <func>
<name name="get_all_env" arity="0"/>
<name name="get_all_env" arity="1"/>
- <fsummary>Get the configuration parameters for an application</fsummary>
+ <fsummary>Get the configuration parameters for an application.</fsummary>
<desc>
<p>Returns the configuration parameters and their values for
<c><anno>Application</anno></c>. If the argument is omitted, it defaults to
@@ -82,7 +110,7 @@
<func>
<name name="get_all_key" arity="0"/>
<name name="get_all_key" arity="1"/>
- <fsummary>Get the application specification keys</fsummary>
+ <fsummary>Get the application specification keys.</fsummary>
<desc>
<p>Returns the application specification keys and their values
for <c><anno>Application</anno></c>. If the argument is omitted, it
@@ -96,7 +124,7 @@
<func>
<name name="get_application" arity="0"/>
<name name="get_application" arity="1"/>
- <fsummary>Get the name of an application containing a certain process or module</fsummary>
+ <fsummary>Get the name of an application containing a certain process or module.</fsummary>
<desc>
<p>Returns the name of the application to which the process
<c><anno>Pid</anno></c> or the module <c><anno>Module</anno></c> belongs. Providing no
@@ -110,222 +138,217 @@
<func>
<name name="get_env" arity="1"/>
<name name="get_env" arity="2"/>
- <fsummary>Get the value of a configuration parameter</fsummary>
+ <fsummary>Get the value of a configuration parameter.</fsummary>
<desc>
- <p>Returns the value of the configuration parameter <c><anno>Par</anno></c>
+ <p>Returns the value of configuration parameter <c><anno>Par</anno></c>
for <c><anno>Application</anno></c>. If the application argument is
omitted, it defaults to the application of the calling
process.</p>
- <p>If the specified application is not loaded, or
- the configuration parameter does not exist, or if the process
- executing the call does not belong to any application,
- the function returns <c>undefined</c>.</p>
+ <p>Returns <c>undefined</c> if any of the following applies:</p>
+ <list type="bulleted">
+ <item>The specified application is not loaded.</item>
+ <item>The configuration parameter does not exist.</item>
+ <item>The process executing the call does not belong to any application.</item>
+ </list>
</desc>
</func>
<func>
<name name="get_env" arity="3"/>
- <fsummary>Get the value of a configuration parameter using a default</fsummary>
+ <fsummary>Get the value of a configuration parameter using a default.</fsummary>
<desc>
- <p>Works like <seealso marker="#get_env/2">get_env/2</seealso> but returns
- <c><anno>Def</anno></c> value when configuration parameter
+ <p>Works like <seealso marker="#get_env/2"><c>get_env/2</c></seealso> but returns
+ value <c><anno>Def</anno></c> when configuration parameter
<c><anno>Par</anno></c> does not exist.</p>
</desc>
</func>
<func>
<name name="get_key" arity="1"/>
<name name="get_key" arity="2"/>
- <fsummary>Get the value of an application specification key</fsummary>
+ <fsummary>Get the value of an application specification key.</fsummary>
<desc>
<p>Returns the value of the application specification key
<c><anno>Key</anno></c> for <c><anno>Application</anno></c>. If the application
argument is omitted, it defaults to the application of
the calling process.</p>
- <p>If the specified application is not loaded, or
- the specification key does not exist, or if the process
- executing the call does not belong to any application,
- the function returns <c>undefined</c>.</p>
+ <p>Returns <c>undefined</c> if any of the following applies:</p>
+ <list type="bulleted">
+ <item>The specified application is not loaded.</item>
+ <item>The specification key does not exist.</item>
+ <item>The process executing the call does not belong to any application.</item>
+ </list>
+
</desc>
</func>
<func>
<name name="load" arity="1"/>
<name name="load" arity="2"/>
- <fsummary>Load an application</fsummary>
+ <fsummary>Load an application.</fsummary>
<type name="application_spec"/>
<type name="application_opt"/>
<desc>
<p>Loads the application specification for an application into
- the application controller. It will also load the application
- specifications for any included applications. Note that
- the function does not load the actual Erlang object code.</p>
- <p>The application can be given by its name <c><anno>Application</anno></c>.
- In this case the application controller will search the code
+ the application controller. It also loads the application
+ specifications for any included applications. Notice that
+ the function does not load the Erlang object code.</p>
+ <p>The application can be specified by its name <c><anno>Application</anno></c>.
+ In this case, the application controller searches the code
path for the application resource file <c><anno>Application</anno>.app</c>
- and load the specification it contains.</p>
- <p>The application specification can also be given directly as a
- tuple <c><anno>AppSpec</anno></c>. This tuple should have the format and
- contents as described in <c>app(4)</c>.</p>
+ and loads the specification it contains.</p>
+ <p>The application specification can also be specified directly as a
+ tuple <c><anno>AppSpec</anno></c>, having the format and
+ contents as described in
+ <seealso marker="app"><c>app(4)</c></seealso>.</p>
<p>If <c><anno>Distributed</anno> == {<anno>Application</anno>,[<anno>Time</anno>,]<anno>Nodes</anno>}</c>,
- the application will be distributed. The argument overrides
+ the application becomes distributed. The argument overrides
the value for the application in the Kernel configuration
parameter <c>distributed</c>. <c><anno>Application</anno></c> must be
- the name of the application (same as in the first argument).
- If a node crashes and <c><anno>Time</anno></c> has been specified, then
- the application controller will wait for <c><anno>Time</anno></c>
+ the application name (same as in the first argument).
+ If a node crashes and <c><anno>Time</anno></c> is specified,
+ the application controller waits for <c><anno>Time</anno></c>
milliseconds before attempting to restart the application on
- another node. If <c><anno>Time</anno></c> is not specified, it will
- default to 0 and the application will be restarted
+ another node. If <c><anno>Time</anno></c> is not specified, it
+ defaults to <c>0</c> and the application is restarted
immediately.</p>
<p><c><anno>Nodes</anno></c> is a list of node names where the application
- may run, in priority from left to right. Node names can be
+ can run, in priority from left to right. Node names can be
grouped using tuples to indicate that they have the same
- priority. Example:</p>
+ priority.</p>
+ <p><em>Example:</em></p>
<code type="none">
Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
- <p>This means that the application should preferably be started
+ <p>This means that the application is preferably to be started
at <c>cp1@cave</c>. If <c>cp1@cave</c> is down,
- the application should be started at either <c>cp2@cave</c>
+ the application is to be started at <c>cp2@cave</c>
or <c>cp3@cave</c>.</p>
<p>If <c>Distributed == default</c>, the value for
the application in the Kernel configuration parameter
- <c>distributed</c> will be used.</p>
+ <c>distributed</c> is used.</p>
</desc>
</func>
<func>
<name name="loaded_applications" arity="0"/>
- <fsummary>Get the currently loaded applications</fsummary>
+ <fsummary>Get the currently loaded applications.</fsummary>
<desc>
- <p>Returns a list with information about the applications which
- have been loaded using <c>load/1,2</c>, also included
- applications. <c><anno>Application</anno></c> is the application name.
- <c><anno>Description</anno></c> and <c><anno>Vsn</anno></c> are the values of its
- <c>description</c> and <c>vsn</c> application specification
+ <p>Returns a list with information about the applications, and included
+ applications, which are loaded using <c>load/1,2</c>.
+ <c><anno>Application</anno></c> is the application name.
+ <c><anno>Description</anno></c> and <c><anno>Vsn</anno></c> are the values
+ of their <c>description</c> and <c>vsn</c> application specification
keys, respectively.</p>
</desc>
</func>
<func>
<name name="permit" arity="2"/>
- <fsummary>Change an application's permission to run on a node.</fsummary>
+ <fsummary>Change the permission for an application to run at a node.</fsummary>
<desc>
<p>Changes the permission for <c><anno>Application</anno></c> to run at
- the current node. The application must have been loaded using
+ the current node. The application must be loaded using
<c>load/1,2</c> for the function to have effect.</p>
<p>If the permission of a loaded, but not started, application
- is set to <c>false</c>, <c>start</c> will return <c>ok</c> but
- the application will not be started until the permission is
+ is set to <c>false</c>, <c>start</c> returns <c>ok</c> but
+ the application is not started until the permission is
set to <c>true</c>.</p>
<p>If the permission of a running application is set to
- <c>false</c>, the application will be stopped. If
- the permission later is set to <c>true</c>, it will be
+ <c>false</c>, the application is stopped. If
+ the permission later is set to <c>true</c>, it is
restarted.</p>
<p>If the application is distributed, setting the permission to
<c>false</c> means that the application will be started at, or
moved to, another node according to how its distribution is
- configured (see <c>load/2</c> above).</p>
+ configured
+ (see <seealso marker="#load/2"><c>load/2</c></seealso>).</p>
<p>The function does not return until the application is
- started, stopped or successfully moved to another node.
- However, in some cases where permission is set to <c>true</c>
- the function may return <c>ok</c> even though the application
- itself has not started. This is true when an application
- cannot start because it has dependencies to other
- applications which have not yet been started. When they have
- been started, <c>Application</c> will be started as well.</p>
+ started, stopped, or successfully moved to another node.
+ However, in some cases where permission is set to <c>true</c>,
+ the function returns <c>ok</c> even though the application
+ is not started. This is true when an application
+ cannot start because of dependencies to other
+ applications that are not yet started. When they are
+ started, <c>Application</c> is started as well.</p>
<p>By default, all applications are loaded with permission
- <c>true</c> on all nodes. The permission is configurable by
+ <c>true</c> on all nodes. The permission can be configured
using the Kernel configuration parameter <c>permissions</c>.</p>
</desc>
</func>
<func>
<name name="set_env" arity="3"/>
<name name="set_env" arity="4"/>
- <fsummary>Set the value of a configuration parameter</fsummary>
+ <fsummary>Set the value of a configuration parameter.</fsummary>
<desc>
- <p>Sets the value of the configuration parameter <c><anno>Par</anno></c> for
+ <p>Sets the value of configuration parameter <c><anno>Par</anno></c> for
<c><anno>Application</anno></c>.</p>
- <p><c>set_env/4</c> uses the standard <c>gen_server</c> timeout
- value (5000 ms). The <c>timeout</c> option can be provided
- if another timeout value is useful, for example, in situations
+ <p><c>set_env/4</c> uses the standard <c>gen_server</c> time-out
+ value (5000 ms). Option <c>timeout</c> can be specified
+ if another time-out value is useful, for example, in situations
where the application controller is heavily loaded.</p>
<p>If <c>set_env/4</c> is called before the application is loaded,
- the application environment values specified in the <c>Application.app</c>
- file will override the ones previously set. This is also true for application
+ the application environment values specified in file <c>Application.app</c>
+ override the ones previously set. This is also true for application
reloads.</p>
- <p>The <c>persistent</c> option can be set to <c>true</c>
- when there is a need to guarantee parameters set with <c>set_env/4</c>
- will not be overridden by the ones defined in the application resource
- file on load. This means persistent values will stick after the application
+ <p>Option <c>persistent</c> can be set to <c>true</c>
+ to guarantee that parameters set with <c>set_env/4</c>
+ are not overridden by those defined in the application resource
+ file on load. This means that persistent values will stick after the application
is loaded and also on application reload.</p>
<warning>
- <p>Use this function only if you know what you are doing,
- that is, on your own applications. It is very application
- and configuration parameter dependent when and how often
- the value is read by the application, and careless use
- of this function may put the application in a
- weird, inconsistent, and malfunctioning state. </p>
+ <p>Use this function only if you know what you are doing,
+ that is, on your own applications. It is very
+ application-dependent and
+ configuration parameter-dependent when and how often
+ the value is read by the application. Careless use
+ of this function can put the application in a
+ weird, inconsistent, and malfunctioning state.</p>
</warning>
</desc>
</func>
<func>
- <name name="ensure_started" arity="1"/>
- <name name="ensure_started" arity="2"/>
- <fsummary>Load and start an application</fsummary>
- <desc>
- <p>Equivalent to <seealso marker="#start/2"><c>application:start/1,2</c></seealso> except
- it returns <c>ok</c> for already started applications.</p>
- </desc>
- </func>
- <func>
- <name name="ensure_all_started" arity="1"/>
- <name name="ensure_all_started" arity="2"/>
- <fsummary>Load and start an application and its dependencies, recursively</fsummary>
- <desc>
- <p>Equivalent to calling <seealso marker="#start/2"><c>application:start/1,2</c></seealso>
- repeatedly on all dependencies that have not yet been started for an application.
- The function returns <c>{ok, AppNames}</c> for a successful start or for an already started
- application (which are however omitted from the <c>AppNames</c> list), and reports
- <c>{error, {AppName,Reason}}</c> for errors, where <c>Reason</c> is any possible reason
- returned by <seealso marker="#start/2"><c>application:start/1,2</c></seealso> when starting a
- specific dependency. In case of an error, the applications that were started by the
- function are stopped to bring the set of running applications back to its initial state.</p>
- </desc>
- </func>
- <func>
<name name="start" arity="1"/>
<name name="start" arity="2"/>
- <fsummary>Load and start an application</fsummary>
- <desc>
+ <fsummary>Load and start an application.</fsummary>
+ <desc>
<p>Starts <c><anno>Application</anno></c>. If it is not loaded,
- the application controller will first load it using
- <c>load/1</c>. It will make sure any included applications
- are loaded, but will not start them. That is assumed to be
+ the application controller first loads it using
+ <c>load/1</c>. It ensures that any included applications
+ are loaded, but does not start them. That is assumed to be
taken care of in the code for <c><anno>Application</anno></c>.</p>
<p>The application controller checks the value of
the application specification key <c>applications</c>, to
- ensure that all applications that should be started before
- this application are running. If not,
+ ensure that all applications needed to be started before
+ this application are running. Otherwise,
<c>{error,{not_started,App}}</c> is returned, where <c>App</c>
is the name of the missing application.</p>
- <p>The application controller then creates an <em>application master</em> for the application. The application master is
- the group leader of all the processes in the application.
+ <p>The application controller then creates an <em>application master</em>
+ for the application. The application master becomes the
+ group leader of all the processes in the application. I/O is
+ forwarded to the previous group leader, though, this is just
+ a way to identify processes that belong to the application.
+ Used for example to find itself from any process, or,
+ reciprocally, to kill them all when it terminates.</p>
+ <p>
The application master starts the application by calling
the application callback function <c>Module:start/2</c> as
defined by the application specification key <c>mod</c>.</p>
- <p>The <c><anno>Type</anno></c> argument specifies the type of
+ <p>Argument <c><anno>Type</anno></c> specifies the type of
the application. If omitted, it defaults to <c>temporary</c>.</p>
<list type="bulleted">
<item>If a permanent application terminates, all other
applications and the entire Erlang node are also terminated.</item>
- <item>If a transient application terminates with <c>Reason == normal</c>, this is reported but no other applications are
- terminated. If a transient application terminates
- abnormally, all other applications and the entire Erlang
- node are also terminated.</item>
+ <item>
+ <list type="bulleted">
+ <item>If a transient application terminates with <c>Reason == normal</c>,
+ this is reported but no other applications are terminated.</item>
+ <item>If a transient application terminates abnormally, all other
+ applications and the entire Erlang node are also terminated.</item>
+ </list>
+ </item>
<item>If a temporary application terminates, this is reported
but no other applications are terminated.</item>
</list>
- <p>Note that it is always possible to stop an application
+ <p>Notice that an application can always be stopped
explicitly by calling <c>stop/1</c>. Regardless of the type of
- the application, no other applications will be affected.</p>
- <p>Note also that the transient type is of little practical use,
- since when a supervision tree terminates, the reason is set to
+ the application, no other applications are affected.</p>
+ <p>Notice also that the transient type is of little practical use,
+ because when a supervision tree terminates, the reason is set to
<c>shutdown</c>, not <c>normal</c>.</p>
</desc>
</func>
@@ -334,13 +357,13 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
<fsummary>Get the start type of an ongoing application startup.</fsummary>
<desc>
<p>This function is intended to be called by a process belonging
- to an application, when the application is being started, to
- determine the start type which is either <c><anno>StartType</anno></c> or
+ to an application, when the application is started, to
+ determine the start type, which is <c><anno>StartType</anno></c> or
<c>local</c>.</p>
- <p>See <seealso marker="#start_type"><c>Module:start/2</c></seealso> for a description of
- <c><anno>StartType</anno></c>.</p>
- <p><c>local</c> is returned if only parts of the application is
- being restarted (by a supervisor), or if the function is
+ <p>For a description of <c><anno>StartType</anno></c>, see
+ <seealso marker="#start_type"><c>Module:start/2</c></seealso>.</p>
+ <p><c>local</c> is returned if only parts of the application are
+ restarted (by a supervisor), or if the function is
called outside a startup.</p>
<p>If the process executing the call does not belong to any
application, the function returns <c>undefined</c>.</p>
@@ -348,103 +371,106 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</func>
<func>
<name name="stop" arity="1"/>
- <fsummary>Stop an application</fsummary>
+ <fsummary>Stop an application.</fsummary>
<desc>
<p>Stops <c><anno>Application</anno></c>. The application master calls
<c>Module:prep_stop/1</c>, if such a function is defined, and
- then tells the top supervisor of the application to shutdown
- (see <c>supervisor(3)</c>). This means that the entire
+ then tells the top supervisor of the application to shut down
+ (see <seealso marker="stdlib:supervisor"><c>supervisor(3)</c></seealso>).
+ This means that the entire
supervision tree, including included applications, is
terminated in reversed start order. After the shutdown,
the application master calls <c>Module:stop/1</c>.
<c>Module</c> is the callback module as defined by
the application specification key <c>mod</c>.</p>
- <p>Last, the application master itself terminates. Note that all
- processes with the application master as group leader, i.e.
+ <p>Last, the application master terminates. Notice that all
+ processes with the application master as group leader, that is,
processes spawned from a process belonging to the application,
- thus are terminated as well.</p>
+ are also terminated.</p>
<p>When stopped, the application is still loaded.</p>
- <p>In order to stop a distributed application, <c>stop/1</c>
- has to be called on all nodes where it can execute (that is,
+ <p>To stop a distributed application, <c>stop/1</c>
+ must be called on all nodes where it can execute (that is,
on all nodes where it has been started). The call to
<c>stop/1</c> on the node where the application currently
- executes will stop its execution. The application will not be
- moved between nodes due to <c>stop/1</c> being called on
+ executes stops its execution. The application is not
+ moved between nodes, as <c>stop/1</c> is called on
the node where the application currently executes before
<c>stop/1</c> is called on the other nodes.</p>
</desc>
</func>
<func>
<name name="takeover" arity="2"/>
- <fsummary>Take over a distributed application</fsummary>
+ <fsummary>Take over a distributed application.</fsummary>
<desc>
- <p>Performs a takeover of the distributed application
+ <p>Takes over the distributed application
<c><anno>Application</anno></c>, which executes at another node
<c>Node</c>. At the current node, the application is
restarted by calling
<c>Module:start({takeover,Node},StartArgs)</c>. <c>Module</c>
and <c>StartArgs</c> are retrieved from the loaded application
specification. The application at the other node is not
- stopped until the startup is completed, i.e. when
+ stopped until the startup is completed, that is, when
<c>Module:start/2</c> and any calls to
<c>Module:start_phase/3</c> have returned.</p>
- <p>Thus two instances of the application will run simultaneously
- during the takeover, which makes it possible to transfer data
- from the old to the new instance. If this is not acceptable
- behavior, parts of the old instance may be shut down when
- the new instance is started. Note that the application may
- not be stopped entirely however, at least the top supervisor
+ <p>Thus, two instances of the application run simultaneously
+ during the takeover, so that data can be transferred
+ from the old to the new instance. If this is not an acceptable
+ behavior, parts of the old instance can be shut down when
+ the new instance is started. However, the application cannot
+ be stopped entirely, at least the top supervisor
must remain alive.</p>
- <p>See <c>start/1,2</c> for a description of <c>Type</c>.</p>
+ <p>For a description of <c>Type</c>, see
+ <seealso marker="#start/1"><c>start/1,2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="unload" arity="1"/>
- <fsummary>Unload an application</fsummary>
+ <fsummary>Unload an application.</fsummary>
<desc>
<p>Unloads the application specification for <c><anno>Application</anno></c>
- from the application controller. It will also unload
+ from the application controller. It also unloads
the application specifications for any included applications.
- Note that the function does not purge the actual Erlang
+ Notice that the function does not purge the Erlang
object code.</p>
</desc>
</func>
<func>
<name name="unset_env" arity="2"/>
<name name="unset_env" arity="3"/>
- <fsummary>Unset the value of a configuration parameter</fsummary>
+ <fsummary>Unset the value of a configuration parameter.</fsummary>
<desc>
<p>Removes the configuration parameter <c><anno>Par</anno></c> and its value
for <c><anno>Application</anno></c>.</p>
<p><c>unset_env/2</c> uses the standard <c>gen_server</c>
- timeout value (5000 ms). The <c>timeout</c> option can be
- provided if another timeout value is useful, for example, in
+ time-out value (5000 ms). Option <c>timeout</c> can be
+ specified if another time-out value is useful, for example, in
situations where the application controller is heavily loaded.</p>
<p><c>unset_env/3</c> also allows the persistent option to be passed
- (see <c>set_env/4</c> above).</p>
- <warning>
- <p>Use this function only if you know what you are doing,
- that is, on your own applications. It is very application
- and configuration parameter dependent when and how often
- the value is read by the application, and careless use
- of this function may put the application in a
- weird, inconsistent, and malfunctioning state. </p>
+ (see <seealso marker="#set_env/4"><c>set_env/4</c></seealso>).</p>
+ <warning>
+ <p>Use this function only if you know what you are doing,
+ that is, on your own applications. It is very
+ application-dependent and configuration
+ parameter-dependent when and how often
+ the value is read by the application. Careless use
+ of this function can put the application in a
+ weird, inconsistent, and malfunctioning state.</p>
</warning>
</desc>
</func>
<func>
<name name="which_applications" arity="0"/>
<name name="which_applications" arity="1"/>
- <fsummary>Get the currently running applications</fsummary>
+ <fsummary>Get the currently running applications.</fsummary>
<desc>
- <p>Returns a list with information about the applications which
+ <p>Returns a list with information about the applications that
are currently running. <c><anno>Application</anno></c> is the application
- name. <c><anno>Description</anno></c> and <c><anno>Vsn</anno></c> are the values of its
- <c>description</c> and <c>vsn</c> application specification
+ name. <c><anno>Description</anno></c> and <c><anno>Vsn</anno></c> are the
+ values of their <c>description</c> and <c>vsn</c> application specification
keys, respectively.</p>
<p><c>which_applications/0</c> uses the standard
- <c>gen_server</c> timeout value (5000 ms). A <c><anno>Timeout</anno></c>
- argument can be provided if another timeout value is useful,
+ <c>gen_server</c> time-out value (5000 ms). A <c><anno>Timeout</anno></c>
+ argument can be specified if another time-out value is useful,
for example, in situations where the application controller
is heavily loaded.</p>
</desc>
@@ -452,81 +478,81 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</funcs>
<section>
- <title>CALLBACK MODULE</title>
- <p>The following functions should be exported from an
+ <title>Callback Module</title>
+ <p>The following functions are to be exported from an
<c>application</c> callback module.</p>
</section>
<funcs>
<func>
<name>Module:start(StartType, StartArgs) -> {ok, Pid} | {ok, Pid, State} | {error, Reason}</name>
- <fsummary>Start an application</fsummary>
+ <fsummary>Start an application.</fsummary>
<type>
- <v>StartType = <seealso marker="#type-start_type">start_type()</seealso></v>
+ <v>StartType = <seealso marker="#type-start_type"><c>start_type()</c></seealso></v>
<v>StartArgs = term()</v>
<v>Pid = pid()</v>
<v>State = term()</v>
</type>
<desc>
<p>This function is called whenever an application is started
- using <c>application:start/1,2</c>, and should start
+ using <c>start/1,2</c>, and is to start
the processes of the application. If the application is
structured according to the OTP design principles as a
supervision tree, this means starting the top supervisor of
the tree.</p>
<p><marker id="start_type"/><c>StartType</c> defines the type of start:</p>
<list type="bulleted">
- <item><c>normal</c> if it's a normal startup.</item>
+ <item><c>normal</c> if it is a normal startup.</item>
<item><c>normal</c> also if the application is distributed and
- started at the current node due to a failover from another
+ started at the current node because of a failover from another
node, and the application specification key <c>start_phases == undefined</c>.</item>
<item><c>{takeover,Node}</c> if the application is
- distributed and started at the current node due to a
+ distributed and started at the current node because of a
takeover from <c>Node</c>, either because
- <c>application:takeover/2</c> has been called or because
+ <c>takeover/2</c> has been called or because
the current node has higher priority than <c>Node</c>.</item>
<item><c>{failover,Node}</c> if the application is
- distributed and started at the current node due to a
+ distributed and started at the current node because of a
failover from <c>Node</c>, and the application
specification key <c>start_phases /= undefined</c>.</item>
</list>
<p><c>StartArgs</c> is the <c>StartArgs</c> argument defined by
the application specification key <c>mod</c>.</p>
- <p>The function should return <c>{ok,Pid}</c> or
- <c>{ok,Pid,State}</c> where <c>Pid</c> is the pid of the top
+ <p>The function is to return <c>{ok,Pid}</c> or
+ <c>{ok,Pid,State}</c>, where <c>Pid</c> is the pid of the top
supervisor and <c>State</c> is any term. If omitted,
- <c>State</c> defaults to <c>[]</c>. If later the application
- is stopped, <c>State</c> is passed to
+ <c>State</c> defaults to <c>[]</c>. If the application
+ is stopped later, <c>State</c> is passed to
<c>Module:prep_stop/1</c>.</p>
</desc>
</func>
<func>
<name>Module:start_phase(Phase, StartType, PhaseArgs) -> ok | {error, Reason}</name>
- <fsummary>Extended start of an application</fsummary>
+ <fsummary>Extended start of an application.</fsummary>
<type>
<v>Phase = atom()</v>
- <v>StartType = <seealso marker="#type-start_type">start_type()</seealso></v>
+ <v>StartType = <seealso marker="#type-start_type"><c>start_type()</c></seealso></v>
<v>PhaseArgs = term()</v>
<v>Pid = pid()</v>
<v>State = state()</v>
</type>
<desc>
- <p>This function is used to start an application with included
- applications, when there is a need for synchronization between
+ <p>Starts an application with included
+ applications, when synchronization is needed between
processes in the different applications during startup.</p>
- <p>The start phases is defined by the application specification
+ <p>The start phases are defined by the application specification
key <c>start_phases == [{Phase,PhaseArgs}]</c>. For included
applications, the set of phases must be a subset of the set of
phases defined for the including application.</p>
<p>The function is called for each start phase (as defined for
the primary application) for the primary application and all
included applications, for which the start phase is defined.</p>
- <p>See <c>Module:start/2</c> for a description of
- <c>StartType</c>.</p>
+ <p>For a description of <c>StartType</c>, see
+ <seealso marker="Module:start/2"><c>Module:start/2</c></seealso>.</p>
</desc>
</func>
<func>
<name>Module:prep_stop(State) -> NewState</name>
- <fsummary>Prepare an application for termination</fsummary>
+ <fsummary>Prepare an application for termination.</fsummary>
<type>
<v>State = NewState = term()</v>
</type>
@@ -536,28 +562,26 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
the application.</p>
<p><c>State</c> is the state returned from
<c>Module:start/2</c>, or <c>[]</c> if no state was returned.
- <c>NewState</c> is any term and will be passed to
+ <c>NewState</c> is any term and is passed to
<c>Module:stop/1</c>.</p>
<p>The function is optional. If it is not defined, the processes
- will be terminated and then <c>Module:stop(State)</c> is
- called.</p>
+ are terminated and then <c>Module:stop(State)</c> is called.</p>
</desc>
</func>
<func>
<name>Module:stop(State)</name>
- <fsummary>Clean up after termination of an application</fsummary>
+ <fsummary>Clean up after termination of an application.</fsummary>
<type>
<v>State = term()</v>
</type>
<desc>
<p>This function is called whenever an application has stopped.
It is intended to be the opposite of <c>Module:start/2</c>
- and should do any necessary cleaning up. The return value is
+ and is to do any necessary cleaning up. The return value is
ignored.</p>
- <p><c>State</c> is the return value of
- <c>Module:prep_stop/1</c>, if such a function exists.
- Otherwise <c>State</c> is taken from the return value of
- <c>Module:start/2</c>.</p>
+ <p><c>State</c> is the return value of <c>Module:prep_stop/1</c>,
+ if such a function exists. Otherwise <c>State</c> is taken from
+ the return value of <c>Module:start/2</c>.</p>
</desc>
</func>
<func>
@@ -572,22 +596,20 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</type>
<desc>
<p>This function is called by an application after a code
- replacement, if there are any changes to the configuration
- parameters.</p>
- <p><c>Changed</c> is a list of parameter-value tuples with all
- configuration parameters with changed values, <c>New</c> is
- a list of parameter-value tuples with all configuration
- parameters that have been added, and <c>Removed</c> is a list
- of all parameters that have been removed.</p>
+ replacement, if the configuration parameters have changed.</p>
+ <p><c>Changed</c> is a list of parameter-value tuples including all
+ configuration parameters with changed values.</p>
+ <p><c>New</c> is a list of parameter-value tuples including all
+ added configuration parameters.</p>
+ <p><c>Removed</c> is a list of all removed parameters.</p>
</desc>
</func>
</funcs>
<section>
- <title>SEE ALSO</title>
+ <title>See Also</title>
<p><seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso>,
<seealso marker="kernel_app">kernel(6)</seealso>,
<seealso marker="app">app(4)</seealso></p>
</section>
</erlref>
-
diff --git a/lib/kernel/doc/src/auth.xml b/lib/kernel/doc/src/auth.xml
index 71b1863e96..5901446960 100644
--- a/lib/kernel/doc/src/auth.xml
+++ b/lib/kernel/doc/src/auth.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,7 +29,7 @@
<rev></rev>
</header>
<module>auth</module>
- <modulesummary>Erlang Network Authentication Server</modulesummary>
+ <modulesummary>Erlang network authentication server.</modulesummary>
<description>
<p>This module is deprecated. For a description of the Magic
Cookie system, refer to
@@ -42,60 +42,60 @@
</datatypes>
<funcs>
<func>
- <name name="is_auth" arity="1"/>
- <fsummary>Status of communication authorization (deprecated)</fsummary>
- <desc>
- <p>Returns <c>yes</c> if communication with <c><anno>Node</anno></c> is
- authorized. Note that a connection to <c><anno>Node</anno></c> will
- be established in this case. Returns <c>no</c> if <c><anno>Node</anno></c>
- does not exist or communication is not authorized (it has
- another cookie than <c>auth</c> thinks it has).</p>
- <p>Use <seealso marker="net_adm#ping/1">net_adm:ping(<c><anno>Node</anno></c>)</seealso>
- instead.</p>
- </desc>
- </func>
- <func>
<name name="cookie" arity="0"/>
- <fsummary>Magic cookie for local node (deprecated)</fsummary>
+ <fsummary>Magic cookie for local node (deprecated).</fsummary>
<desc>
<p>Use
- <seealso marker="erts:erlang#erlang:get_cookie/0">erlang:get_cookie()</seealso>
- instead.</p>
+ <seealso marker="erts:erlang#erlang:get_cookie/0"><c>erlang:get_cookie()</c></seealso>
+ in ERTS instead.</p>
</desc>
</func>
<func>
<name name="cookie" arity="1"/>
- <fsummary>Set the magic for the local node (deprecated)</fsummary>
+ <fsummary>Set the magic for the local node (deprecated).</fsummary>
<type_desc variable="TheCookie">
- The cookie may also be given as a list with a single atom element.
+ The cookie can also be specified as a list with a single atom element.
</type_desc>
<desc>
<p>Use
- <seealso marker="erts:erlang#erlang:set_cookie/2">erlang:set_cookie(node(), <c><anno>Cookie</anno></c>)</seealso>
+ <seealso marker="erts:erlang#erlang:set_cookie/2"><c>erlang:set_cookie(node(), <anno>Cookie</anno>)</c>
+ in ERTS</seealso> instead.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="is_auth" arity="1"/>
+ <fsummary>Status of communication authorization (deprecated).</fsummary>
+ <desc>
+ <p>Returns <c>yes</c> if communication with <c><anno>Node</anno></c> is
+ authorized. Notice that a connection to <c><anno>Node</anno></c>
+ is established in this case. Returns <c>no</c> if <c><anno>Node</anno></c>
+ does not exist or communication is not authorized (it has
+ another cookie than <c>auth</c> thinks it has).</p>
+ <p>Use <seealso marker="net_adm#ping/1"><c>net_adm:ping(<anno>Node</anno>)</c></seealso>
instead.</p>
</desc>
</func>
<func>
<name>node_cookie([Node, Cookie]) -> yes | no</name>
- <fsummary>Set the magic cookie for a node and verify authorization (deprecated)</fsummary>
+ <fsummary>Set the magic cookie for a node and verify authorization (deprecated).</fsummary>
<type>
<v>Node = node()</v>
- <v>Cookie = <seealso marker="#type-cookie">cookie()</seealso></v>
+ <v>Cookie = <seealso marker="#type-cookie"><c>cookie()</c></seealso></v>
</type>
<desc>
<p>Equivalent to
- <seealso marker="#node_cookie/2">node_cookie(Node, Cookie)</seealso>.</p>
+ <seealso marker="#node_cookie/2"><c>node_cookie(Node, Cookie)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="node_cookie" arity="2"/>
- <fsummary>Set the magic cookie for a node and verify authorization (deprecated)</fsummary>
+ <fsummary>Set the magic cookie for a node and verify authorization (deprecated).</fsummary>
<desc>
- <p>Sets the magic cookie of <c><anno>Node</anno></c> to <c><anno>Cookie</anno></c>, and
- verifies the status of the authorization.
+ <p>Sets the magic cookie of <c><anno>Node</anno></c> to
+ <c><anno>Cookie</anno></c> and verifies the status of the authorization.
Equivalent to calling
- <seealso marker="erts:erlang#erlang:set_cookie/2">erlang:set_cookie(<c><anno>Node</anno></c>, <c><anno>Cookie</anno>)</c></seealso>, followed by
- <seealso marker="#is_auth/1">auth:is_auth(<c><anno>Node</anno></c>)</seealso>.</p>
+ <seealso marker="erts:erlang#erlang:set_cookie/2"><c>erlang:set_cookie(<anno>Node</anno>, <anno>Cookie</anno>)</c></seealso>, followed by
+ <seealso marker="#is_auth/1"><c>auth:is_auth(<anno>Node</anno>)</c></seealso>.</p>
</desc>
</func>
</funcs>
diff --git a/lib/kernel/doc/src/book.xml b/lib/kernel/doc/src/book.xml
index 8eaff39dc5..4b3573b9fe 100644
--- a/lib/kernel/doc/src/book.xml
+++ b/lib/kernel/doc/src/book.xml
@@ -4,7 +4,7 @@
<book xmlns:xi="http://www.w3.org/2001/XInclude">
<header titlestyle="normal">
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -34,6 +34,9 @@
<preamble>
<contents level="2"></contents>
</preamble>
+ <parts lift="yes">
+ <xi:include href="part.xml"/>
+ </parts>
<applications>
<xi:include href="ref_man.xml"/>
</applications>
diff --git a/lib/kernel/doc/src/code.xml b/lib/kernel/doc/src/code.xml
index 1bd52040a0..aff3e8133c 100644
--- a/lib/kernel/doc/src/code.xml
+++ b/lib/kernel/doc/src/code.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,262 +29,242 @@
<rev></rev>
</header>
<module>code</module>
- <modulesummary>Erlang Code Server</modulesummary>
+ <modulesummary>Erlang code server.</modulesummary>
<description>
<p>This module contains the interface to the Erlang
<em>code server</em>, which deals with the loading of compiled
code into a running Erlang runtime system.</p>
- <p>The runtime system can be started in either <em>embedded</em> or
- <em>interactive</em> mode. Which one is decided by the command
- line flag <c>-mode</c>.</p>
+ <p>The runtime system can be started in <em>interactive</em> or
+ <em>embedded</em> mode. Which one is decided by the command-line
+ flag <c>-mode</c>:</p>
<pre>
% <input>erl -mode interactive</input></pre>
- <p>Default mode is <c>interactive</c>.</p>
+ <p>The modes are as follows:</p>
<list type="bulleted">
<item>
- <p>In embedded mode, all code is loaded during system start-up
- according to the boot script. (Code can also be loaded later
- by explicitly ordering the code server to do so).</p>
- </item>
- <item>
- <p>In interactive mode, only some code is loaded during system
- startup-up, basically the modules needed by the runtime
- system itself. Other code is dynamically loaded when first
+ <p>In interactive mode, which is default, only some code is loaded
+ during system startup, basically the modules needed by the runtime
+ system. Other code is dynamically loaded when first
referenced. When a call to a function in a certain module is
made, and the module is not loaded, the code server searches
for and tries to load the module.</p>
</item>
+ <item>
+ <p>In embedded mode, modules are not auto loaded. Trying to use
+ a module that has not been loaded results in an error. This mode is
+ recommended when the boot script loads all modules, as it is
+ typically done in OTP releases. (Code can still be loaded later
+ by explicitly ordering the code server to do so).</p>
+ </item>
</list>
- <p>To prevent accidentally reloading modules affecting the Erlang
- runtime system itself, the <c>kernel</c>, <c>stdlib</c> and
- <c>compiler</c> directories are considered <em>sticky</em>. This
+ <p>To prevent accidentally reloading of modules affecting the Erlang
+ runtime system, directories <c>kernel</c>, <c>stdlib</c>,
+ and <c>compiler</c> are considered <em>sticky</em>. This
means that the system issues a warning and rejects the request if
a user tries to reload a module residing in any of them.
- The feature can be disabled by using the command line flag
+ The feature can be disabled by using command-line flag
<c>-nostick</c>.</p>
</description>
<section>
<title>Code Path</title>
- <p>In interactive mode, the code server maintains a search path --
- usually called the <em>code path</em> -- consisting of a list of
+ <p>In interactive mode, the code server maintains a search path,
+ usually called the <em>code path</em>, consisting of a list of
directories, which it searches sequentially when trying to load a
module.</p>
<p>Initially, the code path consists of the current working
- directory and all Erlang object code directories under the library
+ directory and all Erlang object code directories under library
directory <c>$OTPROOT/lib</c>, where <c>$OTPROOT</c> is
the installation directory of Erlang/OTP, <c>code:root_dir()</c>.
Directories can be named <c>Name[-Vsn]</c> and the code server,
by default, chooses the directory with the highest version number
- among those which have the same <c>Name</c>. The <c>-Vsn</c>
- suffix is optional. If an <c>ebin</c> directory exists under
- <c>Name[-Vsn]</c>, it is this directory which is added to
- the code path.</p>
- <p>The environment variable <c>ERL_LIBS</c> (defined in the operating
- system) can be used to define additional library directories that
- will be handled in the same way as the standard OTP library
- directory described above, except that directories that do not
- have an <c>ebin</c> directory will be ignored.</p>
+ among those having the same <c>Name</c>. Suffix <c>-Vsn</c>
+ is optional. If an <c>ebin</c> directory exists under
+ <c>Name[-Vsn]</c>, this directory is added to the code path.</p>
+ <p>Environment variable <c>ERL_LIBS</c> (defined in the operating
+ system) can be used to define more library directories to
+ be handled in the same way as the standard OTP library
+ directory described above, except that directories without
+ an <c>ebin</c> directory are ignored.</p>
<p>All application directories found in the additional directories
- will appear before the standard OTP applications, except for the
- Kernel and STDLIB applications, which will be placed before any
- additional applications. In other words, modules found in any
- of the additional library directories will override modules with
+ appears before the standard OTP applications, except for the
+ Kernel and STDLIB applications, which are placed before
+ any additional applications. In other words, modules found in any
+ of the additional library directories override modules with
the same name in OTP, except for modules in Kernel and
STDLIB.</p>
- <p>The environment variable <c>ERL_LIBS</c> (if defined) should contain
+ <p>Environment variable <c>ERL_LIBS</c> (if defined) is to contain
a colon-separated (for Unix-like systems) or semicolon-separated
(for Windows) list of additional libraries.</p>
- <p>Example: On an Unix-like system, <c>ERL_LIBS</c> could be set to
- <c>/usr/local/jungerl:/home/some_user/my_erlang_lib</c>. (On Windows,
- use semi-colon as separator.)</p>
- </section>
-
- <section>
- <title>Code Path Cache</title>
- <p>The code server incorporates a code path cache. The cache
- functionality is disabled by default. To activate it, start
- the emulator with the command line flag <c>-code_path_cache</c>
- or call <c>code:rehash()</c>. When the cache is created (or
- updated), the code server searches for modules in the code path
- directories. This may take some time if the the code path is long.
- After the cache creation, the time for loading modules in a large
- system (one with a large directory structure) is significantly
- reduced compared to having the cache disabled. The code server
- is able to look up the location of a module from the cache in
- constant time instead of having to search through the code path
- directories.</p>
- <p>Application resource files (<c>.app</c> files) are also stored
- in the code path cache. This feature is used by the application
- controller (see
- <seealso marker="application">application(3)</seealso>) to load
- applications efficiently in large systems.</p>
- <p>Note that when the code path cache is created (or updated), any
- relative directory names in the code path are converted to
- absolute.</p>
+ <p><em>Example:</em></p>
+ <p>On a Unix-like system, <c>ERL_LIBS</c> can be set to the following</p>
+ <code>
+/usr/local/jungerl:/home/some_user/my_erlang_lib</code>
+ <p>On Windows, use semi-colon as separator.</p>
</section>
<section>
<title>Loading of Code From Archive Files</title>
- <warning><p>The support for loading of code from archive files is
- experimental. The sole purpose of releasing it before it is ready
+ <warning><p>The support for loading code from archive files is
+ experimental. The purpose of releasing it before it is ready
is to obtain early feedback. The file format, semantics,
- interfaces etc. may be changed in a future release. The function
- <c>lib_dir/2</c> and the flag <c>-code_path_choice</c> are also
+ interfaces, and so on, can be changed in a future release. The function
+ <seealso marker="#lib_dir/2"><c>lib_dir/2</c></seealso>
+ and flag <c>-code_path_choice</c> are also
experimental.</p></warning>
- <p>In the current implementation, Erlang archives are <c>ZIP</c>
- files with <c>.ez</c> extension. Erlang archives may also be
+ <p>The Erlang archives are <c>ZIP</c>
+ files with extension <c>.ez</c>. Erlang archives can also be
enclosed in <c>escript</c> files whose file extension is arbitrary.</p>
- <p>Erlang archive files may contain entire Erlang applications or
+ <p>Erlang archive files can contain entire Erlang applications or
parts of applications. The structure in an archive file is the
- same as the directory structure for an application. If you for
- example would create an archive of <c>mnesia-4.4.7</c>, the
+ same as the directory structure for an application. If you, for
+ example, create an archive of <c>mnesia-4.4.7</c>, the
archive file must be named <c>mnesia-4.4.7.ez</c> and it must
- contain a top directory with the name <c>mnesia-4.4.7</c>. If the
+ contain a top directory named <c>mnesia-4.4.7</c>. If the
version part of the name is omitted, it must also be omitted in
the archive. That is, a <c>mnesia.ez</c> archive must contain a
<c>mnesia</c> top directory.</p>
- <p>An archive file for an application may for example be
+ <p>An archive file for an application can, for example, be
created like this:</p>
<pre>
- zip:create("mnesia-4.4.7.ez",
- ["mnesia-4.4.7"],
- [{cwd, code:lib_dir()},
- {compress, all},
- {uncompress,[".beam",".app"]}]).</pre>
-
- <p>Any file in the archive may be compressed, but in order to
- speed up the access of frequently read files, it may be a good
+zip:create("mnesia-4.4.7.ez",
+ ["mnesia-4.4.7"],
+ [{cwd, code:lib_dir()},
+ {compress, all},
+ {uncompress,[".beam",".app"]}]).</pre>
+
+ <p>Any file in the archive can be compressed, but to
+ speed up the access of frequently read files, it can be a good
idea to store <c>beam</c> and <c>app</c> files uncompressed in
the archive.</p>
- <p>Normally the top directory of an application is located either
- in the library directory <c>$OTPROOT/lib</c> or in a directory
- referred to by the environment variable <c>ERL_LIBS</c>. At
- startup when the initial code path is computed, the code server
- will also look for archive files in these directories and
- possibly add <c>ebin</c> directories in archives to the code path. The
- code path will then contain paths to directories that looks like
+ <p>Normally the top directory of an application is located
+ in library directory <c>$OTPROOT/lib</c> or in a directory
+ referred to by environment variable <c>ERL_LIBS</c>. At
+ startup, when the initial code path is computed, the code server
+ also looks for archive files in these directories and
+ possibly adds <c>ebin</c> directories in archives to the code path. The
+ code path then contains paths to directories that look like
<c>$OTPROOT/lib/mnesia.ez/mnesia/ebin</c> or
<c>$OTPROOT/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin</c>.</p>
- <p>The code server uses the module <c>erl_prim_loader</c>
- (possibly via the <c>erl_boot_server</c>) to read code files from
- archives. But the functions in <c>erl_prim_loader</c> may also be
+ <p>The code server uses module <c>erl_prim_loader</c> in ERTS
+ (possibly through <c>erl_boot_server</c>) to read code files from
+ archives. However, the functions in <c>erl_prim_loader</c> can also be
used by other applications to read files from archives. For
example, the call
<c>erl_prim_loader:list_dir( "/otp/root/lib/mnesia-4.4.7.ez/mnesia-4.4.7/examples/bench)"</c>
would list the contents of a directory inside an archive.
- See <seealso marker="erts:erl_prim_loader">erl_prim_loader(3)</seealso>.</p>
+ See <seealso marker="erts:erl_prim_loader"><c>erl_prim_loader(3)</c></seealso>.</p>
<p>An application archive file and a regular application directory
- may coexist. This may be useful when there is a need of having
+ can coexist. This can be useful when it is needed to have
parts of the application as regular files. A typical case is the
- <c>priv</c> directory which must reside as a regular directory in
- order to be able to dynamically link in drivers and start port
- programs. For other applications that do not have this need, the
- <c>priv</c> directory may reside in the archive and the files
- under the <c>priv</c> directory may be read via the
+ <c>priv</c> directory, which must reside as a regular directory
+ to link in drivers dynamically and start port programs.
+ For other applications that do not need this, directory
+ <c>priv</c> can reside in the archive and the files
+ under the directory <c>priv</c> can be read through
<c>erl_prim_loader</c>.</p>
- <p>At the time point when a directory is added to the code path as
- well as when the entire code path is (re)set, the code server
- will decide which subdirectories in an application that shall be
- read from the archive and which that shall be read as regular
+ <p>When a directory is added to the code path and
+ when the entire code path is (re)set, the code server
+ decides which subdirectories in an application that are to be
+ read from the archive and which that are to be read as regular
files. If directories are added or removed afterwards, the file
- access may fail if the code path is not updated (possibly to the
- same path as before in order to trigger the directory resolution
- update). For each directory on the second level (ebin, priv, src
- etc.) in the application archive, the code server will firstly
- choose the regular directory if it exists and secondly from the
- archive. The function
- <c>code:lib_dir/2</c> returns the path to the subdirectory. For
- example <c>code:lib_dir(megaco,ebin)</c> may return
+ access can fail if the code path is not updated (possibly to the
+ same path as before, to trigger the directory resolution
+ update).</p>
+
+ <p>For each directory on the second level in the application archive
+ (<c>ebin</c>, <c>priv</c>, <c>src</c>, and so on), the code server first
+ chooses the regular directory if it exists and second from the
+ archive. Function <c>code:lib_dir/2</c> returns the path to the
+ subdirectory. For example, <c>code:lib_dir(megaco,ebin)</c> can return
<c>/otp/root/lib/megaco-3.9.1.1.ez/megaco-3.9.1.1/ebin</c> while
- <c>code:lib_dir(megaco,priv)</c> may return
+ <c>code:lib_dir(megaco,priv)</c> can return
<c>/otp/root/lib/megaco-3.9.1.1/priv</c>.</p>
<p>When an <c>escript</c> file contains an archive, there are
- neither restrictions on the name of the <c>escript</c> nor on how
- many applications that may be stored in the embedded
- archive. Single <c>beam</c> files may also reside on the top
- level in the archive. At startup, both the top directory in the
- embedded archive as well as all (second level) <c>ebin</c>
+ no restrictions on the name of the <c>escript</c> and no restrictions
+ on how many applications that can be stored in the embedded
+ archive. Single Beam files can also reside on the top
+ level in the archive. At startup, the top directory in the
+ embedded archive and all (second level) <c>ebin</c>
directories in the embedded archive are added to the code path.
- See <seealso marker="erts:escript">escript(1)</seealso></p>
+ See <seealso marker="erts:escript"><c>erts:escript(1)</c></seealso>.</p>
<p>When the choice of directories in the code path is
- <c>strict</c>, the directory that ends up in the code path will
- be exactly the stated one. This means that if for example the
+ <c>strict</c>, the directory that ends up in the code path is
+ exactly the stated one. This means that if, for example, the
directory <c>$OTPROOT/lib/mnesia-4.4.7/ebin</c> is explicitly
- added to the code path, the code server will not load files from
- <c>$OTPROOT/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin</c> and vice
- versa. </p>
+ added to the code path, the code server does not load files from
+ <c>$OTPROOT/lib/mnesia-4.4.7.ez/mnesia-4.4.7/ebin</c>.</p>
- <p>This behavior can be controlled via the command line flag
+ <p>This behavior can be controlled through command-line flag
<c>-code_path_choice Choice</c>. If the flag is set to <c>relaxed</c>,
- the code server will instead choose a suitable directory
- depending on the actual file structure. If there exists a regular
- application ebin directory, it will be chosen. But if it does
- not exist, the ebin directory in the archive is chosen if it
- exists. If neither of them exists the original directory will be
+ the code server instead chooses a suitable directory
+ depending on the actual file structure. If a regular
+ application <c>ebin</c> directory exists, it is chosen. Otherwise,
+ the directory <c>ebin</c> in the archive is chosen if it
+ exists. If neither of them exists, the original directory is
chosen.</p>
- <p>The command line flag <c>-code_path_choice Choice</c> does also
- affect how <c>init</c> interprets the <c>boot script</c>. The
- interpretation of the explicit code paths in the <c>boot
- script</c> may be <c>strict</c> or <c>relaxed</c>. It is
- particular useful to set the flag to <c>relaxed</c> when you want
- to elaborate with code loading from archives without editing the
+ <p>Command-line flag <c>-code_path_choice Choice</c> also
+ affects how module <c>init</c> interprets the <c>boot script</c>.
+ The interpretation of the explicit code paths in the <c>boot
+ script</c> can be <c>strict</c> or <c>relaxed</c>. It is
+ particularly useful to set the flag to <c>relaxed</c> when
+ elaborating with code loading from archives without editing the
<c>boot script</c>. The default is <c>relaxed</c>. See <seealso
- marker="erts:init">init(3)</seealso></p></section>
+ marker="erts:init"><c>erts:init(3)</c></seealso>.</p></section>
<section>
<title>Current and Old Code</title>
- <p>The code of a module can exists in two variants in a system:
+ <p>The code for a module can exist in two variants in a system:
<em>current code</em> and <em>old code</em>. When a module is
- loaded into the system for the first time, the code of the module
+ loaded into the system for the first time, the module code
becomes 'current' and the global <em>export table</em> is updated
with references to all functions exported from the module.</p>
- <p>If then a new instance of the module is loaded (perhaps because
- of the correction of an error), then the code of the previous
+ <p>If then a new instance of the module is loaded (for example, because of
+ error correction), the code of the previous
instance becomes 'old', and all export entries referring to
- the previous instance are removed. After that the new instance is
- loaded as if it was loaded for the first time, as described above,
- and becomes 'current'.</p>
- <p>Both old and current code for a module are valid, and may even be
+ the previous instance are removed. After that, the new instance is
+ loaded as for the first time, and becomes 'current'.</p>
+ <p>Both old and current code for a module are valid, and can even be
evaluated concurrently. The difference is that exported functions
- in old code are unavailable. Hence there is no way to make a
- global call to an exported function in old code, but old code may
+ in old code are unavailable. Hence, a global call cannot be made
+ to an exported function in old code, but old code can
still be evaluated because of processes lingering in it.</p>
- <p>If a third instance of the module is loaded, the code server will
- remove (purge) the old code and any processes lingering in it will
- be terminated. Then the third instance becomes 'current' and
+ <p>If a third instance of the module is loaded, the code server
+ removes (purges) the old code and any processes lingering in it
+ are terminated. Then the third instance becomes 'current' and
the previously current code becomes 'old'.</p>
<p>For more information about old and current code, and how to
- make a process switch from old to current code, refer to
+ make a process switch from old to current code, see section
+ Compilation and Code Loading in the
<seealso marker="doc/reference_manual:code_loading">Erlang Reference Manual</seealso>.</p>
</section>
<section>
<title>Argument Types and Invalid Arguments</title>
- <p>Generally, module and application names are atoms, while file and directory
+ <p>Module and application names are atoms, while file and directory
names are strings. For backward compatibility reasons, some functions accept
both strings and atoms, but a future release will probably only allow
the arguments that are documented.</p>
- <p>From the R12B release, functions in this module will generally fail with an
- exception if they are passed an incorrect type (for instance, an integer or a tuple
- where an atom was expected). An error tuple will be returned if the type of the argument
- was correct, but there was some other error (for instance, a non-existing directory
- was given to <c>set_path/1</c>).</p>
+ <p>Functions in this module generally fail with an
+ exception if they are passed an incorrect type (for example, an integer or a tuple
+ where an atom is expected). An error tuple is returned if the argument type
+ is correct, but there are some other errors (for example, a non-existing directory
+ is specified to <c>set_path/1</c>).</p>
</section>
<section>
@@ -334,31 +314,38 @@
<datatype>
<name name="load_error_rsn"/>
</datatype>
+ <datatype>
+ <name name="prepared_code"/>
+ <desc><p>An opaque term holding prepared code.</p></desc>
+ </datatype>
</datatypes>
<funcs>
<func>
<name name="set_path" arity="1"/>
- <fsummary>Set the code server search path</fsummary>
+ <fsummary>Set the code server search path.</fsummary>
<desc>
<p>Sets the code path to the list of directories <c><anno>Path</anno></c>.</p>
- <p>Returns <c>true</c> if successful, or
- <c>{error, bad_directory}</c> if any <c><anno>Dir</anno></c> is not
- the name of a directory, or <c>{error, bad_path}</c> if
- the argument is invalid.</p>
+ <p>Returns:</p>
+ <taglist>
+ <tag><c>true</c></tag>
+ <item><p>If successful</p></item>
+ <tag><c>{error, bad_directory}</c></tag>
+ <item><p>If any <c><anno>Dir</anno></c> is not a directory name</p></item>
+ </taglist>
</desc>
</func>
<func>
<name name="get_path" arity="0"/>
- <fsummary>Return the code server search path</fsummary>
+ <fsummary>Return the code server search path.</fsummary>
<desc>
- <p>Returns the code path</p>
+ <p>Returns the code path.</p>
</desc>
</func>
<func>
<name name="add_path" arity="1"/>
<name name="add_pathz" arity="1"/>
- <fsummary>Add a directory to the end of the code path</fsummary>
+ <fsummary>Add a directory to the end of the code path.</fsummary>
<type name="add_path_ret"/>
<desc>
<p>Adds <c><anno>Dir</anno></c> to the code path. The directory is added as
@@ -371,11 +358,11 @@
</func>
<func>
<name name="add_patha" arity="1"/>
- <fsummary>Add a directory to the beginning of the code path</fsummary>
+ <fsummary>Add a directory to the beginning of the code path.</fsummary>
<type name="add_path_ret"/>
<desc>
<p>Adds <c><anno>Dir</anno></c> to the beginning of the code path. If
- <c><anno>Dir</anno></c> already exists, it is removed from the old
+ <c><anno>Dir</anno></c> exists, it is removed from the old
position in the code path.</p>
<p>Returns <c>true</c> if successful, or
<c>{error, bad_directory}</c> if <c><anno>Dir</anno></c> is not the name
@@ -385,69 +372,86 @@
<func>
<name name="add_paths" arity="1"/>
<name name="add_pathsz" arity="1"/>
- <fsummary>Add directories to the end of the code path</fsummary>
+ <fsummary>Add directories to the end of the code path.</fsummary>
<desc>
<p>Adds the directories in <c><anno>Dirs</anno></c> to the end of the code
- path. If a <c><anno>Dir</anno></c> already exists, it is not added. This
- function always returns <c>ok</c>, regardless of the validity
+ path. If a <c><anno>Dir</anno></c> exists, it is not added.</p>
+ <p>Always returns <c>ok</c>, regardless of the validity
of each individual <c><anno>Dir</anno></c>.</p>
</desc>
</func>
<func>
<name name="add_pathsa" arity="1"/>
- <fsummary>Add directories to the beginning of the code path</fsummary>
- <desc>
- <p>Adds the directories in <c><anno>Dirs</anno></c> to the beginning of
- the code path. If a <c><anno>Dir</anno></c> already exists, it is removed
- from the old position in the code path. This function always
- returns <c>ok</c>, regardless of the validity of each
+ <fsummary>Add directories to the beginning of the code path.</fsummary>
+ <desc>
+ <p>Traverses <c><anno>Dirs</anno></c> and adds
+ each <c><anno>Dir</anno></c> to the beginning of the code
+ path. This means that the order of <c><anno>Dirs</anno></c>
+ is reversed in the resulting code path. For example, if you
+ add <c>[Dir1,Dir2]</c>, the resulting path will
+ be <c>[Dir2,Dir1|OldCodePath]</c>.</p>
+ <p>If a <c><anno>Dir</anno></c> already exists in the code
+ path, it is removed from the old position.</p>
+ <p>Always returns <c>ok</c>, regardless of the validity of each
individual <c><anno>Dir</anno></c>.</p>
</desc>
</func>
<func>
<name name="del_path" arity="1"/>
- <fsummary>Delete a directory from the code path</fsummary>
+ <fsummary>Delete a directory from the code path.</fsummary>
<desc>
<p>Deletes a directory from the code path. The argument can be
an atom <c><anno>Name</anno></c>, in which case the directory with
the name <c>.../<anno>Name</anno>[-Vsn][/ebin]</c> is deleted from the code
- path. It is also possible to give the complete directory name
- <c><anno>Dir</anno></c> as argument.</p>
- <p>Returns <c>true</c> if successful, or <c>false</c> if
- the directory is not found, or <c>{error, bad_name}</c> if
- the argument is invalid.</p>
+ path. Also, the complete directory name <c><anno>Dir</anno></c> can be
+ specified as argument.</p>
+ <p>Returns:</p>
+ <taglist>
+ <tag><c>true</c></tag>
+ <item><p>If successful</p></item>
+ <tag><c>false</c></tag>
+ <item><p>If the directory is not found</p></item>
+ <tag><c>{error, bad_name}</c></tag>
+ <item><p>If the argument is invalid</p></item>
+ </taglist>
</desc>
</func>
<func>
<name name="replace_path" arity="2"/>
- <fsummary>Replace a directory with another in the code path</fsummary>
- <desc>
- <p>This function replaces an old occurrence of a directory
- named <c>.../<anno>Name</anno>[-Vsn][/ebin]</c>, in the code path, with
- <c><anno>Dir</anno></c>. If <c><anno>Name</anno></c> does not exist, it adds the new
- directory <c><anno>Dir</anno></c> last in the code path. The new directory
- must also be named <c>.../<anno>Name</anno>[-Vsn][/ebin]</c>. This function
- should be used if a new version of the directory (library) is
+ <fsummary>Replace a directory with another in the code path.</fsummary>
+ <desc>
+ <p>Replaces an old occurrence of a directory
+ named <c>.../<anno>Name</anno>[-Vsn][/ebin]</c> in the code path, with
+ <c><anno>Dir</anno></c>. If <c><anno>Name</anno></c> does not exist, it adds
+ the new directory <c><anno>Dir</anno></c> last in the code path. The new
+ directory must also be named <c>.../<anno>Name</anno>[-Vsn][/ebin]</c>.
+ This function is to be used if a new version of the directory (library) is
added to a running system.</p>
- <p>Returns <c>true</c> if successful, or
- <c>{error, bad_name}</c> if <c><anno>Name</anno></c> is not found, or
- <c>{error, bad_directory}</c> if <c><anno>Dir</anno></c> does not exist, or
- <c>{error, {badarg, [<anno>Name</anno>, <anno>Dir</anno>]}}</c> if <c><anno>Name</anno></c> or
- <c><anno>Dir</anno></c> is invalid.</p>
+ <p>Returns:</p>
+ <taglist>
+ <tag><c>true</c></tag>
+ <item><p>If successful</p></item>
+ <tag><c>{error, bad_name}</c></tag>
+ <item><p>If <c><anno>Name</anno></c> is not found</p></item>
+ <tag><c>{error, bad_directory}</c></tag>
+ <item><p>If <c><anno>Dir</anno></c> does not exist</p></item>
+ <tag><c>{error, {badarg, [<anno>Name</anno>, <anno>Dir</anno>]}}</c></tag>
+ <item><p>If <c><anno>Name</anno></c> or <c><anno>Dir</anno></c> is invalid</p></item>
+ </taglist>
</desc>
</func>
<func>
<name name="load_file" arity="1"/>
- <fsummary>Load a module</fsummary>
+ <fsummary>Load a module.</fsummary>
<type name="load_ret"/>
<desc>
<p>Tries to load the Erlang module <c><anno>Module</anno></c>, using
the code path. It looks for the object code file with an
- extension that corresponds to the Erlang machine used, for
- example <c><anno>Module</anno>.beam</c>. The loading fails if the module
+ extension corresponding to the Erlang machine used, for
+ example, <c><anno>Module</anno>.beam</c>. The loading fails if the module
name found in the object code differs from the name
<c><anno>Module</anno></c>.
- <seealso marker="#load_binary/3">load_binary/3</seealso> must
+ <seealso marker="#load_binary/3"><c>load_binary/3</c></seealso> must
be used to load object code with a module name that is
different from the file name.</p>
<p>Returns <c>{module, <anno>Module</anno>}</c> if successful, or
@@ -457,45 +461,45 @@
</func>
<func>
<name name="load_abs" arity="1"/>
- <fsummary>Load a module, residing in a given file</fsummary>
+ <fsummary>Load a module, residing in a specified file.</fsummary>
<type name="load_ret"/>
<type name="loaded_filename"/>
<type name="loaded_ret_atoms"/>
<desc>
- <p>Does the same as <c>load_file(<anno>Module</anno>)</c>, but
- <c><anno>Filename</anno></c> is either an absolute file name, or a
- relative file name. The code path is not searched. It returns
+ <p>Same as <c>load_file(<anno>Module</anno>)</c>, but
+ <c><anno>Filename</anno></c> is an absolute or
+ relative filename. The code path is not searched. It returns
a value in the same way as
- <seealso marker="#load_file/1">load_file/1</seealso>. Note
- that <c><anno>Filename</anno></c> should not contain the extension (for
- example <c>".beam"</c>); <c>load_abs/1</c> adds the correct
- extension itself.</p>
+ <seealso marker="#load_file/1"><c>load_file/1</c></seealso>. Notice
+ that <c><anno>Filename</anno></c> must not contain the extension (for
+ example, <c>.beam</c>) because <c>load_abs/1</c> adds the correct
+ extension.</p>
</desc>
</func>
<func>
<name name="ensure_loaded" arity="1"/>
- <fsummary>Ensure that a module is loaded</fsummary>
+ <fsummary>Ensure that a module is loaded.</fsummary>
<desc>
- <p>Tries to to load a module in the same way as
- <seealso marker="#load_file/1">load_file/1</seealso>,
+ <p>Tries to load a module in the same way as
+ <seealso marker="#load_file/1"><c>load_file/1</c></seealso>,
unless the module is already loaded.
- In embedded mode, however, it does not load a module which is not
+ However, in embedded mode it does not load a module that is not
already loaded, but returns <c>{error, embedded}</c> instead.
See <seealso marker="#error_reasons">Error Reasons for Code-Loading Functions</seealso> for a description of other possible error reasons.</p>
</desc>
</func>
<func>
<name name="load_binary" arity="3"/>
- <fsummary>Load object code for a module</fsummary>
+ <fsummary>Load object code for a module.</fsummary>
<type name="loaded_filename"/>
<type name="loaded_ret_atoms"/>
<desc>
<p>This function can be used to load object code on remote
- Erlang nodes. The argument <c><anno>Binary</anno></c> must contain
+ Erlang nodes. Argument <c><anno>Binary</anno></c> must contain
object code for <c><anno>Module</anno></c>.
<c><anno>Filename</anno></c> is only used by the code server to keep a
record of from which file the object code for <c><anno>Module</anno></c>
- comes. Accordingly, <c><anno>Filename</anno></c> is not opened and read by
+ comes. Thus, <c><anno>Filename</anno></c> is not opened and read by
the code server.</p>
<p>Returns <c>{module, <anno>Module</anno>}</c> if successful, or
<c>{error, Reason}</c> if loading fails.
@@ -503,93 +507,237 @@
</desc>
</func>
<func>
+ <name name="atomic_load" arity="1"/>
+ <fsummary>Load a list of modules atomically</fsummary>
+ <desc>
+ <p>Tries to load all of the modules in the list
+ <c><anno>Modules</anno></c> atomically. That means that
+ either all modules are loaded at the same time, or
+ none of the modules are loaded if there is a problem with any
+ of the modules.</p>
+ <p>Loading can fail for one the following reasons:</p>
+ <taglist>
+ <tag><c>badfile</c></tag>
+ <item>
+ <p>The object code has an incorrect format or the module
+ name in the object code is not the expected module name.</p>
+ </item>
+ <tag><c>nofile</c></tag>
+ <item>
+ <p>No file with object code exists.</p>
+ </item>
+ <tag><c>on_load_not_allowed</c></tag>
+ <item>
+ <p>A module contains an
+ <seealso marker="doc/reference_manual:code_loading#on_load">-on_load function</seealso>.</p>
+ </item>
+ <tag><c>duplicated</c></tag>
+ <item>
+ <p>A module is included more than once in
+ <c><anno>Modules</anno></c>.</p>
+ </item>
+ <tag><c>not_purged</c></tag>
+ <item>
+ <p>The object code can not be loaded because an old version
+ of the code already exists.</p>
+ </item>
+ <tag><c>sticky_directory</c></tag>
+ <item>
+ <p>The object code resides in a sticky directory.</p>
+ </item>
+ <tag><c>pending_on_load</c></tag>
+ <item>
+ <p>A previously loaded module contains an
+ <c>-on_load</c> function that never finished.</p>
+ </item>
+ </taglist>
+ <p>If it is important to minimize the time that an application
+ is inactive while changing code, use
+ <seealso marker="#prepare_loading/1">prepare_loading/1</seealso>
+ and
+ <seealso marker="#finish_loading/1">finish_loading/1</seealso>
+ instead of <c>atomic_load/1</c>. Here is an example:</p>
+<pre>
+{ok,Prepared} = code:prepare_loading(Modules),
+%% Put the application into an inactive state or do any
+%% other preparation needed before changing the code.
+ok = code:finish_loading(Prepared),
+%% Resume the application.</pre>
+ </desc>
+ </func>
+ <func>
+ <name name="prepare_loading" arity="1"/>
+ <fsummary>Prepare a list of modules atomically</fsummary>
+ <desc>
+ <p>Prepares to load the modules in the list
+ <c><anno>Modules</anno></c>.
+ Finish the loading by calling
+ <seealso marker="#finish_loading/1">finish_loading(Prepared)</seealso>.</p>
+ <p>This function can fail with one of the following error reasons:</p>
+ <taglist>
+ <tag><c>badfile</c></tag>
+ <item>
+ <p>The object code has an incorrect format or the module
+ name in the object code is not the expected module name.</p>
+ </item>
+ <tag><c>nofile</c></tag>
+ <item>
+ <p>No file with object code exists.</p>
+ </item>
+ <tag><c>on_load_not_allowed</c></tag>
+ <item>
+ <p>A module contains an
+ <seealso marker="doc/reference_manual:code_loading#on_load">-on_load function</seealso>.</p>
+ </item>
+ <tag><c>duplicated</c></tag>
+ <item>
+ <p>A module is included more than once in
+ <c><anno>Modules</anno></c>.</p>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+ <func>
+ <name name="finish_loading" arity="1"/>
+ <fsummary>Finish loading a list of prepared modules atomically</fsummary>
+ <desc>
+ <p>Tries to load code for all modules that have been previously
+ prepared by
+ <seealso marker="#prepare_loading/1">prepare_loading/1</seealso>.
+ The loading occurs atomically, meaning that
+ either all modules are loaded at the same time, or
+ none of the modules are loaded.</p>
+ <p>This function can fail with one of the following error reasons:</p>
+ <taglist>
+ <tag><c>not_purged</c></tag>
+ <item>
+ <p>The object code can not be loaded because an old version
+ of the code already exists.</p>
+ </item>
+ <tag><c>sticky_directory</c></tag>
+ <item>
+ <p>The object code resides in a sticky directory.</p>
+ </item>
+ <tag><c>pending_on_load</c></tag>
+ <item>
+ <p>A previously loaded module contains an
+ <c>-on_load</c> function that never finished.</p>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+ <func>
+ <name name="ensure_modules_loaded" arity="1"/>
+ <fsummary>Ensure that a list of modules is loaded</fsummary>
+ <desc>
+ <p>Tries to load any modules not already loaded in the list
+ <c><anno>Modules</anno></c> in the same way as
+ <seealso marker="#load_file/1">load_file/1</seealso>.</p>
+ <p>Returns <c>ok</c> if successful, or
+ <c>{error,[{Module,Reason}]}</c> if loading of some modules fails.
+ See <seealso marker="#error_reasons">Error Reasons for Code-Loading Functions</seealso> for a description of other possible error reasons.</p>
+ </desc>
+ </func>
+ <func>
<name name="delete" arity="1"/>
- <fsummary>Removes current code for a module</fsummary>
+ <fsummary>Remove current code for a module.</fsummary>
<desc>
<p>Removes the current code for <c><anno>Module</anno></c>, that is,
the current code for <c><anno>Module</anno></c> is made old. This means
that processes can continue to execute the code in the module,
- but that no external function calls can be made to it.</p>
+ but no external function calls can be made to it.</p>
<p>Returns <c>true</c> if successful, or <c>false</c> if there
- is old code for <c><anno>Module</anno></c> which must be purged first, or
+ is old code for <c><anno>Module</anno></c> that must be purged first, or
if <c><anno>Module</anno></c> is not a (loaded) module.</p>
</desc>
</func>
<func>
<name name="purge" arity="1"/>
- <fsummary>Removes old code for a module</fsummary>
+ <fsummary>Remove old code for a module.</fsummary>
<desc>
<p>Purges the code for <c><anno>Module</anno></c>, that is, removes code
marked as old. If some processes still linger in the old code,
these processes are killed before the code is removed.</p>
- <p>Returns <c>true</c> if successful and any process needed to
+ <note><p>As of ERTS version 9.0, a process is only considered
+ to be lingering in the code if it has direct references to the code.
+ For more information see documentation of
+ <seealso marker="erts:erlang#check_process_code/3"><c>erlang:check_process_code/3</c></seealso>,
+ which is used in order to determine this.</p></note>
+ <p>Returns <c>true</c> if successful and any process is needed to
be killed, otherwise <c>false</c>.</p>
</desc>
</func>
<func>
<name name="soft_purge" arity="1"/>
- <fsummary>Removes old code for a module, unless no process uses it</fsummary>
+ <fsummary>Remove old code for a module, unless no process uses it.</fsummary>
<desc>
<p>Purges the code for <c><anno>Module</anno></c>, that is, removes code
marked as old, but only if no processes linger in it.</p>
- <p>Returns <c>false</c> if the module could not be purged due
- to processes lingering in old code, otherwise <c>true</c>.</p>
+ <note><p>As of ERTS version 9.0, a process is only considered
+ to be lingering in the code if it has direct references to the code.
+ For more information see documentation of
+ <seealso marker="erts:erlang#check_process_code/3"><c>erlang:check_process_code/3</c></seealso>,
+ which is used in order to determine this.</p></note>
+ <p>Returns <c>false</c> if the module cannot be purged because
+ of processes lingering in old code, otherwise <c>true</c>.</p>
</desc>
</func>
<func>
<name name="is_loaded" arity="1"/>
- <fsummary>Check if a module is loaded</fsummary>
+ <fsummary>Check if a module is loaded.</fsummary>
<type name="loaded_filename"/>
<type name="loaded_ret_atoms"/>
- <type_desc name="loaded_filename"><c><anno>Filename</anno></c> is an absolute filename</type_desc>
+ <type_desc name="loaded_filename"><c><anno>Filename</anno></c> is an absolute
+ filename.</type_desc>
<desc>
<p>Checks if <c><anno>Module</anno></c> is loaded. If it is,
<c>{file, <anno>Loaded</anno>}</c> is returned, otherwise <c>false</c>.</p>
- <p>Normally, <c><anno>Loaded</anno></c> is the absolute file name
- <c>Filename</c> from which the code was obtained. If the module
+ <p>Normally, <c><anno>Loaded</anno></c> is the absolute filename
+ <c>Filename</c> from which the code is obtained. If the module
is preloaded (see
- <seealso marker="sasl:script">script(4)</seealso>),
- <c>Loaded==preloaded</c>. If the module is Cover compiled (see
- <seealso marker="tools:cover">cover(3)</seealso>),
+ <seealso marker="sasl:script"><c>script(4)</c></seealso>),
+ <c>Loaded==preloaded</c>. If the module is Cover-compiled (see
+ <seealso marker="tools:cover"><c>cover(3)</c></seealso>),
<c>Loaded==cover_compiled</c>.</p>
</desc>
</func>
<func>
<name name="all_loaded" arity="0"/>
- <fsummary>Get all loaded modules</fsummary>
+ <fsummary>Get all loaded modules.</fsummary>
<type name="loaded_filename"/>
<type name="loaded_ret_atoms"/>
- <type_desc name="loaded_filename"><c><anno>Filename</anno></c> is an absolute filename</type_desc>
+ <type_desc name="loaded_filename"><c><anno>Filename</anno></c> is an absolute
+ filename.</type_desc>
<desc>
<p>Returns a list of tuples <c>{<anno>Module</anno>, <anno>Loaded</anno>}</c> for all
- loaded modules. <c><anno>Loaded</anno></c> is normally the absolute file
- name, as described for
- <seealso marker="#is_loaded/1">is_loaded/1</seealso>.</p>
+ loaded modules. <c><anno>Loaded</anno></c> is normally the absolute filename,
+ as described for
+ <seealso marker="#is_loaded/1"><c>is_loaded/1</c></seealso>.</p>
</desc>
</func>
<func>
<name name="which" arity="1"/>
- <fsummary>The object code file of a module</fsummary>
+ <fsummary>The object code file of a module.</fsummary>
<type name="loaded_ret_atoms"/>
<desc>
<p>If the module is not loaded, this function searches the code
- path for the first file which contains object code for
- <c><anno>Module</anno></c> and returns the absolute file name. If
- the module is loaded, it returns the name of the file which
- contained the loaded object code. If the module is pre-loaded,
- <c>preloaded</c> is returned. If the module is Cover compiled,
- <c>cover_compiled</c> is returned. <c>non_existing</c> is
- returned if the module cannot be found.</p>
+ path for the first file containing object code for
+ <c><anno>Module</anno></c> and returns the absolute filename.</p>
+ <p>If the module is loaded, it returns the name of the file
+ containing the loaded object code.</p>
+ <p>If the module is preloaded, <c>preloaded</c> is returned.</p>
+ <p>If the module is Cover-compiled, <c>cover_compiled</c> is returned.</p>
+ <p>If the module cannot be found, <c>non_existing</c> is returned.</p>
</desc>
</func>
<func>
<name name="get_object_code" arity="1"/>
- <fsummary>Get the object code for a module</fsummary>
+ <fsummary>Gets the object code for a module.</fsummary>
<desc>
- <p>Searches the code path for the object code of the module
- <c><anno>Module</anno></c>. It returns <c>{<anno>Module</anno>, <anno>Binary</anno>, <anno>Filename</anno>}</c>
- if successful, and <c>error</c> if not. <c><anno>Binary</anno></c> is a
- binary data object which contains the object code for
+ <p>Searches the code path for the object code of module
+ <c><anno>Module</anno></c>. Returns <c>{<anno>Module</anno>, <anno>Binary</anno>, <anno>Filename</anno>}</c>
+ if successful, otherwise <c>error</c>. <c><anno>Binary</anno></c> is a
+ binary data object, which contains the object code for
the module. This can be useful if code is to be loaded on a
remote node in a distributed system. For example, loading
module <c><anno>Module</anno></c> on a node <c>Node</c> is done as
@@ -603,10 +751,11 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</func>
<func>
<name name="root_dir" arity="0"/>
- <fsummary>Root directory of Erlang/OTP</fsummary>
+ <fsummary>Root directory of Erlang/OTP.</fsummary>
<desc>
<p>Returns the root directory of Erlang/OTP, which is
the directory where it is installed.</p>
+ <p><em>Example:</em></p>
<pre>
> <input>code:root_dir().</input>
"/usr/local/otp"</pre>
@@ -614,10 +763,11 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</func>
<func>
<name name="lib_dir" arity="0"/>
- <fsummary>Library directory of Erlang/OTP</fsummary>
+ <fsummary>Library directory of Erlang/OTP.</fsummary>
<desc>
<p>Returns the library directory, <c>$OTPROOT/lib</c>, where
<c>$OTPROOT</c> is the root directory of Erlang/OTP.</p>
+ <p><em>Example:</em></p>
<pre>
> <input>code:lib_dir().</input>
"/usr/local/otp/lib"</pre>
@@ -625,50 +775,49 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</func>
<func>
<name name="lib_dir" arity="1"/>
- <fsummary>Library directory for an application</fsummary>
+ <fsummary>Library directory for an application.</fsummary>
<desc>
- <p>This function is mainly intended for finding out the path
+ <p>Returns the path
for the "library directory", the top directory, for an
application <c><anno>Name</anno></c> located under <c>$OTPROOT/lib</c> or
- on a directory referred to via the <c>ERL_LIBS</c>
- environment variable.</p>
- <p>If there is a regular directory called <c><anno>Name</anno></c> or
- <c><anno>Name</anno>-Vsn</c> in the code path with an <c>ebin</c>
+ on a directory referred to with environment variable <c>ERL_LIBS</c>.</p>
+ <p>If a regular directory called <c><anno>Name</anno></c> or
+ <c><anno>Name</anno>-Vsn</c> exists in the code path with an <c>ebin</c>
subdirectory, the path to this directory is returned (not
- the <c>ebin</c> directory). If the directory refers to a
- directory in an archive, the archive name is stripped away
- before the path is returned. For example, if the directory
+ the <c>ebin</c> directory).</p>
+ <p>If the directory refers to a directory in an archive, the
+ archive name is stripped away before the path is returned.
+ For example, if directory
<c>/usr/local/otp/lib/mnesia-4.2.2.ez/mnesia-4.2.2/ebin</c>
is in the path, <c>/usr/local/otp/lib/mnesia-4.2.2/ebin</c>
- will be returned. This means that the library directory for
- an application is the same, regardless of whether the
+ is returned. This means that the library directory for
+ an application is the same, regardless if the
application resides in an archive or not.</p>
-
+ <p><em>Example:</em></p>
<pre>
> <input>code:lib_dir(mnesia).</input>
"/usr/local/otp/lib/mnesia-4.2.2"</pre>
<p>Returns <c>{error, bad_name}</c> if <c><anno>Name</anno></c>
is not the name of an application under <c>$OTPROOT/lib</c> or
- on a directory referred to via the <c>ERL_LIBS</c>
- environment variable. Fails with an exception if <c>Name</c>
- has the wrong type.</p>
+ on a directory referred to through environment variable <c>ERL_LIBS</c>.
+ Fails with an exception if <c>Name</c> has the wrong type.</p>
- <warning><p>For backward compatibility, <c><anno>Name</anno></c> is also allowed to
- be a string. That will probably change in a future release.</p></warning>
+ <warning><p>For backward compatibility, <c><anno>Name</anno></c> is also
+ allowed to be a string. That will probably change in a future release.</p></warning>
</desc>
</func>
<func>
<name name="lib_dir" arity="2"/>
- <fsummary>subdirectory for an application</fsummary>
+ <fsummary>Subdirectory for an application.</fsummary>
<desc>
<p>Returns the path to a subdirectory directly under the top
directory of an application. Normally the subdirectories
- resides under the top directory for the application, but when
- applications at least partly resides in an archive the
- situation is different. Some of the subdirectories may reside
- as regular directories while other resides in an archive
- file. It is not checked if this directory really exists.</p>
-
+ reside under the top directory for the application, but when
+ applications at least partly resides in an archive, the
+ situation is different. Some of the subdirectories can reside
+ as regular directories while other reside in an archive
+ file. It is not checked whether this directory exists.</p>
+ <p><em>Example:</em></p>
<pre>
> <input>code:lib_dir(megaco, priv).</input>
"/usr/local/otp/lib/megaco-3.9.1.1/priv"</pre>
@@ -679,7 +828,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</func>
<func>
<name name="compiler_dir" arity="0"/>
- <fsummary>Library directory for the compiler</fsummary>
+ <fsummary>Library directory for the compiler.</fsummary>
<desc>
<p>Returns the compiler library directory. Equivalent to
<c>code:lib_dir(compiler)</c>.</p>
@@ -687,10 +836,10 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</func>
<func>
<name name="priv_dir" arity="1"/>
- <fsummary>Priv directory for an application</fsummary>
+ <fsummary>Priv directory for an application.</fsummary>
<desc>
<p>Returns the path to the <c>priv</c> directory in an
- application. Equivalent to <c>code:lib_dir(<anno>Name</anno>, priv).</c>.</p>
+ application. Equivalent to <c>code:lib_dir(<anno>Name</anno>, priv)</c>.</p>
<warning><p>For backward compatibility, <c><anno>Name</anno></c> is also allowed to
be a string. That will probably change in a future release.</p></warning>
@@ -698,91 +847,130 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</func>
<func>
<name name="objfile_extension" arity="0"/>
- <fsummary>Object code file extension</fsummary>
+ <fsummary>Object code file extension.</fsummary>
<desc>
- <p>Returns the object code file extension that corresponds to
- the Erlang machine used, namely <c>".beam"</c>.</p>
+ <p>Returns the object code file extension corresponding to
+ the Erlang machine used, namely <c>.beam</c>.</p>
</desc>
</func>
<func>
<name name="stick_dir" arity="1"/>
- <fsummary>Mark a directory as sticky</fsummary>
+ <fsummary>Mark a directory as sticky.</fsummary>
<desc>
- <p>This function marks <c><anno>Dir</anno></c> as sticky.</p>
- <p>Returns <c>ok</c> if successful or <c>error</c> if not.</p>
+ <p>Marks <c><anno>Dir</anno></c> as sticky.</p>
+ <p>Returns <c>ok</c> if successful, otherwise <c>error</c>.</p>
</desc>
</func>
<func>
<name name="unstick_dir" arity="1"/>
- <fsummary>Remove a sticky directory mark</fsummary>
+ <fsummary>Remove a sticky directory mark.</fsummary>
<desc>
- <p>This function unsticks a directory which has been marked as
+ <p>Unsticks a directory that is marked as
sticky.</p>
- <p>Returns <c>ok</c> if successful or <c>error</c> if not.</p>
+ <p>Returns <c>ok</c> if successful, otherwise <c>error</c>.</p>
</desc>
</func>
<func>
<name name="is_sticky" arity="1"/>
- <fsummary>Test whether a module is sticky</fsummary>
+ <fsummary>Test if a module is sticky.</fsummary>
<desc>
- <p>This function returns <c>true</c> if <c><anno>Module</anno></c> is the
+ <p>Returns <c>true</c> if <c><anno>Module</anno></c> is the
name of a module that has been loaded from a sticky directory
- (or in other words: an attempt to reload the module will fail),
+ (in other words: an attempt to reload the module will fail),
or <c>false</c> if <c><anno>Module</anno></c> is not a loaded module or is
not sticky.</p>
</desc>
</func>
<func>
- <name name="rehash" arity="0"/>
- <fsummary>Rehash or create code path cache</fsummary>
- <desc>
- <p>This function creates or rehashes the code path cache.</p>
- </desc>
- </func>
- <func>
<name name="where_is_file" arity="1"/>
- <fsummary>Full name of a file located in the code path</fsummary>
+ <fsummary>Full name of a file located in the code path.</fsummary>
<desc>
<p>Searches the code path for <c><anno>Filename</anno></c>, a file of
arbitrary type. If found, the full name is returned.
<c>non_existing</c> is returned if the file cannot be found.
The function can be useful, for example, to locate
- application resource files. If the code path cache is used,
- the code server will efficiently read the full name from
- the cache, provided that <c><anno>Filename</anno></c> is an object code
- file or an <c>.app</c> file.</p>
+ application resource files.</p>
</desc>
</func>
<func>
<name name="clash" arity="0"/>
<fsummary>Search for modules with identical names.</fsummary>
<desc>
- <p>Searches the entire code space for module names with
+ <p>Searches all directories in the code path for module names with
identical names and writes a report to <c>stdout</c>.</p>
</desc>
</func>
<func>
+ <name name="module_status" arity="1"/>
+ <fsummary>Return the status of the module in relation to object file on disk.</fsummary>
+ <desc>
+ <p>Returns:</p>
+ <taglist>
+ <tag><c>not_loaded</c></tag>
+ <item><p>If <c><anno>Module</anno></c> is not currently loaded.</p></item>
+ <tag><c>loaded</c></tag>
+ <item><p>If <c><anno>Module</anno></c> is loaded and the object file
+ exists and contains the same code.</p></item>
+ <tag><c>removed</c></tag>
+ <item><p>If <c><anno>Module</anno></c> is loaded but no
+ corresponding object file can be found in the code path.</p></item>
+ <tag><c>modified</c></tag>
+ <item><p>If <c><anno>Module</anno></c> is loaded but the object file
+ contains code with a different MD5 checksum.</p></item>
+ </taglist>
+ <p>Preloaded modules are always reported as <c>loaded</c>, without
+ inspecting the contents on disk. Cover compiled modules will always
+ be reported as <c>modified</c> if an object file exists, or as
+ <c>removed</c> otherwise. Modules whose load path is an empty string
+ (which is the convention for auto-generated code) will only be
+ reported as <c>loaded</c> or <c>not_loaded</c>.</p>
+ <p>For modules that have native code loaded (see
+ <seealso marker="#is_module_native/1"><c>is_module_native/1</c></seealso>),
+ the MD5 sum of the native code in the object file is used for the
+ comparison, if it exists; the Beam code in the file is ignored.
+ Reversely, for modules that do not currently have native code
+ loaded, any native code in the file will be ignored.</p>
+ <p>See also <seealso marker="#modified_modules/0"><c>modified_modules/0</c></seealso>.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="modified_modules" arity="0"/>
+ <fsummary>Return a list of all modules modified on disk.</fsummary>
+ <desc>
+ <p>Returns the list of all currently loaded modules for which
+ <seealso marker="#module_status/1"><c>module_status/1</c></seealso>
+ returns <c>modified</c>. See also <seealso marker="#all_loaded/0"><c>all_loaded/0</c></seealso>.</p>
+ </desc>
+ </func>
+ <func>
<name name="is_module_native" arity="1"/>
- <fsummary>Test whether a module has native code</fsummary>
+ <fsummary>Test if a module has native code.</fsummary>
<desc>
- <p>This function returns <c>true</c> if <c><anno>Module</anno></c> is
- name of a loaded module that has native code loaded, and
- <c>false</c> if <c><anno>Module</anno></c> is loaded but does not have
- native. If <c><anno>Module</anno></c> is not loaded, this function returns
- <c>undefined</c>.</p>
+ <p>Returns:</p>
+ <taglist>
+ <tag><c>true</c></tag>
+ <item><p>If <c><anno>Module</anno></c> is the
+ name of a loaded module that has native code loaded</p></item>
+ <tag><c>false</c></tag>
+ <item><p>If <c><anno>Module</anno></c> is loaded but does not have
+ native code</p></item>
+ <tag><c>undefined</c></tag>
+ <item><p>If <c><anno>Module</anno></c> is not loaded</p></item>
+ </taglist>
</desc>
</func>
<func>
<name name="get_mode" arity="0"/>
- <fsummary>The code_server's mode.</fsummary>
+ <fsummary>The mode of the code server.</fsummary>
<desc>
- <p>This function returns an atom describing the code_server's mode:
- <c>interactive</c> or <c>embedded</c>. </p>
+ <p>Returns an atom describing the mode of the code server:
+ <c>interactive</c> or <c>embedded</c>.</p>
<p>This information is useful when an external entity (for example,
- an IDE) provides additional code for a running node. If in interactive
- mode, it only needs to add to the code path. If in embedded mode,
- the code has to be loaded with <c>load_binary/3</c></p>
+ an IDE) provides additional code for a running node. If the code server is
+ in interactive mode, it only has to add the path to the code. If the code server
+ is in embedded mode, the code must be loaded with
+ <seealso marker="#load_binary/3"><c>load_binary/3</c></seealso>.</p>
</desc>
</func>
</funcs>
diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml
index 0e34549482..714af93f4d 100644
--- a/lib/kernel/doc/src/config.xml
+++ b/lib/kernel/doc/src/config.xml
@@ -4,14 +4,14 @@
<fileref>
<header>
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
@@ -19,7 +19,7 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
+
</legalnotice>
<title>config</title>
@@ -33,94 +33,98 @@
<description>
<p>A <em>configuration file</em> contains values for configuration
parameters for the applications in the system. The <c>erl</c>
- command line argument <c>-config Name</c> tells the system to use
+ command-line argument <c>-config Name</c> tells the system to use
data in the system configuration file <c>Name.config</c>.</p>
- <p>Configuration parameter values in the configuration file will
+ <p>Configuration parameter values in the configuration file
override the values in the application resource files (see
- <c>app(4)</c>). The values in the configuration file can be
- overridden by command line flags (see <c>erl(1)</c>).</p>
+ <seealso marker="app"><c>app(4)</c></seealso>).
+ The values in the configuration file can be
+ overridden by command-line flags (see
+ <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>).</p>
<p>The value of a configuration parameter is retrieved by calling
<c>application:get_env/1,2</c>.</p>
</description>
<section>
- <title>FILE SYNTAX</title>
- <p>The configuration file should be called <c>Name.config</c> where
- <c>Name</c> is an arbitrary name.</p>
- <p>The <c>.config</c> file contains one single Erlang term.
- The file has the following syntax:</p>
+ <title>File Syntax</title>
+ <p>The configuration file is to be called <c>Name.config</c>, where
+ <c>Name</c> is any name.</p>
+ <p>File <c>.config</c> contains a single Erlang term and
+ has the following syntax:</p>
<code type="none">
-[{Application1, [{Par11, Val11}, ..]},
- ..
- {ApplicationN, [{ParN1, ValN1}, ..]}].</code>
- <list type="bulleted">
- <item>
- <p><c>Application = atom()</c> is the name of the application.</p>
- </item>
- <item>
- <p><c>Par = atom()</c> is the name of a configuration parameter.</p>
- </item>
- <item>
- <p><c>Val = term()</c> is the value of a configuration
- parameter.</p>
- </item>
- </list>
+[{Application1, [{Par11, Val11}, ...]},
+ ...
+ {ApplicationN, [{ParN1, ValN1}, ...]}].</code>
+ <taglist>
+ <tag><c>Application = atom()</c></tag>
+ <item><p>Application name.</p></item>
+ <tag><c>Par = atom()</c></tag>
+ <item><p>Name of a configuration parameter.</p></item>
+ <tag><c>Val = term()</c></tag>
+ <item><p>Value of a configuration parameter.</p></item>
+ </taglist>
</section>
<section>
<title>sys.config</title>
<p>When starting Erlang in embedded mode, it is assumed that
exactly one system configuration file is used, named
- <c>sys.config</c>. This file should be located in
+ <c>sys.config</c>. This file is to be located in
<c>$ROOT/releases/Vsn</c>, where <c>$ROOT</c> is the Erlang/OTP
root installation directory and <c>Vsn</c> is the release version.</p>
<p>Release handling relies on this assumption. When installing a
new release version, the new <c>sys.config</c> is read and used
to update the application configurations.</p>
- <p>This means that specifying another, or additional, <c>.config</c>
- files would lead to inconsistent update of application
- configurations. Therefore, in Erlang 5.4/OTP R10B, the syntax of
- <c>sys.config</c> was extended to allow pointing out other
+ <p>This means that specifying another <c>.config</c> file, or more
+ <c>.config</c> files, leads to inconsistent update of application
+ configurations. There is, however, a syntax for
+ <c>sys.config</c> that allows pointing out other
<c>.config</c> files:</p>
<code type="none">
[{Application, [{Par, Val}]} | File].</code>
- <list type="bulleted">
- <item>
- <p><c>File = string()</c> is the name of another <c>.config</c>
- file. The extension <c>.config</c> may be omitted. It is
- recommended to use absolute paths. A relative path is
- relative the current working directory of the emulator.</p>
- </item>
- </list>
+ <taglist>
+ <tag><c>File = string()</c></tag>
+ <item>Name of another <c>.config</c> file.
+ Extension <c>.config</c> can be omitted. It is
+ recommended to use absolute paths. If a relative path is used,
+ <c>File</c> is searched, first, relative from <c>sys.config</c> directory, then relative
+ to the current working directory of the emulator, for backward compatibility.
+ This allow to use a <c>sys.config</c> pointing out other <c>.config</c> files in a release
+ or in a node started manually using <c>-config ...</c> with same result whatever
+ the current working directory.
+ </item>
+ </taglist>
<p>When traversing the contents of <c>sys.config</c> and a filename
is encountered, its contents are read and merged with the result
so far. When an application configuration tuple
<c>{Application, Env}</c> is found, it is merged with the result
so far. Merging means that new parameters are added and existing
- parameter values overwritten. Example:</p>
+ parameter values overwritten.</p>
+ <p><em>Example:</em></p>
<code type="none">
sys.config:
[{myapp,[{par1,val1},{par2,val2}]},
"/home/user/myconfig"].
-
myconfig.config:
[{myapp,[{par2,val3},{par3,val4}]}].</code>
- <p>This will yield the following environment for <c>myapp</c>:</p>
+ <p>This yields the following environment for <c>myapp</c>:</p>
<code type="none">
[{par1,val1},{par2,val3},{par3,val4}]</code>
- <p>The behaviour if a file specified in <c>sys.config</c> does not
- exist or is erroneous in some other way, is backwards compatible.
+ <p>The behavior if a file specified in <c>sys.config</c> does not
+ exist, or is erroneous, is backwards compatible.
Starting the runtime system will fail. Installing a new release
- version will not fail, but an error message is given and
+ version will not fail, but an error message is returned and
the erroneous file is ignored.</p>
</section>
<section>
- <title>SEE ALSO</title>
- <p><c>app(4)</c>, <c>erl(1)</c>, <em>OTP Design Principles</em></p>
+ <title>See Also</title>
+ <p><seealso marker="app"><c>app(4)</c></seealso>,
+ <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>,
+ <seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso></p>
</section>
</fileref>
diff --git a/lib/kernel/doc/src/disk_log.xml b/lib/kernel/doc/src/disk_log.xml
index 7d4a9687ea..884cb32c0c 100644
--- a/lib/kernel/doc/src/disk_log.xml
+++ b/lib/kernel/doc/src/disk_log.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1997</year>
- <year>2013</year>
+ <year>2017</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -35,149 +35,173 @@
<file>disk_log.sgml</file>
</header>
<module>disk_log</module>
- <modulesummary>A disk based term logging facility</modulesummary>
+ <modulesummary>A disk-based term logging facility.</modulesummary>
<description>
- <p><c>disk_log</c> is a disk based term logger which makes
- it possible to efficiently log items on files.
- Two types of logs are supported,
- <em>halt logs</em> and <em>wrap logs</em>. A halt log
- appends items to a single file, the size of which may or may
- not be limited by the disk log module, whereas a wrap log utilizes
- a sequence of wrap log files of limited size. As a wrap log file
- has been filled up, further items are logged onto to the next
- file in the sequence, starting all over with the first file when
- the last file has been filled up. For the sake of efficiency,
- items are always written to files as binaries.
- </p>
- <p>Two formats of the log files are supported, the <em>internal format</em> and the <em>external format</em>. The internal
- format supports automatic repair of log files that have not been
- properly closed, and makes it possible to efficiently read
- logged items in <em>chunks</em> using a set of functions defined
- in this module. In fact, this is the only way to read internally
- formatted logs. The external format leaves it up to the user to
- read the logged deep byte lists. The disk log module cannot
- repair externally formatted logs. An item logged to an
- internally formatted log must not occupy more than 4 GB of disk
- space (the size must fit in 4 bytes).
- </p>
- <p>For each open disk log there is one process that handles requests
- made to the disk log; the disk log process is created when <c>open/1</c>
+ <p><c>disk_log</c> is a disk-based term logger that enables
+ efficient logging of items on files.</p>
+ <p>Two types of logs are supported:</p>
+ <taglist>
+ <tag>halt logs</tag>
+ <item><p>Appends items to a single file, which size can
+ be limited by the <c>disk_log</c> module.</p></item>
+ <tag>wrap logs</tag>
+ <item><p>Uses a sequence of wrap log files of limited size. As a
+ wrap log file is filled up, further items are logged on to the next
+ file in the sequence, starting all over with the first file when
+ the last file is filled up.</p></item>
+ </taglist>
+ <p>For efficiency reasons, items are always written to files as binaries.</p>
+
+ <p>Two formats of the log files are supported:</p>
+ <taglist>
+ <tag>internal format</tag>
+ <item><p>Supports automatic repair of log files that are not
+ properly closed and enables efficient reading of logged items in
+ <em>chunks</em> using a set of functions defined in this module.
+ This is the only way to read internally formatted logs.
+ An item logged to an internally formatted log must not occupy more
+ than 4 GB of disk space (the size must fit in 4 bytes).</p></item>
+ <tag>external format</tag>
+ <item><p>Leaves it up to the user to read and interpret the logged data.
+ The <c>disk_log</c> module cannot repair externally formatted logs.</p></item>
+ </taglist>
+
+ <p>For each open disk log, one process handles requests
+ made to the disk log. This process is created when
+ <seealso marker="#open/1"><c>open/1</c></seealso>
is called, provided there exists no process handling the disk log.
- A process that opens a disk log can either be an <em>owner</em>
+ A process that opens a disk log can be an <em>owner</em>
or an anonymous <em>user</em> of the disk log. Each owner is
- linked to the disk log
- process, and the disk log is closed by the owner should the
- owner terminate. Owners can subscribe to <em>notifications</em>,
- messages of the form <c>{disk_log, Node, Log, Info}</c> that are sent
+ linked to the disk log process, and an owner can close the disk log
+ either explicitly (by calling <c>close/1</c> or <c>lclose/1,2</c>)
+ or by terminating.</p>
+ <p>Owners can subscribe to <em>notifications</em>,
+ messages of the form <c>{disk_log, Node, Log, Info}</c>, which are sent
from the disk log process when certain events occur, see
- the commands below and in particular the <c>open/1</c> option
- <seealso marker="#notify">notify</seealso>.
- There can be several owners of a log, but a process cannot own a
- log more than once. One and the same process may, however,
- open the log
- as a user more than once. For a disk log process to properly close
- its file and terminate, it must be closed by its owners and once by
- some non-owner process for each time the log was used anonymously;
- the users are counted, and there must not be any users left when the
- disk log process terminates.
+ the functions and in particular the <c>open/1</c> option
+ <seealso marker="#notify"><c>notify</c></seealso>.
+ A log can have many owners, but a process cannot own a
+ log more than once. However, the same process can open the log
+ as a user more than once.</p>
+ <p>For a disk log process to close its file properly and terminate,
+ it must be closed by its owners and once by some non-owner process
+ for each time the log was used anonymously. The users are counted
+ and there must not be any users left when the disk log process terminates.
</p>
- <p>Items can be logged <em>synchronously</em> by using the functions
- <c>log/2</c>, <c>blog/2</c>, <c>log_terms/2</c> and
- <c>blog_terms/2</c>. For each of these functions, the caller is put
- on hold until the items have been logged (but not necessarily
+ <p>Items can be logged <em>synchronously</em> by using functions
+ <seealso marker="#log/2"><c>log/2</c></seealso>,
+ <seealso marker="#blog/2"><c>blog/2</c></seealso>,
+ <seealso marker="#log_terms/2"><c>log_terms/2</c></seealso>, and
+ <seealso marker="#blog_terms/2"><c>blog_terms/2</c></seealso>.
+ For each of these functions, the caller is put
+ on hold until the items are logged (but not necessarily
written, use <c>sync/1</c> to ensure that). By adding an <c>a</c>
- to each of the mentioned function names we get functions that log
+ to each of the mentioned function names, we get functions that log
items <em>asynchronously</em>. Asynchronous functions do not wait for
- the disk log process to actually write the items to the file, but
+ the disk log process to write the items to the file, but
return the control to the caller more or less immediately.
</p>
- <p>When using the internal format for logs, the functions
- <c>log/2</c>, <c>log_terms/2</c>, <c>alog/2</c>, and
- <c>alog_terms/2</c> should be used. These functions log one or
- more Erlang terms. By prefixing each of the functions with
- a <c>b</c> (for "binary") we get the corresponding <c>blog</c>
- functions for the external format. These functions log one or
- more deep lists of bytes or, alternatively, binaries of deep lists
- of bytes.
- For example, to log the string <c>"hello"</c> in ASCII format, we
+ <p>When using the internal format for logs, use functions
+ <seealso marker="#log/2"><c>log/2</c></seealso>,
+ <seealso marker="#log_terms/2"><c>log_terms/2</c></seealso>,
+ <seealso marker="#alog/2"><c>alog/2</c></seealso>, and
+ <seealso marker="#alog_terms/2"><c>alog_terms/2</c></seealso>.
+ These functions log one or more Erlang terms.
+ By prefixing each of the functions with a <c>b</c> (for "binary"),
+ we get the corresponding <c>blog()</c> functions for the external format.
+ These functions log one or more chunks of bytes.
+ For example, to log the string <c>"hello"</c> in ASCII format, you
can use <c>disk_log:blog(Log, "hello")</c>, or
<c>disk_log:blog(Log, list_to_binary("hello"))</c>. The two
- alternatives are equally efficient. The <c>blog</c> functions
- can be used for internally formatted logs as well, but in
- this case they must be called with binaries constructed with
- calls to <c>term_to_binary/1</c>. There is no check to ensure
+ alternatives are equally efficient.</p>
+ <p>The <c>blog()</c> functions can also be used for internally formatted
+ logs, but in this case they must be called with binaries constructed
+ with calls to
+ <seealso marker="erts:erlang#term_to_binary/1"><c>term_to_binary/1</c></seealso>.
+ There is no check to ensure
this, it is entirely the responsibility of the caller. If these
functions are called with binaries that do not correspond to
- Erlang terms, the <c>chunk/2,3</c> and automatic repair
- functions will fail. The corresponding terms (not the binaries)
- will be returned when <c>chunk/2,3</c> is called.
+ Erlang terms, the
+ <seealso marker="#chunk/2"><c>chunk/2,3</c></seealso>
+ and automatic repair
+ functions fail. The corresponding terms (not the binaries)
+ are returned when <c>chunk/2,3</c> is called.
</p>
<p>A collection of open disk logs with the same name running on
- different nodes is said to be a <em>a distributed disk log</em>
- if requests made to any one of the logs are automatically made to
- the other logs as well. The members of such a collection will be
+ different nodes is said to be a <em>distributed disk log</em>
+ if requests made to any of the logs are automatically made to
+ the other logs as well. The members of such a collection are
called individual distributed disk logs, or just distributed
disk logs if there is no risk of confusion. There is no order
- between the members of such a collection. For instance, logged
- terms are not necessarily written onto the node where the
- request was made before written onto the other nodes. One could
- note here that there are a few functions that do not make
- requests to all members of distributed disk logs, namely
- <c>info</c>, <c>chunk</c>, <c>bchunk</c>, <c>chunk_step</c> and
- <c>lclose</c>. An open disk log that is not a distributed disk
+ between the members of such a collection. For example, logged
+ terms are not necessarily written to the node where the
+ request was made before written to the other nodes. However,
+ a few functions do not make requests to all
+ members of distributed disk logs, namely
+ <seealso marker="#info/1"><c>info/1</c></seealso>,
+ <seealso marker="#chunk/2"><c>chunk/2,3</c></seealso>,
+ <seealso marker="#bchunk/2"><c>bchunk/2,3</c></seealso>,
+ <seealso marker="#chunk_step/3"><c>chunk_step/3</c></seealso>, and
+ <seealso marker="#lclose/1"><c>lclose/1,2</c></seealso>.</p>
+ <p>An open disk log that is not a distributed disk
log is said to be a <em>local disk log</em>. A local disk log is
- accessible only from the node where the disk log process runs,
+ only accessible from the node where the disk log process runs,
whereas a distributed disk log is accessible from all nodes in
- the Erlang system, with exception for those nodes where a local
+ the Erlang system, except for those nodes where a local
disk log with the same name as the distributed disk log exists.
All processes on nodes that have access to a local or
- distributed disk log can log items or otherwise change, inspect
+ distributed disk log can log items or otherwise change, inspect,
or close the log.
</p>
<p>It is not guaranteed that all log files of a distributed disk log
- contain the same log items; there is no attempt made to synchronize
+ contain the same log items. No attempt is made to synchronize
the contents of the files. However, as long as at least one of
- the involved nodes is alive at each time, all items will be logged.
+ the involved nodes is alive at each time, all items are logged.
When logging items to a distributed log, or otherwise trying to
change the log, the replies from individual logs are
ignored. If all nodes are down, the disk log functions
reply with a <c>nonode</c> error.
</p>
<note>
- <p>In some applications it may not be acceptable that
+ <p>In some applications, it can be unacceptable that
replies from individual logs are ignored. An alternative in such
- situations is to use several local disk logs instead of one
+ situations is to use many local disk logs instead of one
distributed disk log, and implement the distribution without use
- of the disk log module.</p>
+ of the <c>disk_log</c> module.</p>
</note>
<p>Errors are reported differently for asynchronous log attempts
- and other uses of the disk log module. When used synchronously
- the disk log module replies with an error message, but when called
- asynchronously, the disk log module does not know where to send
- the error message. Instead owners subscribing to notifications will
+ and other uses of the <c>disk_log</c> module. When used synchronously,
+ this module replies with an error message, but when called
+ asynchronously, this module does not know where to send
+ the error message. Instead, owners subscribing to notifications
receive an <c>error_status</c> message.
</p>
- <p>The disk log module itself does not report errors to the
- <c>error_logger</c> module; it is up to the caller to decide
- whether the error logger should be employed or not. The function
- <c>format_error/1</c> can be used to produce readable messages
- from error replies. Information events are however sent to the
- error logger in two situations, namely when a log is repaired,
- or when a file is missing while reading chunks.
+ <p>The <c>disk_log</c> module does not report errors to the
+ <seealso marker="error_logger"><c>error_logger</c></seealso>
+ module. It is up to the caller to decide
+ whether to employ the error logger. Function
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>
+ can be used to produce readable messages from error replies.
+ However, information events are sent to the error logger in two
+ situations, namely when a log is repaired, or when a file is missing
+ while reading chunks.
</p>
- <p>The error message <c>no_such_log</c> means that the given
- disk log is not currently open. Nothing is said about
- whether the disk log files exist or not.
+ <p>Error message <c>no_such_log</c> means that the specified
+ disk log is not open. Nothing is said about whether the disk log
+ files exist or not.
</p>
<note>
<p>If an attempt to reopen or truncate a log fails (see
- <c>reopen</c> and <c>truncate</c>) the disk log process
- immediately terminates. Before the process terminates links to
- to owners and blocking processes (see <c>block</c>) are removed.
- The effect is that the links work in one direction only; any
- process using a disk log has to check for the error message
- <c>no_such_log</c> if some other process might truncate or
- reopen the log simultaneously.</p>
+ <seealso marker="#reopen/2"><c>reopen/2,3</c></seealso>
+ and
+ <seealso marker ="#truncate/1"><c>truncate/1,2</c></seealso>)
+ the disk log process terminates immediately. Before the process
+ terminates, links to owners and blocking processes (see
+ <seealso marker="#block/1"><c>block/1,2</c></seealso>) are removed.
+ The effect is that the links work in one direction only. Any
+ process using a disk log must check for error message
+ <c>no_such_log</c> if some other process truncates or
+ reopens the log simultaneously.</p>
</note>
</description>
<datatypes>
@@ -194,9 +218,6 @@
<name name="dlog_head_opt"/>
</datatype>
<datatype>
- <name name="dlog_byte"/>
- </datatype>
- <datatype>
<name name="dlog_mode"/>
</datatype>
<datatype>
@@ -209,9 +230,6 @@
</desc>
</datatype>
<datatype>
- <name name="bytes"/>
- </datatype>
- <datatype>
<name name="invalid_header"/>
</datatype>
<datatype>
@@ -221,11 +239,10 @@
<funcs>
<func>
<name name="accessible_logs" arity="0"/>
- <fsummary>Return the accessible disk logs on the current node.</fsummary>
+ <fsummary>Return the accessible disk logs on the current node.</fsummary>
<desc>
- <p>The <c>accessible_logs/0</c> function returns
- the names of the disk logs accessible on the current node.
- The first list contains local disk logs, and the
+ <p>Returns the names of the disk logs accessible on the current node.
+ The first list contains local disk logs and the
second list contains distributed disk logs.
</p>
</desc>
@@ -233,55 +250,55 @@
<func>
<name name="alog" arity="2"/>
<name name="balog" arity="2"/>
- <fsummary>Asynchronously log an item onto a disk log.</fsummary>
+ <fsummary>Asynchronously log an item on to a disk log.</fsummary>
<type variable="Log"/>
<type variable="Term" name_i="1"/>
<type variable="Bytes"/>
<type name="notify_ret"/>
<desc>
- <p>The <c>alog/2</c> and <c>balog/2</c> functions asynchronously
- append an item to a disk log. The function <c>alog/2</c> is
- used for internally formatted logs, and the function <c>balog/2</c>
- for externally formatted logs. <c>balog/2</c> can be used
- for internally formatted logs as well provided the binary was
- constructed with a call to <c>term_to_binary/1</c>.
+ <p>Asynchronously append an item to a disk log. <c>alog/2</c> is
+ used for internally formatted logs and <c>balog/2</c>
+ for externally formatted logs. <c>balog/2</c> can also be used
+ for internally formatted logs if the binary is
+ constructed with a call to
+ <seealso marker="erts:erlang#term_to_binary/1"><c>term_to_binary/1</c></seealso>.
</p>
- <p>The owners that subscribe to notifications will receive the
- message <c>read_only</c>, <c>blocked_log</c>
- or <c>format_external</c> in case the item cannot be written
+ <p>Owners subscribing to notifications receive
+ message <c>read_only</c>, <c>blocked_log</c>,
+ or <c>format_external</c> if the item cannot be written
on the log, and possibly one of the messages <c>wrap</c>,
- <c>full</c> and <c>error_status</c> if an item was written
- on the log. The message <c>error_status</c> is sent if there
- is something wrong with the header function or a file error
- occurred.
+ <c>full</c>, or <c>error_status</c> if an item is written
+ on the log. Message <c>error_status</c> is sent if
+ something is wrong with the header function or if a file error
+ occurs.
</p>
</desc>
</func>
<func>
<name name="alog_terms" arity="2"/>
<name name="balog_terms" arity="2"/>
- <fsummary>Asynchronously log several items onto a disk log.</fsummary>
+ <fsummary>Asynchronously log many items on to a disk log.</fsummary>
<type variable="Log"/>
<type variable="TermList" name_i="1"/>
<type variable="ByteList"/>
<type name="notify_ret"/>
<desc>
- <p>The <c>alog_terms/2</c> and <c>balog_terms/2</c> functions
- asynchronously append a list of items to a disk log.
- The function <c>alog_terms/2</c> is used for internally
- formatted logs, and the function <c>balog_terms/2</c>
- for externally formatted logs. <c>balog_terms/2</c> can be used
- for internally formatted logs as well provided the binaries were
- constructed with calls to <c>term_to_binary/1</c>.
+ <p>Asynchronously append a list of items to a disk log.
+ <c>alog_terms/2</c> is used for internally
+ formatted logs and <c>balog_terms/2</c>
+ for externally formatted logs. <c>balog_terms/2</c> can also be used
+ for internally formatted logs if the binaries are
+ constructed with calls to
+ <seealso marker="erts:erlang#term_to_binary/1"><c>term_to_binary/1</c></seealso>.
</p>
- <p>The owners that subscribe to notifications will receive the
- message <c>read_only</c>, <c>blocked_log</c>
- or <c>format_external</c> in case the items cannot be written
+ <p>Owners subscribing to notifications receive
+ message <c>read_only</c>, <c>blocked_log</c>,
+ or <c>format_external</c> if the items cannot be written
on the log, and possibly one or more of the messages <c>wrap</c>,
- <c>full</c> and <c>error_status</c> if items were written
- on the log. The message <c>error_status</c> is sent if there
- is something wrong with the header function or a file error
- occurred.
+ <c>full</c>, and <c>error_status</c> if items are written
+ on the log. Message <c>error_status</c> is sent if
+ something is wrong with the header function or if a file error
+ occurs.
</p>
</desc>
</func>
@@ -294,76 +311,75 @@
<p>With a call to <c>block/1,2</c> a process can block a log.
If the blocking process is not an owner of the log, a temporary
link is created between the disk log process and the blocking
- process. The link is used to ensure that the disk log is
- unblocked should the blocking process terminate without
+ process. The link ensures that the disk log is
+ unblocked if the blocking process terminates without
first closing or unblocking the log.
</p>
<p>Any process can probe a blocked log with <c>info/1</c> or
close it with <c>close/1</c>. The blocking process can also
- use the functions <c>chunk/2,3</c>, <c>bchunk/2,3</c>,
+ use functions <c>chunk/2,3</c>, <c>bchunk/2,3</c>,
<c>chunk_step/3</c>, and <c>unblock/1</c> without being
- affected by the block. Any other attempt than those hitherto
- mentioned to update or read a blocked log suspends the
- calling process until the log is unblocked or returns an
+ affected by the block. Any other attempt than those
+ mentioned so far to update or read a blocked log suspends the
+ calling process until the log is unblocked or returns
error message <c>{blocked_log, <anno>Log</anno>}</c>, depending on
whether the value of <c><anno>QueueLogRecords</anno></c> is <c>true</c>
- or <c>false</c>. The default value of <c><anno>QueueLogRecords</anno></c>
- is <c>true</c>, which is used by <c>block/1</c>.
+ or <c>false</c>. <c><anno>QueueLogRecords</anno></c> defaults to
+ <c>true</c>, which is used by <c>block/1</c>.
</p>
</desc>
</func>
<func>
<name name="change_header" arity="2"/>
- <fsummary>Change the head or head_func option for an owner of a disk log.</fsummary>
+ <fsummary>Change option head or head_func for an owner of a disk log.</fsummary>
<desc>
- <p>The <c>change_header/2</c> function changes the value of
- the <c>head</c> or <c>head_func</c> option of a disk log.</p>
+ <p>Changes the value of option <c>head</c> or <c>head_func</c> for an owner of a disk log.</p>
</desc>
</func>
<func>
<name name="change_notify" arity="3"/>
- <fsummary>Change the notify option for an owner of a disk log.</fsummary>
+ <fsummary>Change option notify for an owner of a disk log.</fsummary>
<desc>
- <p>The <c>change_notify/3</c> function changes the value of the
- <c>notify</c> option for an owner of a disk log. </p>
+ <p>Changes the value of option <c>notify</c> for an owner of a disk log. </p>
</desc>
</func>
<func>
<name name="change_size" arity="2"/>
<fsummary>Change the size of an open disk log.</fsummary>
<desc>
- <p>The <c>change_size/2</c> function changes the size of an open log.
- For a halt log it is always possible to increase the size,
- but it is not possible to decrease the size to something less than
- the current size of the file.
+ <p>Changes the size of an open log.
+ For a halt log, the size can always be increased,
+ but it cannot be decreased to something less than
+ the current file size.
</p>
- <p>For a wrap log it is always possible to increase both the
- size and number of files, as long as the number of files does not
+ <p>For a wrap log, both the size and the number of files can always
+ be increased, as long as the number of files does not
exceed 65000. If the maximum number of files is decreased, the
- change will not be valid until the current file is full and the
+ change is not valid until the current file is full and the
log wraps to the next file.
- The redundant files will be removed next time the log wraps around,
- i.e. starts to log to file number 1.
+ The redundant files are removed the next time the log wraps around,
+ that is, starts to log to file number 1.
</p>
<p>As an example, assume that the old maximum number of files
is 10 and that the new maximum number of files is 6. If
the current file number is not greater than the new maximum number
- of files, the files 7 to 10 will be removed when file number 6
+ of files, files 7-10 are removed when file 6
is full and the log starts to write to file number 1 again.
- Otherwise the files greater than the current
- file will be removed when the current file is full (e.g. if
- the current file is 8, the files 9 and 10); the files between
- new maximum number of files and the current
- file (i.e. files 7 and 8) will be removed next time file number 6
+ Otherwise, the files greater than the current
+ file are removed when the current file is full (for example, if
+ the current file is 8, files 9 and 10 are removed). The files between
+ the new maximum number of files and the current
+ file (that is, files 7 and 8) are removed the next time file 6
is full.
</p>
- <p>If the size of the files is decreased the change will immediately
- affect the current log. It will not of course change the
- size of log files already full until next time they are used.
+ <p>If the size of the files is decreased, the change immediately
+ affects the current log. It does not change the
+ size of log files already full until the next time they are used.
</p>
- <p>If the log size is decreased for instance to save space,
- the function <c>inc_wrap_file/1</c> can be used to force the log
- to wrap.
+ <p>If the log size is decreased, for example, to save space,
+ function
+ <seealso marker="#inc_wrap_file/1"><c>inc_wrap_file/1</c></seealso>
+ can be used to force the log to wrap.
</p>
</desc>
</func>
@@ -380,93 +396,85 @@
<type name="bchunk_ret"/>
<type name="chunk_error_rsn"/>
<desc>
- <p>The <c>chunk/2,3</c> and <c>bchunk/2,3</c> functions make
- it possible to efficiently read the terms which have been
- appended to an internally formatted log. It minimizes disk
- I/O by reading 64 kilobyte chunks from the file. The
- <c>bchunk/2,3</c> functions return the binaries read from
- the file; they do not call <c>binary_to_term</c>. Otherwise
- the work just like <c>chunk/2,3</c>.
+ <p>Efficiently reads the terms that are appended
+ to an internally formatted log. It minimizes disk
+ I/O by reading 64 kilobyte chunks from the file. Functions
+ <c>bchunk/2,3</c> return the binaries read from
+ the file, they do not call <c>binary_to_term()</c>. Apart from that,
+ they work just like <c>chunk/2,3</c>.
</p>
- <p>The first time <c>chunk</c> (or <c>bchunk</c>) is called,
+ <p>The first time <c>chunk()</c> (or <c>bchunk()</c>) is called,
an initial continuation, the atom <c>start</c>, must be
- provided. If there is a disk log process running on the
- current node, terms are read from that log, otherwise an
+ provided. If a disk log process is running on the
+ current node, terms are read from that log. Otherwise, an
individual distributed log on some other node is chosen, if
such a log exists.
</p>
<p>When <c>chunk/3</c> is called, <c><anno>N</anno></c> controls the
maximum number of terms that are read from the log in each
- chunk. Default is <c>infinity</c>, which means that all the
+ chunk. Defaults to <c>infinity</c>, which means that all the
terms contained in the 64 kilobyte chunk are read. If less than
<c><anno>N</anno></c> terms are returned, this does not necessarily mean
- that the end of the file has been reached.
+ that the end of the file is reached.
</p>
- <p>The <c>chunk</c> function returns a tuple
- <c>{<anno>Continuation2</anno>, <anno>Terms</anno>}</c>, where <c><anno>Terms</anno></c> is a list
+ <p><c>chunk()</c> returns a tuple
+ <c>{<anno>Continuation2</anno>, <anno>Terms</anno>}</c>, where
+ <c><anno>Terms</anno></c> is a list
of terms found in the log. <c><anno>Continuation2</anno></c> is yet
- another continuation which must be passed on to any
- subsequent calls to <c>chunk</c>. With a series of calls to
- <c>chunk</c> it is possible to extract all terms from a log.
+ another continuation, which must be passed on to any
+ subsequent calls to <c>chunk()</c>. With a series of calls to
+ <c>chunk()</c>, all terms from a log can be extracted.
</p>
- <p>The <c>chunk</c> function returns a tuple
- <c>{<anno>Continuation2</anno>, <anno>Terms</anno>, <anno>Badbytes</anno>}</c> if the log is opened
- in read-only mode and the read chunk is corrupt. <c><anno>Badbytes</anno></c>
- is the number of bytes in the file which were found not to be
- Erlang terms in the chunk. Note also that the log is not repaired.
+ <p><c>chunk()</c> returns a tuple
+ <c>{<anno>Continuation2</anno>, <anno>Terms</anno>, <anno>Badbytes</anno>}</c>
+ if the log is opened in read-only mode and the read chunk is corrupt.
+ <c><anno>Badbytes</anno></c> is the number of bytes in the file found not to be
+ Erlang terms in the chunk. Notice that the log is not repaired.
When trying to read chunks from a log opened in read-write mode,
- the tuple <c>{corrupt_log_file, <anno>FileName</anno>}</c> is returned if the
+ tuple <c>{corrupt_log_file, <anno>FileName</anno>}</c> is returned if the
read chunk is corrupt.
</p>
- <p><c>chunk</c> returns <c>eof</c> when the end of the log is
- reached, or <c>{error, <anno>Reason</anno>}</c> if an error occurs. Should
- a wrap log file be missing, a message is output on the error log.
+ <p><c>chunk()</c> returns <c>eof</c> when the end of the log is
+ reached, or <c>{error, <anno>Reason</anno>}</c> if an error occurs. If
+ a wrap log file is missing, a message is output on the error log.
</p>
<p>When <c>chunk/2,3</c> is used with wrap logs, the returned
- continuation may or may not be valid in the next call to
- <c>chunk</c>. This is because the log may wrap and delete
- the file into which the continuation points. To make sure
- this does not happen, the log can be blocked during the
- search.
+ continuation might not be valid in the next call to
+ <c>chunk()</c>. This is because the log can wrap and delete
+ the file into which the continuation points. To prevent this,
+ the log can be blocked during the search.
</p>
</desc>
</func>
<func>
<name name="chunk_info" arity="1"/>
- <fsummary>Return information about a chunk continuation of a disk log.</fsummary>
+ <fsummary>Return information about a chunk continuation of a disk log.</fsummary>
<desc>
- <p>The <c>chunk_info/1</c> function returns the following pair
+ <p>Returns the pair <c>{node, <anno>Node</anno>}</c>,
describing the chunk continuation returned by
- <c>chunk/2,3</c>, <c>bchunk/2,3</c>, or <c>chunk_step/3</c>:
- </p>
- <list type="bulleted">
- <item>
- <p><c>{node, <anno>Node</anno>}</c>. Terms are read from
- the disk log running on <c><anno>Node</anno></c>.</p>
- </item>
- </list>
+ <c>chunk/2,3</c>, <c>bchunk/2,3</c>, or <c>chunk_step/3</c>.</p>
+ <p>Terms are read from the disk log running on <c><anno>Node</anno></c>.</p>
</desc>
</func>
<func>
<name name="chunk_step" arity="3"/>
- <fsummary>Step forward or backward among the wrap log files of a disk log.</fsummary>
+ <fsummary>Step forward or backward among the wrap log files of a disk log.</fsummary>
<desc>
- <p>The function <c>chunk_step</c> can be used in conjunction
- with <c>chunk/2,3</c> and <c>bchunk/2,3</c> to search
- through an internally formatted wrap log. It takes as
+ <p>Can be used with <c>chunk/2,3</c> and <c>bchunk/2,3</c>
+ to search through an internally formatted wrap log. It takes as
argument a continuation as returned by <c>chunk/2,3</c>,
<c>bchunk/2,3</c>, or <c>chunk_step/3</c>, and steps forward
(or backward) <c><anno>Step</anno></c> files in the wrap log. The
- continuation returned points to the first log item in the
+ continuation returned, points to the first log item in the
new current file.
</p>
- <p>If the atom <c>start</c> is given as continuation, a disk log
+ <p>If atom <c>start</c> is specified as continuation, a disk log
to read terms from is chosen. A local or distributed disk log
on the current node is preferred to an
individual distributed log on some other node.
</p>
- <p>If the wrap log is not full because all files have not been
- used yet, <c>{error, end_of_log}</c> is returned if trying to
+ <p>If the wrap log is not full because all files are not yet
+ used, <c>{error, end_of_log}</c> is returned if trying to
step outside the log.
</p>
</desc>
@@ -476,21 +484,20 @@
<fsummary>Close a disk log.</fsummary>
<type name="close_error_rsn"/>
<desc>
- <p><marker id="close_1"></marker>The function <c>close/1</c> closes a
+ <p><marker id="close_1"></marker>Closes a
local or distributed disk log properly. An internally
formatted log must be closed before the Erlang system is
- stopped, otherwise the log is regarded as unclosed and the
- automatic repair procedure will be activated next time the
+ stopped. Otherwise, the log is regarded as unclosed and the
+ automatic repair procedure is activated next time the
log is opened.
</p>
- <p>The disk log process in not terminated as long as there are
- owners or users of the log. It should be stressed that each
- and every owner must close the log, possibly by terminating,
- and that any other process - not only the processes that have
- opened the log anonymously - can decrement the <c>users</c>
+ <p>The disk log process is not terminated as long as there are
+ owners or users of the log. All owners must close the log,
+ possibly by terminating. Also, any other process, not only the processes
+ that have opened the log anonymously, can decrement the <c>users</c>
counter by closing the log.
Attempts to close a log by a process that is
- not an owner are simply ignored if there are no users.
+ not an owner are ignored if there are no users.
</p>
<p>If the log is blocked by the closing process, the log is also
unblocked.
@@ -499,12 +506,14 @@
</func>
<func>
<name name="format_error" arity="1"/>
- <fsummary>Return an English description of a disk log error reply.</fsummary>
+ <fsummary>Return an English description of a disk log error reply.</fsummary>
<desc>
<p>Given the error returned by any function in this module,
- the function <c>format_error</c> returns a descriptive string
- of the error in English. For file errors, the function
- <c>format_error/1</c> in the <c>file</c> module is called.</p>
+ this function returns a descriptive string
+ of the error in English. For file errors, function
+ <c>format_error/1</c> in module
+ <seealso marker="file#format_error/1"><c>file</c></seealso>
+ is called.</p>
</desc>
</func>
<func>
@@ -513,16 +522,15 @@
<type name="inc_wrap_error_rsn"/>
<type name="invalid_header"/>
<desc>
- <p>The <c>inc_wrap_file/1</c> function forces the internally formatted
- disk log to start logging to the
- next log file. It can be used, for instance, in conjunction with
+ <p>Forces the internally formatted disk log to start logging to the
+ next log file. It can be used, for example, with
<c>change_size/2</c> to reduce the amount of disk space allocated
by the disk log.
</p>
- <p>The owners that subscribe to notifications will normally
- receive a <c>wrap</c> message, but in case of
- an error with a reason tag of <c>invalid_header</c> or
- <c>file_error</c> an <c>error_status</c> message will be sent.</p>
+ <p>Owners subscribing to notifications normally
+ receive a <c>wrap</c> message, but if
+ an error occurs with a reason tag of <c>invalid_header</c> or
+ <c>file_error</c>, an <c>error_status</c> message is sent.</p>
</desc>
</func>
<func>
@@ -530,132 +538,148 @@
<fsummary>Return information about a disk log.</fsummary>
<type name="dlog_info"/>
<desc>
- <p>The <c>info/1</c> function returns a list of <c>{Tag, Value}</c>
- pairs describing the log. If there is a disk log process running
- on the current node, that log is used as source of information,
- otherwise an individual distributed log on
- some other node is chosen, if such a log exists.
+ <p>Returns a list of <c>{Tag, Value}</c> pairs describing the log.
+ If a disk log process is running on the current node,
+ that log is used as source of information, otherwise an individual
+ distributed log on some other node is chosen, if such a log exists.
</p>
<p>The following pairs are returned for all logs:
</p>
- <list type="bulleted">
+ <taglist>
+ <tag><c>{name, <anno>Log</anno>}</c></tag>
<item>
- <p><c>{name, <anno>Log</anno>}</c>, where <c><anno>Log</anno></c> is the name of
- the log as given by the <c>open/1</c> option <c>name</c>.</p>
+ <p><c><anno>Log</anno></c> is the log name
+ as specified by the <c>open/1</c> option <c>name</c>.</p>
</item>
+ <tag><c>{file, <anno>File</anno>}</c></tag>
<item>
- <p><c>{file, <anno>File</anno>}</c>. For halt logs <c><anno>File</anno></c> is the
+ <p>For halt logs <c><anno>File</anno></c> is the
filename, and for wrap logs <c><anno>File</anno></c> is the base name.</p>
</item>
+ <tag><c>{type, <anno>Type</anno>}</c></tag>
<item>
- <p><c>{type, <anno>Type</anno>}</c>, where <c><anno>Type</anno></c> is the type of
- the log as given by the <c>open/1</c> option <c>type</c>.</p>
+ <p><c><anno>Type</anno></c> is the log type
+ as specified by the <c>open/1</c> option <c>type</c>.</p>
</item>
+ <tag><c>{format, <anno>Format</anno>}</c></tag>
<item>
- <p><c>{format, <anno>Format</anno>}</c>, where <c><anno>Format</anno></c> is the format
- of the log as given by the <c>open/1</c> option <c>format</c>.</p>
+ <p><c><anno>Format</anno></c> is the log format
+ as specified by the <c>open/1</c> option <c>format</c>.</p>
</item>
+ <tag><c>{size, <anno>Size</anno>}</c></tag>
<item>
- <p><c>{size, <anno>Size</anno>}</c>, where <c><anno>Size</anno></c> is the size
- of the log as given by the <c>open/1</c> option <c>size</c>,
+ <p><c><anno>Size</anno></c> is the log size
+ as specified by the <c>open/1</c> option <c>size</c>,
or the size set by <c>change_size/2</c>. The value set by
<c>change_size/2</c> is reflected immediately.</p>
</item>
+ <tag><c>{mode, <anno>Mode</anno>}</c></tag>
<item>
- <p><c>{mode, <anno>Mode</anno>}</c>, where <c><anno>Mode</anno></c> is the mode
- of the log as given by the <c>open/1</c> option <c>mode</c>.</p>
+ <p><c><anno>Mode</anno></c> is the log mode
+ as specified by the <c>open/1</c> option <c>mode</c>.</p>
</item>
+ <tag><c>{owners, [{pid(), <anno>Notify</anno>}]}</c></tag>
<item>
- <p><c>{owners, [{pid(), <anno>Notify</anno>}]}</c> where <c><anno>Notify</anno></c>
+ <p><c><anno>Notify</anno></c>
is the value set by the <c>open/1</c> option <c>notify</c>
- or the function <c>change_notify/3</c> for the owners of
+ or function <c>change_notify/3</c> for the owners of
the log.</p>
</item>
+ <tag><c>{users, <anno>Users</anno>}</c></tag>
<item>
- <p><c>{users, <anno>Users</anno>}</c> where <c><anno>Users</anno></c> is the number
+ <p><c><anno>Users</anno></c> is the number
of anonymous users of the log, see the <c>open/1</c> option
- <seealso marker="#linkto">linkto</seealso>.</p>
+ <seealso marker="#linkto"><c>linkto</c></seealso>.</p>
</item>
+ <tag><c>{status, <anno>Status</anno>}</c></tag>
<item>
- <p><c>{status, <anno>Status</anno>}</c>, where <c><anno>Status</anno></c> is <c>ok</c>
- or <c>{blocked, <anno>QueueLogRecords</anno>}</c> as set by the functions
+ <p><c><anno>Status</anno></c> is <c>ok</c>
+ or <c>{blocked, <anno>QueueLogRecords</anno>}</c> as set by functions
<c>block/1,2</c> and <c>unblock/1</c>.</p>
</item>
+ <tag><c>{node, <anno>Node</anno>}</c></tag>
<item>
- <p><c>{node, <anno>Node</anno>}</c>. The information returned by the
- current invocation of the <c>info/1</c> function has been
+ <p>The information returned by the
+ current invocation of function <c>info/1</c> is
gathered from the disk log process running on <c><anno>Node</anno></c>.</p>
</item>
+ <tag><c>{distributed, <anno>Dist</anno>}</c></tag>
<item>
- <p><c>{distributed, <anno>Dist</anno>}</c>. If the log is local on
- the current node, then <c><anno>Dist</anno></c> has the value <c>local</c>,
+ <p>If the log is local on
+ the current node, <c><anno>Dist</anno></c> has the value <c>local</c>,
otherwise all nodes where the log is distributed
are returned as a list.</p>
</item>
- </list>
+ </taglist>
<p>The following pairs are returned for all logs opened in
<c>read_write</c> mode:
</p>
- <list type="bulleted">
+ <taglist>
+ <tag><c>{head, <anno>Head</anno>}</c></tag>
<item>
- <p><c>{head, <anno>Head</anno>}</c>. Depending of the value of
- the <c>open/1</c> options <c>head</c> and <c>head_func</c>
- or set by the function <c>change_header/2</c>, the value
+ <p>Depending on the value of
+ the <c>open/1</c> options <c>head</c> and <c>head_func</c>,
+ or set by function <c>change_header/2</c>, the value
of <c><anno>Head</anno></c> is <c>none</c> (default),
- <c>{head, H}</c> (<c>head</c> option) or <c>{M,F,A}</c>
+ <c>{head, H}</c> (<c>head</c> option), or <c>{M,F,A}</c>
(<c>head_func</c> option).</p>
</item>
+ <tag><c>{no_written_items, <anno>NoWrittenItems</anno>}</c></tag>
<item>
- <p><c>{no_written_items, <anno>NoWrittenItems</anno>}</c>, where
- <c><anno>NoWrittenItems</anno></c> is the number of items
+ <p><c><anno>NoWrittenItems</anno></c> is the number of items
written to the log since the disk log process was created.</p>
</item>
- </list>
+ </taglist>
<p>The following pair is returned for halt logs opened in
<c>read_write</c> mode:
</p>
- <list type="bulleted">
+ <taglist>
+ <tag><c>{full, <anno>Full</anno>}</c></tag>
<item>
- <p><c>{full, <anno>Full</anno>}</c>, where <c><anno>Full</anno></c> is <c>true</c> or
+ <p><c><anno>Full</anno></c> is <c>true</c> or
<c>false</c> depending on whether the halt log is full or not.</p>
</item>
- </list>
+ </taglist>
<p>The following pairs are returned for wrap logs opened in
<c>read_write</c> mode:
</p>
- <list type="bulleted">
+ <taglist>
+ <tag><c>{no_current_bytes, integer() >= 0}</c></tag>
<item>
- <p><c>{no_current_bytes, integer() >= 0}</c> is the number
+ <p>The number
of bytes written to the current wrap log file.</p>
</item>
+ <tag><c>{no_current_items, integer() >= 0}</c></tag>
<item>
- <p><c>{no_current_items, integer() >= 0}</c> is the number
+ <p>The number
of items written to the current wrap log file, header
inclusive.</p>
</item>
+ <tag><c>{no_items, integer() >= 0}</c></tag>
<item>
- <p><c>{no_items, integer() >= 0}</c> is the total number
+ <p>The total number
of items in all wrap log files.</p>
</item>
+ <tag><c>{current_file, integer()}</c></tag>
<item>
- <p><c>{current_file, integer()}</c> is the ordinal for
+ <p>The ordinal for
the current wrap log file in the range <c>1..MaxNoFiles</c>,
- where <c>MaxNoFiles</c> is given by the <c>open/1</c> option
+ where <c>MaxNoFiles</c> is specified by the <c>open/1</c> option
<c>size</c> or set by <c>change_size/2</c>.</p>
</item>
+ <tag><c>{no_overflows, {<anno>SinceLogWasOpened</anno>, <anno>SinceLastInfo</anno>}}</c></tag>
<item>
- <p><c>{no_overflows, {<anno>SinceLogWasOpened</anno>, <anno>SinceLastInfo</anno>}}</c>,
- where <c><anno>SinceLogWasOpened</anno></c> (<c><anno>SinceLastInfo</anno></c>) is
- the number of times a wrap log file has been filled up and a
- new one opened or <c>inc_wrap_file/1</c> has been called since
+ <p><c><anno>SinceLogWasOpened</anno></c> (<c><anno>SinceLastInfo</anno></c>)
+ is the number of times a wrap log file has been filled up and a
+ new one is opened or <c>inc_wrap_file/1</c> has been called since
the disk log was last opened (<c>info/1</c>
was last called). The first time <c>info/2</c> is called
after a log was (re)opened or truncated, the two values
are equal.</p>
</item>
- </list>
- <p>Note that the <c>chunk/2,3</c>, <c>bchunk/2,3</c>, and
- <c>chunk_step/3</c> functions do not affect any value
+ </taglist>
+ <p>Notice that functions <c>chunk/2,3</c>, <c>bchunk/2,3</c>, and
+ <c>chunk_step/3</c> do not affect any value
returned by <c>info/1</c>.
</p>
</desc>
@@ -666,17 +690,16 @@
<fsummary>Close a disk log on one node.</fsummary>
<type name="lclose_error_rsn"/>
<desc>
- <p>The function <c>lclose/1</c> closes a local log or an
- individual distributed log on the current node.
- The function <c>lclose/2</c> closes an individual
- distributed log on the specified node if the node
- is not the current one.
- <c>lclose(<anno>Log</anno>)</c> is equivalent to
+ <p><c>lclose/1</c> closes a local log or an individual distributed
+ log on the current node.</p>
+ <p><c>lclose/2</c> closes an individual distributed log on the
+ specified node if the node is not the current one.</p>
+ <p><c>lclose(<anno>Log</anno>)</c> is equivalent to
<c>lclose(<anno>Log</anno>,&nbsp;node())</c>.
- See also <seealso marker="#close_1">close/1</seealso>.
+ See also <seealso marker="#close_1"><c>close/1</c></seealso>.
</p>
- <p>If there is no log with the given name
- on the specified node, <c>no_such_log</c> is returned.
+ <p>If no log with the specified name exist on the specified node,
+ <c>no_such_log</c> is returned.
</p>
</desc>
</func>
@@ -689,25 +712,27 @@
<type variable="Bytes"/>
<type name="log_error_rsn"/>
<desc>
- <p>The <c>log/2</c> and <c>blog/2</c> functions synchronously
- append a term to a disk log. They return <c>ok</c> or
- <c>{error, <anno>Reason</anno>}</c> when the term has been written to
- disk. If the log is distributed, <c>ok</c> is always
- returned, unless all nodes are down. Terms are written by
- means of the ordinary <c>write()</c> function of the
- operating system. Hence, there is no guarantee that the term
- has actually been written to the disk, it might linger in
- the operating system kernel for a while. To make sure the
- item is actually written to disk, the <c>sync/1</c> function
+ <p>Synchronously
+ appends a term to a disk log. Returns <c>ok</c> or
+ <c>{error, <anno>Reason</anno>}</c> when the term is written to
+ disk. If the log is distributed, <c>ok</c> is returned,
+ unless all nodes are down. Terms are written by
+ the ordinary <c>write()</c> function of the
+ operating system. Hence, it is not guaranteed that the term
+ is written to disk, it can linger in
+ the operating system kernel for a while. To ensure that the
+ item is written to disk, function
+ <seealso marker="#sync/1"><c>sync/1</c></seealso>
must be called.
</p>
- <p>The <c>log/2</c> function is used for internally formatted logs,
+ <p><c>log/2</c> is used for internally formatted logs,
and <c>blog/2</c> for externally formatted logs.
- <c>blog/2</c> can be used
- for internally formatted logs as well provided the binary was
- constructed with a call to <c>term_to_binary/1</c>.
- </p>
- <p>The owners that subscribe to notifications will be notified
+ <c>blog/2</c> can also be used
+ for internally formatted logs if the binary is
+ constructed with a call to
+ <seealso marker="erts:erlang#term_to_binary/1">
+ <c>term_to_binary/1</c></seealso>.</p>
+ <p>Owners subscribing to notifications are notified
of an error with an <c>error_status</c> message if the error
reason tag is <c>invalid_header</c> or <c>file_error</c>.
</p>
@@ -716,27 +741,27 @@
<func>
<name name="log_terms" arity="2"/>
<name name="blog_terms" arity="2"/>
- <fsummary>Log several items onto a disk log.</fsummary>
+ <fsummary>Log many items onto a disk log.</fsummary>
<type variable="Log"/>
<type variable="TermList" name_i="1"/>
<type variable="BytesList"/>
<type name="log_error_rsn"/>
<desc>
- <p>The <c>log_terms/2</c> and <c>blog_terms/2</c> functions
- synchronously append a list of items to the log. The benefit
- of using these functions rather than the <c>log/2</c> and
- <c>blog/2</c> functions is that of efficiency: the given
- list is split into as large sublists as possible (limited by
- the size of wrap log files), and each sublist is logged as
- one single item, which reduces the overhead.
+ <p>Synchronously appends a list of items to the log. It is more
+ efficient to use these functions instead of functions <c>log/2</c>
+ and <c>blog/2</c>. The specified list is split into as large
+ sublists as possible (limited by the size of wrap log files),
+ and each sublist is logged as one single item, which reduces
+ the overhead.
</p>
- <p>The <c>log_terms/2</c> function is used for internally formatted
+ <p><c>log_terms/2</c> is used for internally formatted
logs, and <c>blog_terms/2</c> for externally formatted logs.
- <c>blog_terms/2</c> can be used
- for internally formatted logs as well provided the binaries were
- constructed with calls to <c>term_to_binary/1</c>.
- </p>
- <p>The owners that subscribe to notifications will be notified
+ <c>blog_terms/2</c> can also be used
+ for internally formatted logs if the binaries are
+ constructed with calls to
+ <seealso marker="erts:erlang#term_to_binary/1">
+ <c>term_to_binary/1</c></seealso>.</p>
+ <p>Owners subscribing to notifications are notified
of an error with an <c>error_status</c> message if the error
reason tag is <c>invalid_header</c> or <c>file_error</c>.
</p>
@@ -755,110 +780,119 @@
<type name="dlog_optattr"/>
<type name="dlog_size"/>
<desc>
- <p>The <c><anno>ArgL</anno></c> parameter is a list of options which have
- the following meanings:</p>
- <list type="bulleted">
+ <p>Parameter <c><anno>ArgL</anno></c> is a list of the following
+ options:</p>
+ <taglist>
+ <tag><c>{name, <anno>Log</anno>}</c></tag>
<item>
- <p><c>{name, <anno>Log</anno>}</c> specifies the name of the log.
- This is the name which must be passed on as a parameter in
+ <p>Specifies the log name.
+ This name must be passed on as a parameter in
all subsequent logging operations. A name must always
be supplied.
</p>
</item>
+ <tag><c>{file, <anno>FileName</anno>}</c></tag>
<item>
- <p><c>{file, <anno>FileName</anno>}</c> specifies the name of the
- file which will be used for logged terms. If this value is
- omitted and the name of the log is either an atom or a string,
- the file name will default to <c>lists:concat([<anno>Log</anno>, ".LOG"])</c> for halt logs. For wrap logs, this will be
- the base name of the files. Each file in a wrap log
- will be called <c><![CDATA[<base_name>.N]]></c>, where <c>N</c> is an
- integer. Each wrap log will also have two files called
+ <p>Specifies the name of the
+ file to be used for logged terms. If this value is
+ omitted and the log name is an atom or a string,
+ the filename defaults to <c>lists:concat([<anno>Log</anno>, ".LOG"])</c>
+ for halt logs.</p>
+ <p>For wrap logs, this is the base name of the files. Each file in
+ a wrap log is called <c><![CDATA[<base_name>.N]]></c>, where <c>N</c>
+ is an integer. Each wrap log also has two files called
<c><![CDATA[<base_name>.idx]]></c> and <c><![CDATA[<base_name>.siz]]></c>.
</p>
</item>
+ <tag><c>{linkto, <anno>LinkTo</anno>}</c><marker id="linkto"></marker></tag>
<item>
- <p><c>{linkto, <anno>LinkTo</anno>}</c>. <marker id="linkto"></marker>
-If
- <c><anno>LinkTo</anno></c> is a pid, that pid becomes an owner of the
- log. If <c><anno>LinkTo</anno></c> is <c>none</c> the log records
+ <p>If <c><anno>LinkTo</anno></c> is a pid, it becomes an owner of the
+ log. If <c><anno>LinkTo</anno></c> is <c>none</c>, the log records
that it is used anonymously by some process by
incrementing the <c>users</c> counter. By default, the
- process which calls <c>open/1</c> owns the log.
+ process that calls <c>open/1</c> owns the log.
</p>
</item>
+ <tag><c>{repair, <anno>Repair</anno>}</c></tag>
<item>
- <p><c>{repair, <anno>Repair</anno>}</c>. If <c><anno>Repair</anno></c> is <c>true</c>,
- the current log file will be repaired, if needed. As the
+ <p>If <c><anno>Repair</anno></c> is <c>true</c>,
+ the current log file is repaired, if needed. As the
restoration is initiated, a message is output on the error log.
- If <c>false</c> is given,
- no automatic repair will be attempted. Instead, the
+ If <c>false</c> is specified,
+ no automatic repair is attempted. Instead, the
tuple <c>{error, {need_repair, <anno>Log</anno>}}</c> is returned if an
attempt is made to open a corrupt log file.
- If <c>truncate</c> is given, the log file will
- be truncated, creating an empty log. Default is
+ If <c>truncate</c> is specified, the log file becomes
+ truncated, creating an empty log. Defaults to
<c>true</c>, which has no effect on logs opened in
read-only mode.
</p>
</item>
+ <tag><c>{type, <anno>Type</anno>}</c></tag>
<item>
- <p><c>{type, <anno>Type</anno>}</c> is the type of the log. Default
- is <c>halt</c>.
+ <p>The log type. Defaults to <c>halt</c>.
</p>
</item>
+ <tag><c>{format, <anno>Format</anno>}</c></tag>
<item>
- <p><c>{format, <anno>Format</anno>}</c> specifies the format of the
- disk log. Default is <c>internal</c>.
+ <p>Disk log format. Defaults to <c>internal</c>.
</p>
</item>
+ <tag><c>{size, <anno>Size</anno>}</c></tag>
<item>
- <p><c>{size, <anno>Size</anno>}</c> specifies the size of the log.
- When a halt log has reached its maximum size, all attempts to
- log more items are rejected. The default size is
+ <p>Log size.</p>
+ <p>When a halt log has reached its maximum size, all attempts to
+ log more items are rejected. Defaults to
<c>infinity</c>, which for halt implies that there is no
- maximum size. For wrap logs, the <c><anno>Size</anno></c> parameter
- may be either a pair
- <c>{<anno>MaxNoBytes</anno>, <anno>MaxNoFiles</anno>}</c> or <c>infinity</c>. In the
- latter case, if the files of an already existing wrap log
+ maximum size.</p>
+ <p>For wrap logs, parameter <c><anno>Size</anno></c>
+ can be a pair
+ <c>{<anno>MaxNoBytes</anno>, <anno>MaxNoFiles</anno>}</c> or
+ <c>infinity</c>.
+ In the latter case, if the files of an existing wrap log
with the same name can be found, the size is read
- from the existing wrap log, otherwise an error is returned.
- Wrap logs write at most <c><anno>MaxNoBytes</anno></c> bytes on each file
- and use <c><anno>MaxNoFiles</anno></c> files before starting all over with
- the first wrap log file. Regardless of <c><anno>MaxNoBytes</anno></c>,
+ from the existing wrap log, otherwise an error is returned.</p>
+ <p>Wrap logs write at most <c><anno>MaxNoBytes</anno></c>
+ bytes on each file and use <c><anno>MaxNoFiles</anno></c>
+ files before starting all over with the first wrap log
+ file. Regardless of <c><anno>MaxNoBytes</anno></c>,
at least the header (if there is one) and one
- item is written on each wrap log file before
- wrapping to the next file.
- When opening an existing wrap log, it is not
- necessary to supply a value for the option <c>Size</c>, but any
- supplied value must equal the current size of the log, otherwise
- the tuple <c>{error, {size_mismatch, <anno>CurrentSize</anno>, <anno>NewSize</anno>}}</c>
- is returned.
- </p>
+ item are written on each wrap log file before
+ wrapping to the next file.</p>
+ <p>When opening an existing wrap log, it is not
+ necessary to supply a value for option <c>Size</c>, but any
+ supplied value must equal the current log size, otherwise
+ the tuple <c>{error, {size_mismatch, <anno>CurrentSize</anno>,
+ <anno>NewSize</anno>}}</c> is returned.</p>
</item>
+ <tag><c>{distributed, <anno>Nodes</anno>}</c></tag>
<item>
- <p><c>{distributed, <anno>Nodes</anno>}</c>. This option can be used for
- adding members to a distributed disk log. The
- default value is <c>[]</c>, which means that
+ <p>This option can be used for
+ adding members to a distributed disk log.
+ Defaults to <c>[]</c>, which means that
the log is local on the current node.
</p>
</item>
+ <tag><c>{notify, boolean()}</c><marker id="notify"></marker></tag>
<item>
- <marker id="notify"></marker>
- <p><c>{notify, bool()}</c>. If <c>true</c>, the owners of the
- log are notified when certain events occur in the log.
- Default is <c>false</c>. The owners are sent one of the
+ <p>If <c>true</c>, the log owners
+ are notified when certain log events occur.
+ Defaults to <c>false</c>. The owners are sent one of the
following messages when an event occurs:
</p>
- <list type="bulleted">
+ <taglist>
+ <tag><c>{disk_log, Node, Log, {wrap, NoLostItems}}</c></tag>
<item>
- <p><c>{disk_log, Node, Log, {wrap, NoLostItems}}</c> is sent when a wrap log has
+ <p>Sent when a wrap log has
filled up one of its files and a new file is
opened. <c>NoLostItems</c> is the number of
- previously logged items that have been lost when
+ previously logged items that were lost when
truncating existing files.
</p>
</item>
+ <tag><c>{disk_log, Node, Log, {truncated, NoLostItems}}</c></tag>
<item>
- <p><c>{disk_log, Node, Log, {truncated, NoLostItems}}</c> is sent when a log has been
+ <p>Sent when a log is
truncated or reopened. For halt logs <c>NoLostItems</c>
is the number of items written on the log since the
disk log process was created. For wrap logs
@@ -866,127 +900,138 @@ If
wrap log files.
</p>
</item>
+ <tag><c>{disk_log, Node, Log, {read_only, Items}}</c></tag>
<item>
- <p><c>{disk_log, Node, Log, {read_only, Items}}</c>
- is sent when an asynchronous log attempt is made to
+ <p>Sent when an asynchronous log attempt is made to
a log file opened in read-only mode.
<c>Items</c> is the items from the log attempt.
</p>
</item>
+ <tag><c>{disk_log, Node, Log, {blocked_log, Items}}</c></tag>
<item>
- <p><c>{disk_log, Node, Log, {blocked_log, Items}}</c>
- is sent when an asynchronous log attempt is made to
+ <p>Sent when an asynchronous log attempt is made to
a blocked log that does not queue log attempts.
<c>Items</c> is the items from the log attempt.
</p>
</item>
+ <tag><c>{disk_log, Node, Log, {format_external, Items}}</c></tag>
<item>
- <p><c>{disk_log, Node, Log, {format_external, Items}}</c>
- is sent when <c>alog/2</c> or <c>alog_terms/2</c> is
+ <p>Sent when function <c>alog/2</c> or <c>alog_terms/2</c> is
used for internally formatted logs. <c>Items</c> is the
items from the log attempt.
</p>
</item>
+ <tag><c>{disk_log, Node, Log, full}</c></tag>
<item>
- <p><c>{disk_log, Node, Log, full}</c> is sent when
+ <p>Sent when
an attempt to log items to a wrap log would write more
- bytes than the limit set by the <c>size</c> option.
+ bytes than the limit set by option <c>size</c>.
</p>
</item>
+ <tag><c>{disk_log, Node, Log, {error_status, Status}}</c></tag>
<item>
- <p><c>{disk_log, Node, Log, {error_status, Status}}</c>
- is sent when the error status changes. The error status
+ <p>Sent when the error status changes. The error status
is defined by the outcome of the last attempt to log
- items to a the log or to truncate the log or the last
- use of <c>sync/1</c>, <c>inc_wrap_file/1</c> or
- <c>change_size/2</c>. <c>Status</c> is one of <c>ok</c> and
- <c>{error, Error}</c>, the former being the initial value.
+ items to the log, or to truncate the log, or the last
+ use of function <c>sync/1</c>, <c>inc_wrap_file/1</c>, or
+ <c>change_size/2</c>. <c>Status</c> is either <c>ok</c> or
+ <c>{error, Error}</c>, the former is the initial value.
</p>
</item>
- </list>
+ </taglist>
</item>
+ <tag><c>{head, <anno>Head</anno>}</c></tag>
<item>
- <p><c>{head, <anno>Head</anno>}</c> specifies a header to be
+ <p>Specifies a header to be
written first on the log file. If the log is a wrap
log, the item <c><anno>Head</anno></c> is written first in each new file.
- <c><anno>Head</anno></c> should be a term if the format is
- <c>internal</c>, and a deep list of bytes (or a binary)
- otherwise. Default is <c>none</c>, which means that
+ <c><anno>Head</anno></c> is to be a term if the format is
+ <c>internal</c>, otherwise a sequence of bytes.
+ Defaults to <c>none</c>, which means that
no header is written first on the file.
</p>
</item>
+ <tag><c>{head_func, {M,F,A}}</c></tag>
<item>
- <p><c>{head_func, {M,F,A}}</c> specifies a function
+ <p>Specifies a function
to be called each time a new log file is opened.
The call <c>M:F(A)</c> is assumed to return <c>{ok, Head}</c>.
The item <c>Head</c> is written first in each file.
- <c>Head</c> should be a term if the format is
- <c>internal</c>, and a deep list of bytes (or a binary)
- otherwise.
+ <c>Head</c> is to be a term if the format is
+ <c>internal</c>, otherwise a sequence of bytes.
</p>
</item>
+ <tag><c>{mode, <anno>Mode</anno>}</c></tag>
<item>
- <p><c>{mode, <anno>Mode</anno>}</c> specifies if the log is to be
- opened in read-only or read-write mode. It defaults to
+ <p>Specifies if the log is to be
+ opened in read-only or read-write mode. Defaults to
<c>read_write</c>.
</p>
</item>
- </list>
- <p>The <c>open/1</c> function returns <c>{ok, <anno>Log</anno>}</c> if the
- log file was successfully opened. If the file was
- successfully repaired, the tuple <c>{repaired, <anno>Log</anno>, {recovered, <anno>Rec</anno>}, {badbytes, <anno>Bad</anno>}}</c> is returned, where
- <c><anno>Rec</anno></c> is the number of whole Erlang terms found in the
- file and <c><anno>Bad</anno></c> is the number of bytes in the file which
- were non-Erlang terms. If the <c>distributed</c> parameter
- was given, <c>open/1</c> returns a list of
+ <tag><c>{quiet, Boolean}</c></tag>
+ <item>
+ <p>Specifies if messages will be sent to
+ <c>error_logger</c> on recoverable errors with
+ the log files. Defaults to <c>false</c>.</p>
+ </item>
+ </taglist>
+ <p><c>open/1</c> returns <c>{ok, <anno>Log</anno>}</c> if the
+ log file is successfully opened. If the file is
+ successfully repaired, the tuple <c>{repaired, <anno>Log</anno>,
+ {recovered, <anno>Rec</anno>}, {badbytes, <anno>Bad</anno>}}</c>
+ is returned, where <c><anno>Rec</anno></c> is the number of
+ whole Erlang terms found in the file and <c><anno>Bad</anno></c>
+ is the number of bytes in the file that
+ are non-Erlang terms. If the parameter <c>distributed</c>
+ is specified, <c>open/1</c> returns a list of
successful replies and a list of erroneous replies. Each
reply is tagged with the node name.
</p>
<p>When a disk log is opened in read-write mode, any existing
- log file is checked for. If there is none a new empty
+ log file is checked for. If there is none, a new empty
log is created, otherwise the existing file is opened at the
position after the last logged item, and the logging of items
- will commence from there. If the format is <c>internal</c>
+ starts from there. If the format is <c>internal</c>
and the existing file is not recognized as an internally
- formatted log, a tuple <c>{error, {not_a_log_file, <anno>FileName</anno>}}</c>
+ formatted log, a tuple
+ <c>{error, {not_a_log_file, <anno>FileName</anno>}}</c>
is returned.
</p>
- <p>The <c>open/1</c> function cannot be used for changing the
- values of options of an already open log; when there are prior
+ <p><c>open/1</c> cannot be used for changing the
+ values of options of an open log. When there are prior
owners or users of a log, all option values except <c>name</c>,
- <c>linkto</c> and <c>notify</c> are just checked against
- the values that have been supplied before as option values
- to <c>open/1</c>, <c>change_header/2</c>, <c>change_notify/3</c>
- or <c>change_size/2</c>. As a consequence,
+ <c>linkto</c>, and <c>notify</c> are only checked against
+ the values supplied before as option values
+ to function <c>open/1</c>, <c>change_header/2</c>, <c>change_notify/3</c>,
+ or <c>change_size/2</c>. Thus,
none of the options except <c>name</c> is mandatory. If some
- given value differs from the current value, a tuple
+ specified value differs from the current value, a tuple
<c>{error, {arg_mismatch, <anno>OptionName</anno>, <anno>CurrentValue</anno>, <anno>Value</anno>}}</c>
- is returned. Caution: an owner's attempt to open a log
- as owner once again is acknowledged with the return value
+ is returned.</p>
+ <note><p>If an owner attempts to open a log
+ as owner once again, it is acknowledged with the return value
<c>{ok, <anno>Log</anno>}</c>, but the state of the disk log is not
- affected in any way.
- </p>
- <p>If a log with a given name is local on some node,
+ affected.</p></note>
+ <p>If a log with a specified name is local on some node,
and one tries to open the log distributed on the same node,
- then the tuple <c>{error, {node_already_open, <anno>Log</anno>}}</c> is
+ the tuple <c>{error, {node_already_open, <anno>Log</anno>}}</c> is
returned. The same tuple is returned if the log is distributed on
some node, and one tries to open the log locally on the same node.
Opening individual distributed disk logs for the first time
adds those logs to a (possibly empty) distributed disk log.
- The option values supplied are used
- on all nodes mentioned by the <c>distributed</c> option.
+ The supplied option values are used
+ on all nodes mentioned by option <c>distributed</c>.
Individual distributed logs know nothing
about each other's option values, so each node can be
given unique option values by creating a distributed
- log with several calls to <c>open/1</c>.
+ log with many calls to <c>open/1</c>.
</p>
- <p>It is possible to open a log file more than once by giving
- different values to the option <c>name</c> or by using the
+ <p>A log file can be opened more than once by giving
+ different values to option <c>name</c> or by using the
same file when distributing a log on different nodes.
- It is up to the user of the <c>disk_log</c>
- module to ensure that no more than one
- disk log process has write access to any file, or the
- the file may be corrupted.
+ It is up to the user of module <c>disk_log</c>
+ to ensure that not more than one disk log process has write
+ access to any file, otherwise the file can be corrupted.
</p>
<p>If an attempt to open a log file for the first time fails,
the disk log process terminates with the EXIT message
@@ -999,9 +1044,9 @@ If
<name name="pid2name" arity="1"/>
<fsummary>Return the name of the disk log handled by a pid.</fsummary>
<desc>
- <p>The <c>pid2name/1</c> function returns the name of the log
+ <p>Returns the log name
given the pid of a disk log process on the current node, or
- <c>undefined</c> if the given pid is not a disk log process.
+ <c>undefined</c> if the specified pid is not a disk log process.
</p>
<p>This function is meant to be used for debugging only.
</p>
@@ -1018,25 +1063,25 @@ If
<type variable="BHead"/>
<type name="reopen_error_rsn"/>
<desc>
- <p>The <c>reopen</c> functions first rename the log file
- to <c><anno>File</anno></c> and then re-create a new log file.
- In case of a wrap log, <c><anno>File</anno></c> is used as the base name
+ <p>Renames the log file
+ to <c><anno>File</anno></c> and then recreates a new log file.
+ If a wrap log exists, <c><anno>File</anno></c> is used as the base name
of the renamed files.
By default the header given to <c>open/1</c> is written first in
- the newly opened log file, but if the <c><anno>Head</anno></c> or the
- <c><anno>BHead</anno></c> argument is given, this item is used instead.
- The header argument is used once only; next time a wrap log file
+ the newly opened log file, but if argument <c><anno>Head</anno></c> or
+ <c><anno>BHead</anno></c> is specified, this item is used instead.
+ The header argument is used only once. Next time a wrap log file
is opened, the header given to <c>open/1</c> is used.
</p>
- <p>The <c>reopen/2,3</c> functions are used for internally formatted
+ <p><c>reopen/2,3</c> are used for internally formatted
logs, and <c>breopen/3</c> for externally formatted logs.
</p>
- <p>The owners that subscribe to notifications will receive
+ <p>Owners subscribing to notifications receive
a <c>truncate</c> message.
</p>
<p>Upon failure to reopen the log, the disk log process terminates
- with the EXIT message <c>{{failed,Error},[{disk_log,Fun,Arity}]}</c>,
- and other processes that have requests queued receive the message
+ with the EXIT message <c>{{failed,Error},[{disk_log,Fun,Arity}]}</c>.
+ Other processes having requests queued receive the message
<c>{disk_log, Node, {error, disk_log_stopped}}</c>.
</p>
</desc>
@@ -1046,8 +1091,7 @@ If
<fsummary>Flush the contents of a disk log to the disk.</fsummary>
<type name="sync_error_rsn"/>
<desc>
- <p>The <c>sync/1</c> function ensures that the contents of the
- log are actually written to the disk.
+ <p>Ensures that the contents of the log are written to the disk.
This is usually a rather expensive operation.
</p>
</desc>
@@ -1062,24 +1106,24 @@ If
<type variable="BHead"/>
<type name="trunc_error_rsn"/>
<desc>
- <p>The <c>truncate</c> functions remove all items from a disk log.
- If the <c><anno>Head</anno></c> or the <c><anno>BHead</anno></c> argument is
- given, this item is written first in the newly truncated
+ <p>Removes all items from a disk log.
+ If argument <c><anno>Head</anno></c> or <c><anno>BHead</anno></c> is
+ specified, this item is written first in the newly truncated
log, otherwise the header given to <c>open/1</c> is used.
- The header argument is only used once; next time a wrap log file
+ The header argument is used only once. Next time a wrap log file
is opened, the header given to <c>open/1</c> is used.
</p>
- <p>The <c>truncate/1,2</c> functions are used for internally
+ <p><c>truncate/1,2</c> are used for internally
formatted logs, and <c>btruncate/2</c> for externally formatted
logs.
</p>
- <p>The owners that subscribe to notifications will receive
+ <p>Owners subscribing to notifications receive
a <c>truncate</c> message.
</p>
<p>If the attempt to truncate the log fails, the disk log process
terminates with the EXIT message
- <c>{{failed,Reason},[{disk_log,Fun,Arity}]}</c>, and
- other processes that have requests queued receive the message
+ <c>{{failed,Reason},[{disk_log,Fun,Arity}]}</c>.
+ Other processes having requests queued receive the message
<c>{disk_log, Node, {error, disk_log_stopped}}</c>.
</p>
</desc>
@@ -1089,7 +1133,7 @@ If
<fsummary>Unblock a disk log.</fsummary>
<type name="unblock_error_rsn"/>
<desc>
- <p>The <c>unblock/1</c> function unblocks a log.
+ <p>Unblocks a log.
A log can only be unblocked by the blocking process.
</p>
</desc>
@@ -1098,8 +1142,8 @@ If
<section>
<title>See Also</title>
- <p><seealso marker="file">file(3)</seealso>,
- <seealso marker="pg2">pg2(3)</seealso>,
- <seealso marker="wrap_log_reader">wrap_log_reader(3)</seealso></p>
+ <p><seealso marker="file"><c>file(3)</c></seealso>,
+ <seealso marker="pg2"><c>pg2(3)</c></seealso>,
+ <seealso marker="wrap_log_reader"><c>wrap_log_reader(3)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/erl_boot_server.xml b/lib/kernel/doc/src/erl_boot_server.xml
index a8015fa453..4109251387 100644
--- a/lib/kernel/doc/src/erl_boot_server.xml
+++ b/lib/kernel/doc/src/erl_boot_server.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,72 +29,73 @@
<rev></rev>
</header>
<module>erl_boot_server</module>
- <modulesummary>Boot Server for Other Erlang Machines</modulesummary>
+ <modulesummary>Boot server for other Erlang machines.</modulesummary>
<description>
- <p>This server is used to assist diskless Erlang nodes which fetch
+ <p>This server is used to assist diskless Erlang nodes that fetch
all Erlang code from another machine.</p>
<p>This server is used to fetch all code, including the start
script, if an Erlang runtime system is started with
- the <c>-loader inet</c> command line flag. All hosts specified
- with the <c>-hosts Host</c> command line flag must have one
+ command-line flag <c>-loader inet</c>. All hosts specified
+ with command-line flag <c>-hosts Host</c> must have one
instance of this server running.</p>
- <p>This server can be started with the <c>kernel</c> configuration
+ <p>This server can be started with the Kernel configuration
parameter <c>start_boot_server</c>.</p>
- <p>The <c>erl_boot_server</c> can both read regular files as well as
- files in archives. See <seealso marker="code">code(3)</seealso>
- and <seealso marker="erts:erl_prim_loader">erl_prim_loader(3)</seealso>.</p>
- <warning><p>The support for loading of code from archive files is
- experimental. The sole purpose of releasing it before it is ready
- is to obtain early feedback. The file format, semantics,
- interfaces etc. may be changed in a future release.</p></warning>
+ <p>The <c>erl_boot_server</c> can read regular files and
+ files in archives. See <seealso marker="code"><c>code(3)</c></seealso>
+ and
+ <seealso marker="erts:erl_prim_loader"><c>erl_prim_loader(3)</c></seealso>
+ in ERTS.</p>
+ <warning><p>The support for loading code from archive files is
+ experimental. It is released before it is ready
+ to obtain early feedback. The file format, semantics,
+ interfaces, and so on, can be changed in a future release.</p></warning>
</description>
<funcs>
<func>
- <name name="start" arity="1"/>
- <fsummary>Start the boot server</fsummary>
+ <name name="add_slave" arity="1"/>
+ <fsummary>Add a slave to the list of allowed slaves.</fsummary>
<desc>
- <p>Starts the boot server. <c><anno>Slaves</anno></c> is a list of IP
- addresses for hosts which are allowed to use this server as a
- boot server.</p>
+ <p>Adds a <c><anno>Slave</anno></c> node to the list of allowed slave hosts.</p>
</desc>
</func>
<func>
- <name name="start_link" arity="1"/>
- <fsummary>Start the boot server and links the caller</fsummary>
+ <name name="delete_slave" arity="1"/>
+ <fsummary>Delete a slave from the list of allowed slaves.</fsummary>
<desc>
- <p>Starts the boot server and links to the caller. This function
- is used to start the server if it is included in a supervision
- tree.</p>
+ <p>Deletes a <c><anno>Slave</anno></c> node from the list of allowed slave
+ hosts.</p>
</desc>
</func>
<func>
- <name name="add_slave" arity="1"/>
- <fsummary>Add a slave to the list of allowed slaves</fsummary>
+ <name name="start" arity="1"/>
+ <fsummary>Start the boot server.</fsummary>
<desc>
- <p>Adds a <c><anno>Slave</anno></c> node to the list of allowed slave hosts.</p>
+ <p>Starts the boot server. <c><anno>Slaves</anno></c> is a list of
+ IP addresses for hosts, which are allowed to use this server as a
+ boot server.</p>
</desc>
</func>
<func>
- <name name="delete_slave" arity="1"/>
- <fsummary>Delete a slave from the list of allowed slaves</fsummary>
+ <name name="start_link" arity="1"/>
+ <fsummary>Start the boot server and link to the the caller.</fsummary>
<desc>
- <p>Deletes a <c><anno>Slave</anno></c> node from the list of allowed slave
- hosts.</p>
+ <p>Starts the boot server and links to the caller. This function
+ is used to start the server if it is included in a supervision
+ tree.</p>
</desc>
</func>
<func>
<name name="which_slaves" arity="0"/>
- <fsummary>Return the current list of allowed slave hosts</fsummary>
+ <fsummary>Return the current list of allowed slave hosts.</fsummary>
<desc>
<p>Returns the current list of allowed slave hosts.</p>
</desc>
</func>
</funcs>
-
<section>
<title>SEE ALSO</title>
- <p><seealso marker="erts:init">init(3)</seealso>,
- <seealso marker="erts:erl_prim_loader">erl_prim_loader(3)</seealso></p>
+ <p><seealso marker="erts:init"><c>erts:init(3)</c></seealso>,
+ <seealso marker="erts:erl_prim_loader"><c>erts:erl_prim_loader(3)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/erl_ddll.xml b/lib/kernel/doc/src/erl_ddll.xml
index 8d71883cf4..75114e015c 100644
--- a/lib/kernel/doc/src/erl_ddll.xml
+++ b/lib/kernel/doc/src/erl_ddll.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,84 +29,91 @@
<rev></rev>
</header>
<module>erl_ddll</module>
- <modulesummary>Dynamic Driver Loader and Linker</modulesummary>
+ <modulesummary>Dynamic driver loader and linker.</modulesummary>
<description>
- <p>The <c>erl_ddll</c> module provides an interface for loading
- and unloading <em>erlang linked in drivers</em> in runtime.</p>
+ <p>This module provides an interface for loading
+ and unloading <em>Erlang linked-in drivers</em> in runtime.</p>
<note>
- <p>This is a large reference document. For casual use of the
- module, as well as for most real world applications, the
- descriptions of the functions <seealso marker="#load/2">load/2</seealso> and <seealso marker="#unload/1">unload/1</seealso> are enough to get
- going. </p>
+ <p>This is a large reference document. For casual use of this
+ module, and for most real world applications, the
+ descriptions of functions
+ <seealso marker="#load/2"><c>load/2</c></seealso> and
+ <seealso marker="#unload/1"><c>unload/1</c></seealso>
+ are enough to getting started.</p>
</note>
- <p>The driver should be provided as a dynamically linked library
- in a object code format specific for the platform in use,
- i. e. <c>.so</c> files on most Unix systems and <c>.ddl</c>
- files on windows. An erlang linked in driver has to provide
+ <p>The driver is to be provided as a dynamically linked library
+ in an object code format specific for the platform in use,
+ that is, <c>.so</c> files on most Unix systems and <c>.ddl</c>
+ files on Windows. An Erlang linked-in driver must provide
specific interfaces to the emulator, so this module is not
- designed for loading arbitrary dynamic libraries. For further
- information about erlang drivers, refer to the ERTS reference
- manual section <seealso marker="erts:erl_driver">erl_driver</seealso>.</p>
+ designed for loading arbitrary dynamic libraries. For more
+ information about Erlang drivers, see
+ <seealso marker="erts:erl_driver"><c>erts:erl_driver</c></seealso>
+ .</p>
<marker id="users"></marker>
- <p>When describing a set of functions, (i.e. a module, a part of a
- module or an application) executing in a process and wanting to
- use a ddll-driver, we use the term <em>user</em>. There can be
- several users in one process (different modules needing the same
- driver) and several processes running the same code, making up
- several <em>users</em> of a driver. In the basic scenario, each
- user loads the driver before starting to use it and unloads the
- driver when done. The reference counting keeps track of
- processes as well as the number of loads by each process, so that
- the driver will only be unloaded when no one wants it
- (it has no user). The driver also keeps track of ports that are
- opened towards it, so that one can delay unloading until all
- ports are closed or kill all ports using the driver when it is
- unloaded. </p>
+ <p>When describing a set of functions (that is, a module, a part of a
+ module, or an application), executing in a process and wanting to
+ use a ddll-driver, we use the term <em>user</em>. A process can
+ have many users (different modules needing the same
+ driver) and many processes running the same code, making up
+ many <em>users</em> of a driver.</p>
+ <p>In the basic scenario, each user loads the driver before
+ starting to use it and unloads the driver when done.
+ The reference counting keeps track of
+ processes and the number of loads by each process. This way
+ the driver is only unloaded when no one wants it (it has no user).
+ The driver also keeps track of ports that are
+ opened to it. This enables delay of unloading until all
+ ports are closed, or killing of all ports that use the driver when
+ it is unloaded.</p>
<marker id="scenarios"></marker>
<p>The interface supports two basic scenarios of loading and
unloading. Each scenario can also have the option of either
killing ports when the driver is unloading, or waiting for the
- ports to close themselves. The scenarios are:</p>
+ ports to close themselves. The scenarios are as follows:</p>
<taglist>
- <tag><em>Load and unload on a "when needed basis"</em></tag>
+ <tag><em>Load and Unload on a "When Needed Basis"</em></tag>
<item>
<p>This (most common) scenario simply supports that each
<seealso marker="#users">user</seealso> of the driver loads
- it when it is needed and unloads it when the <seealso marker="#users">user</seealso> no longer have any use for
- it. The driver is always reference counted and as long as a
+ it when needed and unloads it when no longer needed.
+ The driver is always reference counted and as long as a
process keeping the driver loaded is still alive, the driver
is present in the system.</p>
<p>Each <seealso marker="#users">user</seealso> of the driver
use <em>literally</em> the same pathname for the driver when
- demanding load, but the <seealso marker="#users">users</seealso> are not really concerned
- with if the driver is already loaded from the filesystem or
- if the object code has to be loaded from filesystem.</p>
- <p>Two pairs of functions support this scenario:</p>
+ demanding load, but the
+ <seealso marker="#users">users</seealso> are not concerned
+ with if the driver is already loaded from the file system or
+ if the object code must be loaded from file system.</p>
+ <p>The following two pairs of functions support this scenario:</p>
<taglist>
<tag><em>load/2 and unload/1</em></tag>
<item>
<p>When using the <c>load/unload</c> interfaces, the
- driver will not <em>actually</em> get unloaded until the
- <em>last port</em> using the driver is closed. The function
- <c>unload/1</c> can return immediately, as the <seealso marker="#users">users</seealso> are not really concerned
- with when the actual unloading occurs. The
- driver will actually get unloaded when no one needs it any longer.</p>
- <p>If a process having the driver loaded dies, it will have
- the same effect as if unloading was done. </p>
- <p>When loading, the function <c>load/2</c> returns
- <c>ok</c> as soon as there is any instance of the driver
- present, so that if a driver is waiting to get unloaded
- (due to open ports), it will simply change state to no
+ driver is not unloaded until the
+ <em>last port</em> using the driver is closed. Function
+ <c>unload/1</c> can return immediately, as the
+ <seealso marker="#users">users</seealso>
+ have no interrest in when the unloading occurs. The
+ driver is unloaded when no one needs it any longer.</p>
+ <p>If a process having the driver loaded dies, it has
+ the same effect as if unloading is done.</p>
+ <p>When loading, function <c>load/2</c> returns
+ <c>ok</c> when any instance of the driver is
+ present. Thus, if a driver is waiting to get unloaded
+ (because of open ports), it simply changes state to no
longer need unloading.</p>
</item>
<tag><em>load_driver/2 and unload_driver/1</em></tag>
<item>
- <p>These interfaces is intended to be used when it is considered an
- error that ports are open towards a driver that no <seealso marker="#users">user</seealso>
- has loaded. The ports still open when the
+ <p>These interfaces are intended to be used when it is considered an
+ error that ports are open to a driver that no
+ <seealso marker="#users">user</seealso>
+ has loaded. The ports that are still open when the
last <seealso marker="#users">user</seealso> calls
<c>unload_driver/1</c> or when the last process having the
- driver loaded dies, will get killed with reason
+ driver loaded dies, are killed with reason
<c>driver_unloaded</c>.</p>
<p>The function names <c>load_driver</c> and
<c>unload_driver</c> are kept for backward
@@ -114,60 +121,66 @@
</item>
</taglist>
</item>
- <tag><em>Loading and reloading for code replacement</em></tag>
+ <tag><em>Loading and Reloading for Code Replacement</em></tag>
<item>
- <p>This scenario occurs when the driver code might need
+ <p>This scenario can occur if the driver code needs
replacement during operation of the Erlang
- emulator. Implementing driver code replacement is somewhat
- more tedious than beam code replacement, as one driver
- cannot be loaded as both "old" and "new" code. All <seealso marker="#users">users</seealso> of a driver must have it
+ emulator. Implementing driver code replacement is a little
+ more tedious than Beam code replacement, as one driver
+ cannot be loaded as both "old" and "new" code. All
+ <seealso marker="#users">users</seealso> of a driver must have it
closed (no open ports) before the old code can be unloaded
and the new code can be loaded.</p>
- <p>The actual unloading/loading is done as one atomic
+ <p>The unloading/loading is done as one atomic
operation, blocking all processes in the system from using
- the driver concerned while in progress.</p>
+ the driver in question while in progress.</p>
<p>The preferred way to do driver code replacement is to let
<em>one single process</em> keep track of the driver. When
- the process start, the driver is loaded. When replacement
+ the process starts, the driver is loaded. When replacement
is required, the driver is reloaded. Unload is probably never
- done, or done when the process exits. If more than one <seealso marker="#users">user</seealso> has a driver loaded when code
- replacement is demanded, the replacement cannot occur until
- the last "other" <seealso marker="#users">user</seealso> has
+ done, or done when the process exits. If more than one
+ <seealso marker="#users">user</seealso> has a driver
+ loaded when code replacement is demanded, the replacement cannot
+ occur until the last "other"
+ <seealso marker="#users">user</seealso> has
unloaded the driver.</p>
<p>Demanding reload when a reload is already in progress is
- always an error. Using the high level functions, it is also
- an error to demand reloading when more than one <seealso marker="#users">user</seealso> has the driver loaded. To
- simplify driver replacement, avoid designing your system so
- that more than than one <seealso marker="#users">user</seealso> has the driver loaded.</p>
- <p>The two functions for reloading drivers should be used
- together with corresponding load functions, to support the two
+ always an error. Using the high-level functions, it is also
+ an error to demand reloading when more than one
+ <seealso marker="#users">user</seealso> has the driver loaded.</p>
+ <p>To simplify driver replacement, avoid designing your system so
+ that more than one
+ <seealso marker="#users">user</seealso> has the driver loaded.</p>
+ <p>The two functions for reloading drivers are to be used
+ together with corresponding load functions to support the two
different behaviors concerning open ports:</p>
<taglist>
<tag><em>load/2 and reload/2</em></tag>
<item>
- <p>This pair of functions is used when reloading should be
- done after the last open port towards the driver is
+ <p>This pair of functions is used when reloading is to be
+ done after the last open port to the driver is
closed.</p>
- <p>As <c>reload/2</c> actually waits for the reloading to
- occur, a misbehaving process keeping open ports towards
- the driver (or keeping the driver loaded) might cause
- infinite waiting for reload. Timeouts has to be provided
+ <p>As <c>reload/2</c> waits for the reloading to
+ occur, a misbehaving process keeping open ports to
+ the driver (or keeping the driver loaded) can cause
+ infinite waiting for reload. Time-outs must be provided
outside of the process demanding the reload or by using
- the low-level interface <seealso marker="#try_load/3">try_load/3</seealso> in combination
- with driver monitors (see below).</p>
+ the low-level interface
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>
+ in combination with driver monitors.</p>
</item>
<tag><em>load_driver/2 and reload_driver/2</em></tag>
<item>
- <p>This pair of functions are used when open ports towards
- the driver should be killed with reason
+ <p>This pair of functions are used when open ports to
+ the driver are to be killed with reason
<c>driver_unloaded</c> to allow for new driver code to
get loaded.</p>
- <p>If, however, another process has the driver loaded,
- calling <c>reload_driver</c> returns the error code
+ <p>However, if another process has the driver loaded,
+ calling <c>reload_driver</c> returns error code
<c>pending_process</c>. As stated earlier,
- the recommended design is to not allow other <seealso marker="#users">users</seealso> than the "driver
- reloader" to actually demand loading of the concerned
- driver.</p>
+ the recommended design is to not allow other
+ <seealso marker="#users">users</seealso> than the "driver
+ reloader" to demand loading of the driver in question.</p>
</item>
</taglist>
</item>
@@ -184,903 +197,982 @@
<funcs>
<func>
<name name="demonitor" arity="1"/>
- <fsummary>Remove a monitor for a driver</fsummary>
+ <fsummary>Remove a monitor for a driver.</fsummary>
<desc>
<p>Removes a driver monitor in much the same way as
- <seealso marker="erts:erlang#erlang:demonitor/1">erlang:demonitor/1</seealso> does with process
- monitors. See <seealso marker="#monitor/2">monitor/2</seealso>, <seealso marker="#try_load/3">try_load/3</seealso> and <seealso marker="#try_unload/2">try_unload/2</seealso> for details
- about how to create driver monitors.</p>
+ <seealso marker="erts:erlang#erlang:demonitor/1"><c>erlang:demonitor/1</c></seealso>
+ in ERTS
+ does with process monitors. For details about how to create
+ driver monitors, see
+ <seealso marker="#monitor/2"><c>monitor/2</c></seealso>,
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>, and
+ <seealso marker="#try_unload/2"><c>try_unload/2</c></seealso>.</p>
<p>The function throws a <c>badarg</c> exception if the
- parameter is not a reference(). </p>
+ parameter is not a <c>reference()</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="format_error" arity="1"/>
+ <fsummary>Format an error descriptor.</fsummary>
+ <desc>
+ <p>Takes an <c><anno>ErrorDesc</anno></c> returned by load, unload, or
+ reload functions and returns a string that
+ describes the error or warning.</p>
+ <note>
+ <p>Because of peculiarities in the dynamic loading interfaces on
+ different platforms, the returned string is only guaranteed
+ to describe the correct error <em>if format_error/1 is called
+ in the same instance of the Erlang virtual machine as the error
+ appeared in</em> (meaning the same operating
+ system process).</p>
+ </note>
</desc>
</func>
<func>
<name name="info" arity="0"/>
- <fsummary>Retrieve information about all drivers</fsummary>
+ <fsummary>Retrieve information about all drivers.</fsummary>
<desc>
- <p>Returns a list of tuples <c>{<anno>DriverName</anno>, <anno>InfoList</anno>}</c>, where
- <c><anno>InfoList</anno></c> is the result of calling <seealso marker="#info/1">info/1</seealso> for that
- <c><anno>DriverName</anno></c>. Only dynamically linked in drivers are
+ <p>Returns a list of tuples <c>{<anno>DriverName</anno>, <anno>InfoList</anno>}</c>,
+ where <c><anno>InfoList</anno></c> is the result of calling
+ <seealso marker="#info/1"><c>info/1</c></seealso> for that
+ <c><anno>DriverName</anno></c>. Only dynamically linked-in drivers are
included in the list.</p>
</desc>
</func>
<func>
<name name="info" arity="1"/>
- <fsummary>Retrieve information about one driver</fsummary>
+ <fsummary>Retrieve information about one driver.</fsummary>
<desc>
- <p>Returns a list of tuples <c>{<anno>Tag</anno>, <anno>Value</anno>}</c>, where
- <c><anno>Tag</anno></c> is the information item and <c><anno>Value</anno></c> is the result
- of calling <seealso marker="#info/2">info/2</seealso> with this driver name and
- this tag. The result being a tuple list containing all
- information available about a driver. </p>
- <p>The different tags that will appear in the list are:</p>
+ <p>Returns a list of tuples <c>{<anno>Tag</anno>, <anno>Value</anno>}</c>,
+ where <c><anno>Tag</anno></c> is the information item and
+ <c><anno>Value</anno></c> is the result of calling
+ <seealso marker="#info/2"><c>info/2</c></seealso> with this driver
+ name and this tag. The result is a tuple list containing all information
+ available about a driver.</p>
+ <p>The following tags appears in the list:</p>
<list type="bulleted">
- <item>processes</item>
- <item>driver_options</item>
- <item>port_count</item>
- <item>linked_in_driver</item>
- <item>permanent</item>
- <item>awaiting_load</item>
- <item>awaiting_unload</item>
+ <item><c>processes</c></item>
+ <item><c>driver_options</c></item>
+ <item><c>port_count</c></item>
+ <item><c>linked_in_driver</c></item>
+ <item><c>permanent</c></item>
+ <item><c>awaiting_load</c></item>
+ <item><c>awaiting_unload</c></item>
</list>
- <p>For a detailed description of each value, please read the
- description of <seealso marker="#info/2">info/2</seealso> below.</p>
+ <p>For a detailed description of each value, see
+ <seealso marker="#info/2"><c>info/2</c></seealso>.</p>
<p>The function throws a <c>badarg</c> exception if the driver
is not present in the system.</p>
</desc>
</func>
<func>
<name name="info" arity="2"/>
- <fsummary>Retrieve specific information about one driver</fsummary>
+ <fsummary>Retrieve specific information about one driver.</fsummary>
<desc>
- <p>This function returns specific information about one aspect
- of a driver. The <c><anno>Tag</anno></c> parameter specifies which aspect
- to get information about. The <c><anno>Value</anno></c> return differs
+ <p>Returns specific information about one aspect of a driver.
+ Parameter <c><anno>Tag</anno></c> specifies which aspect
+ to get information about. The return <c><anno>Value</anno></c> differs
between different tags:</p>
<taglist>
- <tag><em>processes</em></tag>
+ <tag><c>processes</c></tag>
<item>
- <p>Return all processes containing <seealso marker="#users">users</seealso> of the specific drivers
- as a list of tuples <c>{pid(),integer() >= 0}</c>, where the
- <c>integer()</c> denotes the number of users in the process
+ <p>Returns all processes containing
+ <seealso marker="#users">users</seealso> of the specific drivers
+ as a list of tuples <c>{pid(),integer() >= 0}</c>, where
+ <c>integer()</c> denotes the number of users in process
<c>pid()</c>.</p>
</item>
- <tag><em>driver_options</em></tag>
+ <tag><c>driver_options</c></tag>
<item>
- <p>Return a list of the driver options provided when
- loading, as well as any options set by the driver itself
- during initialization. The currently only valid option
- being <c>kill_ports</c>.</p>
+ <p>Returns a list of the driver options provided when
+ loading, and any options set by the driver
+ during initialization. The only valid option
+ is <c>kill_ports</c>.</p>
</item>
- <tag><em>port_count</em></tag>
+ <tag><c>port_count</c></tag>
<item>
- <p>Return the number of ports (an <c>integer >= 0()</c>) using the driver.</p>
+ <p>Returns the number of ports (an <c>integer() >= 0</c>)
+ using the driver.</p>
</item>
- <tag><em>linked_in_driver</em></tag>
+ <tag><c>linked_in_driver</c></tag>
<item>
- <p>Return a <c>boolean()</c>, being <c>true</c> if the driver is a
- statically linked in one and <c>false</c> otherwise.</p>
+ <p>Returns a <c>boolean()</c>, which is <c>true</c> if the driver is a
+ statically linked-in one, otherwise <c>false</c>.</p>
</item>
- <tag><em>permanent</em></tag>
+ <tag><c>permanent</c></tag>
<item>
- <p>Return a <c>boolean()</c>, being <c>true</c> if the driver has made
- itself permanent (and is <em>not</em> a statically
- linked in driver). <c>false</c> otherwise.</p>
+ <p>Returns a <c>boolean()</c>, which is <c>true</c> if the driver has
+ made itself permanent (and is <em>not</em> a statically
+ linked-in driver), otherwise <c>false</c>.</p>
</item>
- <tag><em>awaiting_load</em></tag>
+ <tag><c>awaiting_load</c></tag>
<item>
- <p>Return a list of all processes having monitors for
- <c>loading</c> active, each process returned as
- <c>{pid(),integer() >= 0}</c>, where the <c>integer()</c> is the
- number of monitors held by the process <c>pid()</c>.</p>
+ <p>Returns a list of all processes having monitors for
+ <c>loading</c> active. Each process is returned as
+ <c>{pid(),integer() >= 0}</c>, where <c>integer()</c> is the
+ number of monitors held by process <c>pid()</c>.</p>
</item>
- <tag><em>awaiting_unload</em></tag>
+ <tag><c>awaiting_unload</c></tag>
<item>
- <p>Return a list of all processes having monitors for
- <c>unloading</c> active, each process returned as
- <c>{pid(),integer() >= 0}</c>, where the <c>integer()</c> is the
- number of monitors held by the process <c>pid()</c>.</p>
+ <p>Returns a list of all processes having monitors for
+ <c>unloading</c> active. Each process is returned as
+ <c>{pid(),integer() >= 0}</c>, where <c>integer()</c> is the
+ number of monitors held by process <c>pid()</c>.</p>
</item>
</taglist>
- <p>If the options <c>linked_in_driver</c> or <c>permanent</c>
- return true, all other options will return the value
- <c>linked_in_driver</c> or <c>permanent</c> respectively.</p>
+ <p>If option <c>linked_in_driver</c> or <c>permanent</c>
+ returns <c>true</c>, all other options return
+ <c>linked_in_driver</c> or <c>permanent</c>, respectively.</p>
<p>The function throws a <c>badarg</c> exception if the driver
- is not present in the system or the tag is not supported.</p>
+ is not present in the system or if the tag is not supported.</p>
</desc>
</func>
<func>
<name name="load" arity="2"/>
- <fsummary>Load a driver</fsummary>
+ <fsummary>Load a driver.</fsummary>
<desc>
- <p>Loads and links the dynamic driver <c><anno>Name</anno></c>. <c><anno>Path</anno></c>
+ <p>Loads and links the dynamic driver <c><anno>Name</anno></c>.
+ <c><anno>Path</anno></c>
is a file path to the directory containing the driver.
<c><anno>Name</anno></c> must be a sharable object/dynamic library. Two
drivers with different <c><anno>Path</anno></c> parameters cannot be
- loaded under the same name. The <c><anno>Name</anno></c> is a string or
+ loaded under the same name. <c><anno>Name</anno></c> is a string or
atom containing at least one character.</p>
- <p>The <c><anno>Name</anno></c> given should correspond to the filename
- of the actual dynamically loadable object file residing in
- the directory given as <c><anno>Path</anno></c>, but <em>without</em> the
- extension (i.e. <c>.so</c>). The driver name provided in
+ <p>The <c><anno>Name</anno></c> specified is to correspond to the filename
+ of the dynamically loadable object file residing in
+ the directory specified as <c><anno>Path</anno></c>, but <em>without</em> the
+ extension (that is, <c>.so</c>). The driver name provided in
the driver initialization routine must correspond with the
- filename, in much the same way as erlang module names
+ filename, in much the same way as Erlang module names
correspond to the names of the <c>.beam</c> files.</p>
- <p>If the driver has been previously unloaded, but is still
- present due to open ports against it, a call to
- <c>load/2</c> will stop the unloading and keep the driver
- (as long as the <c><anno>Path</anno></c> is the same) and <c>ok</c> is
- returned. If one actually wants the object code to be
- reloaded, one uses <seealso marker="#reload/2">reload/2</seealso> or the low-level
- interface <seealso marker="#try_load/3">try_load/3</seealso>
- instead. Please refer to the description of <seealso marker="#scenarios">different scenarios</seealso> for
+ <p>If the driver was previously unloaded, but is still
+ present because of open ports to it, a call to
+ <c>load/2</c> stops the unloading and keeps the driver
+ (as long as <c><anno>Path</anno></c> is the same), and <c>ok</c> is
+ returned. If you really want the object code to be
+ reloaded, use <seealso marker="#reload/2"><c>reload/2</c></seealso>
+ or the low-level interface
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso> instead.
+ See also the description of
+ <seealso marker="#scenarios"><c>different scenarios</c></seealso> for
loading/unloading in the introduction.</p>
<p>If more than one process tries to load an already loaded
- driver withe the same <c><anno>Path</anno></c>, or if the same process
- tries to load it several times, the function will return
- <c>ok</c>. The emulator will keep track of the
+ driver with the same <c><anno>Path</anno></c>, or if the same process
+ tries to load it many times, the function returns
+ <c>ok</c>. The emulator keeps track of the
<c>load/2</c> calls, so that a corresponding number of
- <c>unload/2</c> calls will have to be done from the same
- process before the driver will actually get unloaded. It is
+ <c>unload/2</c> calls must be done from the same
+ process before the driver gets unloaded. It is
therefore safe for an application to load a driver that is
shared between processes or applications when needed. It can
safely be unloaded without causing trouble for other
- parts of the system. </p>
- <p>It is not allowed to load
- several drivers with the same name but with different
- <c>Path</c> parameters.</p>
+ parts of the system.</p>
+ <p>It is not allowed to load multiple drivers with
+ the same name but with different <c>Path</c> parameters.</p>
<note>
- <p>Note especially that the <c><anno>Path</anno></c> is interpreted
- literally, so that all loaders of the same driver needs to
- give the same <em>literal</em><c><anno>Path</anno></c> string, even
- though different paths might point out the same directory
- in the filesystem (due to use of relative paths and
+ <p><c><anno>Path</anno></c> is interpreted
+ literally, so that all loaders of the same driver must
+ specify the same <em>literal</em> <c><anno>Path</anno></c> string,
+ although different paths can point out the same directory
+ in the file system (because of use of relative paths and
links).</p>
</note>
<p>On success, the function returns <c>ok</c>. On
failure, the return value is <c>{error,<anno>ErrorDesc</anno>}</c>,
where <c><anno>ErrorDesc</anno></c> is an opaque term to be
- translated into human readable form by the <seealso marker="#format_error/1">format_error/1</seealso>
- function.</p>
- <p>For more control over the error handling, again use the
- <seealso marker="#try_load/3">try_load/3</seealso>
+ translated into human readable form by function
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>.</p>
+ <p>For more control over the error handling, use the
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>
interface instead.</p>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
+ parameters are not specified as described here.</p>
</desc>
</func>
<func>
<name name="load_driver" arity="2"/>
- <fsummary>Load a driver</fsummary>
+ <fsummary>Load a driver.</fsummary>
<desc>
- <p>Works essentially as <c>load/2</c>, but will load the driver
- with other options. All ports that are using the
- driver will get killed with the reason
- <c>driver_unloaded</c> when the driver is to be unloaded.</p>
- <p>The number of loads and unloads by different <seealso marker="#users">users</seealso> influence the actual loading
- and unloading of a driver file. The port killing will
- therefore only happen when the <em>last</em> <seealso marker="#users">user</seealso> unloads the driver, or the
- last process having loaded the driver exits.</p>
+ <p>Works essentially as <c>load/2</c>, but loads the driver
+ with other options. All ports using the
+ driver are killed with reason <c>driver_unloaded</c>
+ when the driver is to be unloaded.</p>
+ <p>The number of loads and unloads by different
+ <seealso marker="#users">users</seealso> influences the loading
+ and unloading of a driver file. The port killing
+ therefore only occurs when the <em>last</em>
+ <seealso marker="#users">user</seealso> unloads the driver,
+ or when the last process having loaded the driver exits.</p>
<p>This interface (or at least the name of the functions) is
- kept for backward compatibility. Using <seealso marker="#try_load/3">try_load/3</seealso> with
- <c>{driver_options,[kill_ports]} </c> in the option list will
- give the same effect regarding the port killing.</p>
+ kept for backward compatibility.
+ Using <seealso marker="#try_load/3"><c>try_load/3</c></seealso> with
+ <c>{driver_options,[kill_ports]}</c> in the option list
+ gives the same effect regarding the port killing.</p>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
+ parameters are not specified as described here.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="loaded_drivers" arity="0"/>
+ <fsummary>List loaded drivers.</fsummary>
+ <desc>
+ <p>Returns a list of all the available drivers, both
+ (statically) linked-in and dynamically loaded ones.</p>
+ <p>The driver names are returned as a list of strings rather
+ than a list of atoms for historical reasons.</p>
+ <p>For more information about drivers, see
+ <seealso marker="#info/0"><c>info</c></seealso>.</p>
</desc>
</func>
<func>
<name name="monitor" arity="2"/>
- <fsummary>Create a monitor for a driver</fsummary>
+ <fsummary>Create a monitor for a driver.</fsummary>
<desc>
- <p>This function creates a driver monitor and works in many
- ways as the function <seealso marker="erts:erlang#erlang:monitor/2">erlang:monitor/2</seealso>,
+ <p>Creates a driver monitor and works in many
+ ways as
+ <seealso marker="erts:erlang#erlang:monitor/2"><c>erlang:monitor/2</c></seealso>
+ in ERTS,
does for processes. When a driver changes state, the monitor
- results in a monitor-message being sent to the calling
- process. The <c><anno>MonitorRef</anno></c> returned by this function is
+ results in a monitor message that is sent to the calling
+ process. <c><anno>MonitorRef</anno></c> returned by this function is
included in the message sent.</p>
- <p>As with process monitors, each driver monitor set will only
- generate <em>one single message</em>. The monitor is
- "destroyed" after the message is sent and there is then no
- need to call <seealso marker="#demonitor/1">demonitor/1</seealso>.</p>
- <p>The <c><anno>MonitorRef</anno></c> can also be used in subsequent calls
- to <seealso marker="#demonitor/1">demonitor/1</seealso> to
+ <p>As with process monitors, each driver monitor set only
+ generates <em>one single message</em>. The monitor is
+ "destroyed" after the message is sent, so it is then not
+ needed to call
+ <seealso marker="#demonitor/1"><c>demonitor/1</c></seealso>.</p>
+ <p><c><anno>MonitorRef</anno></c> can also be used in subsequent calls
+ to <seealso marker="#demonitor/1"><c>demonitor/1</c></seealso> to
remove a monitor.</p>
<p>The function accepts the following parameters:</p>
<taglist>
- <tag><em><c><anno>Tag</anno></c></em></tag>
+ <tag><c><anno>Tag</anno></c></tag>
<item>
- <p>The monitor tag is always <c>driver</c> as this function
+ <p>The monitor tag is always <c>driver</c>, as this function
can only be used to create driver monitors. In the future,
driver monitors will be integrated with process monitors,
- why this parameter has to be given for consistence.</p>
+ why this parameter has to be specified for consistence.</p>
</item>
- <tag><em><c><anno>Item</anno></c></em></tag>
+ <tag><c><anno>Item</anno></c></tag>
<item>
- <p>The <c><anno>Item</anno></c> parameter specifies which driver one
- wants to monitor (the name of the driver) as well as
- which state change one wants to monitor. The parameter
+ <p>Parameter <c><anno>Item</anno></c> specifies
+ which driver to monitor (the driver name) and
+ which state change to monitor. The parameter
is a tuple of arity two whose first element is the
- driver name and second element is either of:</p>
+ driver name and second element is one of the following:</p>
<taglist>
- <tag><em>loaded</em></tag>
+ <tag><c>loaded</c></tag>
<item>
- <p>Notify me when the driver is reloaded (or loaded if
+ <p>Notifies when the driver is reloaded (or loaded if
loading is underway). It only makes sense to monitor
drivers that are in the process of being loaded or
- reloaded. One cannot monitor a future-to-be driver
- name for loading, that will only result in a
- <c>'DOWN'</c> message being immediately sent.
+ reloaded. A future driver name for loading cannot be
+ monitored. That only results in a
+ <c>DOWN</c> message sent immediately.
Monitoring for loading is therefore most useful when
- triggered by the <seealso marker="#try_load/3">try_load/3</seealso> function,
+ triggered by function
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>,
where the monitor is created <em>because</em> the
driver is in such a pending state.</p>
- <p>Setting a driver monitor for <c>loading</c> will
- eventually lead to one of the following messages
+ <p>Setting a driver monitor for <c>loading</c>
+ eventually leads to one of the following messages
being sent:</p>
<taglist>
- <tag><em>{'UP', reference(), driver, Name, loaded}</em></tag>
+ <tag><c>{'UP', reference(), driver, Name, loaded}</c></tag>
<item>
- <p>This message is sent, either immediately if the
+ <p>This message is sent either immediately if the
driver is already loaded and no reloading is
pending, or when reloading is executed if
reloading is pending. </p>
<p>The <seealso marker="#users">user</seealso> is
- expected to know if reloading is demanded prior
- to creating a monitor for loading.</p>
+ expected to know if reloading is demanded before
+ creating a monitor for loading.</p>
</item>
- <tag><em>{'UP', reference(), driver, Name, permanent}</em></tag>
+ <tag><c>{'UP', reference(), driver, Name, permanent}</c></tag>
<item>
- <p>This message will be sent if reloading was
+ <p>This message is sent if reloading was
expected, but the (old) driver made itself
- permanent prior to reloading. It will also be
+ permanent before reloading. It is also
sent if the driver was permanent or statically
- linked in when trying to create the monitor.</p>
+ linked-in when trying to create the monitor.</p>
</item>
- <tag><em>{'DOWN', reference(), driver, Name, load_cancelled}</em></tag>
+ <tag><c>{'DOWN', reference(), driver, Name, load_cancelled}</c></tag>
<item>
- <p>This message will arrive if reloading was
- underway, but the <seealso marker="#users">user</seealso> having requested
- reload cancelled it by either dying or calling
- <seealso marker="#try_unload/2">try_unload/2</seealso>
+ <p>This message arrives if reloading was
+ underway, but the requesting
+ <seealso marker="#users">user</seealso>
+ cancelled it by dying or calling
+ <seealso marker="#try_unload/2"><c>try_unload/2</c></seealso>
(or <c>unload/1</c>/<c>unload_driver/1</c>)
again before it was reloaded.</p>
</item>
- <tag><em>{'DOWN', reference(), driver, Name, {load_failure, Failure}}</em></tag>
+ <tag><c>{'DOWN', reference(), driver, Name, {load_failure, Failure}}</c></tag>
<item>
- <p>This message will arrive if reloading was
+ <p>This message arrives if reloading was
underway but the loading for some reason
failed. The <c>Failure</c> term is one of the
- errors that can be returned from <seealso marker="#try_load/3">try_load/3</seealso>. The
- error term can be passed to <seealso marker="#format_error/1">format_error/1</seealso>
- for translation into human readable form. Note
- that the translation has to be done in the same
- running erlang virtual machine as the error
+ errors that can be returned from
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>.
+ The error term can be passed to
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>
+ for translation into human readable form. Notice
+ that the translation must be done in the same
+ running Erlang virtual machine as the error
was detected in.</p>
</item>
</taglist>
</item>
- <tag><em>unloaded</em></tag>
+ <tag><c>unloaded</c></tag>
<item>
- <p>Monitor when a driver gets unloaded. If one
+ <p>Monitors when a driver gets unloaded. If one
monitors a driver that is not present in the system,
- one will immediately get notified that the driver got
+ one immediately gets notified that the driver got
unloaded. There is no guarantee that the driver was
- actually ever loaded.</p>
- <p>A driver monitor for unload will eventually result
+ ever loaded.</p>
+ <p>A driver monitor for unload eventually results
in one of the following messages being sent:</p>
<taglist>
- <tag><em>{'DOWN', reference(), driver, Name, unloaded}</em></tag>
+ <tag><c>{'DOWN', reference(), driver, Name, unloaded}</c></tag>
<item>
- <p>The driver instance monitored is now
- unloaded. As the unload might have been due to a
- <c>reload/2</c> request, the driver might once
+ <p>The monitored driver instance is now
+ unloaded. As the unload can be a result of a
+ <c>reload/2</c> request, the driver can once
again have been loaded when this message
arrives.</p>
</item>
- <tag><em>{'UP', reference(), driver, Name, unload_cancelled}</em></tag>
+ <tag><c>{'UP', reference(), driver, Name, unload_cancelled}</c></tag>
<item>
- <p>This message will be sent if unloading was
+ <p>This message is sent if unloading was
expected, but while the driver was waiting for
- all ports to get closed, a new <seealso marker="#users">user</seealso> of the driver
- appeared and the unloading was cancelled.</p>
- <p>This message appears when an <c>{ok, pending_driver}</c>) was returned from <seealso marker="#try_unload/2">try_unload/2</seealso>)
- for the last <seealso marker="#users">user</seealso> of the driver and
- then a <c>{ok, already_loaded}</c> is returned
- from a call to <seealso marker="#try_load/3">try_load/3</seealso>.</p>
- <p>If one wants to <em>really</em> monitor when the
- driver gets unloaded, this message will distort
- the picture, no unloading was really done.
- The <c>unloaded_only</c> option creates a monitor
- similar to an <c>unloaded</c> monitor, but does
- never result in this message.</p>
+ all ports to get closed, a new
+ <seealso marker="#users">user</seealso> of the driver
+ appeared, and the unloading was cancelled.</p>
+ <p>This message appears if <c>{ok, pending_driver}</c>
+ was returned from
+ <seealso marker="#try_unload/2"><c>try_unload/2</c></seealso>
+ for the last <seealso marker="#users">user</seealso>
+ of the driver, and then <c>{ok, already_loaded}</c> is returned
+ from a call to
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>.</p>
+ <p>If one <em>really</em> wants to monitor when the
+ driver gets unloaded, this message distorts
+ the picture, because no unloading was done.
+ Option <c>unloaded_only</c> creates a monitor
+ similar to an <c>unloaded</c> monitor, but
+ never results in this message.</p>
</item>
- <tag><em>{'UP', reference(), driver, Name, permanent}</em></tag>
+ <tag><c>{'UP', reference(), driver, Name, permanent}</c></tag>
<item>
- <p>This message will be sent if unloading was
+ <p>This message is sent if unloading was
expected, but the driver made itself
- permanent prior to unloading. It will also be
+ permanent before unloading. It is also
sent if trying to monitor a permanent or
- statically linked in driver.</p>
+ statically linked-in driver.</p>
</item>
</taglist>
</item>
- <tag><em>unloaded_only</em></tag>
+ <tag><c>unloaded_only</c></tag>
<item>
<p>A monitor created as <c>unloaded_only</c> behaves
- exactly as one created as <c>unloaded</c> with the
- exception that the <c>{'UP', reference(), driver, Name, unload_cancelled}</c> message will never be
- sent, but the monitor instead persists until the
- driver <em>really</em> gets unloaded.</p>
+ exactly as one created as <c>unloaded</c>
+ except that the
+ <c>{'UP', reference(), driver, Name, unload_cancelled}</c>
+ message is never sent, but the monitor instead persists until
+ the driver <em>really</em> gets unloaded.</p>
</item>
</taglist>
</item>
</taglist>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
+ parameters are not specified as described here.</p>
</desc>
</func>
<func>
<name name="reload" arity="2"/>
- <fsummary>Replace a driver</fsummary>
+ <fsummary>Replace a driver.</fsummary>
<desc>
<p>Reloads the driver named <c><anno>Name</anno></c> from a possibly
- different <c><anno>Path</anno></c> than was previously used. This
- function is used in the code change <seealso marker="#scenarios">scenario</seealso> described in the
+ different <c><anno>Path</anno></c> than previously used. This
+ function is used in the code change
+ <seealso marker="#scenarios"><c>scenario</c></seealso> described in the
introduction.</p>
<p>If there are other <seealso marker="#users">users</seealso>
- of this driver, the function will return <c>{error, pending_process}</c>, but if there are no more users, the
- function call will hang until all open ports are closed.</p>
+ of this driver, the function returns <c>{error, pending_process}</c>,
+ but if there are no other users, the function call hangs until all
+ open ports are closed.</p>
<note>
- <p>Avoid mixing
- several <seealso marker="#users">users</seealso>
- with driver reload requests.</p>
- </note>
- <p>If one wants to avoid hanging on open ports, one should use
- the <seealso marker="#try_load/3">try_load/3</seealso>
- function instead.</p>
- <p>The <c><anno>Name</anno></c> and <c><anno>Path</anno></c> parameters have exactly the
- same meaning as when calling the plain <seealso marker="#load/2">load/2</seealso> function.</p>
- <note>
- <p>Avoid mixing
- several <seealso marker="#users">users</seealso>
- with driver reload requests.</p>
+ <p>Avoid mixing multiple
+ <seealso marker="#users">users</seealso>
+ with driver reload requests.</p>
</note>
+ <p>To avoid hanging on open ports, use function
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>
+ instead.</p>
+ <p>The <c><anno>Name</anno></c> and <c><anno>Path</anno></c> parameters
+ have exactly the same meaning as when calling the plain function
+ <seealso marker="#load/2"><c>load/2</c></seealso>.</p>
+
<p>On success, the function returns <c>ok</c>. On
- failure, the function returns an opaque error, with the
- exception of the <c>pending_process</c> error described
- above. The opaque errors are to be translated into human
- readable form by the <seealso marker="#format_error/1">format_error/1</seealso> function.</p>
- <p>For more control over the error handling, again use the
- <seealso marker="#try_load/3">try_load/3</seealso>
+ failure, the function returns an opaque error,
+ except the <c>pending_process</c> error described
+ earlier. The opaque errors are to be translated into human
+ readable form by function
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>.</p>
+ <p>For more control over the error handling, use the
+ <seealso marker="#try_load/3"><c>try_load/3</c></seealso>
interface instead.</p>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
+ parameters are not specified as described here.</p>
</desc>
</func>
<func>
<name name="reload_driver" arity="2"/>
- <fsummary>Replace a driver</fsummary>
+ <fsummary>Replace a driver.</fsummary>
<desc>
- <p>Works exactly as <seealso marker="#reload/2">reload/2</seealso>, but for drivers
- loaded with the <seealso marker="#load_driver/2">load_driver/2</seealso> interface. </p>
- <p>As this interface implies that ports are being killed when
- the last user disappears, the function wont hang waiting for
+ <p>Works exactly as <seealso marker="#reload/2"><c>reload/2</c></seealso>,
+ but for drivers loaded with the
+ <seealso marker="#load_driver/2"><c>load_driver/2</c></seealso> interface.</p>
+ <p>As this interface implies that ports are killed when
+ the last user disappears, the function does not hang waiting for
ports to get closed.</p>
- <p>For further details, see the <seealso marker="#scenarios">scenarios</seealso> in the module
- description and refer to the <seealso marker="#reload/2">reload/2</seealso> function description.</p>
+ <p>For more details, see
+ <seealso marker="#scenarios"><c>scenarios</c></seealso> in this module
+ description and the function description for
+ <seealso marker="#reload/2"><c>reload/2</c></seealso>.</p>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
+ parameters are not specified as described here.</p>
</desc>
</func>
<func>
<name name="try_load" arity="3"/>
- <fsummary>Load a driver</fsummary>
+ <fsummary>Load a driver.</fsummary>
<desc>
- <p>This function provides more control than the
+ <p>Provides more control than the
<c>load/2</c>/<c>reload/2</c> and
<c>load_driver/2</c>/<c>reload_driver/2</c> interfaces. It
- will never wait for completion of other operations related
- to the driver, but immediately return the status of the
- driver as either:</p>
+ never waits for completion of other operations related
+ to the driver, but immediately returns the status of the
+ driver as one of the following:</p>
<taglist>
- <tag><em>{ok, loaded}</em></tag>
+ <tag><c>{ok, loaded}</c></tag>
<item>
- <p>The driver was actually loaded and is immediately
- usable.</p>
+ <p>The driver was loaded and is immediately usable.</p>
</item>
- <tag><em>{ok, already_loaded}</em></tag>
+ <tag><c>{ok, already_loaded}</c></tag>
<item>
<p>The driver was already loaded by another process
- and/or is in use by a living port. The load by you is
+ or is in use by a living port, or both. The load by you is
registered and a corresponding <c>try_unload</c> is
expected sometime in the future.</p>
</item>
- <tag><em>{ok, pending_driver}</em>or <em>{ok, pending_driver, reference()}</em></tag>
+ <tag><c>{ok, pending_driver}</c>or <c>{ok, pending_driver, reference()}</c></tag>
<item>
<p>The load request is registered, but the loading is
- delayed due to the fact that an earlier instance of the
- driver is still waiting to get unloaded (there are open
- ports using it). Still, unload is expected when you are
- done with the driver. This return value will
- <em>mostly</em> happen when the
+ delayed because an earlier instance of the
+ driver is still waiting to get unloaded (open
+ ports use it). Still, unload is expected when you are
+ done with the driver. This return value
+ <em>mostly</em> occurs when options
<c>{reload,pending_driver}</c> or
- <c>{reload,pending}</c> options are used, but
- <em>can</em> happen when another <seealso marker="#users">user</seealso> is unloading a driver in
- parallel and the <c>kill_ports</c> driver option is
- set. In other words, this return value will always need
- to be handled!</p>
+ <c>{reload,pending}</c> are used, but
+ <em>can</em> occur when another
+ <seealso marker="#users">user</seealso> is unloading a
+ driver in parallel and driver option <c>kill_ports</c> is set.
+ In other words, this return value always needs
+ to be handled.</p>
</item>
- <tag><em>{ok, pending_process}</em>or <em>{ok, pending_process, reference()}</em></tag>
+ <tag><c>{ok, pending_process}</c>or <c>{ok, pending_process, reference()}</c></tag>
<item>
<p>The load request is registered, but the loading is
- delayed due to the fact that an earlier instance of the
+ delayed because an earlier instance of the
driver is still waiting to get unloaded by another
<seealso marker="#users">user</seealso> (not only by a
port, in which case <c>{ok,pending_driver}</c> would
have been returned). Still, unload is expected when you
- are done with the driver. This return value will
- <em>only</em> happen when the <c>{reload,pending}</c>
- option is used.</p>
+ are done with the driver. This return value
+ <em>only</em> occurs when option <c>{reload,pending}</c>
+ is used.</p>
</item>
</taglist>
<p>When the function returns <c>{ok, pending_driver}</c> or
- <c>{ok, pending_process}</c>, one might want to get information
- about when the driver is <em>actually</em> loaded. This can
- be achieved by using the <c>{monitor, <anno>MonitorOption</anno>}</c> option.</p>
- <p>When monitoring is requested, and a corresponding <c>{ok, pending_driver}</c> or <c>{ok, pending_process}</c> would be
- returned, the function will instead return a tuple <c>{ok, <anno>PendingStatus</anno>, reference()}</c> and the process will, at a later
- time when the driver actually gets loaded, get a monitor
- message. The monitor message one can expect is described in
- the <seealso marker="#monitor/2">monitor/2</seealso>
- function description. </p>
+ <c>{ok, pending_process}</c>, one can get information
+ about when the driver is <em>actually</em> loaded by using
+ option <c>{monitor, <anno>MonitorOption</anno>}</c>.</p>
+ <p>When monitoring is requested, and a corresponding
+ <c>{ok, pending_driver}</c> or <c>{ok, pending_process}</c> would
+ be returned, the function instead returns a tuple
+ <c>{ok, <anno>PendingStatus</anno>, reference()}</c>
+ and the process then gets a monitor message later, when the
+ driver gets loaded. The monitor message to expect is described in
+ the function description of
+ <seealso marker="#monitor/2"><c>monitor/2</c></seealso>.</p>
<note>
- <p>Note that in case of loading, monitoring can
- <em>not</em> only get triggered by using the <c>{reload, <anno>ReloadOption</anno>}</c> option, but also in special cases where
- the load-error is transient, why <c>{monitor, pending_driver}</c> should be used under basically
- <em>all</em> real world circumstances!</p>
+ <p>In case of loading, monitoring can <em>not</em> only get
+ triggered by using option <c>{reload, <anno>ReloadOption</anno>}</c>,
+ but also in special cases where the load error is transient. Thus,
+ <c>{monitor, pending_driver}</c> is to be used under basically
+ <em>all</em> real world circumstances.</p>
</note>
<p>The function accepts the following parameters:</p>
<taglist>
- <tag><em><c><anno>Path</anno></c></em></tag>
+ <tag><c><anno>Path</anno></c></tag>
<item>
- <p>The filesystem path to the directory where the driver
- object file is situated. The filename of the object file
+ <p>The file system path to the directory where the driver
+ object file is located. The filename of the object file
(minus extension) must correspond to the driver name
- (used in the name parameter) and the driver must
- identify itself with the very same name. The
- <c><anno>Path</anno></c> might be provided as an <em>iolist()</em>,
+ (used in parameter <c><anno>Name</anno></c>) and the driver must
+ identify itself with the same name.
+ <c><anno>Path</anno></c> can be provided as an <em>iolist()</em>,
meaning it can be a list of other <c>iolist()</c>s, characters
- (eight bit integers) or binaries, all to be flattened
+ (8-bit integers), or binaries, all to be flattened
into a sequence of characters.</p>
<p>The (possibly flattened) <c><anno>Path</anno></c> parameter must be
- consistent throughout the system, a driver should, by
+ consistent throughout the system. A driver is to, by
all <seealso marker="#users">users</seealso>, be loaded
- using the same <em>literal</em><c><anno>Path</anno></c>. The
- exception is when <em>reloading</em> is requested, in
- which case the <c><anno>Path</anno></c> may be specified
- differently. Note that all <seealso marker="#users">users</seealso> trying to load the
- driver at a later time will need to use the <em>new</em><c><anno>Path</anno></c> if the <c><anno>Path</anno></c> is changed using a
- <c>reload</c> option. This is yet another reason
+ using the same <em>literal</em> <c><anno>Path</anno></c>.
+ The exception is when <em>reloading</em> is requested,
+ in which case <c><anno>Path</anno></c> can be specified
+ differently. Notice that all
+ <seealso marker="#users">users</seealso> trying to load the
+ driver later need to use the
+ <em>new</em> <c><anno>Path</anno></c> if <c><anno>Path</anno></c>
+ is changed using a <c>reload</c> option. This is yet another reason
to have <em>only one loader</em> of a driver one wants to
- upgrade in a running system! </p>
+ upgrade in a running system.</p>
</item>
- <tag><em><c><anno>Name</anno></c></em></tag>
+ <tag><c><anno>Name</anno></c></tag>
<item>
- <p>The name parameter is the name of the driver to be used
- in subsequent calls to <seealso marker="erts:erlang#open_port/2">open_port</seealso>. The
- name can be specified either as an <c>iolist()</c> or
- as an <c>atom()</c>. The name given when loading is used
- to find the actual object file (with the
- help of the <c><anno>Path</anno></c> and the system implied
- extension suffix, i.e. <c>.so</c>). The name by which
- the driver identifies itself must also be consistent
- with this <c><anno>Name</anno></c> parameter, much as a beam-file's
- module name much correspond to its filename.</p>
+ <p>This parameter is the name of the driver
+ to be used in subsequent calls to function
+ <seealso marker="erts:erlang#open_port/2"><c>erlang:open_port</c></seealso>
+ in ERTS.
+ The name can be specified as an <c>iolist()</c> or
+ an <c>atom()</c>. The name specified when loading is used
+ to find the object file (with the help of <c><anno>Path</anno></c>
+ and the system-implied extension suffix, that is, <c>.so</c>).
+ The name by which the driver identifies itself must also be consistent
+ with this <c><anno>Name</anno></c> parameter, much as
+ the module name of a Beam file much corresponds to its filename.</p>
</item>
- <tag><em><c><anno>OptionList</anno></c></em></tag>
+ <tag><c><anno>OptionList</anno></c></tag>
<item>
- <p>A number of options can be specified to control the
- loading operation. The options are given as a list of
- two-tuples, the tuples having the following values and
+ <p>Some options can be specified to control the
+ loading operation. The options are specified as a list of
+ two-tuples. The tuples have the following values and
meanings:</p>
<taglist>
- <tag><em>{driver_options, <c><anno>DriverOptionList</anno></c>}</em></tag>
+ <tag><c>{driver_options, <anno>DriverOptionList</anno>}</c></tag>
<item>
- <p>This option is to provide options that will change
- its general behavior and will "stick" to the driver
+ <p>This is to provide options that changes
+ its general behavior and "sticks" to the driver
throughout its lifespan.</p>
- <p>The driver options for a given driver name need
- always to be consistent, <em>even when the driver is reloaded</em>, meaning that they are as much a part
- of the driver as the actual name.</p>
- <p>Currently the only allowed driver option is
+ <p>The driver options for a specified driver name need
+ always to be consistent, <em>even when the driver is reloaded</em>,
+ meaning that they are as much a part of the driver as the name.</p>
+ <p>The only allowed driver option is
<c>kill_ports</c>, which means that all ports opened
- towards the driver are killed with the exit-reason
+ to the driver are killed with exit reason
<c>driver_unloaded</c> when no process any longer
has the driver loaded. This situation arises either
- when the last <seealso marker="#users">user</seealso> calls <seealso marker="#try_unload/2">try_unload/2</seealso>, or
- the last process having loaded the driver exits.</p>
+ when the last <seealso marker="#users">user</seealso> calls
+ <seealso marker="#try_unload/2"><c>try_unload/2</c></seealso>, or
+ when the last process having loaded the driver exits.</p>
</item>
- <tag><em>{monitor, <c><anno>MonitorOption</anno></c>}</em></tag>
+ <tag><c>{monitor, <anno>MonitorOption</anno>}</c></tag>
<item>
<p>A <c><anno>MonitorOption</anno></c> tells <c>try_load/3</c> to
trigger a driver monitor under certain
conditions. When the monitor is triggered, the
- function will return a three-tuple <c>{ok, <anno>PendingStatus</anno>, reference()}</c>, where the <c>reference()</c> is
- the monitor ref for the driver monitor.</p>
- <p>Only one <c><anno>MonitorOption</anno></c> can be specified and
- it is either the atom <c>pending</c>, which means
- that a monitor should be created whenever a load
- operation is delayed, and the atom
- <c>pending_driver</c>, in which a monitor is
- created whenever the operation is delayed due to
- open ports towards an otherwise unused driver. The
- <c>pending_driver</c> option is of little use, but
- is present for completeness, it is very well defined
- which reload-options might give rise to which
- delays. It might, however, be a good idea to use the
- same <c><anno>MonitorOption</anno></c> as the <c><anno>ReloadOption</anno></c>
- if present.</p>
- <p>If reloading is not requested, it might still be
- useful to specify the <c>monitor</c> option, as
- forced unloads (<c>kill_ports</c> driver option or
- the <c>kill_ports</c> option to <seealso marker="#try_unload/2">try_unload/2</seealso>) will
- trigger a transient state where driver loading
+ function returns a three-tuple
+ <c>{ok, <anno>PendingStatus</anno>, reference()}</c>, where
+ <c>reference()</c> is the monitor reference for the driver monitor.</p>
+ <p>Only one <c><anno>MonitorOption</anno></c> can be specified.
+ It is one of the following:</p>
+ <list type="bulleted">
+ <item>
+ <p>The atom <c>pending</c>, which means
+ that a monitor is to be created whenever a load
+ operation is delayed,</p>
+ </item>
+ <item>
+ <p>The atom <c>pending_driver</c>, in which a monitor
+ is created whenever the operation is delayed because
+ of open ports to an otherwise unused driver.</p>
+ </item>
+ </list>
+ <p>Option <c>pending_driver</c> is of little use, but
+ is present for completeness, as it is well defined which
+ reload options that can give rise to which delays.
+ However, it can be a good idea to use the same
+ <c><anno>MonitorOption</anno></c> as the
+ <c><anno>ReloadOption</anno></c>, if present.</p>
+ <p>If reloading is not requested, it can still be
+ useful to specify option <c>monitor</c>, as
+ forced unloads (driver option <c>kill_ports</c> or
+ option <c>kill_ports</c> to
+ <seealso marker="#try_unload/2"><c>try_unload/2</c></seealso>)
+ trigger a transient state where driver loading
cannot be performed until all closing ports are
- actually closed. So, as <c>try_unload</c> can, in
- almost all situations, return <c>{ok, pending_driver}</c>, one should always specify at least
- <c>{monitor, pending_driver}</c> in production
- code (see the monitor discussion above). </p>
+ closed. Thus, as <c>try_unload</c> can, in
+ almost all situations, return <c>{ok, pending_driver}</c>,
+ always specify at least <c>{monitor, pending_driver}</c>
+ in production code (see the monitor discussion earlier).</p>
</item>
- <tag><em>{reload, <c><anno>ReloadOption</anno></c>}</em></tag>
+ <tag><c>{reload, <anno>ReloadOption</anno>}</c></tag>
<item>
- <p>This option is used when one wants to
+ <p>This option is used to
<em>reload</em> a driver from disk, most often in a
code upgrade scenario. Having a <c>reload</c> option
- also implies that the <c><anno>Path</anno></c> parameter need
- <em>not</em> be consistent with earlier loads of
+ also implies that parameter <c><anno>Path</anno></c> does
+ <em>not</em> need to be consistent with earlier loads of
the driver.</p>
- <p>To reload a driver, the process needs to have previously
- loaded the driver, i.e there has to be an active <seealso marker="#users">user</seealso> of the driver in the process. </p>
- <p>The <c>reload</c> option can be either the atom
- <c>pending</c>, in which reloading is requested for
- any driver and will be effectuated when <em>all</em>
- ports opened against the driver are closed. The
- replacement of the driver will in this case take
- place regardless of if there are still
- pending <seealso marker="#users">users</seealso>
- having the driver loaded!
- The option also triggers port-killing (if the
- <c>kill_ports</c> driver option is used) even though
- there are pending users, making it usable for forced
- driver replacement, but laying a lot of
- responsibility on the driver <seealso marker="#users">users</seealso>. The pending option is
- seldom used as one does not want other <seealso marker="#users">users</seealso> to have loaded the
- driver when code change is underway. </p>
- <p>The more useful option is <c>pending_driver</c>,
- which means that reloading will be queued if the
- driver is <em>not</em> loaded by any other <seealso marker="#users">users</seealso>, but the driver has
- opened ports, in which case <c>{ok, pending_driver}</c> will be returned (a
- <c>monitor</c> option is of course recommended).</p>
- <p>If the driver is unloaded (not present in the
- system), the error code
- <c>not_loaded</c> will be returned. The
- <c>reload</c> option is intended for when the user
+ <p>To reload a driver, the process must have loaded the driver
+ before, that is, there must be an active
+ <seealso marker="#users">user</seealso> of the driver
+ in the process.</p>
+ <p>The <c>reload</c> option can be either of the following:</p>
+ <taglist>
+ <tag><c>pending</c></tag>
+ <item>
+ <p>With the atom <c>pending</c>, reloading is requested
+ for any driver and is effectuated when <em>all</em>
+ ports opened to the driver are closed. The driver
+ replacement in this case takes
+ place regardless if there are still
+ pending <seealso marker="#users">users</seealso>
+ having the driver loaded.</p>
+ <p>The option also triggers port-killing (if driver
+ option <c>kill_ports</c> is used) although
+ there are pending users, making it usable for forced
+ driver replacement, but laying much
+ responsibility on the driver
+ <seealso marker="#users">users</seealso>.
+ The pending option is seldom used as one does not want other
+ <seealso marker="#users">users</seealso> to have loaded
+ the driver when code change is underway.</p></item>
+ <tag><c>pending_driver</c></tag>
+ <item>
+ <p>This option is more useful. Here, reloading is queued
+ if the driver is <em>not</em> loaded by any other
+ <seealso marker="#users">users</seealso>, but the
+ driver has opened ports, in which case
+ <c>{ok, pending_driver}</c> is returned
+ (a <c>monitor</c> option is recommended).</p></item>
+ </taglist>
+ <p>If the driver is unloaded (not present in the system),
+ error code <c>not_loaded</c> is returned. Option
+ <c>reload</c> is intended for when the user
has already loaded the driver in advance.</p>
</item>
</taglist>
</item>
</taglist>
- <p>The function might return numerous errors, of which some
- only can be returned given a certain combination of options.</p>
- <p>A number of errors are opaque and can only be interpreted by
- passing them to the <seealso marker="#format_error/1">format_error/1</seealso> function,
+ <p>The function can return numerous errors, some
+ can only be returned given a certain combination of options.</p>
+ <p>Some errors are opaque and can only be interpreted by
+ passing them to function
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>,
but some can be interpreted directly:</p>
<taglist>
- <tag><em>{error,linked_in_driver}</em></tag>
+ <tag><c>{error,linked_in_driver}</c></tag>
<item>
- <p>The driver with the specified name is an erlang
- statically linked in driver, which cannot be manipulated
+ <p>The driver with the specified name is an Erlang
+ statically linked-in driver, which cannot be manipulated
with this API.</p>
</item>
- <tag><em>{error,inconsistent}</em></tag>
+ <tag><c>{error,inconsistent}</c></tag>
<item>
- <p>The driver has already been loaded with either other
- <c><anno>DriverOptionList</anno></c> or a different <em>literal</em><c>Path</c> argument.</p>
- <p>This can happen even if a <c>reload</c> option is given,
- if the <c>DriverOptionList</c> differ from the current.</p>
+ <p>The driver is already loaded with other
+ <c><anno>DriverOptionList</anno></c> or a different
+ <em>literal</em> <c>Path</c> argument.</p>
+ <p>This can occur even if a <c>reload</c> option is specified,
+ if <c>DriverOptionList</c> differs from the current.</p>
</item>
- <tag><em>{error, permanent}</em></tag>
+ <tag><c>{error, permanent}</c></tag>
<item>
<p>The driver has requested itself to be permanent, making
- it behave like an erlang linked in driver and it can no
+ it behave like an Erlang linked-in driver and can no
longer be manipulated with this API.</p>
</item>
- <tag><em>{error, pending_process}</em></tag>
+ <tag><c>{error, pending_process}</c></tag>
<item>
- <p>The driver is loaded by other <seealso marker="#users">users</seealso> when the <c>{reload, pending_driver}</c> option was given.</p>
+ <p>The driver is loaded by other
+ <seealso marker="#users">users</seealso> when
+ option <c>{reload, pending_driver}</c> was specified.</p>
</item>
- <tag><em>{error, pending_reload}</em></tag>
+ <tag><c>{error, pending_reload}</c></tag>
<item>
- <p>Driver reload is already requested by another <seealso marker="#users">user</seealso> when the <c>{reload, <anno>ReloadOption</anno>}</c> option was given.</p>
+ <p>Driver reload is already requested by another
+ <seealso marker="#users">user</seealso> when option
+ <c>{reload, <anno>ReloadOption</anno>}</c> was specified.</p>
</item>
- <tag><em>{error, not_loaded_by_this_process}</em></tag>
+ <tag><c>{error, not_loaded_by_this_process}</c></tag>
<item>
- <p>Appears when the <c>reload</c> option is given. The
- driver <c><anno>Name</anno></c> is present in the system, but there is no
- <seealso marker="#users">user</seealso> of it in this
+ <p>Appears when option <c>reload</c> is specified. The
+ driver <c><anno>Name</anno></c> is present in the system, but there
+ is no <seealso marker="#users">user</seealso> of it in this
process.</p>
</item>
- <tag><em>{error, not_loaded}</em></tag>
+ <tag><c>{error, not_loaded}</c></tag>
<item>
- <p>Appears when the <c>reload</c> option is given. The
+ <p>Appears when option <c>reload</c> is specified. The
driver <c><anno>Name</anno></c> is not in the system. Only drivers
loaded by this process can be reloaded.</p>
</item>
</taglist>
- <p>All other error codes are to be translated by the <seealso marker="#format_error/1">format_error/1</seealso>
- function. Note that calls to <c>format_error</c> should be
- performed from the same running instance of the erlang
- virtual machine as the error was detected in, due to system
- dependent behavior concerning error values.</p>
- <p>If the arguments or options are malformed, the function will
- throw a <c>badarg</c> exception.</p>
+ <p>All other error codes are to be translated by function
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>.
+ Notice that calls to <c>format_error</c> are to be
+ performed from the same running instance of the Erlang
+ virtual machine as the error is detected in, because of
+ system-dependent behavior concerning error values.</p>
+ <p>If the arguments or options are malformed, the function
+ throws a <c>badarg</c> exception.</p>
</desc>
</func>
<func>
<name name="try_unload" arity="2"/>
- <fsummary>Unload a driver</fsummary>
+ <fsummary>Unload a driver.</fsummary>
<desc>
- <p>This is the low level function to unload (or decrement
+ <p>This is the low-level function to unload (or decrement
reference counts of) a driver. It can be used to force port
killing, in much the same way as the driver option
- <c>kill_ports</c> implicitly does, and it can trigger a
- monitor either due to other <seealso marker="#users">users</seealso> still having the driver
- loaded or that there are open ports using the driver.</p>
+ <c>kill_ports</c> implicitly does. Also, it can trigger a
+ monitor either because other
+ <seealso marker="#users">users</seealso> still have the driver
+ loaded or because open ports use the driver.</p>
<p>Unloading can be described as the process of telling the
emulator that this particular part of the code in this
- particular process (i.e. this <seealso marker="#users">user</seealso>) no longer needs the
- driver. That can, if there are no other users, trigger
- actual unloading of the driver, in which case the driver
- name disappears from the system and (if possible) the memory
- occupied by the driver executable code is reclaimed. If the
- driver has the <c>kill_ports</c> option set, or if
- <c>kill_ports</c> was specified as an option to this
- function, all pending ports using this driver will get
- killed when unloading is done by the last <seealso marker="#users">user</seealso>. If no port-killing is
- involved and there are open ports, the actual unloading
- is delayed until there are no more open ports using the
- driver. If, in this case, another <seealso marker="#users">user</seealso> (or even this user) loads the
- driver again before the driver is actually unloaded, the
- unloading will never take place.</p>
- <p>To allow the <seealso marker="#users">user</seealso> that
- <em>requests unloading</em> to wait for <em>actual unloading</em> to
- take place, <c>monitor</c> triggers can be specified in much
- the same way as when loading. As <seealso marker="#users">users</seealso> of this function however
- seldom are interested in more than decrementing the
- reference counts, monitoring is more seldom needed. If the
- <c>kill_ports</c> option is used however, monitor trigging is
- crucial, as the ports are not guaranteed to have been killed
- until the driver is unloaded, why a monitor should be
- triggered for at least the <c>pending_driver</c> case.</p>
- <p>The possible monitor messages that can be expected are the
- same as when using the <c>unloaded</c> option to the
- <seealso marker="#monitor/2">monitor/2</seealso> function.</p>
- <p>The function will return one of the following statuses upon
+ particular process (that is, this
+ <seealso marker="#users">user</seealso>) no longer needs
+ the driver. That can, if there are no other users, trigger
+ unloading of the driver, in which case the driver name
+ disappears from the system and (if possible) the memory
+ occupied by the driver executable code is reclaimed.</p>
+ <p>If the driver has option <c>kill_ports</c> set, or if
+ <c>kill_ports</c> is specified as an option to this
+ function, all pending ports using this driver are
+ killed when unloading is done by the last
+ <seealso marker="#users">user</seealso>. If no port-killing
+ is involved and there are open ports, the unloading
+ is delayed until no more open ports use the
+ driver. If, in this case, another
+ <seealso marker="#users">user</seealso> (or even this user)
+ loads the driver again before the driver is unloaded, the
+ unloading never takes place.</p>
+ <p>To allow the <seealso marker="#users">user</seealso> to
+ <em>request unloading</em> to wait for <em>actual unloading</em>,
+ <c>monitor</c> triggers can be specified in much the same way as
+ when loading. However, as <seealso marker="#users">users</seealso>
+ of this function seldom are interested in more than decrementing the
+ reference counts, monitoring is seldom needed.</p>
+ <note><p> If option <c>kill_ports</c> is used, monitor trigging is crucial,
+ as the ports are not guaranteed to be killed until the driver is unloaded.
+ Thus, a monitor must be triggered for at least the <c>pending_driver</c>
+ case.</p></note>
+ <p>The possible monitor messages to expect are the
+ same as when using option <c>unloaded</c> to function
+ <seealso marker="#monitor/2"><c>monitor/2</c></seealso>.</p>
+ <p>The function returns one of the following statuses upon
success:</p>
<taglist>
- <tag><em>{ok, unloaded}</em></tag>
+ <tag><c>{ok, unloaded}</c></tag>
<item>
<p>The driver was immediately unloaded, meaning that the
driver name is now free to use by other drivers and, if
the underlying OS permits it, the memory occupied by the
driver object code is now reclaimed.</p>
<p>The driver can only be unloaded when there are no open
- ports using it and there are no more <seealso marker="#users">users</seealso> requiring it to be
+ ports using it and no more
+ <seealso marker="#users">users</seealso> require it to be
loaded.</p>
</item>
- <tag><em>{ok, pending_driver}</em>or <em>{ok, pending_driver, reference()}</em></tag>
+ <tag><c>{ok, pending_driver}</c>or
+ <c>{ok, pending_driver, reference()}</c></tag>
<item>
- <p>This return value indicates that this call removed the
- last <seealso marker="#users">user</seealso> from the
+ <p>Indicates that this call removed the last
+ <seealso marker="#users">user</seealso> from the
driver, but there are still open ports using it.
- When all ports are closed and no new <seealso marker="#users">users</seealso> have arrived, the driver
- will actually be reloaded and the name and memory
+ When all ports are closed and no new
+ <seealso marker="#users">users</seealso> have arrived,
+ the driver is reloaded and the name and memory
reclaimed.</p>
- <p>This return value is valid even when the option
- <c>kill_ports</c> was used, as killing ports may not be
- a process that completes immediately. The condition is,
- in that case, however transient. Monitors are as always
- useful to detect when the driver is really unloaded.</p>
+ <p>This return value is valid even if option <c>kill_ports</c>
+ was used, as killing ports can be a process that does not
+ complete immediately. However, the condition is in that case
+ transient. Monitors are always useful to detect when the driver
+ is really unloaded.</p>
</item>
- <tag><em>{ok, pending_process}</em>or <em>{ok, pending_process, reference()}</em></tag>
+ <tag><c>{ok, pending_process}</c>or
+ <c>{ok, pending_process, reference()}</c></tag>
<item>
- <p>The unload request is registered, but there are still
- other <seealso marker="#users">users</seealso> holding
- the driver. Note that the term <c>pending_process</c>
- might refer to the running process, there might be more
+ <p>The unload request is registered, but
+ other <seealso marker="#users">users</seealso> still hold
+ the driver. Notice that the term <c>pending_process</c>
+ can refer to the running process; there can be more
than one <seealso marker="#users">user</seealso> in the
same process.</p>
- <p>This is a normal, healthy return value if the call was
+ <p>This is a normal, healthy, return value if the call was
just placed to inform the emulator that you have no
- further use of the driver. It is actually the most
- common return value in the most common <seealso marker="#scenarios">scenario</seealso>
+ further use of the driver. It is the most
+ common return value in the most common
+ <seealso marker="#scenarios"><c>scenario</c></seealso>
described in the introduction.</p>
</item>
</taglist>
<p>The function accepts the following parameters:</p>
<taglist>
- <tag><em><c><anno>Name</anno></c></em></tag>
+ <tag><c><anno>Name</anno></c></tag>
<item>
- <p>The name parameter is the name of the driver to be
- unloaded. The name can be specified either as an
- <c>iolist()</c> or as an <c>atom()</c>. </p>
+ <p><c><anno>Name</anno></c> is the name of the
+ driver to be unloaded. The name can be specified as an
+ <c>iolist()</c> or as an <c>atom()</c>.</p>
</item>
- <tag><em><c><anno>OptionList</anno></c></em></tag>
+ <tag><c><anno>OptionList</anno></c></tag>
<item>
- <p>The <c><anno>OptionList</anno></c> argument can be used to specify
- certain behavior regarding ports as well as triggering
+ <p>Argument <c><anno>OptionList</anno></c> can be used to specify
+ certain behavior regarding ports and triggering
monitors under certain conditions:</p>
<taglist>
- <tag><em>kill_ports</em></tag>
+ <tag><c>kill_ports</c></tag>
<item>
- <p>Force killing of all ports opened using this driver,
- with the exit reason <c>driver_unloaded</c>, if you are
- the <em>last</em><seealso marker="#users">user</seealso> of the driver.</p>
- <p>If there are other <seealso marker="#users">users</seealso> having the driver
- loaded, this option will have no effect.</p>
- <p>If one wants the consistent behavior of killing ports
+ <p>Forces killing of all ports opened using this driver,
+ with exit reason <c>driver_unloaded</c>, if you are
+ the <em>last</em> <seealso marker="#users">user</seealso>
+ of the driver.</p>
+ <p>If other <seealso marker="#users">users</seealso>
+ have the driver loaded, this option has no effect.</p>
+ <p>To get the consistent behavior of killing ports
when the last <seealso marker="#users">user</seealso>
- unloads, one should use the driver option
+ unloads, use driver option
<c>kill_ports</c> when loading the driver instead.</p>
</item>
- <tag><em>{monitor, <c><anno>MonitorOption</anno></c>}</em></tag>
+ <tag><c>{monitor, <anno>MonitorOption</anno>}</c></tag>
<item>
- <p>This option creates a driver monitor if the condition
- given in <c><anno>MonitorOption</anno></c> is true. The valid
+ <p>Creates a driver monitor if the condition
+ specified in <c><anno>MonitorOption</anno></c> is true. The valid
options are:</p>
<taglist>
- <tag><em>pending_driver</em></tag>
+ <tag><c>pending_driver</c></tag>
<item>
- <p>Create a driver monitor if the return value is to
+ <p>Creates a driver monitor if the return value is to
be <c>{ok, pending_driver}</c>.</p>
</item>
- <tag><em>pending</em></tag>
+ <tag><c>pending</c></tag>
<item>
- <p>Create a monitor if the return value will be either
+ <p>Creates a monitor if the return value is
<c>{ok, pending_driver}</c> or <c>{ok, pending_process}</c>.</p>
</item>
</taglist>
- <p>The <c>pending_driver</c> <c><anno>MonitorOption</anno></c> is by far
- the most useful and it has to be used to ensure that the
- driver has really been unloaded and the ports closed
- whenever the <c>kill_ports</c> option is used or the
- driver may have been loaded with the <c>kill_ports</c>
- driver option.</p>
- <p>By using the monitor-triggers in the call to
- <c>try_unload</c> one can be sure that the monitor is
- actually added before the unloading is executed, meaning
- that the monitor will always get properly triggered,
- which would not be the case if one called
- <c>erl_ddll:monitor/2</c> separately.</p>
+ <p>The <c>pending_driver</c> <c><anno>MonitorOption</anno></c> is
+ by far the most useful. It must be used to ensure that the
+ driver really is unloaded and the ports closed
+ whenever option <c>kill_ports</c> is used, or the
+ driver can have been loaded with driver option
+ <c>kill_ports</c>.</p>
+ <p>Using the monitor triggers in the call to
+ <c>try_unload</c> ensures that the monitor is
+ added before the unloading is executed, meaning
+ that the monitor is always properly triggered,
+ which is not the case if <c>monitor/2</c> is called
+ separately.</p>
</item>
</taglist>
</item>
</taglist>
- <p>The function may return several error conditions, of which
- all are well specified (no opaque values):</p>
+ <p>The function can return the following error conditions,
+ all well specified (no opaque values):</p>
<taglist>
- <tag><em>{error, linked_in_driver}</em></tag>
+ <tag><c>{error, linked_in_driver}</c></tag>
<item>
- <p>You were trying to unload an erlang statically linked in
+ <p>You were trying to unload an Erlang statically linked-in
driver, which cannot be manipulated with this interface
(and cannot be unloaded at all).</p>
</item>
- <tag><em>{error, not_loaded}</em></tag>
+ <tag><c>{error, not_loaded}</c></tag>
<item>
<p>The driver <c><anno>Name</anno></c> is not present in the system.</p>
</item>
- <tag><em>{error, not_loaded_by_this_process}</em></tag>
+ <tag><c>{error, not_loaded_by_this_process}</c></tag>
<item>
<p>The driver <c><anno>Name</anno></c> is present in the system, but
there is no <seealso marker="#users">user</seealso> of
it in this process. </p>
<p>As a special case, drivers can be unloaded from
- processes that has done no corresponding call to
- <c>try_load/3</c> if, and only if, there are <em>no users of the driver at all</em>, which may happen if the
+ processes that have done no corresponding call to
+ <c>try_load/3</c> if, and only if, there are
+ <em>no users of the driver at all</em>, which can occur if the
process containing the last user dies.</p>
</item>
- <tag><em>{error, permanent}</em></tag>
+ <tag><c>{error, permanent}</c></tag>
<item>
<p>The driver has made itself permanent, in which case it
can no longer be manipulated by this interface (much
- like a statically linked in driver).</p>
+ like a statically linked-in driver).</p>
</item>
</taglist>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
+ parameters are not specified as described here.</p>
</desc>
</func>
<func>
<name name="unload" arity="1"/>
- <fsummary>Unload a driver</fsummary>
+ <fsummary>Unload a driver.</fsummary>
<desc>
<p>Unloads, or at least dereferences the driver named
- <c><anno>Name</anno></c>. If the caller is the last <seealso marker="#users">user</seealso> of the driver, and there
- are no more open ports using the driver, the driver will
- actually get unloaded. In all other cases, actual unloading
- will be delayed until all ports are closed and there are no
- remaining <seealso marker="#users">users</seealso>.</p>
- <p>If there are other <seealso marker="#users">users</seealso> of the driver, the reference
- counts of the driver is merely decreased, so that the caller
- is no longer considered a user of the driver. For usage
- scenarios, see the <seealso marker="#scenarios">description</seealso> in the beginning
- of this document. </p>
+ <c><anno>Name</anno></c>. If the caller is the last
+ <seealso marker="#users">user</seealso> of the driver,
+ and no more open ports use the driver, the driver
+ gets unloaded. Otherwise, unloading
+ is delayed until all ports are closed and no
+ <seealso marker="#users">users</seealso> remain.</p>
+ <p>If there are other <seealso marker="#users">users</seealso>
+ of the driver, the reference counts of the driver is merely decreased,
+ so that the caller is no longer considered a
+ <seealso marker="#users">user</seealso> of the driver. For use
+ scenarios, see the <seealso marker="#scenarios"><c>description</c></seealso>
+ in the beginning of this module.</p>
<p>The <c><anno>ErrorDesc</anno></c> returned is an opaque value to be
- passed further on to the <seealso marker="#format_error/1">format_error/1</seealso>
- function. For more control over the operation, use the
- <seealso marker="#try_unload/2">try_unload/2</seealso>
+ passed further on to function
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>.
+ For more control over the operation, use the
+ <seealso marker="#try_unload/2"><c>try_unload/2</c></seealso>
interface.</p>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
+ parameters are not specified as described here.</p>
</desc>
</func>
<func>
<name name="unload_driver" arity="1"/>
- <fsummary>Unload a driver</fsummary>
+ <fsummary>Unload a driver.</fsummary>
<desc>
<p>Unloads, or at least dereferences the driver named
- <c><anno>Name</anno></c>. If the caller is the last <seealso marker="#users">user</seealso> of the driver, all
- remaining open ports using the driver will get killed with
- the reason <c>driver_unloaded</c> and the driver will
- eventually get unloaded.</p>
+ <c><anno>Name</anno></c>. If the caller is the last
+ <seealso marker="#users">user</seealso> of the driver, all
+ remaining open ports using the driver are killed with
+ reason <c>driver_unloaded</c> and the driver
+ eventually gets unloaded.</p>
<p>If there are other <seealso marker="#users">users</seealso>
of the driver, the reference counts of the driver is merely
decreased, so that the caller is no longer considered a
<seealso marker="#users">user</seealso>. For
- usage scenarios, see the <seealso marker="#scenarios">description</seealso> in the beginning
- of this document.</p>
+ use scenarios, see the
+ <seealso marker="#scenarios"><c>description</c></seealso> in the
+ beginning of this module.</p>
<p>The <c><anno>ErrorDesc</anno></c> returned is an opaque value to be
- passed further on to the <seealso marker="#format_error/1">format_error/1</seealso>
- function. For more control over the operation, use the
- <seealso marker="#try_unload/2">try_unload/2</seealso>
+ passed further on to function
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>.
+ For more control over the operation, use the
+ <seealso marker="#try_unload/2"><c>try_unload/2</c></seealso>
interface.</p>
<p>The function throws a <c>badarg</c> exception if the
- parameters are not given as described above. </p>
- </desc>
- </func>
- <func>
- <name name="loaded_drivers" arity="0"/>
- <fsummary>List loaded drivers</fsummary>
- <desc>
- <p>Returns a list of all the available drivers, both
- (statically) linked-in and dynamically loaded ones.</p>
- <p>The driver names are returned as a list of strings rather
- than a list of atoms for historical reasons.</p>
- <p>More information about drivers can be obtained using one of
- the <seealso marker="#info/0">info</seealso> functions.</p>
- </desc>
- </func>
- <func>
- <name name="format_error" arity="1"/>
- <fsummary>Format an error descriptor</fsummary>
- <desc>
- <p>Takes an <c><anno>ErrorDesc</anno></c> returned by load, unload or
- reload functions and returns a string which
- describes the error or warning.</p>
- <note>
- <p>Due to peculiarities in the dynamic loading interfaces on
- different platform, the returned string is only guaranteed
- to describe the correct error <em>if format_error/1 is called in the same instance of the erlang virtual machine as the error appeared in</em> (meaning the same operating
- system process)!</p>
- </note>
+ parameters are not specified as described here.</p>
</desc>
</func>
</funcs>
-
<section>
- <title>SEE ALSO</title>
- <p>erl_driver(4), driver_entry(4)</p>
+ <title>See Also</title>
+ <p><seealso marker="erts:erl_driver"><c>erts:erl_driver(4)</c></seealso>,
+ <seealso marker="erts:driver_entry"><c>erts:driver_entry(4)</c></seealso></p>
</section>
</erlref>
-
diff --git a/lib/kernel/doc/src/erl_epmd.xml b/lib/kernel/doc/src/erl_epmd.xml
new file mode 100644
index 0000000000..8b076cd2d7
--- /dev/null
+++ b/lib/kernel/doc/src/erl_epmd.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>erl_epmd</title>
+ <prepared>Timmo Verlaan</prepared>
+ <docno>1</docno>
+ <date>2018-02-19</date>
+ <rev>A</rev>
+ </header>
+ <module>erl_epmd</module>
+ <modulesummary>
+ Erlang interface towards epmd
+ </modulesummary>
+ <description>
+ <p>This module communicates with the EPMD daemon, see <seealso
+ marker="erts:epmd">epmd</seealso>. To implement your own epmd module please
+ see <seealso marker="erts:alt_disco">ERTS User's Guide: How to Implement an
+ Alternative Service Discovery for Erlang Distribution</seealso></p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="start_link" arity="0"/>
+ <fsummary>Callback for erl_distribution supervisor.</fsummary>
+ <desc>
+ <p>This function is invoked as this module is added as a child of the
+ <c>erl_distribution</c> supervisor.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="register_node" arity="2"/>
+ <name name="register_node" arity="3"/>
+ <fsummary>Registers the node with <c>epmd</c>.</fsummary>
+ <desc>
+ <p>Registers the node with <c>epmd</c> and tells epmd what port will be
+ used for the current node. It returns a creation number. This number is
+ incremented on each register to help with identifying if a node is
+ reconnecting to epmd.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="port_please" arity="2"/>
+ <name name="port_please" arity="3"/>
+ <fsummary>Returns the port number for a given node.</fsummary>
+ <desc>
+ <p>Requests the distribution port for the given node of an EPMD
+ instance. Together with the port it returns a distribution protocol
+ version which has been 5 since Erlang/OTP R6.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="address_please" arity="3"/>
+ <fsummary>Returns address and port.</fsummary>
+ <desc>
+ <p>Called by the distribution module. Resolves the <c>Host</c> to an IP
+ address.</p>
+ <p>Another epmd module may return port and distribution protocol version
+ as well.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="names" arity="1"/>
+ <fsummary>Names of Erlang nodes at a host.</fsummary>
+ <desc>
+ <p>Called by <seealso marker="net_adm"><c>net_adm:names/0</c></seealso>.
+ <c>Host</c> defaults to the localhost. Returns the names and associated
+ port numbers of the Erlang nodes that <c>epmd</c> registered at the
+ specified host. Returns <c>{error, address}</c> if <c>epmd</c> is not
+ operational.</p>
+ <p><em>Example:</em></p>
+ <pre>
+(arne@dunn)1> <input>erl_epmd:names(localhost).</input>
+{ok,[{"arne",40262}]}</pre>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
diff --git a/lib/kernel/doc/src/erl_prim_loader_stub.xml b/lib/kernel/doc/src/erl_prim_loader_stub.xml
index 8d5f58dc3a..f3189e2a66 100644
--- a/lib/kernel/doc/src/erl_prim_loader_stub.xml
+++ b/lib/kernel/doc/src/erl_prim_loader_stub.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1997</year>
- <year>2013</year>
+ <year>2016</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -37,7 +37,7 @@
The module erl_prim_loader is moved to the runtime system
application. Please see <seealso
marker="erts:erl_prim_loader">erl_prim_loader(3)</seealso> in the
- erts reference manual instead.
+ ERTS reference manual instead.
</p></description>
</erlref>
diff --git a/lib/kernel/doc/src/erlang_stub.xml b/lib/kernel/doc/src/erlang_stub.xml
index b9867806bd..afd353e438 100644
--- a/lib/kernel/doc/src/erlang_stub.xml
+++ b/lib/kernel/doc/src/erlang_stub.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1997</year>
- <year>2013</year>
+ <year>2016</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -37,7 +37,7 @@
The module erlang is moved to the runtime system
application. Please see <seealso
marker="erts:erlang">erlang(3)</seealso> in the
- erts reference manual instead.
+ ERTS reference manual instead.
</p></description>
</erlref>
diff --git a/lib/kernel/doc/src/error_handler.xml b/lib/kernel/doc/src/error_handler.xml
index 6aec5c77d5..e5639487dc 100644
--- a/lib/kernel/doc/src/error_handler.xml
+++ b/lib/kernel/doc/src/error_handler.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1996</year>
- <year>2013</year>
+ <year>2016</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -31,35 +31,48 @@
<rev></rev>
</header>
<module>error_handler</module>
- <modulesummary>Default System Error Handler</modulesummary>
+ <modulesummary>Default system error handler.</modulesummary>
<description>
- <p>The error handler module defines what happens when certain types
+ <p>This module defines what happens when certain types
of errors occur.</p>
</description>
<funcs>
<func>
+ <name name="raise_undef_exception" arity="3"/>
+ <fsummary>Raise an undef exception.</fsummary>
+ <type_desc variable="Args">
+ A (possibly empty) list of arguments <c>Arg1,..,ArgN</c>
+ </type_desc>
+ <desc>
+ <p>Raises an <c>undef</c> exception with a stacktrace, indicating
+ that <c><anno>Module</anno>:<anno>Function</anno>/N</c> is
+ undefined.
+ </p>
+ </desc>
+ </func>
+ <func>
<name name="undefined_function" arity="3"/>
- <fsummary>Called when an undefined function is encountered</fsummary>
+ <fsummary>Called when an undefined function is encountered.</fsummary>
<type_desc variable="Args">
A (possibly empty) list of arguments <c>Arg1,..,ArgN</c>
</type_desc>
<desc>
- <p>This function is called by the run-time system if a call is made to
+ <p>This function is called by the runtime system if a call is made to
<c><anno>Module</anno>:<anno>Function</anno>(Arg1,.., ArgN)</c> and
- <c><anno>Module</anno>:<anno>Function</anno>/N</c> is undefined. Note that
- <c>undefined_function/3</c> is evaluated inside the process
+ <c><anno>Module</anno>:<anno>Function</anno>/N</c> is undefined.
+ Notice that this function is evaluated inside the process
making the original call.</p>
- <p>This function will first attempt to autoload
+ <p>This function first attempts to autoload
<c><anno>Module</anno></c>. If that is not possible,
- an <c>undef</c> exception will be raised.</p>
+ an <c>undef</c> exception is raised.</p>
- <p>If it was possible to load <c><anno>Module</anno></c>
- and the function <c><anno>Function</anno>/N</c> is exported,
- it will be called.</p>
+ <p>If it is possible to load <c><anno>Module</anno></c>
+ and function <c><anno>Function</anno>/N</c> is exported,
+ it is called.</p>
- <p>Otherwise, if the function <c>'$handle_undefined_function'/2</c>
- is exported, it will be called as
+ <p>Otherwise, if function <c>'$handle_undefined_function'/2</c>
+ is exported, it is called as
<c>'$handle_undefined_function'(</c><anno>Function</anno>,
<anno>Args</anno>).
</p>
@@ -67,61 +80,48 @@
<p>Defining <c>'$handle_undefined_function'/2</c> in
ordinary application code is highly discouraged. It is very
easy to make subtle errors that can take a long time to
- debug. Furthermore, none of the tools for static code
+ debug. Furthermore, none of the tools for static code
analysis (such as Dialyzer and Xref) supports the use of
<c>'$handle_undefined_function'/2</c> and no such support
will be added. Only use this function after having carefully
- considered other, less dangerous, solutions. One example of
+ considered other, less dangerous, solutions. One example of
potential legitimate use is creating stubs for other
sub-systems during testing and debugging.
</p>
</warning>
- <p>Otherwise an <c>undef</c> exception will be raised.</p>
- </desc>
- </func>
- <func>
- <name name="raise_undef_exception" arity="3"/>
- <fsummary>Raise an undef exception</fsummary>
- <type_desc variable="Args">
- A (possibly empty) list of arguments <c>Arg1,..,ArgN</c>
- </type_desc>
- <desc>
- <p>Raise an <c>undef</c> exception with a stacktrace indicating
- that <c><anno>Module</anno>:<anno>Function</anno>/N</c> is
- undefined.
- </p>
+ <p>Otherwise an <c>undef</c> exception is raised.</p>
</desc>
</func>
<func>
<name name="undefined_lambda" arity="3"/>
- <fsummary>Called when an undefined lambda (fun) is encountered</fsummary>
+ <fsummary>Called when an undefined lambda (fun) is encountered.</fsummary>
<type_desc variable="Args">
A (possibly empty) list of arguments <c>Arg1,..,ArgN</c>
</type_desc>
<desc>
<p>This function is evaluated if a call is made to
- <c><anno>Fun</anno>(Arg1,.., ArgN)</c> when the module defining the fun is
- not loaded. The function is evaluated inside the process
+ <c><anno>Fun</anno>(Arg1,.., ArgN)</c> when the module defining
+ the fun is not loaded. The function is evaluated inside the process
making the original call.</p>
<p>If <c><anno>Module</anno></c> is interpreted, the interpreter is invoked
and the return value of the interpreted
<c><anno>Fun</anno>(Arg1,.., ArgN)</c> call is returned.</p>
<p>Otherwise, it returns, if possible, the value of
- <c>apply(<anno>Fun</anno>, <anno>Args</anno>)</c> after an attempt has been made to
- autoload <c><anno>Module</anno></c>. If this is not possible, the call
- fails with exit reason <c>undef</c>.</p>
+ <c>apply(<anno>Fun</anno>, <anno>Args</anno>)</c> after an attempt
+ is made to autoload <c><anno>Module</anno></c>. If this is not possible,
+ the call fails with exit reason <c>undef</c>.</p>
</desc>
</func>
</funcs>
<section>
<title>Notes</title>
- <p>The code in <c>error_handler</c> is complex and should not be
- changed without fully understanding the interaction between
+ <p>The code in <c>error_handler</c> is complex. Do not
+ change it without fully understanding the interaction between
the error handler, the <c>init</c> process of the code server,
and the I/O mechanism of the code.</p>
- <p>Changes in the code which may seem small can cause a deadlock
- as unforeseen consequences may occur. The use of <c>input</c> is
+ <p>Code changes that seem small can cause a deadlock,
+ as unforeseen consequences can occur. The use of <c>input</c> is
dangerous in this type of code.</p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml
index 92e14c2bef..c3d68fd79f 100644
--- a/lib/kernel/doc/src/error_logger.xml
+++ b/lib/kernel/doc/src/error_logger.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,41 +29,45 @@
<rev></rev>
</header>
<module>error_logger</module>
- <modulesummary>Erlang Error Logger</modulesummary>
+ <modulesummary>Erlang error logger.</modulesummary>
<description>
+
+ <note>
+ <p>In Erlang/OTP 21.0, a new API for logging was added. The
+ old <c>error_logger</c> module can still be used by legacy
+ code, but log events are redirected to the new Logger API. New
+ code should use the Logger API directly.</p>
+ <p><c>error_logger</c> is no longer started by default, but is
+ automatically started when an event handler is added
+ with <c>error_logger:add_report_handler/1,2</c>. The <c>error_logger</c>
+ module is then also added as a handler to the new logger.</p>
+ <p>See <seealso marker="logger"><c>logger(3)</c></seealso> and
+ the <seealso marker="logger_chapter">Logging</seealso> chapter
+ in the User's Guide for more information.</p>
+ </note>
+
<p>The Erlang <em>error logger</em> is an event manager (see
<seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and
- <seealso marker="stdlib:gen_event">gen_event(3)</seealso>),
- registered as <c>error_logger</c>. Error, warning and info events
- are sent to the error logger from the Erlang runtime system and
- the different Erlang/OTP applications. The events are, by default,
- logged to tty. Note that an event from a process <c>P</c> is
- logged at the node of the group leader of <c>P</c>. This means
- that log output is directed to the node from which a process was
- created, which not necessarily is the same node as where it is
- executing.</p>
- <p>Initially, <c>error_logger</c> only has a primitive event
- handler, which buffers and prints the raw event messages. During
- system startup, the application Kernel replaces this with a
- <em>standard event handler</em>, by default one which writes
- nicely formatted output to tty. Kernel can also be configured so
- that events are logged to file instead, or not logged at all, see
- <seealso marker="kernel_app">kernel(6)</seealso>.</p>
- <p>Also the SASL application, if started, adds its own event
- handler, which by default writes supervisor, crash and progress
- reports to tty. See
- <seealso marker="sasl:sasl_app">sasl(6)</seealso>.</p>
- <p>It is recommended that user defined applications should report
- errors through the error logger, in order to get uniform reports.
- User defined event handlers can be added to handle application
- specific events. (<c>add_report_handler/1,2</c>). Also, there is
- a useful event handler in STDLIB for multi-file logging of events,
- see <c>log_mf_h(3)</c>.</p>
+ <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>),
+ registered as <c>error_logger</c>.</p>
+ <p>Error logger is no longer started by default, but is
+ automatically started when an event handler is added
+ with <seealso marker="#add_report_handler/1">
+ <c>add_report_handler/1,2</c></seealso>. The <c>error_logger</c>
+ module is then also added as a handler to the new logger,
+ causing log events to be forwarded from logger to error logger,
+ and consequently to all installed error logger event
+ handlers.</p>
+ <p>User-defined event handlers can be added to handle application-specific
+ events.</p>
+ <p>Existing event handlers provided by STDLIB and SASL are still
+ available, but are no longer used by OTP.</p>
<p>Warning events were introduced in Erlang/OTP R9C and are enabled
- by default as of 18.0. To retain backwards compatibility with existing
- user defined event handlers, these may be tagged as errors or info
- using the command line flag <c><![CDATA[+W <e | i | w>]]></c>, thus
- showing up as error or info reports in the logs.</p>
+ by default as from Erlang/OTP 18.0. To retain backwards compatibility
+ with existing user-defined event handlers, the warning events can be
+ tagged as <c>errors</c> or <c>info</c> using command-line flag
+ <c><![CDATA[+W <e | i | w>]]></c>, thus showing up as
+ <c>ERROR REPORT</c> or <c>INFO REPORT</c> in the logs.</p>
</description>
<datatypes>
<datatype>
@@ -72,234 +76,228 @@
</datatypes>
<funcs>
<func>
+ <name name="add_report_handler" arity="1"/>
+ <name name="add_report_handler" arity="2"/>
+ <fsummary>Add an event handler to the error logger.</fsummary>
+ <desc>
+ <p>Adds a new event handler to the error logger. The event
+ handler must be implemented as a <c>gen_event</c> callback
+ module, see
+ <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>.</p>
+ <p><c><anno>Handler</anno></c> is typically the name of the callback module
+ and <c><anno>Args</anno></c> is an optional term (defaults to []) passed
+ to the initialization callback function <c><anno>Handler</anno>:init/1</c>.
+ The function returns <c>ok</c> if successful.</p>
+ <p>The event handler must be able to handle the events in this module, see
+ section <seealso marker="#events">Events</seealso>.</p>
+ <p>The first time this function is called,
+ <c>error_logger</c> is added as a Logger handler, and
+ the <c>error_logger</c> process is started.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="delete_report_handler" arity="1"/>
+ <fsummary>Delete an event handler from the error logger.</fsummary>
+ <desc>
+ <p>Deletes an event handler from the error logger by calling
+ <c>gen_event:delete_handler(error_logger, <anno>Handler</anno>, [])</c>,
+ see <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>.</p>
+ <p>If no more event handlers exist after the deletion,
+ <c>error_logger</c> is removed as a Logger handler, and
+ the <c>error_logger</c> process is stopped.</p>
+ </desc>
+ </func>
+ <func>
<name name="error_msg" arity="1"/>
<name name="error_msg" arity="2"/>
<name name="format" arity="2"/>
- <fsummary>Send an standard error event to the error logger</fsummary>
+ <fsummary>Log a standard error event.</fsummary>
<desc>
- <p>Sends a standard error event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments are the same as
- the arguments of <c>io:format/2</c>. The event is handled by
- the standard event handler.</p>
+ <p>Log a standard error event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
+ <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
+ <p><em>Example:</em></p>
<pre>
-1> <input>error_logger:error_msg("An error occurred in ~p~n", [a_module]).</input>
-
-=ERROR REPORT==== 11-Aug-2005::14:03:19 ===
+1> <input>error_logger:error_msg("An error occurred in ~p", [a_module]).</input>
+=ERROR REPORT==== 22-May-2018::11:18:43.376917 ===
An error occurred in a_module
ok</pre>
<warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use <c>error_report/1</c> instead.</p>
+ <p>If the Unicode translation modifier (<c>t</c>) is used in
+ the format string, all event handlers must ensure that the
+ formatted output is correctly encoded for the I/O
+ device.</p>
</warning>
</desc>
</func>
<func>
<name name="error_report" arity="1"/>
- <fsummary>Send a standard error report event to the error logger</fsummary>
+ <fsummary>Log a standard error event.</fsummary>
<desc>
- <p>Sends a standard error report event to the error logger.
- The event is handled by the standard event handler.</p>
+ <p>Log a standard error event. Error logger forwards the event
+ to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
+ <p><em>Example:</em></p>
<pre>
2> <input>error_logger:error_report([{tag1,data1},a_term,{tag2,data}]).</input>
-
-=ERROR REPORT==== 11-Aug-2005::13:45:41 ===
+=ERROR REPORT==== 22-May-2018::11:24:23.699306 ===
tag1: data1
a_term
tag2: data
ok
3> <input>error_logger:error_report("Serious error in my module").</input>
-
-=ERROR REPORT==== 11-Aug-2005::13:45:49 ===
+=ERROR REPORT==== 22-May-2018::11:24:45.972445 ===
Serious error in my module
ok</pre>
</desc>
</func>
<func>
<name name="error_report" arity="2"/>
- <fsummary>Send a user defined error report event to the error logger</fsummary>
+ <fsummary>Log a user-defined error event.</fsummary>
<desc>
- <p>Sends a user defined error report event to the error logger.
- An event handler to handle the event is supposed to have been
- added. The event is ignored by the standard event handler.</p>
+ <p>Log a user-defined error event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
<p>It is recommended that <c><anno>Report</anno></c> follows the same
- structure as for <c>error_report/1</c>.</p>
- </desc>
- </func>
- <func>
- <name name="warning_map" arity="0"/>
- <fsummary>Return the current mapping for warning events</fsummary>
- <desc>
- <p>Returns the current mapping for warning events. Events sent
- using <c>warning_msg/1,2</c> or <c>warning_report/1,2</c>
- are tagged as errors, warnings (default) or info, depending
- on the value of the command line flag <c>+W</c>.</p>
- <pre>
-os$ <input>erl</input>
-Erlang (BEAM) emulator version 5.4.8 [hipe] [threads:0] [kernel-poll]
-
-Eshell V5.4.8 (abort with ^G)
-1> <input>error_logger:warning_map().</input>
-warning
-2> <input>error_logger:warning_msg("Warnings tagged as: ~p~n", [warning]).</input>
-
-=WARNING REPORT==== 11-Aug-2005::15:31:55 ===
-Warnings tagged as: warning
-ok
-3>
-User switch command
- --> q
-os$ <input>erl +W e</input>
-Erlang (BEAM) emulator version 5.4.8 [hipe] [threads:0] [kernel-poll]
-
-Eshell V5.4.8 (abort with ^G)
-1> <input>error_logger:warning_map().</input>
-error
-2> <input>error_logger:warning_msg("Warnings tagged as: ~p~n", [error]).</input>
-
-=ERROR REPORT==== 11-Aug-2005::15:31:23 ===
-Warnings tagged as: error
-ok</pre>
- </desc>
- </func>
- <func>
- <name name="warning_msg" arity="1"/>
- <name name="warning_msg" arity="2"/>
- <fsummary>Send a standard warning event to the error logger</fsummary>
- <desc>
- <p>Sends a standard warning event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments are the same as
- the arguments of <c>io:format/2</c>. The event is handled by
- the standard event handler. It is tagged either as an error,
- warning or info, see
- <seealso marker="#warning_map/0">warning_map/0</seealso>.</p>
- <warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use <c>warning_report/1</c> instead.</p>
- </warning>
- </desc>
- </func>
- <func>
- <name name="warning_report" arity="1"/>
- <fsummary>Send a standard warning report event to the error logger</fsummary>
- <desc>
- <p>Sends a standard warning report event to the error logger.
- The event is handled by the standard event handler. It is
- tagged either as an error, warning or info, see
- <seealso marker="#warning_map/0">warning_map/0</seealso>.</p>
+ structure as for
+ <seealso marker="#error_report/1"><c>error_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name name="warning_report" arity="2"/>
- <fsummary>Send a user defined warning report event to the error logger</fsummary>
+ <name name="get_format_depth" arity="0"/>
+ <fsummary>Get the value of the Kernel application variable
+ <c>error_logger_format_depth</c>.</fsummary>
<desc>
- <p>Sends a user defined warning report event to the error
- logger. An event handler to handle the event is supposed to
- have been added. The event is ignored by the standard event
- handler. It is tagged either as an error, warning or info,
- depending on the value of
- <seealso marker="#warning_map/0">warning_map/0</seealso>.</p>
+ <p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the
+ value of <c>error_logger_format_depth</c>
+ in the Kernel application, if Depth is an integer. Otherwise,
+ <c>unlimited</c> is returned.</p>
+ <note>
+ <p>The <c>error_logger_format_depth</c> variable
+ is <seealso marker="kernel_app#deprecated-configuration-parameters">
+ deprecated</seealso> since
+ the <seealso marker="logger">Logger API</seealso> was
+ introduced in Erlang/OTP 21.0. The variable, and this
+ function, are kept for backwards compatibility since they
+ still might be used by legacy report handlers.</p>
+ </note>
</desc>
</func>
<func>
<name name="info_msg" arity="1"/>
<name name="info_msg" arity="2"/>
- <fsummary>Send a standard information event to the error logger</fsummary>
+ <fsummary>Log a standard information event.</fsummary>
<desc>
- <p>Sends a standard information event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments are the same as
- the arguments of <c>io:format/2</c>. The event is handled by
- the standard event handler.</p>
+ <p>Log a standard information event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
+ <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
+ <p><em>Example:</em></p>
<pre>
-1> <input>error_logger:info_msg("Something happened in ~p~n", [a_module]).</input>
-
-=INFO REPORT==== 11-Aug-2005::14:06:15 ===
+1> <input>error_logger:info_msg("Something happened in ~p", [a_module]).</input>
+=INFO REPORT==== 22-May-2018::12:03:32.612462 ===
Something happened in a_module
ok</pre>
<warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use <c>info_report/1</c> instead.</p>
+ <p>If the Unicode translation modifier (<c>t</c>) is used in
+ the format string, all event handlers must ensure that the
+ formatted output is correctly encoded for the I/O
+ device.</p>
</warning>
</desc>
</func>
<func>
<name name="info_report" arity="1"/>
- <fsummary>Send a standard information report event to the error logger</fsummary>
+ <fsummary>Log a standard information event.</fsummary>
<desc>
- <p>Sends a standard information report event to the error
- logger. The event is handled by the standard event handler.</p>
+ <p>Log a standard information event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
+ <p><em>Example:</em></p>
<pre>
2> <input>error_logger:info_report([{tag1,data1},a_term,{tag2,data}]).</input>
-
-=INFO REPORT==== 11-Aug-2005::13:55:09 ===
+=INFO REPORT==== 22-May-2018::12:06:35.994440 ===
tag1: data1
a_term
tag2: data
ok
3> <input>error_logger:info_report("Something strange happened").</input>
-
-=INFO REPORT==== 11-Aug-2005::13:55:36 ===
+=INFO REPORT==== 22-May-2018::12:06:49.066872 ===
Something strange happened
ok</pre>
</desc>
</func>
<func>
<name name="info_report" arity="2"/>
- <fsummary>Send a user defined information report event to the error logger</fsummary>
+ <fsummary>Log a user-defined information event.</fsummary>
<desc>
- <p>Sends a user defined information report event to the error
- logger. An event handler to handle the event is supposed to
- have been added. The event is ignored by the standard event
- handler.</p>
+ <p>Log a user-defined information event. Error logger forwards
+ the event to Logger, including metadata that allows
+ backwards compatibility with legacy error logger event
+ handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
<p>It is recommended that <c><anno>Report</anno></c> follows the same
- structure as for <c>info_report/1</c>.</p>
- </desc>
- </func>
- <func>
- <name name="add_report_handler" arity="1"/>
- <name name="add_report_handler" arity="2"/>
- <fsummary>Add an event handler to the error logger</fsummary>
- <desc>
- <p>Adds a new event handler to the error logger. The event
- handler must be implemented as a <c>gen_event</c> callback
- module, see
- <seealso marker="stdlib:gen_event">gen_event(3)</seealso>.</p>
- <p><c><anno>Handler</anno></c> is typically the name of the callback module
- and <c><anno>Args</anno></c> is an optional term (defaults to []) passed
- to the initialization callback function <c><anno>Handler</anno>:init/1</c>.
- The function returns <c>ok</c> if successful.</p>
- <p>The event handler must be able to handle the
- <seealso marker="#events">events</seealso> described below.</p>
- </desc>
- </func>
- <func>
- <name name="delete_report_handler" arity="1"/>
- <fsummary>Delete an event handler from the error logger</fsummary>
- <desc>
- <p>Deletes an event handler from the error logger by calling
- <c>gen_event:delete_handler(error_logger, <anno>Handler</anno>, [])</c>,
- see <seealso marker="stdlib:gen_event">gen_event(3)</seealso>.</p>
- </desc>
- </func>
- <func>
- <name name="tty" arity="1"/>
- <fsummary>Enable or disable printouts to the tty</fsummary>
- <desc>
- <p>Enables (<c><anno>Flag</anno> == true</c>) or disables
- (<c><anno>Flag</anno> == false</c>) printout of standard events to the tty.</p>
- <p>This is done by adding or deleting the standard event handler
- for output to tty, thus calling this function overrides
- the value of the Kernel <c>error_logger</c> configuration
- parameter.</p>
+ structure as for
+ <seealso marker="#info_report/1"><c>info_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
<name name="logfile" arity="1" clause_i="1"/>
<name name="logfile" arity="1" clause_i="2"/>
<name name="logfile" arity="1" clause_i="3"/>
- <fsummary>Enable or disable error printouts to a file</fsummary>
+ <fsummary>Enable or disable error printouts to a file.</fsummary>
<type variable="Filename"/>
<type variable="OpenReason" name_i="1"/>
<type variable="CloseReason" name_i="2"/>
@@ -307,23 +305,31 @@ ok</pre>
<type name="open_error"/>
<desc>
<p>Enables or disables printout of standard events to a file.</p>
- <p>This is done by adding or deleting the standard event handler
- for output to file, thus calling this function overrides
- the value of the Kernel <c>error_logger</c> configuration
- parameter.</p>
- <p>Enabling file logging can be used in combination with calling
- <c>tty(false)</c>, in order to have a silent system, where
- all standard events are logged to a file only.
- There can only be one active log file at a time.</p>
- <p><c>Request</c> is one of:</p>
+ <p>This is done by adding or deleting
+ the <c>error_logger_file_h</c> event handler, and thus
+ indirectly adding <c>error_logger</c> as a Logger
+ handler.</p>
+ <p>Notice that this function does not manipulate the Logger
+ configuration directly, meaning that if the default Logger
+ handler is already logging to a file, this function can
+ potentially cause logging to a second file.</p>
+ <p>This function is useful as a shortcut during development
+ and testing, but must not be used in a production
+ system. See
+ section <seealso marker="logger_chapter">Logging</seealso>
+ in the Kernel User's Guide, and
+ the <seealso marker="logger"><c>logger(3)</c></seealso>
+ manual page for information about how to configure Logger
+ for live systems.</p>
+ <p><c>Request</c> is one of the following:</p>
<taglist>
<tag><c>{open, <anno>Filename</anno>}</c></tag>
<item>
- <p>Opens the log file <c><anno>Filename</anno></c>. Returns <c>ok</c> if
+ <p>Opens log file <c><anno>Filename</anno></c>. Returns <c>ok</c> if
successful, or <c>{error, allready_have_logfile}</c> if
logging to file is already enabled, or an error tuple if
- another error occurred. For example, if <c><anno>Filename</anno></c>
- could not be opened.</p>
+ another error occurred (for example, if <c><anno>Filename</anno></c>
+ cannot be opened). The file is opened with encoding UTF-8.</p>
</item>
<tag><c>close</c></tag>
<item>
@@ -339,6 +345,130 @@ ok</pre>
</taglist>
</desc>
</func>
+ <func>
+ <name name="tty" arity="1"/>
+ <fsummary>Enable or disable printouts to the terminal.</fsummary>
+ <desc>
+ <p>Enables (<c><anno>Flag</anno> == true</c>) or disables
+ (<c><anno>Flag</anno> == false</c>) printout of standard events
+ to the terminal.</p>
+ <p>This is done by manipulating the Logger configuration. The
+ function is useful as a shortcut during development and
+ testing, but must not be used in a production system. See
+ section <seealso marker="logger_chapter">Logging</seealso>
+ in the Kernel User's Guide, and
+ the <seealso marker="logger"><c>logger(3)</c></seealso>
+ manual page for information about how to configure Logger
+ for live systems.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="warning_map" arity="0"/>
+ <fsummary>Return the current mapping for warning events.</fsummary>
+ <desc>
+ <p>Returns the current mapping for warning events. Events sent
+ using <c>warning_msg/1,2</c> or <c>warning_report/1,2</c>
+ are tagged as errors, warnings (default), or info, depending
+ on the value of command-line flag <c>+W</c>.</p>
+ <p><em>Example:</em></p>
+ <pre>
+os$ <input>erl</input>
+Erlang (BEAM) emulator version 5.4.8 [hipe] [threads:0] [kernel-poll]
+
+Eshell V5.4.8 (abort with ^G)
+1> <input>error_logger:warning_map().</input>
+warning
+2> <input>error_logger:warning_msg("Warnings tagged as: ~p~n", [warning]).</input>
+
+=WARNING REPORT==== 11-Aug-2005::15:31:55 ===
+Warnings tagged as: warning
+ok
+3>
+User switch command
+ --> q
+os$ <input>erl +W e</input>
+Erlang (BEAM) emulator version 5.4.8 [hipe] [threads:0] [kernel-poll]
+
+Eshell V5.4.8 (abort with ^G)
+1> <input>error_logger:warning_map().</input>
+error
+2> <input>error_logger:warning_msg("Warnings tagged as: ~p~n", [error]).</input>
+
+=ERROR REPORT==== 11-Aug-2005::15:31:23 ===
+Warnings tagged as: error
+ok</pre>
+ </desc>
+ </func>
+ <func>
+ <name name="warning_msg" arity="1"/>
+ <name name="warning_msg" arity="2"/>
+ <fsummary>Log a standard warning event.</fsummary>
+ <desc>
+ <p>Log a standard warning event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
+ <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler. The log
+ level can be changed to error or info, see
+ <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
+ <warning>
+ <p>If the Unicode translation modifier (<c>t</c>) is used in
+ the format string, all event handlers must ensure that the
+ formatted output is correctly encoded for the I/O
+ device.</p>
+ </warning>
+ </desc>
+ </func>
+ <func>
+ <name name="warning_report" arity="1"/>
+ <fsummary>Log a standard warning event.</fsummary>
+ <desc>
+ <p>Log a standard warning event. Error logger forwards the event
+ to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler. The log
+ level can be changed to error or info, see
+ <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="warning_report" arity="2"/>
+ <fsummary>Log a user-defined warning event.</fsummary>
+ <desc>
+ <p>Log a user-defined warning event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
+ <p>The log level can be changed to error or info, see
+ <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
+ <p>It is recommended that <c><anno>Report</anno></c> follows the same
+ structure as for
+ <seealso marker="#warning_report/1"><c>warning_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
+ </desc>
+ </func>
</funcs>
<section>
@@ -346,8 +476,8 @@ ok</pre>
<title>Events</title>
<p>All event handlers added to the error logger must handle
the following events. <c>Gleader</c> is the group leader pid of
- the process which sent the event, and <c>Pid</c> is the process
- which sent the event.</p>
+ the process that sent the event, and <c>Pid</c> is the process
+ that sent the event.</p>
<taglist>
<tag><c>{error, Gleader, {Pid, Format, Data}}</c></tag>
<item>
@@ -364,18 +494,18 @@ ok</pre>
</item>
<tag><c>{warning_msg, Gleader, {Pid, Format, Data}}</c></tag>
<item>
- <p>Generated when <c>warning_msg/1,2</c> is called, provided
- that warnings are set to be tagged as warnings.</p>
+ <p>Generated when <c>warning_msg/1,2</c> is called
+ if warnings are set to be tagged as warnings.</p>
</item>
<tag><c>{warning_report, Gleader, {Pid, std_warning, Report}}</c></tag>
<item>
- <p>Generated when <c>warning_report/1</c> is called, provided
- that warnings are set to be tagged as warnings.</p>
+ <p>Generated when <c>warning_report/1</c> is called
+ if warnings are set to be tagged as warnings.</p>
</item>
<tag><c>{warning_report, Gleader, {Pid, Type, Report}}</c></tag>
<item>
- <p>Generated when <c>warning_report/2</c> is called, provided
- that warnings are set to be tagged as warnings.</p>
+ <p>Generated when <c>warning_report/2</c> is called
+ if warnings are set to be tagged as warnings.</p>
</item>
<tag><c>{info_msg, Gleader, {Pid, Format, Data}}</c></tag>
<item>
@@ -390,17 +520,20 @@ ok</pre>
<p>Generated when <c>info_report/2</c> is called.</p>
</item>
</taglist>
- <p>Note that also a number of system internal events may be
- received, a catch-all clause last in the definition of
+ <p>Notice that some system-internal events can also be
+ received. Therefore a catch-all clause last in the definition of
the event handler callback function <c>Module:handle_event/2</c>
- is necessary. This also holds true for
- <c>Module:handle_info/2</c>, as there are a number of system
- internal messages the event handler must take care of as well.</p>
+ is necessary. This also applies for
+ <c>Module:handle_info/2</c>, as the event handler must also take care of
+ some system-internal messages.</p>
</section>
-
<section>
- <title>SEE ALSO</title>
- <p>gen_event(3), log_mf_h(3), kernel(6), sasl(6)</p>
+ <title>See Also</title>
+ <p><seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>,
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>,
+ <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>,
+ <seealso marker="kernel_app"><c>kernel(6)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/fascicules.xml b/lib/kernel/doc/src/fascicules.xml
deleted file mode 100644
index fadd37eefb..0000000000
--- a/lib/kernel/doc/src/fascicules.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE fascicules SYSTEM "fascicules.dtd">
-
-<fascicules>
- <fascicule file="ref_man" href="ref_man_frame.html" entry="yes">
- Reference Manual
- </fascicule>
- <fascicule file="part_notes" href="part_notes_frame.html" entry="no">
- Release Notes
- </fascicule>
- <fascicule file="" href="../../../../doc/print.html" entry="no">
- Off-Print
- </fascicule>
-</fascicules>
-
diff --git a/lib/kernel/doc/src/file.xml b/lib/kernel/doc/src/file.xml
index 831ef1c22a..9acaf6b41e 100644
--- a/lib/kernel/doc/src/file.xml
+++ b/lib/kernel/doc/src/file.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2014</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,56 +29,70 @@
<rev></rev>
</header>
<module>file</module>
- <modulesummary>File Interface Module</modulesummary>
+ <modulesummary>File interface module.</modulesummary>
<description>
- <p>The module <c>file</c> provides an interface to the file system.</p>
- <p>On operating systems with thread support, it is possible to let
- file operations be performed in threads of their own, allowing
- other Erlang processes to continue executing in parallel with
- the file operations. See the command line flag
- <c>+A</c> in <seealso marker="erts:erl">erl(1)</seealso>.</p>
+ <p>This module provides an interface to the file system.</p>
- <p>With regard to file name encoding, the Erlang VM can operate in
- two modes. The current mode can be queried using the <seealso
- marker="#native_name_encoding">native_name_encoding/0</seealso>
- function. It returns either <c>latin1</c> or <c>utf8</c>.</p>
+ <warning>
+ <p>File operations are only guaranteed to appear atomic when going
+ through the same file server. A NIF or other OS process may observe
+ intermediate steps on certain operations on some operating systems,
+ eg. renaming an existing file on Windows, or
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c>
+ </seealso> on any OS at the time of writing.</p>
+ </warning>
- <p>In the <c>latin1</c> mode, the Erlang VM does not change the
- encoding of file names. In the <c>utf8</c> mode, file names can
- contain Unicode characters greater than 255 and the VM will
- convert file names back and forth to the native file name encoding
+ <p>Regarding filename encoding, the Erlang VM can operate in
+ two modes. The current mode can be queried using function
+ <seealso marker="#native_name_encoding/0"><c>native_name_encoding/0</c></seealso>.
+ It returns <c>latin1</c> or <c>utf8</c>.</p>
+
+ <p>In <c>latin1</c> mode, the Erlang VM does not change the
+ encoding of filenames. In <c>utf8</c> mode, filenames can
+ contain Unicode characters greater than 255 and the VM
+ converts filenames back and forth to the native filename encoding
(usually UTF-8, but UTF-16 on Windows).</p>
<p>The default mode depends on the operating system. Windows and
- MacOS X enforce consistent file name encoding and therefore the
- VM uses the <c>utf8</c> mode.</p>
+ MacOS X enforce consistent filename encoding and therefore the
+ VM uses <c>utf8</c> mode.</p>
- <p>On operating systems with transparent naming (i.e. all Unix
- systems except MacOS X), the default will be <c>utf8</c> if the
- terminal supports UTF-8, otherwise <c>latin1</c>. The default may
- be overridden using the <c>+fnl</c> (to force <c>latin1</c> mode)
- or <c>+fnu</c> (to force <c>utf8</c> mode) when starting <seealso
- marker="erts:erl">erl</seealso>.</p>
+ <p>On operating systems with transparent naming (for example, all Unix
+ systems except MacOS X), default is <c>utf8</c> if the
+ terminal supports UTF-8, otherwise <c>latin1</c>. The default can
+ be overridden using <c>+fnl</c> (to force <c>latin1</c> mode)
+ or <c>+fnu</c> (to force <c>utf8</c> mode) when starting
+ <seealso marker="erts:erl"><c>erl</c></seealso>.</p>
- <p>On operating systems with transparent naming, files could be
- inconsistently named, i.e. some files are encoded in UTF-8 while
- others are encoded in (for example) iso-latin1. To be able to
- handle file systems with inconsistent naming when running in the
- <c>utf8</c> mode, the concept of "raw file names" has been
- introduced.</p>
+ <p>On operating systems with transparent naming, files can be
+ inconsistently named, for example, some files are encoded in UTF-8 while
+ others are encoded in ISO Latin-1. The concept of <em>raw filenames</em> is
+ introduced to handle file systems with inconsistent naming when running in
+ <c>utf8</c> mode.</p>
- <p>A raw file name is a file name given as a binary. The Erlang VM
- will perform no translation of a file name given as a binary on
+ <p>A <em>raw filename</em> is a filename specified as a binary. The Erlang VM
+ does not translate a filename specified as a binary on
systems with transparent naming.</p>
- <p>When running in the <c>utf8</c> mode, the
- <c>file:list_dir/1</c> and <c>file:read_link/1</c> functions will
- never return raw file names. Use the <seealso
- marker="#list_dir_all">list_dir_all/1</seealso> and <seealso
- marker="#read_link_all">read_link_all/1</seealso> functions to
- return all file names including raw file names.</p>
+ <p>When running in <c>utf8</c> mode, functions
+ <seealso marker="#list_dir/1"><c>list_dir/1</c></seealso> and
+ <seealso marker="#read_link/1"><c>read_link/1</c></seealso>
+ never return raw filenames. To return all filenames including raw filenames,
+ use functions
+ <seealso marker="#list_dir_all"><c>list_dir_all/1</c></seealso> and
+ <seealso marker="#read_link_all"><c>read_link_all/1</c></seealso>.</p>
+
+ <p>See also section <seealso marker="stdlib:unicode_usage#notes-about-raw-filenames">Notes About Raw Filenames</seealso> in the STDLIB User's Guide.</p>
- <p>Also see <seealso marker="stdlib:unicode_usage#notes-about-raw-filenames">Notes about raw file names</seealso>.</p>
+ <note><p>
+ File operations used to accept filenames containing
+ null characters (integer value zero). This caused
+ the name to be truncated and in some cases arguments
+ to primitive operations to be mixed up. Filenames
+ containing null characters inside the filename
+ are now <em>rejected</em> and will cause primitive
+ file operations fail.
+ </p></note>
</description>
@@ -89,22 +103,33 @@
<datatype>
<name>fd()</name>
<desc>
- <p><marker id="type-fd"/>
- A file descriptor representing a file opened in <seealso
- marker="#raw">raw</seealso> mode.</p>
+ <p>A file descriptor representing a file opened in
+ <seealso marker="#raw"><c>raw</c></seealso> mode.</p>
</desc>
</datatype>
<datatype>
<name name="filename"/>
+ <desc>
+ <p>
+ See also the documentation of the
+ <seealso marker="#type-name_all"><c>name_all()</c></seealso> type.
+ </p>
+ </desc>
</datatype>
<datatype>
<name name="filename_all"/>
+ <desc>
+ <p>
+ See also the documentation of the
+ <seealso marker="#type-name_all"><c>name_all()</c></seealso> type.
+ </p>
+ </desc>
</datatype>
<datatype>
<name name="io_device"/>
<desc>
<p>As returned by
- <seealso marker="#open/2">file:open/2</seealso>;
+ <seealso marker="#open/2"><c>open/2</c></seealso>;
<c>pid()</c> is a process handling I/O-protocols.</p>
</desc>
</datatype>
@@ -112,28 +137,30 @@
<name name="name"/>
<desc>
<p>If VM is in Unicode filename mode, <c>string()</c> and <c>char()</c>
- are allowed to be > 255.
+ are allowed to be &gt; 255. See also the documentation of the
+ <seealso marker="#type-name_all"><c>name_all()</c></seealso> type.
</p>
</desc>
</datatype>
<datatype>
<name name="name_all"/>
<desc>
- <p>If VM is in Unicode filename mode, <c>string()</c> and <c>char()</c>
- are allowed to be > 255.
+ <p>If VM is in Unicode filename mode, characters
+ are allowed to be &gt; 255.
<c><anno>RawFilename</anno></c> is a filename not subject to
Unicode translation,
meaning that it can contain characters not conforming to
- the Unicode encoding expected from the filesystem
- (i.e. non-UTF-8 characters although the VM is started
- in Unicode filename mode).
+ the Unicode encoding expected from the file system
+ (that is, non-UTF-8 characters although the VM is started
+ in Unicode filename mode). Null characters (integer value zero)
+ are <em>not</em> allowed in filenames (not even at the end).
</p>
</desc>
</datatype>
<datatype>
<name name="posix"/>
<desc>
- <p>An atom which is named from the POSIX error codes used in
+ <p>An atom that is named from the POSIX error codes used in
Unix, and in the runtime libraries of most C compilers.</p>
</desc>
</datatype>
@@ -160,7 +187,7 @@
<funcs>
<func>
<name name="advise" arity="4"/>
- <fsummary>Predeclare an access pattern for file data</fsummary>
+ <fsummary>Predeclare an access pattern for file data.</fsummary>
<type name="posix_file_advise"/>
<desc>
<p><c>advise/4</c> can be used to announce an intention to access file
@@ -171,80 +198,80 @@
</func>
<func>
<name name="allocate" arity="3"/>
- <fsummary>Allocate file space</fsummary>
+ <fsummary>Allocate file space.</fsummary>
<desc>
<p><c>allocate/3</c> can be used to preallocate space for a file.</p>
- <p>This function only succeeds in platforms that implement this
+ <p>This function only succeeds in platforms that provide this
feature. When it succeeds, space is preallocated for the file but
the file size might not be updated. This behaviour depends on the
- preallocation implementation. To guarantee the file size is updated
- one must truncate the file to the new size.</p>
+ preallocation implementation. To guarantee that the file size is updated,
+ truncate the file to the new size.</p>
</desc>
</func>
<func>
<name name="change_group" arity="2"/>
- <fsummary>Change group of a file</fsummary>
+ <fsummary>Change group of a file.</fsummary>
<desc>
<p>Changes group of a file. See
- <seealso marker="#write_file_info/2">write_file_info/2</seealso>.</p>
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="change_mode" arity="2"/>
- <fsummary>Change permissions of a file</fsummary>
+ <fsummary>Change permissions of a file.</fsummary>
<desc>
<p>Changes permissions of a file. See
- <seealso marker="#write_file_info/2">write_file_info/2</seealso>.</p>
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="change_owner" arity="2"/>
- <fsummary>Change owner of a file</fsummary>
+ <fsummary>Change owner of a file.</fsummary>
<desc>
<p>Changes owner of a file. See
- <seealso marker="#write_file_info/2">write_file_info/2</seealso>.</p>
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="change_owner" arity="3"/>
- <fsummary>Change owner and group of a file</fsummary>
+ <fsummary>Change owner and group of a file.</fsummary>
<desc>
<p>Changes owner and group of a file. See
- <seealso marker="#write_file_info/2">write_file_info/2</seealso>.</p>
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="change_time" arity="2"/>
- <fsummary>Change the modification time of a file</fsummary>
+ <fsummary>Change the modification time of a file.</fsummary>
<desc>
<p>Changes the modification and access times of a file. See
- <seealso marker="#write_file_info/2">write_file_info/2</seealso>.</p>
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="change_time" arity="3"/>
- <fsummary>Change the modification and last access time of a file</fsummary>
+ <fsummary>Change the modification and last access time of a file.</fsummary>
<desc>
<p>Changes the modification and last access times of a file. See
- <seealso marker="#write_file_info/2">write_file_info/2</seealso>.</p>
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="close" arity="1"/>
- <fsummary>Close a file</fsummary>
+ <fsummary>Close a file.</fsummary>
<desc>
<p>Closes the file referenced by <c><anno>IoDevice</anno></c>. It mostly
- returns <c>ok</c>, expect for some severe errors such as out
+ returns <c>ok</c>, except for some severe errors such as out
of memory.</p>
- <p>Note that if the option <c>delayed_write</c> was
- used when opening the file, <c>close/1</c> might return an
+ <p>Notice that if option <c>delayed_write</c> was
+ used when opening the file, <c>close/1</c> can return an
old write error and not even try to close the file. See
- <seealso marker="#open/2">open/2</seealso>.</p>
+ <seealso marker="#open/2"><c>open/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="consult" arity="1"/>
- <fsummary>Read Erlang terms from a file</fsummary>
+ <fsummary>Read Erlang terms from a file.</fsummary>
<desc>
<p>Reads Erlang terms, separated by '.', from
<c><anno>Filename</anno></c>. Returns one of the following:</p>
@@ -256,42 +283,44 @@
<tag><c>{error, atom()}</c></tag>
<item>
<p>An error occurred when opening the file or reading it.
- See <seealso marker="#open/2">open/2</seealso> for a list
- of typical error codes.</p>
+ For a list of typical error codes, see
+ <seealso marker="#open/2"><c>open/2</c></seealso>.</p>
</item>
<tag><c>{error, {<anno>Line</anno>, <anno>Mod</anno>,
<anno>Term</anno>}}</c></tag>
<item>
<p>An error occurred when interpreting the Erlang terms in
- the file. Use <c>format_error/1</c> to convert
- the three-element tuple to an English description of
- the error.</p>
+ the file. To convert the three-element tuple to an English
+ description of the error, use
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>.</p>
</item>
</taglist>
- <p>Example:</p>
-<code type="none">f.txt: {person, "kalle", 25}.
+ <p><em>Example:</em></p>
+<code type="none">
+f.txt: {person, "kalle", 25}.
{person, "pelle", 30}.</code>
-<pre>1> <input>file:consult("f.txt").</input>
+<pre>
+1> <input>file:consult("f.txt").</input>
{ok,[{person,"kalle",25},{person,"pelle",30}]}</pre>
- <p>The encoding of of <c><anno>Filename</anno></c> can be set
- by a comment as described in <seealso
- marker="stdlib:epp#encoding">epp(3)</seealso>.</p>
+ <p>The encoding of <c><anno>Filename</anno></c> can be set
+ by a comment, as described in
+ <seealso marker="stdlib:epp#encoding"><c>epp(3)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="copy" arity="2"/>
<name name="copy" arity="3"/>
- <fsummary>Copy file contents</fsummary>
+ <fsummary>Copy file contents.</fsummary>
<desc>
<p>Copies <c><anno>ByteCount</anno></c> bytes from
<c><anno>Source</anno></c> to <c><anno>Destination</anno></c>.
<c><anno>Source</anno></c> and <c><anno>Destination</anno></c> refer
- to either filenames or IO devices from e.g. <c>open/2</c>.
+ to either filenames or IO devices from, for example, <c>open/2</c>.
<c><anno>ByteCount</anno></c> defaults to <c>infinity</c>, denoting an
infinite number of bytes.</p>
- <p>The argument <c><anno>Modes</anno></c> is a list of possible modes,
- see <seealso marker="#open/2">open/2</seealso>, and defaults to
- [].</p>
+ <p>Argument <c><anno>Modes</anno></c> is a list of possible modes,
+ see <seealso marker="#open/2"><c>open/2</c></seealso>, and defaults to
+ <c>[]</c>.</p>
<p>If both <c><anno>Source</anno></c> and
<c><anno>Destination</anno></c> refer to
filenames, the files are opened with <c>[read, binary]</c>
@@ -303,25 +332,51 @@
<p>If <c><anno>Destination</anno></c> refers to a filename, it is opened
with <c>write</c> mode prepended to the mode list before
the copy, and closed when done.</p>
- <p>Returns <c>{ok, <anno>BytesCopied</anno>}</c> where
+ <p>Returns <c>{ok, <anno>BytesCopied</anno>}</c>, where
<c><anno>BytesCopied</anno></c> is
- the number of bytes that actually was copied, which may be
+ the number of bytes that was copied, which can be
less than <c><anno>ByteCount</anno></c> if end of file was
encountered on the source. If the operation fails,
<c>{error, <anno>Reason</anno>}</c> is returned.</p>
- <p>Typical error reasons: As for <c>open/2</c> if a file had to
- be opened, and as for <c>read/2</c> and <c>write/2</c>.</p>
+ <p>Typical error reasons: as for
+ <seealso marker="#open/2"><c>open/2</c></seealso> if a file
+ had to be opened, and as for
+ <seealso marker="#read/2"><c>read/2</c></seealso> and
+ <seealso marker="#write/2"><c>write/2</c></seealso>.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="datasync" arity="1"/>
+ <fsummary>Synchronize the in-memory data of a file, ignoring most of its metadata, with that on the physical medium.</fsummary>
+ <desc>
+ <p>Ensures that any buffers kept by the operating system
+ (not by the Erlang runtime system) are written to disk. In
+ many ways it resembles <c>fsync</c> but it does not update
+ some of the metadata of the file, such as the access time. On
+ some platforms this function has no effect.</p>
+ <p>Applications that access databases or log files often write
+ a tiny data fragment (for example, one line in a log file) and then
+ call <c>fsync()</c> immediately to ensure that the written
+ data is physically stored on the hard disk. Unfortunately, <c>fsync()</c>
+ always initiates two write operations: one for the newly
+ written data and another one to update the modification
+ time stored in the <c>inode</c>. If the modification time is not a part
+ of the transaction concept, <c>fdatasync()</c> can be used to avoid
+ unnecessary <c>inode</c> disk write operations.</p>
+ <p>Available only in some POSIX systems, this call results in a
+ call to <c>fsync()</c>, or has no effect in systems not providing
+ the <c>fdatasync()</c> syscall.</p>
</desc>
</func>
<func>
<name name="del_dir" arity="1"/>
- <fsummary>Delete a directory</fsummary>
+ <fsummary>Delete a directory.</fsummary>
<desc>
- <p>Tries to delete the directory <c><anno>Dir</anno></c>.
+ <p>Tries to delete directory <c><anno>Dir</anno></c>.
The directory must
be empty before it can be deleted. Returns <c>ok</c> if
successful.</p>
- <p>Typical error reasons are:</p>
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>eacces</c></tag>
<item>
@@ -351,11 +406,11 @@
</func>
<func>
<name name="delete" arity="1"/>
- <fsummary>Delete a file</fsummary>
+ <fsummary>Delete a file.</fsummary>
<desc>
- <p>Tries to delete the file <c><anno>Filename</anno></c>.
+ <p>Tries to delete file <c><anno>Filename</anno></c>.
Returns <c>ok</c> if successful.</p>
- <p>Typical error reasons are:</p>
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>enoent</c></tag>
<item>
@@ -367,32 +422,32 @@
</item>
<tag><c>eperm</c></tag>
<item>
- <p>The file is a directory and the user is not super-user.</p>
+ <p>The file is a directory and the user is not superuser.</p>
</item>
<tag><c>enotdir</c></tag>
<item>
- <p>A component of the file name is not a directory. On some
+ <p>A component of the filename is not a directory. On some
platforms, <c>enoent</c> is returned instead.</p>
</item>
<tag><c>einval</c></tag>
<item>
- <p><c><anno>Filename</anno></c> had an improper type, such as tuple.</p>
+ <p><c><anno>Filename</anno></c> has an improper type, such as tuple.</p>
</item>
</taglist>
<warning>
- <p>In a future release, a bad type for the
- <c><anno>Filename</anno></c> argument will probably generate
+ <p>In a future release, a bad type for argument
+ <c><anno>Filename</anno></c> will probably generate
an exception.</p>
</warning>
</desc>
</func>
<func>
<name name="eval" arity="1"/>
- <fsummary>Evaluate Erlang expressions in a file</fsummary>
+ <fsummary>Evaluate Erlang expressions in a file.</fsummary>
<desc>
<p>Reads and evaluates Erlang expressions, separated by '.' (or
- ',', a sequence of expressions is also an expression), from
- <c><anno>Filename</anno></c>. The actual result of the evaluation
+ ',', a sequence of expressions is also an expression) from
+ <c><anno>Filename</anno></c>. The result of the evaluation
is not returned; any expression sequence in the file must be there
for its side effect. Returns one of the following:</p>
<taglist>
@@ -403,35 +458,36 @@
<tag><c>{error, atom()}</c></tag>
<item>
<p>An error occurred when opening the file or reading it.
- See <c>open/2</c> for a list of typical error codes.</p>
+ For a list of typical error codes, see
+ <seealso marker="#open/2"><c>open/2</c></seealso>.</p>
</item>
<tag><c>{error, {<anno>Line</anno>, <anno>Mod</anno>,
<anno>Term</anno>}}</c></tag>
<item>
<p>An error occurred when interpreting the Erlang
- expressions in the file. Use <c>format_error/1</c> to
- convert the three-element tuple to an English description
- of the error.</p>
+ expressions in the file. To convert the three-element tuple
+ to an English description of the error, use
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>.</p>
</item>
</taglist>
- <p>The encoding of of <c><anno>Filename</anno></c> can be set
- by a comment as described in <seealso
- marker="stdlib:epp#encoding">epp(3)</seealso>.</p>
+ <p>The encoding of <c><anno>Filename</anno></c> can be set
+ by a comment, as described in
+ <seealso marker="stdlib:epp#encoding"><c>epp(3)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="eval" arity="2"/>
- <fsummary>Evaluate Erlang expressions in a file</fsummary>
+ <fsummary>Evaluate Erlang expressions in a file.</fsummary>
<desc>
- <p>The same as <c>eval/1</c> but the variable bindings
- <c><anno>Bindings</anno></c> are used in the evaluation. See
- <seealso marker="stdlib:erl_eval">erl_eval(3)</seealso> about
- variable bindings.</p>
+ <p>The same as <c>eval/1</c>, but the variable bindings
+ <c><anno>Bindings</anno></c> are used in the evaluation. For information
+ about the variable bindings, see
+ <seealso marker="stdlib:erl_eval"><c>erl_eval(3)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="format_error" arity="1"/>
- <fsummary>Return a descriptive string for an error reason</fsummary>
+ <fsummary>Return a descriptive string for an error reason.</fsummary>
<desc>
<p>Given the error reason returned by any function in this
module, returns a descriptive string of the error in English.</p>
@@ -439,17 +495,17 @@
</func>
<func>
<name name="get_cwd" arity="0"/>
- <fsummary>Get the current working directory</fsummary>
+ <fsummary>Get the current working directory.</fsummary>
<desc>
<p>Returns <c>{ok, <anno>Dir</anno>}</c>, where <c><anno>Dir</anno></c>
is the current
working directory of the file server.</p>
<note>
<p>In rare circumstances, this function can fail on Unix.
- It may happen if read permission does not exist for
+ It can occur if read permission does not exist for
the parent directories of the current directory.</p>
</note>
- <p>Typical error reasons are:</p>
+ <p>A typical error reason:</p>
<taglist>
<tag><c>eacces</c></tag>
<item>
@@ -461,17 +517,19 @@
</func>
<func>
<name name="get_cwd" arity="1"/>
- <fsummary>Get the current working directory for the drive specified</fsummary>
+ <fsummary>Get the current working directory for the specified drive.</fsummary>
<desc>
- <p><c><anno>Drive</anno></c> should be of the form
- "<c>Letter</c><c>:</c>",
- for example "c:". Returns <c>{ok, <anno>Dir</anno>}</c> or
+ <p>Returns <c>{ok, <anno>Dir</anno>}</c> or
<c>{error, <anno>Reason</anno>}</c>, where <c><anno>Dir</anno></c>
- is the current
- working directory of the drive specified.</p>
- <p>This function returns <c>{error, enotsup}</c> on platforms
- which have no concept of current drive (Unix, for example).</p>
- <p>Typical error reasons are:</p>
+ is the current working directory of the specified drive.</p>
+
+ <p><c><anno>Drive</anno></c> is to be of the form
+ "<c>Letter</c><c>:</c>", for example, "c:".</p>
+
+ <p>Returns <c>{error, enotsup}</c> on platforms
+ that have no concept of current drive (Unix, for example).</p>
+
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>enotsup</c></tag>
<item>
@@ -490,16 +548,16 @@
</func>
<func>
<name name="list_dir" arity="1"/>
- <fsummary>List files in a directory</fsummary>
+ <fsummary>List files in a directory.</fsummary>
<desc>
<p>Lists all files in a directory, <em>except</em> files
- with "raw" names. Returns
- <c>{ok, <anno>Filenames</anno>}</c> if successful.
- Otherwise, it returns <c>{error, <anno>Reason</anno>}</c>.
+ with raw filenames. Returns
+ <c>{ok, <anno>Filenames</anno>}</c> if successful,
+ otherwise <c>{error, <anno>Reason</anno>}</c>.
<c><anno>Filenames</anno></c> is a list of
the names of all the files in the directory. The names are
not sorted.</p>
- <p>Typical error reasons are:</p>
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>eacces</c></tag>
<item>
@@ -513,24 +571,24 @@
<tag><c>{no_translation, <anno>Filename</anno>}</c></tag>
<item>
<p><c><anno>Filename</anno></c> is a <c>binary()</c> with
- characters coded in ISO-latin-1 and the VM was started
- with the parameter <c>+fnue</c>.</p>
+ characters coded in ISO Latin-1 and the VM was started
+ with parameter <c>+fnue</c>.</p>
</item>
</taglist>
</desc>
</func>
<func>
<name name="list_dir_all" arity="1"/>
- <fsummary>List all files in a directory</fsummary>
+ <fsummary>List all files in a directory.</fsummary>
<desc>
<p><marker id="list_dir_all"/>Lists all the files in a directory,
- including files with "raw" names.
- Returns <c>{ok, <anno>Filenames</anno>}</c> if successful.
- Otherwise, it returns <c>{error, <anno>Reason</anno>}</c>.
+ including files with raw filenames.
+ Returns <c>{ok, <anno>Filenames</anno>}</c> if successful,
+ otherwise <c>{error, <anno>Reason</anno>}</c>.
<c><anno>Filenames</anno></c> is a list of
the names of all the files in the directory. The names are
not sorted.</p>
- <p>Typical error reasons are:</p>
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>eacces</c></tag>
<item>
@@ -546,12 +604,12 @@
</func>
<func>
<name name="make_dir" arity="1"/>
- <fsummary>Make a directory</fsummary>
+ <fsummary>Make a directory.</fsummary>
<desc>
- <p>Tries to create the directory <c><anno>Dir</anno></c>. Missing parent
+ <p>Tries to create directory <c><anno>Dir</anno></c>. Missing parent
directories are <em>not</em> created. Returns <c>ok</c> if
successful.</p>
- <p>Typical error reasons are:</p>
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>eacces</c></tag>
<item>
@@ -560,7 +618,7 @@
</item>
<tag><c>eexist</c></tag>
<item>
- <p>There is already a file or directory named <c><anno>Dir</anno></c>.</p>
+ <p>A file or directory named <c><anno>Dir</anno></c> exists already.</p>
</item>
<tag><c>enoent</c></tag>
<item>
@@ -568,7 +626,7 @@
</item>
<tag><c>enospc</c></tag>
<item>
- <p>There is a no space left on the device.</p>
+ <p>No space is left on the device.</p>
</item>
<tag><c>enotdir</c></tag>
<item>
@@ -580,13 +638,13 @@
</func>
<func>
<name name="make_link" arity="2"/>
- <fsummary>Make a hard link to a file</fsummary>
+ <fsummary>Make a hard link to a file.</fsummary>
<desc>
<p>Makes a hard link from <c><anno>Existing</anno></c> to
- <c><anno>New</anno></c>, on
- platforms that support links (Unix and Windows). This function returns
- <c>ok</c> if the link was successfully created, or
- <c>{error, <anno>Reason</anno>}</c>. On platforms that do not support
+ <c><anno>New</anno></c> on
+ platforms supporting links (Unix and Windows). This function returns
+ <c>ok</c> if the link was successfully created, otherwise
+ <c>{error, <anno>Reason</anno>}</c>. On platforms not supporting
links, <c>{error,enotsup}</c> is returned.</p>
<p>Typical error reasons:</p>
<taglist>
@@ -609,17 +667,16 @@
</func>
<func>
<name name="make_symlink" arity="2"/>
- <fsummary>Make a symbolic link to a file or directory</fsummary>
+ <fsummary>Make a symbolic link to a file or directory.</fsummary>
<desc>
- <p>This function creates a symbolic link <c><anno>New</anno></c> to
- the file or directory <c><anno>Existing</anno></c>, on platforms that
- support symbolic links (most Unix systems and Windows beginning with
+ <p>Creates a symbolic link <c><anno>New</anno></c> to
+ the file or directory <c><anno>Existing</anno></c> on platforms
+ supporting symbolic links (most Unix systems and Windows, beginning with
Vista).
- <c><anno>Existing</anno></c> need not exist.
- This function returns <c>ok</c> if the link was
- successfully created, or <c>{error, <anno>Reason</anno>}</c>.
- On platforms
- that do not support symbolic links, <c>{error, enotsup}</c>
+ <c><anno>Existing</anno></c> does not need to exist.
+ Returns <c>ok</c> if the link is
+ successfully created, otherwise <c>{error, <anno>Reason</anno>}</c>.
+ On platforms not supporting symbolic links, <c>{error, enotsup}</c>
is returned.</p>
<p>Typical error reasons:</p>
<taglist>
@@ -646,23 +703,23 @@
</func>
<func>
<name name="native_name_encoding" arity="0"/>
- <fsummary>Return the VM's configured filename encoding</fsummary>
+ <fsummary>Return the configured filename encoding of the VM.</fsummary>
<desc>
- <p><marker id="native_name_encoding"/>This function returns
- the file name encoding mode. If it is <c>latin1</c>, the
- system does no translation of file names. If it is
- <c>utf8</c>, file names will be converted back and forth to
- the native file name encoding (usually UTF-8, but UTF-16 on
+ <p><marker id="native_name_encoding"/>Returns
+ the filename encoding mode. If it is <c>latin1</c>, the
+ system translates no filenames. If it is
+ <c>utf8</c>, filenames are converted back and forth to
+ the native filename encoding (usually UTF-8, but UTF-16 on
Windows).</p>
</desc>
</func>
<func>
<name name="open" arity="2"/>
- <fsummary>Open a file</fsummary>
+ <fsummary>Open a file.</fsummary>
<desc>
- <p>Opens the file <c><anno>File</anno></c> in the mode determined
- by <c><anno>Modes</anno></c>, which may contain one or more of the
- following items:</p>
+ <p>Opens file <c><anno>File</anno></c> in the mode determined
+ by <c><anno>Modes</anno></c>, which can contain one or more of the
+ following options:</p>
<taglist>
<tag><c>read</c></tag>
<item>
@@ -671,98 +728,100 @@
<tag><c>write</c></tag>
<item>
<p>The file is opened for writing. It is created if it does
- not exist. If the file exists, and if <c>write</c> is not
- combined with <c>read</c>, the file will be truncated.</p>
+ not exist. If the file exists and <c>write</c> is not
+ combined with <c>read</c>, the file is truncated.</p>
</item>
<tag><c>append</c></tag>
<item>
- <p>The file will be opened for writing, and it will be
- created if it does not exist. Every write operation to a
- file opened with <c>append</c> will take place at
- the end of the file.</p>
+ <p>The file is opened for writing. It is created if it does
+ not exist. Every write operation to a file opened with
+ <c>append</c> takes place at the end of the file.</p>
</item>
<tag><c>exclusive</c></tag>
<item>
- <p>The file, when opened for writing, is created if it
- does not exist. If the file exists, open will return
- <c>{error, eexist}</c>.</p>
+ <p>The file is opened for writing. It is created if it does
+ not exist. If the file exists, <c>{error, eexist}</c>
+ is returned.</p>
<warning><p>This option does not guarantee exclusiveness on
- file systems that do not support O_EXCL properly,
+ file systems not supporting <c>O_EXCL</c> properly,
such as NFS. Do not depend on this option unless you
know that the file system supports it (in general, local
- file systems should be safe).</p></warning>
+ file systems are safe).</p></warning>
</item>
<tag><c>raw</c></tag>
<item>
<p><marker id="raw"/>
- The <c>raw</c> option allows faster access to a file,
- because no Erlang process is needed to handle the file.
+ Allows faster access to a file,
+ as no Erlang process is needed to handle the file.
However, a file opened in this way has the following
limitations:</p>
<list type="bulleted">
- <item>The functions in the <c>io</c> module cannot be used,
- because they can only talk to an Erlang process.
- Instead, use the <c>read/2</c>, <c>read_line/1</c> and
- <c>write/2</c>
- functions.</item>
- <item>Especially if <c>read_line/1</c> is to be used on a <c>raw</c> file, it is recommended to combine this option with the <c>{read_ahead, Size}</c> option as line oriented I/O is inefficient without buffering.</item>
- <item>Only the Erlang process which opened the file can use
- it.</item>
- <item>A remote Erlang file server cannot be used;
- the computer on which the Erlang node is running must
+ <item><p>The functions in the <c>io</c> module cannot be used,
+ as they can only talk to an Erlang process.
+ Instead, use functions
+ <seealso marker="#read/2"><c>read/2</c></seealso>,
+ <seealso marker="#read_line/1"><c>read_line/1</c></seealso>, and
+ <seealso marker="#write/2"><c>write/2</c></seealso>.</p></item>
+ <item><p>Especially if <c>read_line/1</c> is to be used on a <c>raw</c>
+ file, it is recommended to combine this option with option
+ <c>{read_ahead, Size}</c> as line-oriented I/O is inefficient
+ without buffering.</p></item>
+ <item><p>Only the Erlang process that opened the file can use
+ it.</p></item>
+ <item><p>A remote Erlang file server cannot be used.
+ The computer on which the Erlang node is running must
have access to the file system (directly or through
- NFS).</item>
+ NFS).</p></item>
</list>
</item>
<tag><c>binary</c></tag>
<item>
- <p>When this option has been given, read operations on the file
- will return binaries rather than lists.</p>
+ <p>Read operations on the file return binaries rather than lists.</p>
</item>
<tag><c>{delayed_write, Size, Delay}</c></tag>
<item>
- <p>If this option is used, the data in subsequent
- <c>write/2</c> calls is buffered until there are at least
- <c>Size</c> bytes buffered, or until the oldest buffered
+ <p>Data in subsequent
+ <c>write/2</c> calls is buffered until at least
+ <c>Size</c> bytes are buffered, or until the oldest buffered
data is <c>Delay</c> milliseconds old. Then all buffered
data is written in one operating system call.
The buffered data is also flushed before some other file
operation than <c>write/2</c> is executed.</p>
<p>The purpose of this option is to increase performance
- by reducing the number of operating system calls, so the
- <c>write/2</c> calls should be for sizes significantly
- less than <c>Size</c>, and not interspersed by to many
- other file operations, for this to happen.</p>
+ by reducing the number of operating system calls. Thus, the
+ <c>write/2</c> calls must be for sizes significantly
+ less than <c>Size</c>, and not interspersed by too many
+ other file operations.</p>
<p>When this option is used, the result of <c>write/2</c>
- calls may prematurely be reported as successful, and if
- a write error should actually occur the error is
- reported as the result of the next file operation, which
- is not executed.</p>
+ calls can prematurely be reported as successful, and if
+ a write error occurs, the error is reported as the result
+ of the next file operation, which is not executed.</p>
<p>For example, when <c>delayed_write</c> is used, after a
- number of <c>write/2</c> calls, <c>close/1</c> might
- return <c>{error, enospc}</c> because there was not enough
- space on the disc for previously written data, and
- <c>close/1</c> should probably be called again since the
+ number of <c>write/2</c> calls, <c>close/1</c> can
+ return <c>{error, enospc}</c>, as there is not enough
+ space on the disc for previously written data.
+ <c>close/1</c> must probably be called again, as the
file is still open.</p>
</item>
<tag><c>delayed_write</c></tag>
<item>
<p>The same as <c>{delayed_write, Size, Delay}</c> with
reasonable default values for <c>Size</c> and
- <c>Delay</c>. (Roughly some 64 KBytes, 2 seconds)</p>
+ <c>Delay</c> (roughly some 64 KB, 2 seconds).</p>
</item>
<tag><c>{read_ahead, Size}</c></tag>
<item>
- <p>This option activates read data buffering. If
+ <p>Activates read data buffering. If
<c>read/2</c> calls are for significantly less than
- <c>Size</c> bytes, read operations towards the operating
+ <c>Size</c> bytes, read operations to the operating
system are still performed for blocks of <c>Size</c>
bytes. The extra data is buffered and returned in
subsequent <c>read/2</c> calls, giving a performance gain
- since the number of operating system calls is reduced.</p>
- <p>The <c>read_ahead</c> buffer is also highly utilized
- by the <c>read_line/1</c> function in <c>raw</c> mode,
- why this option is recommended (for performance reasons)
+ as the number of operating system calls is reduced.</p>
+ <p>The <c>read_ahead</c> buffer is also highly used
+ by function <c>read_line/1</c> in <c>raw</c> mode,
+ therefore this option is recommended
+ (for performance reasons)
when accessing raw files using that function.</p>
<p>If <c>read/2</c> calls are for sizes not significantly
less than, or even greater than <c>Size</c> bytes, no
@@ -771,93 +830,141 @@
<tag><c>read_ahead</c></tag>
<item>
<p>The same as <c>{read_ahead, Size}</c> with a reasonable
- default value for <c>Size</c>. (Roughly some 64 KBytes)</p>
+ default value for <c>Size</c> (roughly some 64 KB).</p>
</item>
<tag><c>compressed</c></tag>
<item>
<p>Makes it possible to read or write gzip compressed
- files. The <c>compressed</c> option must be combined
- with either <c>read</c> or <c>write</c>, but not both.
- Note that the file size obtained with
- <c>read_file_info/1</c> will most probably not match the
- number of bytes that can be read from a compressed file.</p>
+ files. Option <c>compressed</c> must be combined
+ with <c>read</c> or <c>write</c>, but not both.
+ Notice that the file size obtained with
+ <seealso marker="#read_file_info/1"><c>read_file_info/1</c></seealso>
+ does probably not match the number of bytes that can be
+ read from a compressed file.</p>
</item>
<tag><c>{encoding, Encoding}</c></tag>
<item>
- <p>Makes the file perform automatic translation of characters to and from a specific (Unicode) encoding. Note that the data supplied to file:write or returned by file:read still is byte oriented, this option only denotes how data is actually stored in the disk file.</p>
- <p>Depending on the encoding, different methods of reading and writing data is preferred. The default encoding of <c>latin1</c> implies using this (the file) module for reading and writing data, as the interfaces provided here work with byte-oriented data, while using other (Unicode) encodings makes the <seealso marker="stdlib:io">io(3)</seealso> module's <c>get_chars</c>, <c>get_line</c> and <c>put_chars</c> functions more suitable, as they can work with the full Unicode range.</p>
- <p>If data is sent to an <c>io_device()</c> in a format that cannot be converted to the specified encoding, or if data is read by a function that returns data in a format that cannot cope with the character range of the data, an error occurs and the file will be closed.</p>
- <p>The allowed values for <c>Encoding</c> are:</p>
+ <p>Makes the file perform automatic translation of characters to
+ and from a specific (Unicode) encoding. Notice that the data supplied
+ to
+ <seealso marker="#write/2"><c>write/2</c></seealso>
+ or returned by
+ <seealso marker="#read/2"><c>read/2</c></seealso>
+ still is byte-oriented; this option
+ denotes only how data is stored in the disk file.</p>
+ <p>Depending on the encoding, different methods of reading and writing
+ data is preferred. The default encoding of <c>latin1</c> implies using
+ this module (<c>file</c>) for reading and writing data as the interfaces
+ provided here work with byte-oriented data. Using other (Unicode)
+ encodings makes the
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso> functions
+ <c>get_chars</c>, <c>get_line</c>, and <c>put_chars</c> more suitable,
+ as they can work with the full Unicode range.</p>
+ <p>If data is sent to an <c>io_device()</c> in a format that cannot be
+ converted to the specified encoding, or if data is read by a function
+ that returns data in a format that cannot cope with the character range
+ of the data, an error occurs and the file is closed.</p>
+ <p>Allowed values for <c>Encoding</c>:</p>
<taglist>
<tag><c>latin1</c></tag>
<item>
- <p>The default encoding. Bytes supplied to i.e. file:write are written as is on the file, likewise bytes read from the file are returned to i.e. file:read as is. If the <seealso marker="stdlib:io">io(3)</seealso> module is used for writing, the file can only cope with Unicode characters up to codepoint 255 (the ISO-latin-1 range).</p>
+ <p>The default encoding. Bytes supplied to the file, that is,
+ <seealso marker="#write/2"><c>write/2</c></seealso>
+ are written "as is" on the file. Likewise, bytes read from the file,
+ that is,
+ <seealso marker="#read/2"><c>read/2</c></seealso> are
+ returned "as is". If module
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso> is used for
+ writing, the file can only cope with Unicode characters up to code point
+ 255 (the ISO Latin-1 range).</p>
</item>
- <tag><c>unicode</c> or <c>utf8</c></tag>
+ <tag><c>unicode or utf8</c></tag>
<item>
- <p>Characters are translated to and from the UTF-8 encoding before being written to or read from the file. A file opened in this way might be readable using the file:read function, as long as no data stored on the file lies beyond the ISO-latin-1 range (0..255), but failure will occur if the data contains Unicode codepoints beyond that range. The file is best read with the functions in the Unicode aware <seealso marker="stdlib:io">io(3)</seealso> module.</p>
- <p>Bytes written to the file by any means are translated to UTF-8 encoding before actually being stored on the disk file.</p>
+ <p>Characters are translated to and from UTF-8 encoding before they are
+ written to or read from the file. A file opened in this way can be
+ readable using function
+ <seealso marker="#read/2"><c>read/2</c></seealso>,
+ as long as no data stored on
+ the file lies beyond the ISO Latin-1 range (0..255), but failure occurs
+ if the data contains Unicode code points beyond that range. The file is
+ best read with the functions in the Unicode aware module
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso>.</p>
+ <p>Bytes written to the file by any means are translated to UTF-8 encoding
+ before being stored on the disk file.</p>
</item>
- <tag><c>utf16</c> or <c>{utf16,big}</c></tag>
+ <tag><c>utf16 or {utf16,big}</c></tag>
<item>
- <p>Works like <c>unicode</c>, but translation is done to and from big endian UTF-16 instead of UTF-8.</p>
+ <p>Works like <c>unicode</c>, but translation is done to and from big
+ endian UTF-16 instead of UTF-8.</p>
</item>
<tag><c>{utf16,little}</c></tag>
<item>
- <p>Works like <c>unicode</c>, but translation is done to and from little endian UTF-16 instead of UTF-8.</p>
+ <p>Works like <c>unicode</c>, but translation is done to and from little
+ endian UTF-16 instead of UTF-8.</p>
</item>
- <tag><c>utf32</c> or <c>{utf32,big}</c></tag>
+ <tag><c>utf32 or {utf32,big}</c></tag>
<item>
- <p>Works like <c>unicode</c>, but translation is done to and from big endian UTF-32 instead of UTF-8.</p>
+ <p>Works like <c>unicode</c>, but translation is done to and from big
+ endian UTF-32 instead of UTF-8.</p>
</item>
<tag><c>{utf32,little}</c></tag>
<item>
- <p>Works like <c>unicode</c>, but translation is done to and from little endian UTF-32 instead of UTF-8.</p>
+ <p>Works like <c>unicode</c>, but translation is done to and from little
+ endian UTF-32 instead of UTF-8.</p>
</item>
</taglist>
- <p>The Encoding can be changed for a file "on the fly" by using the <seealso marker="stdlib:io#setopts/2">io:setopts/2</seealso> function, why a file can be analyzed in latin1 encoding for i.e. a BOM, positioned beyond the BOM and then be set for the right encoding before further reading.See the <seealso marker="stdlib:unicode">unicode(3)</seealso> module for functions identifying BOM's.</p>
+ <p>The Encoding can be changed for a file "on the fly" by using function
+ <seealso marker="stdlib:io#setopts/2"><c>io:setopts/2</c></seealso>.
+ So a file can be analyzed in latin1 encoding for, for example, a BOM,
+ positioned beyond the BOM and then be set for the right encoding before
+ further reading. For functions identifying BOMs, see module
+ <seealso marker="stdlib:unicode"><c>unicode(3)</c></seealso>. </p>
<p>This option is not allowed on <c>raw</c> files.</p>
</item>
<tag><c>ram</c></tag>
<item>
- <p><c>File</c> must be <c>iodata()</c>. Returns an <c>fd()</c> which lets the <c>file</c> module operate on the data in-memory as if it is a file.</p>
+ <p><c>File</c> must be <c>iodata()</c>. Returns an <c>fd()</c>, which lets
+ module <c>file</c> operate on the data in-memory as if it is a file.</p>
</item>
<tag><c>sync</c></tag>
<item>
- <p>On platforms that support it, enables the POSIX <c>O_SYNC</c> synchronous I/O flag or its platform-dependent
- equivalent (e.g., <c>FILE_FLAG_WRITE_THROUGH</c> on Windows) so that writes to the file block until the data has
- been physically written to disk. Be aware, though, that the exact semantics of this flag differ from platform to
- platform; for example, neither Linux nor Windows guarantees that all file metadata are also written before the call
- returns. For precise semantics, check the details of your platform's documentation. On platforms with no
- support for POSIX <c>O_SYNC</c> or equivalent, use of the <c>sync</c> flag causes <c>open</c> to return
- <c>{error, enotsup}</c>.</p>
+ <p>On platforms supporting it, enables the POSIX <c>O_SYNC</c> synchronous
+ I/O flag or its platform-dependent equivalent (for example,
+ <c>FILE_FLAG_WRITE_THROUGH</c> on Windows) so that writes to the file
+ block until the data is physically written to disk. However, be aware
+ that the exact semantics of this flag differ from platform to
+ platform. For example, none of Linux or Windows guarantees that all file
+ metadata are also written before the call returns. For precise semantics,
+ check the details of your platform documentation. On platforms with no
+ support for POSIX <c>O_SYNC</c> or equivalent, use of the <c>sync</c>
+ flag causes <c>open</c> to return <c>{error, enotsup}</c>.</p>
</item>
</taglist>
<p>Returns:</p>
<taglist>
<tag><c>{ok, <anno>IoDevice</anno>}</c></tag>
<item>
- <p>The file has been opened in the requested mode.
+ <p>The file is opened in the requested mode.
<c><anno>IoDevice</anno></c> is a reference to the file.</p>
</item>
<tag><c>{error, <anno>Reason</anno>}</c></tag>
<item>
- <p>The file could not be opened.</p>
+ <p>The file cannot be opened.</p>
</item>
</taglist>
- <p><c><anno>IoDevice</anno></c> is really the pid of the process which
+ <p><c><anno>IoDevice</anno></c> is really the pid of the process that
handles the file. This process is linked to the process
- which originally opened the file. If any process to which
- the <c><anno>IoDevice</anno></c> is linked terminates, the file will
- be closed and the process itself will be terminated.
+ that originally opened the file. If any process to which
+ the <c><anno>IoDevice</anno></c> is linked terminates, the file is
+ closed and the process itself is terminated.
An <c><anno>IoDevice</anno></c> returned from this call can be used
- as an argument to the IO functions (see
- <seealso marker="stdlib:io">io(3)</seealso>).</p>
+ as an argument to the I/O functions (see
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso>).</p>
<note>
- <p>In previous versions of <c>file</c>, modes were given
+ <p>In previous versions of <c>file</c>, modes were specified
as one of the atoms <c>read</c>, <c>write</c>, or
<c>read_write</c> instead of a list. This is still allowed
- for reasons of backwards compatibility, but should not be
+ for reasons of backwards compatibility, but is not to be
used for new code. Also note that <c>read_write</c> is not
allowed in a mode list.</p>
</note>
@@ -874,17 +981,16 @@
</item>
<tag><c>eisdir</c></tag>
<item>
- <p>The named file is not a regular file. It may be a
- directory, a fifo, or a device.</p>
+ <p>The named file is a directory.</p>
</item>
<tag><c>enotdir</c></tag>
<item>
- <p>A component of the file name is not a directory. On some
+ <p>A component of the filename is not a directory. On some
platforms, <c>enoent</c> is returned instead.</p>
</item>
<tag><c>enospc</c></tag>
<item>
- <p>There is a no space left on the device (if <c>write</c>
+ <p>There is no space left on the device (if <c>write</c>
access was specified).</p>
</item>
</taglist>
@@ -892,182 +998,186 @@
</func>
<func>
<name name="path_consult" arity="2"/>
- <fsummary>Read Erlang terms from a file</fsummary>
+ <fsummary>Read Erlang terms from a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
names) until the file <c><anno>Filename</anno></c> is found.
If <c><anno>Filename</anno></c>
is an absolute filename, <c><anno>Path</anno></c> is ignored.
- Then reads Erlang terms, separated by '.', from the file.
- Returns one of the following:</p>
+ Then reads Erlang terms, separated by '.', from the file.</p>
+ <p>Returns one of the following:</p>
<taglist>
<tag><c>{ok, <anno>Terms</anno>, <anno>FullName</anno>}</c></tag>
<item>
- <p>The file was successfully read. <c><anno>FullName</anno></c> is
+ <p>The file is successfully read. <c><anno>FullName</anno></c> is
the full name of the file.</p>
</item>
<tag><c>{error, enoent}</c></tag>
<item>
- <p>The file could not be found in any of the directories in
+ <p>The file cannot be found in any of the directories in
<c><anno>Path</anno></c>.</p>
</item>
<tag><c>{error, atom()}</c></tag>
<item>
<p>An error occurred when opening the file or reading it.
- See <seealso marker="#open/2">open/2</seealso> for a list
- of typical error codes.</p>
+ For a list of typical error codes, see
+ <seealso marker="#open/2"><c>open/2</c></seealso>.</p>
</item>
<tag><c>{error, {<anno>Line</anno>, <anno>Mod</anno>,
<anno>Term</anno>}}</c></tag>
<item>
<p>An error occurred when interpreting the Erlang terms in
- the file. Use <c>format_error/1</c> to convert
- the three-element tuple to an English description of
+ the file. Use
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>
+ to convert the three-element tuple to an English description of
the error.</p>
</item>
</taglist>
- <p>The encoding of of <c><anno>Filename</anno></c> can be set
- by a comment as described in <seealso
- marker="stdlib:epp#encoding">epp(3)</seealso>.</p>
+ <p>The encoding of <c><anno>Filename</anno></c> can be set
+ by a comment as described in
+ <seealso marker="stdlib:epp#encoding"><c>epp(3)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="path_eval" arity="2"/>
- <fsummary>Evaluate Erlang expressions in a file</fsummary>
+ <fsummary>Evaluate Erlang expressions in a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
names) until the file <c><anno>Filename</anno></c> is found.
- If <c><anno>Filename</anno></c> is an absolute file name,
+ If <c><anno>Filename</anno></c> is an absolute filename,
<c><anno>Path</anno></c> is ignored. Then reads
and evaluates Erlang expressions, separated by '.' (or ',', a
sequence of expressions is also an expression), from the file.
- The actual result of evaluation is not returned; any
+ The result of evaluation is not returned; any
expression sequence in the file must be there for its side
- effect. Returns one of the following:</p>
+ effect.</p>
+ <p>Returns one of the following:</p>
<taglist>
<tag><c>{ok, <anno>FullName</anno>}</c></tag>
<item>
- <p>The file was read and evaluated. <c><anno>FullName</anno></c> is
+ <p>The file is read and evaluated. <c><anno>FullName</anno></c> is
the full name of the file.</p>
</item>
<tag><c>{error, enoent}</c></tag>
<item>
- <p>The file could not be found in any of the directories in
+ <p>The file cannot be found in any of the directories in
<c><anno>Path</anno></c>.</p>
</item>
<tag><c>{error, atom()}</c></tag>
<item>
<p>An error occurred when opening the file or reading it.
- See <seealso marker="#open/2">open/2</seealso> for a list
- of typical error codes.</p>
+ For a list of typical error codes, see
+ <seealso marker="#open/2"><c>open/2</c></seealso>.</p>
</item>
<tag><c>{error, {<anno>Line</anno>, <anno>Mod</anno>,
<anno>Term</anno>}}</c></tag>
<item>
<p>An error occurred when interpreting the Erlang
- expressions in the file. Use <c>format_error/1</c> to
- convert the three-element tuple to an English description
+ expressions in the file. Use
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>
+ to convert the three-element tuple to an English description
of the error.</p>
</item>
</taglist>
- <p>The encoding of of <c><anno>Filename</anno></c> can be set
- by a comment as described in <seealso
- marker="stdlib:epp#encoding">epp(3)</seealso>.</p>
+ <p>The encoding of <c><anno>Filename</anno></c> can be set
+ by a comment as described in
+ <seealso marker="stdlib:epp#encoding"><c>epp(3)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="path_open" arity="3"/>
- <fsummary>Open a file</fsummary>
+ <fsummary>Open a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
names) until the file <c><anno>Filename</anno></c> is found.
If <c><anno>Filename</anno></c>
- is an absolute file name, <c><anno>Path</anno></c> is ignored.
- Then opens the file in the mode determined by <c><anno>Modes</anno></c>.
- Returns one of the following:</p>
+ is an absolute filename, <c><anno>Path</anno></c> is ignored.
+ Then opens the file in the mode determined by <c><anno>Modes</anno></c>.</p>
+ <p>Returns one of the following:</p>
<taglist>
<tag><c>{ok, <anno>IoDevice</anno>, <anno>FullName</anno>}</c></tag>
<item>
- <p>The file has been opened in the requested mode.
+ <p>The file is opened in the requested mode.
<c><anno>IoDevice</anno></c> is a reference to the file and
<c><anno>FullName</anno></c> is the full name of the file.</p>
</item>
<tag><c>{error, enoent}</c></tag>
<item>
- <p>The file could not be found in any of the directories in
+ <p>The file cannot be found in any of the directories in
<c><anno>Path</anno></c>.</p>
</item>
<tag><c>{error, atom()}</c></tag>
<item>
- <p>The file could not be opened.</p>
+ <p>The file cannot be opened.</p>
</item>
</taglist>
</desc>
</func>
<func>
<name name="path_script" arity="2"/>
- <fsummary>Evaluate and return the value of Erlang expressions in a file</fsummary>
+ <fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
names) until the file <c><anno>Filename</anno></c> is found.
- If <c><anno>Filename</anno></c> is an absolute file name,
+ If <c><anno>Filename</anno></c> is an absolute filename,
<c><anno>Path</anno></c> is ignored. Then reads
and evaluates Erlang expressions, separated by '.' (or ',', a
- sequence of expressions is also an expression), from the file.
- Returns one of the following:</p>
+ sequence of expressions is also an expression), from the file.</p>
+ <p>Returns one of the following:</p>
<taglist>
<tag><c>{ok, <anno>Value</anno>, <anno>FullName</anno>}</c></tag>
<item>
- <p>The file was read and evaluated. <c><anno>FullName</anno></c> is
+ <p>The file is read and evaluated. <c><anno>FullName</anno></c> is
the full name of the file and <c><anno>Value</anno></c> the value of
the last expression.</p>
</item>
<tag><c>{error, enoent}</c></tag>
<item>
- <p>The file could not be found in any of the directories in
+ <p>The file cannot be found in any of the directories in
<c><anno>Path</anno></c>.</p>
</item>
<tag><c>{error, atom()}</c></tag>
<item>
<p>An error occurred when opening the file or reading it.
- See <seealso marker="#open/2">open/2</seealso> for a list
- of typical error codes.</p>
+ For a list of typical error codes, see
+ <seealso marker="#open/2"><c>open/2</c></seealso>.</p>
</item>
<tag><c>{error, {<anno>Line</anno>, <anno>Mod</anno>,
<anno>Term</anno>}}</c></tag>
<item>
<p>An error occurred when interpreting the Erlang
- expressions in the file. Use <c>format_error/1</c> to
- convert the three-element tuple to an English description
+ expressions in the file. Use
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>
+ to convert the three-element tuple to an English description
of the error.</p>
</item>
</taglist>
- <p>The encoding of of <c><anno>Filename</anno></c> can be set
- by a comment as described in <seealso
- marker="stdlib:epp#encoding">epp(3)</seealso>.</p>
+ <p>The encoding of <c><anno>Filename</anno></c> can be set
+ by a comment as described in
+ <seealso marker="stdlib:epp#encoding"><c>epp(3)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="path_script" arity="3"/>
- <fsummary>Evaluate and return the value of Erlang expressions in a file</fsummary>
+ <fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>The same as <c>path_script/2</c> but the variable bindings
<c><anno>Bindings</anno></c> are used in the evaluation. See
- <seealso marker="stdlib:erl_eval">erl_eval(3)</seealso> about
+ <seealso marker="stdlib:erl_eval"><c>erl_eval(3)</c></seealso> about
variable bindings.</p>
</desc>
</func>
<func>
<name name="pid2name" arity="1"/>
- <fsummary>Return the name of the file handled by a pid</fsummary>
+ <fsummary>Return the name of the file handled by a pid.</fsummary>
<desc>
- <p>If <c><anno>Pid</anno></c> is an IO device, that is, a pid returned from
+ <p>If <c><anno>Pid</anno></c> is an I/O device, that is, a pid returned from
<c>open/2</c>, this function returns the filename, or rather:</p>
<taglist>
<tag><c>{ok, <anno>Filename</anno>}</c></tag>
<item>
- <p>If this node's file server is not a slave, the file was
- opened by this node's file server, (this implies that
+ <p>If the file server of this node is not a slave, the file was
+ opened by the file server of this node (this implies that
<c><anno>Pid</anno></c> must be a local pid) and the file is not
closed. <c><anno>Filename</anno></c> is the filename in flat string
format.</p>
@@ -1084,13 +1194,12 @@
</func>
<func>
<name name="position" arity="2"/>
- <fsummary>Set position in a file</fsummary>
+ <fsummary>Set position in a file.</fsummary>
<desc>
<p>Sets the position of the file referenced by <c><anno>IoDevice</anno></c>
- to <c><anno>Location</anno></c>. Returns
- <c>{ok, <anno>NewPosition</anno>}</c> (as
- absolute offset) if successful, otherwise
- <c>{error, <anno>Reason</anno>}</c>. <c><anno>Location</anno></c> is
+ to <c><anno>Location</anno></c>. Returns <c>{ok, <anno>NewPosition</anno>}</c>
+ (as absolute offset) if successful, otherwise
+ <c>{error, <anno>Reason</anno>}</c>. <c><anno>Location</anno></c> is
one of the following:</p>
<taglist>
<tag><c>Offset</c></tag>
@@ -1114,14 +1223,21 @@
<p>The same as above with <c>Offset</c> 0.</p>
</item>
</taglist>
- <p>Note that offsets are counted in bytes, not in characters. If the file is opened using some other <c>encoding</c> than <c>latin1</c>, one byte does not correspond to one character. Positioning in such a file can only be done to known character boundaries, i.e. to a position earlier retrieved by getting a current position, to the beginning/end of the file or to some other position <em>known</em> to be on a correct character boundary by some other means (typically beyond a byte order mark in the file, which has a known byte-size).</p>
- <p>Typical error reasons are:</p>
+ <p>Notice that offsets are counted in bytes, not in characters. If the file
+ is opened using some other <c>encoding</c> than <c>latin1</c>, one byte
+ does not correspond to one character. Positioning in such a file can only
+ be done to known character boundaries. That is, to a position earlier retrieved
+ by getting a current position, to the beginning/end of the file or to some
+ other position <em>known</em> to be on a correct character boundary by some
+ other means (typically beyond a byte order mark in the file, which has a
+ known byte-size).</p>
+ <p>A typical error reason is:</p>
<taglist>
<tag><c>einval</c></tag>
<item>
- <p>Either <c><anno>Location</anno></c> was illegal, or it
+ <p>Either <c><anno>Location</anno></c> is illegal, or it is
evaluated to a
- negative offset in the file. Note that if the resulting
+ negative offset in the file. Notice that if the resulting
position is a negative value, the result is an error, and
after the call the file position is undefined.</p>
</item>
@@ -1130,7 +1246,7 @@
</func>
<func>
<name name="pread" arity="2"/>
- <fsummary>Read from a file at certain positions</fsummary>
+ <fsummary>Read from a file at certain positions.</fsummary>
<desc>
<p>Performs a sequence of <c>pread/3</c> in one operation,
which is more efficient than calling them one at a time.
@@ -1139,70 +1255,94 @@
where each <c><anno>Data</anno></c>, the result of the corresponding
<c>pread</c>, is either a list or a binary depending on
the mode of the file, or <c>eof</c> if the requested position
- was beyond end of file.</p>
- <p>As the position is given as a byte-offset, special caution has to be taken when working with files where <c>encoding</c> is set to something else than <c>latin1</c>, as not every byte position will be a valid character boundary on such a file.</p>
+ is beyond end of file.</p>
+ <p>As the position is specified as a byte-offset, take special caution
+ when working with files where <c>encoding</c> is set to something else
+ than <c>latin1</c>, as not every byte position is a valid character
+ boundary on such a file.</p>
</desc>
</func>
<func>
<name name="pread" arity="3"/>
- <fsummary>Read from a file at a certain position</fsummary>
+ <fsummary>Read from a file at a certain position.</fsummary>
<desc>
<p>Combines <c>position/2</c> and <c>read/2</c> in one
operation, which is more efficient than calling them one at a
- time. If <c><anno>IoDevice</anno></c> has been opened in raw mode,
- some restrictions apply: <c><anno>Location</anno></c> is only allowed
- to be an
- integer; and the current position of the file is undefined
- after the operation.</p>
- <p>As the position is given as a byte-offset, special caution has to be taken when working with files where <c>encoding</c> is set to something else than <c>latin1</c>, as not every byte position will be a valid character boundary on such a file.</p>
+ time. If <c><anno>IoDevice</anno></c> is opened in <c>raw</c> mode,
+ some restrictions apply:</p>
+ <list type="bulleted">
+ <item><c><anno>Location</anno></c> is only allowed to be an
+ integer.</item>
+ <item>The current position of the file is undefined after the
+ operation.</item>
+ </list>
+ <p>As the position is specified as a byte-offset, take special caution
+ when working with files where <c>encoding</c> is set to something else
+ than <c>latin1</c>, as not every byte position is a valid character
+ boundary on such a file.</p>
</desc>
</func>
<func>
<name name="pwrite" arity="2"/>
- <fsummary>Write to a file at certain positions</fsummary>
+ <fsummary>Write to a file at certain positions.</fsummary>
<desc>
<p>Performs a sequence of <c>pwrite/3</c> in one operation,
which is more efficient than calling them one at a time.
Returns <c>ok</c> or <c>{error, {<anno>N</anno>,
<anno>Reason</anno>}}</c>, where
- <c><anno>N</anno></c> is the number of successful writes that was done
+ <c><anno>N</anno></c> is the number of successful writes done
before the failure.</p>
- <p>When positioning in a file with other <c>encoding</c> than <c>latin1</c>, caution must be taken to set the position on a correct character boundary, see <seealso marker="#position/2">position/2</seealso> for details.</p>
+ <p>When positioning in a file with other <c>encoding</c> than <c>latin1</c>,
+ caution must be taken to set the position on a correct character boundary.
+ For details, see <seealso marker="#position/2"><c>position/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="pwrite" arity="3"/>
- <fsummary>Write to a file at a certain position</fsummary>
+ <fsummary>Write to a file at a certain position.</fsummary>
<desc>
<p>Combines <c>position/2</c> and <c>write/2</c> in one
operation, which is more efficient than calling them one at a
- time. If <c><anno>IoDevice</anno></c> has been opened in raw mode,
- some restrictions apply: <c><anno>Location</anno></c> is only allowed
- to be an
- integer; and the current position of the file is undefined
- after the operation.</p>
- <p>When positioning in a file with other <c>encoding</c> than <c>latin1</c>, caution must be taken to set the position on a correct character boundary, see <seealso marker="#position/2">position/2</seealso> for details.</p>
+ time. If <c><anno>IoDevice</anno></c> has been opened in <c>raw</c> mode,
+ some restrictions apply:</p>
+ <list type="bulleted">
+ <item><c><anno>Location</anno></c> is only allowed to be an
+ integer.</item>
+ <item>The current position of the file is undefined after the
+ operation.</item>
+ </list>
+ <p>When positioning in a file with other <c>encoding</c> than <c>latin1</c>,
+ caution must be taken to set the position on a correct character boundary.
+ For details, see <seealso marker="#position/2"><c>position/2</c></seealso>.</p>
</desc>
</func>
<func>
<name name="read" arity="2"/>
- <fsummary>Read from a file</fsummary>
+ <fsummary>Read from a file.</fsummary>
<desc>
<p>Reads <c><anno>Number</anno></c> bytes/characters from the file
referenced by <c><anno>IoDevice</anno></c>. The functions
- <c>read/2</c>, <c>pread/3</c>
- and <c>read_line/1</c> are the only ways to read from a file
- opened in raw mode (although they work for normally opened
- files, too).</p>
- <p>For files where <c>encoding</c> is set to something else than <c>latin1</c>, one character might be represented by more than one byte on the file. The parameter <c>Number</c> always denotes the number of <em>characters</em> read from the file, while the position in the file might be moved much more than this number when reading a Unicode file.</p>
- <p>Also, if <c>encoding</c> is set to something else than <c>latin1</c>, the <c>read/3</c> call will fail if the data contains characters larger than 255, which is why the <seealso marker="stdlib:io">io(3)</seealso> module is to be preferred when reading such a file.</p>
+ <seealso marker="#read/2"><c>read/2</c></seealso>,
+ <seealso marker="#pread/3"><c>pread/3</c></seealso>, and
+ <seealso marker="#read_line/1"><c>read_line/1</c></seealso>
+ are the only ways to read from a file opened in <c>raw</c> mode
+ (although they work for normally opened files, too).</p>
+ <p>For files where <c>encoding</c> is set to something else than <c>latin1</c>,
+ one character can be represented by more than one byte on the file.
+ The parameter <c>Number</c> always denotes the number of <em>characters</em>
+ read from the file, while the position in the file can be moved much more than
+ this number when reading a Unicode file.</p>
+ <p>Also, if <c>encoding</c> is set to something else than <c>latin1</c>,
+ the <c>read/3</c> call fails if the data contains characters larger than 255,
+ which is why module <seealso marker="stdlib:io"><c>io(3)</c></seealso>
+ is to be preferred when reading such a file.</p>
<p>The function returns:</p>
<taglist>
<tag><c>{ok, <anno>Data</anno>}</c></tag>
<item>
<p>If the file was opened in binary mode, the read bytes are
returned in a binary, otherwise in a list. The list or
- binary will be shorter than the number of bytes requested
+ binary is shorter than the number of bytes requested
if end of file was reached.</p>
</item>
<tag><c>eof</c></tag>
@@ -1223,14 +1363,16 @@
</item>
<tag><c>{no_translation, unicode, latin1}</c></tag>
<item>
- <p>The file was opened with another <c>encoding</c> than <c>latin1</c> and the data in the file can not be translated to the byte-oriented data that this function returns.</p>
+ <p>The file is opened with another <c>encoding</c> than <c>latin1</c> and
+ the data in the file cannot be translated to the byte-oriented data that
+ this function returns.</p>
</item>
</taglist>
</desc>
</func>
<func>
<name name="read_file" arity="1"/>
- <fsummary>Read a file</fsummary>
+ <fsummary>Read a file.</fsummary>
<desc>
<p>Returns <c>{ok, <anno>Binary</anno>}</c>, where
<c><anno>Binary</anno></c> is a binary
@@ -1254,7 +1396,7 @@
</item>
<tag><c>enotdir</c></tag>
<item>
- <p>A component of the file name is not a directory. On some
+ <p>A component of the filename is not a directory. On some
platforms, <c>enoent</c> is returned instead.</p>
</item>
<tag><c>enomem</c></tag>
@@ -1267,7 +1409,7 @@
<func>
<name name="read_file_info" arity="1"/>
<name name="read_file_info" arity="2"/>
- <fsummary>Get information about a file</fsummary>
+ <fsummary>Retrieve information about a file.</fsummary>
<desc>
<p>Retrieves information about a file. Returns
<c>{ok, <anno>FileInfo</anno>}</c> if successful, otherwise
@@ -1277,24 +1419,32 @@
<c>file.hrl</c>. Include the following directive in the module
from which the function is called:</p>
<code type="none">
--include_lib("kernel/include/file.hrl").</code>
- <p>The time type returned in <c>atime</c>, <c>mtime</c> and <c>ctime</c>
- is dependent on the time type set in <c>Opts :: {time, Type}</c>.
- Type <c>local</c> will return local time, <c>universal</c> will
- return universal time and <c>posix</c> will return seconds since
- or before unix time epoch which is 1970-01-01 00:00 UTC.
- Default is <c>{time, local}</c>.
- </p>
- <p>If the <c>raw</c> option is set, the file server will not be called
- and only informations about local files will be returned.</p>
- <note>
- <p>
- Since file times is stored in posix time on most OS it is
- faster to query file information with the <c>posix</c> option.
- </p>
- </note>
+ -include_lib("kernel/include/file.hrl").</code>
+ <p>The time type returned in <c>atime</c>, <c>mtime</c>, and <c>ctime</c>
+ is dependent on the time type set in <c>Opts :: {time, Type}</c> as
+ follows:</p>
+ <taglist>
+ <tag><c>local</c></tag>
+ <item><p>Returns local time.</p></item>
+ <tag><c>universal</c></tag>
+ <item><p>Returns universal time.</p></item>
+ <tag><c>posix</c></tag>
+ <item><p>Returns seconds since or before Unix time epoch,
+ which is 1970-01-01 00:00 UTC.</p></item>
+ </taglist>
+ <p>Default is <c>{time, local}</c>.</p>
+ <p>If the option <c>raw</c> is set, the file server is not called and
+ only information about local files is returned. Note that this will
+ break this module's atomicity guarantees as it can race with a
+ concurrent call to
+ <seealso marker="#write_file_info/2"><c>write_file_info/1,2</c>
+ </seealso></p>
+ <note>
+ <p>As file times are stored in POSIX time on most OS, it is faster to
+ query file information with option <c>posix</c>.</p>
+ </note>
- <p>The record <c>file_info</c> contains the following fields.</p>
+ <p>The record <c>file_info</c> contains the following fields:</p>
<taglist>
<tag><c>size = integer() >= 0</c></tag>
<item>
@@ -1308,19 +1458,25 @@
<item>
<p>The current system access to the file.</p>
</item>
- <tag><c>atime = </c><seealso marker="#type-date_time">date_time()</seealso><c> | integer() >= 0</c></tag>
+ <tag><c>atime = </c>
+ <seealso marker="#type-date_time"><c>date_time()</c></seealso><c> |
+ integer() >= 0</c></tag>
<item>
<p>The last time the file was read.</p>
</item>
- <tag><c>mtime = </c><seealso marker="#type-date_time">date_time()</seealso><c> | integer() >= 0</c></tag>
+ <tag><c>mtime = </c>
+ <seealso marker="#type-date_time"><c>date_time()</c></seealso><c> |
+ integer() >= 0</c></tag>
<item>
<p>The last time the file was written.</p>
</item>
- <tag><c>ctime = </c><seealso marker="#type-date_time">date_time()</seealso><c> | integer() >=0</c></tag>
+ <tag><c>ctime = </c>
+ <seealso marker="#type-date_time"><c>date_time()</c></seealso><c> |
+ integer() >=0</c></tag>
<item>
<p>The interpretation of this time field depends on
the operating system. On Unix, it is the last time
- the file or the inode was changed. In Windows, it is
+ the file or the <c>inode</c> was changed. In Windows, it is
the create time.</p>
</item>
<tag><c>mode = integer() >= 0</c></tag>
@@ -1328,36 +1484,36 @@
<p>The file permissions as the sum of the following bit
values:</p>
<taglist>
- <tag>8#00400</tag>
- <item>read permission: owner</item>
- <tag>8#00200</tag>
- <item>write permission: owner</item>
- <tag>8#00100</tag>
- <item>execute permission: owner</item>
- <tag>8#00040</tag>
- <item>read permission: group</item>
- <tag>8#00020</tag>
- <item>write permission: group</item>
- <tag>8#00010</tag>
- <item>execute permission: group</item>
- <tag>8#00004</tag>
- <item>read permission: other</item>
- <tag>8#00002</tag>
- <item>write permission: other</item>
- <tag>8#00001</tag>
- <item>execute permission: other</item>
- <tag>16#800</tag>
- <item>set user id on execution</item>
- <tag>16#400</tag>
- <item>set group id on execution</item>
+ <tag><c>8#00400</c></tag>
+ <item><p>read permission: owner</p></item>
+ <tag><c>8#00200</c></tag>
+ <item><p>write permission: owner</p></item>
+ <tag><c>8#00100</c></tag>
+ <item><p>execute permission: owner</p></item>
+ <tag><c>8#00040</c></tag>
+ <item><p>read permission: group</p></item>
+ <tag><c>8#00020</c></tag>
+ <item><p>write permission: group</p></item>
+ <tag><c>8#00010</c></tag>
+ <item><p>execute permission: group</p></item>
+ <tag><c>8#00004</c></tag>
+ <item><p>read permission: other</p></item>
+ <tag><c>8#00002</c></tag>
+ <item><p>write permission: other</p></item>
+ <tag><c>8#00001</c></tag>
+ <item><p>execute permission: other</p></item>
+ <tag><c>16#800</c></tag>
+ <item><p>set user id on execution</p></item>
+ <tag><c>16#400</c></tag>
+ <item><p>set group id on execution</p></item>
</taglist>
<p>On Unix platforms, other bits than those listed above
- may be set.</p>
+ may be set.</p>
</item>
<tag><c>links = integer() >= 0</c></tag>
<item>
- <p>Number of links to the file (this will always be 1 for
- file systems which have no concept of links).</p>
+ <p>Number of links to the file (this is always 1 for
+ file systems that have no concept of links).</p>
</item>
<tag><c>major_device = integer() >= 0</c></tag>
<item>
@@ -1373,17 +1529,17 @@
<tag><c>inode = integer() >= 0</c></tag>
<item>
<p>Gives the <c>inode</c> number. On non-Unix file systems,
- this field will be zero.</p>
+ this field is zero.</p>
</item>
<tag><c>uid = integer() >= 0</c></tag>
<item>
- <p>Indicates the owner of the file. Will be zero for
- non-Unix file systems.</p>
+ <p>Indicates the owner of the file. On non-Unix file systems,
+ this field is zero.</p>
</item>
<tag><c>gid = integer() >= 0</c></tag>
<item>
<p>Gives the group that the owner of the file belongs to.
- Will be zero for non-Unix file systems.</p>
+ On non-Unix file systems, this field is zero.</p>
</item>
</taglist>
<p>Typical error reasons:</p>
@@ -1399,7 +1555,7 @@
</item>
<tag><c>enotdir</c></tag>
<item>
- <p>A component of the file name is not a directory. On some
+ <p>A component of the filename is not a directory. On some
platforms, <c>enoent</c> is returned instead.</p>
</item>
</taglist>
@@ -1407,18 +1563,34 @@
</func>
<func>
<name name="read_line" arity="1"/>
- <fsummary>Read a line from a file</fsummary>
+ <fsummary>Read a line from a file.</fsummary>
<desc>
<p>Reads a line of bytes/characters from the file referenced by
- <c><anno>IoDevice</anno></c>. Lines are defined to be delimited by the linefeed (LF, <c>\n</c>) character, but any carriage return (CR, <c>\r</c>) followed by a newline is also treated as a single LF character (the carriage return is silently ignored). The line is returned <em>including</em> the LF, but excluding any CR immediately followed by a LF. This behaviour is consistent with the behaviour of <seealso marker="stdlib:io#get_line/2">io:get_line/2</seealso>. If end of file is reached without any LF ending the last line, a line with no trailing LF is returned.</p>
- <p>The function can be used on files opened in <c>raw</c> mode. It is however inefficient to use it on <c>raw</c> files if the file is not opened with the option <c>{read_ahead, Size}</c> specified, why combining <c>raw</c> and <c>{read_ahead, Size}</c> is highly recommended when opening a text file for raw line oriented reading.</p>
- <p>If <c>encoding</c> is set to something else than <c>latin1</c>, the <c>read_line/1</c> call will fail if the data contains characters larger than 255, why the <seealso marker="stdlib:io">io(3)</seealso> module is to be preferred when reading such a file.</p>
+ <c><anno>IoDevice</anno></c>. Lines are defined to be delimited by the
+ linefeed (LF, <c>\n</c>) character, but any carriage return (CR, <c>\r</c>)
+ followed by a newline is also treated as a single LF character (the carriage
+ return is silently ignored). The line is returned <em>including</em> the LF,
+ but excluding any CR immediately followed by an LF. This behaviour is
+ consistent with the behaviour of
+ <seealso marker="stdlib:io#get_line/2"><c>io:get_line/2</c></seealso>.
+ If end of file is reached without any LF ending the last line, a line with no
+ trailing LF is returned.</p>
+ <p>The function can be used on files opened in <c>raw</c> mode. However, it is
+ inefficient to use it on <c>raw</c> files if the file is not opened with
+ option <c>{read_ahead, Size}</c> specified. Thus, combining <c>raw</c> and
+ <c>{read_ahead, Size}</c> is highly recommended when opening a text file for
+ raw line-oriented reading.</p>
+ <p>If <c>encoding</c> is set to something else than <c>latin1</c>, the
+ <c>read_line/1</c> call fails if the data contains characters larger than 255,
+ why module <seealso marker="stdlib:io"><c>io(3)</c></seealso> is to be
+ preferred when reading such a file.</p>
<p>The function returns:</p>
<taglist>
<tag><c>{ok, <anno>Data</anno>}</c></tag>
<item>
- <p>One line from the file is returned, including the trailing LF, but with CRLF sequences replaced by a single LF (see above).</p>
- <p>If the file was opened in binary mode, the read bytes are
+ <p>One line from the file is returned, including the trailing LF,
+ but with CRLF sequences replaced by a single LF (see above).</p>
+ <p>If the file is opened in binary mode, the read bytes are
returned in a binary, otherwise in a list.</p>
</item>
<tag><c>eof</c></tag>
@@ -1439,22 +1611,24 @@
</item>
<tag><c>{no_translation, unicode, latin1}</c></tag>
<item>
- <p>The file is was opened with another <c>encoding</c> than <c>latin1</c> and the data on the file can not be translated to the byte-oriented data that this function returns.</p>
+ <p>The file is opened with another <c>encoding</c> than <c>latin1</c> and
+ the data on the file cannot be translated to the byte-oriented data that
+ this function returns.</p>
</item>
</taglist>
</desc>
</func>
<func>
<name name="read_link" arity="1"/>
- <fsummary>See what a link is pointing to</fsummary>
+ <fsummary>See what a link is pointing to.</fsummary>
<desc>
- <p><marker id="read_link_all"/>This function returns
+ <p><marker id="read_link_all"/>Returns
<c>{ok, <anno>Filename</anno>}</c> if
<c><anno>Name</anno></c> refers to a symbolic link that is
- not a "raw" file name, or <c>{error, <anno>Reason</anno>}</c>
+ not a raw filename, or <c>{error, <anno>Reason</anno>}</c>
otherwise.
On platforms that do not support symbolic links, the return
- value will be <c>{error,enotsup}</c>.</p>
+ value is <c>{error,enotsup}</c>.</p>
<p>Typical error reasons:</p>
<taglist>
<tag><c>einval</c></tag>
@@ -1476,14 +1650,14 @@
</func>
<func>
<name name="read_link_all" arity="1"/>
- <fsummary>See what a link is pointing to</fsummary>
+ <fsummary>See what a link is pointing to.</fsummary>
<desc>
- <p>This function returns <c>{ok, <anno>Filename</anno>}</c> if
+ <p>Returns <c>{ok, <anno>Filename</anno>}</c> if
<c><anno>Name</anno></c> refers to a symbolic link or
<c>{error, <anno>Reason</anno>}</c> otherwise.
On platforms that do not support symbolic links, the return
- value will be <c>{error,enotsup}</c>.</p>
- <p>Note that <c><anno>Filename</anno></c> can be either a list
+ value is <c>{error,enotsup}</c>.</p>
+ <p>Notice that <c><anno>Filename</anno></c> can be either a list
or a binary.</p>
<p>Typical error reasons:</p>
<taglist>
@@ -1505,31 +1679,34 @@
<func>
<name name="read_link_info" arity="1"/>
<name name="read_link_info" arity="2"/>
- <fsummary>Get information about a link or file</fsummary>
- <desc>
- <p>This function works like
- <seealso marker="#read_file_info/2">read_file_info/1,2</seealso> except that
- if <c><anno>Name</anno></c> is a symbolic link, information about
- the link will be returned in the <c>file_info</c> record and
- the <c>type</c> field of the record will be set to
- <c>symlink</c>.</p>
- <p>If the <c>raw</c> option is set, the file server will not be called
- and only informations about local files will be returned.</p>
+ <fsummary>Retrieve information about a link or file.</fsummary>
+ <desc>
+ <p>Works like
+ <seealso marker="#read_file_info/2"><c>read_file_info/1,2</c></seealso>
+ except that if <c><anno>Name</anno></c> is a symbolic link, information
+ about the link is returned in the <c>file_info</c> record and
+ the <c>type</c> field of the record is set to <c>symlink</c>.</p>
+ <p>If the option <c>raw</c> is set, the file server is not called and
+ only information about local files is returned. Note that this will
+ break this module's atomicity guarantees as it can race with a
+ concurrent call to
+ <seealso marker="#write_file_info/2"><c>write_file_info/1,2</c>
+ </seealso></p>
<p>If <c><anno>Name</anno></c> is not a symbolic link, this function returns
- exactly the same result as <c>read_file_info/1</c>.
+ the same result as <c>read_file_info/1</c>.
On platforms that do not support symbolic links, this function
is always equivalent to <c>read_file_info/1</c>.</p>
</desc>
</func>
<func>
<name name="rename" arity="2"/>
- <fsummary>Rename a file</fsummary>
+ <fsummary>Rename a file.</fsummary>
<desc>
<p>Tries to rename the file <c><anno>Source</anno></c> to
<c><anno>Destination</anno></c>.
It can be used to move files (and directories) between
directories, but it is not sufficient to specify
- the destination only. The destination file name must also be
+ the destination only. The destination filename must also be
specified. For example, if <c>bar</c> is a normal file and
<c>foo</c> and <c>baz</c> are directories,
<c>rename("foo/bar", "baz")</c> returns an error, but
@@ -1560,7 +1737,7 @@
<item>
<p><c><anno>Source</anno></c> is a root directory, or
<c><anno>Destination</anno></c>
- is a sub-directory of <c><anno>Source</anno></c>.</p>
+ is a subdirectory of <c><anno>Source</anno></c>.</p>
</item>
<tag><c>eisdir</c></tag>
<item>
@@ -1586,58 +1763,97 @@
</func>
<func>
<name name="script" arity="1"/>
- <fsummary>Evaluate and return the value of Erlang expressions in a file</fsummary>
+ <fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>Reads and evaluates Erlang expressions, separated by '.' (or
',', a sequence of expressions is also an expression), from
- the file. Returns one of the following:</p>
+ the file.</p>
+ <p>Returns one of the following:</p>
<taglist>
<tag><c>{ok, <anno>Value</anno>}</c></tag>
<item>
- <p>The file was read and evaluated. <c><anno>Value</anno></c> is
+ <p>The file is read and evaluated. <c><anno>Value</anno></c> is
the value of the last expression.</p>
</item>
<tag><c>{error, atom()}</c></tag>
<item>
<p>An error occurred when opening the file or reading it.
- See <seealso marker="#open/2">open/2</seealso> for a list
- of typical error codes.</p>
+ For a list of typical error codes, see
+ <seealso marker="#open/2"><c>open/2</c></seealso>.</p>
</item>
<tag><c>{error, {<anno>Line</anno>, <anno>Mod</anno>,
<anno>Term</anno>}}</c></tag>
<item>
<p>An error occurred when interpreting the Erlang
- expressions in the file. Use <c>format_error/1</c> to
- convert the three-element tuple to an English description
+ expressions in the file. Use
+ <seealso marker="#format_error/1"><c>format_error/1</c></seealso>
+ to convert the three-element tuple to an English description
of the error.</p>
</item>
</taglist>
- <p>The encoding of of <c><anno>Filename</anno></c> can be set
- by a comment as described in <seealso
- marker="stdlib:epp#encoding">epp(3)</seealso>.</p>
+ <p>The encoding of <c><anno>Filename</anno></c> can be set
+ by a comment as described in
+ <seealso marker="stdlib:epp#encoding"><c>epp(3)</c></seealso>.</p>
</desc>
</func>
<func>
<name name="script" arity="2"/>
- <fsummary>Evaluate and return the value of Erlang expressions in a file</fsummary>
+ <fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>The same as <c>script/1</c> but the variable bindings
<c><anno>Bindings</anno></c> are used in the evaluation. See
- <seealso marker="stdlib:erl_eval">erl_eval(3)</seealso> about
+ <seealso marker="stdlib:erl_eval"><c>erl_eval(3)</c></seealso> about
variable bindings.</p>
</desc>
</func>
<func>
+ <name name="sendfile" arity="2"/>
+ <fsummary>Send a file to a socket.</fsummary>
+ <desc>
+ <p>Sends the file <c>Filename</c> to <c>Socket</c>.
+ Returns <c>{ok, BytesSent}</c> if successful,
+ otherwise <c>{error, Reason}</c>.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="sendfile" arity="5"/>
+ <fsummary>Send a file to a socket.</fsummary>
+ <type name="sendfile_option"/>
+ <desc>
+ <p>Sends <c>Bytes</c> from the file
+ referenced by <c>RawFile</c> beginning at <c>Offset</c> to
+ <c>Socket</c>.
+ Returns <c>{ok, BytesSent}</c> if successful,
+ otherwise <c>{error, Reason}</c>. If <c>Bytes</c> is set to
+ <c>0</c> all data after the specified <c>Offset</c> is sent.</p>
+ <p>The file used must be opened using the <c>raw</c> flag, and the process
+ calling <c>sendfile</c> must be the controlling process of the socket.
+ See <seealso marker="gen_tcp#controlling_process-2"><c>gen_tcp:controlling_process/2</c></seealso>.</p>
+ <p>If the OS used does not support non-blocking <c>sendfile</c>, an
+ Erlang fallback using <seealso marker="#read/2"><c>read/2</c></seealso>
+ and <seealso marker="gen_tcp#send/2"><c>gen_tcp:send/2</c></seealso> is
+ used.</p>
+ <p>The option list can contain the following options:</p>
+ <taglist>
+ <tag><c>chunk_size</c></tag>
+ <item><p>The chunk size used by the Erlang fallback to send
+ data. If using the fallback, set this to a value
+ that comfortably fits in the systems memory. Default is 20 MB.</p></item>
+ </taglist>
+ </desc>
+ </func>
+ <func>
<name name="set_cwd" arity="1"/>
- <fsummary>Set the current working directory</fsummary>
+ <fsummary>Set the current working directory.</fsummary>
<desc>
<p>Sets the current working directory of the file server to
<c><anno>Dir</anno></c>. Returns <c>ok</c> if successful.</p>
- <p>The functions in the <c>file</c> module usually treat binaries
- as raw filenames, i.e. they are passed as is even when the encoding
- of the binary does not agree with <c>file:native_name_encoding()</c>.
- This function however expects binaries to be encoded according to the
- value returned by <c>file:native_name_encoding()</c>.</p>
+ <p>The functions in the module <c>file</c> usually treat binaries
+ as raw filenames, that is, they are passed "as is" even when the
+ encoding of the binary does not agree with
+ <seealso marker="#native_name_encoding/0"><c>native_name_encoding()</c></seealso>.
+ However, this function expects binaries to be encoded according to the
+ value returned by <c>native_name_encoding()</c>.</p>
<p>Typical error reasons are:</p>
<taglist>
<tag><c>enoent</c></tag>
@@ -1656,31 +1872,31 @@
</item>
<tag><c>badarg</c></tag>
<item>
- <p><c><anno>Dir</anno></c> had an improper type,
+ <p><c><anno>Dir</anno></c> has an improper type,
such as tuple.</p>
</item>
<tag><c>no_translation</c></tag>
<item>
<p><c><anno>Dir</anno></c> is a <c>binary()</c> with
characters coded in ISO-latin-1 and the VM is operating
- with unicode file name encoding.</p>
+ with unicode filename encoding.</p>
</item>
</taglist>
<warning>
- <p>In a future release, a bad type for the
+ <p>In a future release, a bad type for argument
<c><anno>Dir</anno></c>
- argument will probably generate an exception.</p>
+ will probably generate an exception.</p>
</warning>
</desc>
</func>
<func>
<name name="sync" arity="1"/>
- <fsummary>Synchronizes the in-memory state of a file with that on the physical medium</fsummary>
+ <fsummary>Synchronize the in-memory state of a file with that on the physical medium.</fsummary>
<desc>
- <p>Makes sure that any buffers kept by the operating system
+ <p>Ensures that any buffers kept by the operating system
(not by the Erlang runtime system) are written to disk. On
some platforms, this function might have no effect.</p>
- <p>Typical error reasons are:</p>
+ <p>A typical error reason is:</p>
<taglist>
<tag><c>enospc</c></tag>
<item>
@@ -1690,90 +1906,28 @@
</desc>
</func>
<func>
- <name name="datasync" arity="1"/>
- <fsummary>Synchronizes the in-memory data of a file, ignoring most of its metadata, with that on the physical medium</fsummary>
- <desc>
- <p>Makes sure that any buffers kept by the operating system
- (not by the Erlang runtime system) are written to disk. In
- many ways it resembles fsync but it does not update
- some of the file's metadata such as the access time. On
- some platforms this function has no effect.</p>
- <p>Applications that access databases or log files often write
- a tiny data fragment (e.g., one line in a log file) and then
- call fsync() immediately in order to ensure that the written
- data is physically stored on the harddisk. Unfortunately, fsync()
- will always initiate two write operations: one for the newly
- written data and another one in order to update the modification
- time stored in the inode. If the modification time is not a part
- of the transaction concept, fdatasync() can be used to avoid
- unnecessary inode disk write operations.</p>
- <p>Available only in some POSIX systems, this call results in a
- call to fsync(), or has no effect in systems not implementing
- the fdatasync() syscall.</p>
- </desc>
- </func>
- <func>
<name name="truncate" arity="1"/>
- <fsummary>Truncate a file</fsummary>
+ <fsummary>Truncate a file.</fsummary>
<desc>
<p>Truncates the file referenced by <c><anno>IoDevice</anno></c> at
- the current position. Returns <c>ok</c> if successful,
+ the current position. Returns <c>ok</c> if successful,
otherwise <c>{error, <anno>Reason</anno>}</c>.</p>
</desc>
</func>
<func>
- <name name="sendfile" arity="2"/>
- <fsummary>send a file to a socket</fsummary>
- <desc>
- <p>Sends the file <c>Filename</c> to <c>Socket</c>.
- Returns <c>{ok, BytesSent}</c> if successful,
- otherwise <c>{error, Reason}</c>.</p>
- </desc>
- </func>
- <func>
- <name name="sendfile" arity="5"/>
- <fsummary>send a file to a socket</fsummary>
- <type name="sendfile_option"/>
- <desc>
- <p>Sends <c>Bytes</c> from the file
- referenced by <c>RawFile</c> beginning at <c>Offset</c> to
- <c>Socket</c>.
- Returns <c>{ok, BytesSent}</c> if successful,
- otherwise <c>{error, Reason}</c>. If <c>Bytes</c> is set to
- 0 all data after the given <c>Offset</c> is sent.</p>
- <p>The file used must be opened using the raw flag, and the process
- calling sendfile must be the controlling process of the socket.
- See <seealso marker="gen_tcp#controlling_process-2">gen_tcp:controlling_process/2</seealso></p>
- <p>If the OS used does not support sendfile, an Erlang fallback
- using file:read and gen_tcp:send is used.</p>
- <p>The option list can contain the following options:</p>
- <taglist>
- <tag><c>chunk_size</c></tag>
- <item>The chunk size used by the erlang fallback to send
- data. If using the fallback, this should be set to a value
- which comfortably fits in the systems memory. Default is 20 MB.</item>
- <tag><c>use_threads</c></tag>
- <item>Instruct the emulator to use the async thread pool for the
- sendfile system call. This could be usefull if the OS you are running
- on does not properly support non-blocking sendfile calls. Do note that
- using async threads potentially makes your system volnerable to slow
- client attacks. If set to true and no async threads are available,
- the sendfile call will return <c>{error,einval}</c>.
- Introduced in Erlang/OTP 17.0. Default is false.</item>
- </taglist>
- </desc>
- </func>
- <func>
<name name="write" arity="2"/>
- <fsummary>Write to a file</fsummary>
+ <fsummary>Write to a file.</fsummary>
<desc>
<p>Writes <c><anno>Bytes</anno></c> to the file referenced by
<c><anno>IoDevice</anno></c>. This function is the only way to write to a
- file opened in raw mode (although it works for normally
- opened files, too). Returns <c>ok</c> if successful, and
+ file opened in <c>raw</c> mode (although it works for normally opened
+ files too). Returns <c>ok</c> if successful, and
<c>{error, <anno>Reason</anno>}</c> otherwise.</p>
- <p>If the file is opened with <c>encoding</c> set to something else than <c>latin1</c>, each byte written might result in several bytes actually being written to the file, as the byte range 0..255 might represent anything between one and four bytes depending on value and UTF encoding type.</p>
- <p>Typical error reasons are:</p>
+ <p>If the file is opened with <c>encoding</c> set to something else than
+ <c>latin1</c>, each byte written can result in many bytes being written to
+ the file, as the byte range 0..255 can represent anything between one and
+ four bytes depending on value and UTF encoding type.</p>
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>ebadf</c></tag>
<item>
@@ -1781,34 +1935,35 @@
</item>
<tag><c>enospc</c></tag>
<item>
- <p>There is a no space left on the device.</p>
+ <p>No space is left on the device.</p>
</item>
</taglist>
</desc>
</func>
<func>
<name name="write_file" arity="2"/>
- <fsummary>Write a file</fsummary>
+ <fsummary>Write a file.</fsummary>
<desc>
- <p>Writes the contents of the iodata term <c><anno>Bytes</anno></c>
- to the file <c><anno>Filename</anno></c>.
- The file is created if it does not
- exist. If it exists, the previous contents are
- overwritten. Returns <c>ok</c>, or <c>{error, <anno>Reason</anno>}</c>.</p>
- <p>Typical error reasons are:</p>
+ <p>Writes the contents of the <c>iodata</c> term <c><anno>Bytes</anno></c>
+ to file <c><anno>Filename</anno></c>.
+ The file is created if it does not exist.
+ If it exists, the previous contents are overwritten.
+ Returns <c>ok</c> if successful, otherwise
+ <c>{error, <anno>Reason</anno>}</c>.</p>
+ <p>Typical error reasons:</p>
<taglist>
<tag><c>enoent</c></tag>
<item>
- <p>A component of the file name does not exist.</p>
+ <p>A component of the filename does not exist.</p>
</item>
<tag><c>enotdir</c></tag>
<item>
- <p>A component of the file name is not a directory. On some
+ <p>A component of the filename is not a directory. On some
platforms, <c>enoent</c> is returned instead.</p>
</item>
<tag><c>enospc</c></tag>
<item>
- <p>There is a no space left on the device.</p>
+ <p>No space is left on the device.</p>
</item>
<tag><c>eacces</c></tag>
<item>
@@ -1824,51 +1979,64 @@
</func>
<func>
<name name="write_file" arity="3"/>
- <fsummary>Write a file</fsummary>
+ <fsummary>Write a file.</fsummary>
<desc>
<p>Same as <c>write_file/2</c>, but takes a third argument
<c><anno>Modes</anno></c>, a list of possible modes, see
- <seealso marker="#open/2">open/2</seealso>. The mode flags
- <c>binary</c> and <c>write</c> are implicit, so they should
- not be used.</p>
+ <seealso marker="#open/2"><c>open/2</c></seealso>. The mode flags
+ <c>binary</c> and <c>write</c> are implicit, so they are
+ not to be used.</p>
</desc>
</func>
<func>
<name name="write_file_info" arity="2"/>
<name name="write_file_info" arity="3"/>
- <fsummary>Change information about a file</fsummary>
+ <fsummary>Change file information.</fsummary>
<desc>
- <p>Change file information. Returns <c>ok</c> if successful,
+ <p>Changes file information. Returns <c>ok</c> if successful,
otherwise <c>{error, <anno>Reason</anno>}</c>.
<c><anno>FileInfo</anno></c> is a record
<c>file_info</c>, defined in the Kernel include file
<c>file.hrl</c>. Include the following directive in the module
from which the function is called:</p>
<code type="none">
--include_lib("kernel/include/file.hrl").</code>
- <p>The time type set in <c>atime</c>, <c>mtime</c> and <c>ctime</c>
- is dependent on the time type set in <c>Opts :: {time, Type}</c>.
- Type <c>local</c> will interpret the time set as local, <c>universal</c> will
- interpret it as universal time and <c>posix</c> must be seconds since
- or before unix time epoch which is 1970-01-01 00:00 UTC.
- Default is <c>{time, local}</c>.</p>
- <p>If the <c>raw</c> option is set, the file server will not be called
- and only informations about local files will be returned.</p>
+ -include_lib("kernel/include/file.hrl").</code>
+ <p>The time type set in <c>atime</c>, <c>mtime</c>, and <c>ctime</c>
+ depends on the time type set in <c>Opts :: {time, Type}</c> as
+ follows:</p>
+ <taglist>
+ <tag><c>local</c></tag>
+ <item><p>Interprets the time set as local.</p></item>
+ <tag><c>universal</c></tag>
+ <item><p>Interprets it as universal time.</p></item>
+ <tag><c>posix</c></tag>
+ <item><p>Must be seconds since or before Unix time epoch,
+ which is 1970-01-01 00:00 UTC.</p></item>
+ </taglist>
+ <p>Default is <c>{time, local}</c>.</p>
+ <p>If the option <c>raw</c> is set, the file server is not called
+ and only information about local files is returned.</p>
<p>The following fields are used from the record, if they are
- given.</p>
+ specified:</p>
<taglist>
- <tag><c>atime = </c><seealso marker="#type-date_time">date_time()</seealso><c> | integer() >= 0</c></tag>
+ <tag><c>atime = </c>
+ <seealso marker="#type-date_time"><c>date_time()</c></seealso><c> |
+ integer() >= 0</c></tag>
<item>
<p>The last time the file was read.</p>
</item>
- <tag><c>mtime = </c><seealso marker="#type-date_time">date_time()</seealso><c> | integer() >= 0</c></tag>
+ <tag><c>mtime = </c>
+ <seealso marker="#type-date_time"><c>date_time()</c></seealso><c> |
+ integer() >= 0</c></tag>
<item>
<p>The last time the file was written.</p>
</item>
- <tag><c>ctime = </c><seealso marker="#type-date_time">date_time()</seealso><c> | integer() >= 0</c></tag>
+ <tag><c>ctime = </c>
+ <seealso marker="#type-date_time"><c>date_time()</c></seealso><c> |
+ integer() >= 0</c></tag>
<item>
- <p>On Unix, any value give for this field will be ignored
- (the "ctime" for the file will be set to the current
+ <p>On Unix, any value specified for this field is ignored
+ (the "ctime" for the file is set to the current
time). On Windows, this field is the new creation time to
set for the file.</p>
</item>
@@ -1877,40 +2045,40 @@
<p>The file permissions as the sum of the following bit
values:</p>
<taglist>
- <tag>8#00400</tag>
- <item>read permission: owner</item>
- <tag>8#00200</tag>
- <item>write permission: owner</item>
- <tag>8#00100</tag>
- <item>execute permission: owner</item>
- <tag>8#00040</tag>
- <item>read permission: group</item>
- <tag>8#00020</tag>
- <item>write permission: group</item>
- <tag>8#00010</tag>
- <item>execute permission: group</item>
- <tag>8#00004</tag>
- <item>read permission: other</item>
- <tag>8#00002</tag>
- <item>write permission: other</item>
- <tag>8#00001</tag>
- <item>execute permission: other</item>
- <tag>16#800</tag>
- <item>set user id on execution</item>
- <tag>16#400</tag>
- <item>set group id on execution</item>
+ <tag><c>8#00400</c></tag>
+ <item><p>Read permission: owner</p></item>
+ <tag><c>8#00200</c></tag>
+ <item><p>Write permission: owner</p></item>
+ <tag><c>8#00100</c></tag>
+ <item><p>Execute permission: owner</p></item>
+ <tag><c>8#00040</c></tag>
+ <item><p>Read permission: group</p></item>
+ <tag><c>8#00020</c></tag>
+ <item><p>Write permission: group</p></item>
+ <tag><c>8#00010</c></tag>
+ <item><p>Execute permission: group</p></item>
+ <tag><c>8#00004</c></tag>
+ <item><p>Read permission: other</p></item>
+ <tag><c>8#00002</c></tag>
+ <item><p>Write permission: other</p></item>
+ <tag><c>8#00001</c></tag>
+ <item><p>Execute permission: other</p></item>
+ <tag><c>16#800</c></tag>
+ <item><p>Set user id on execution</p></item>
+ <tag><c>16#400</c></tag>
+ <item><p>Set group id on execution</p></item>
</taglist>
<p>On Unix platforms, other bits than those listed above
- may be set.</p>
+ may be set.</p>
</item>
<tag><c>uid = integer() >= 0</c></tag>
<item>
- <p>Indicates the owner of the file. Ignored for non-Unix
+ <p>Indicates the file owner. Ignored for non-Unix
file systems.</p>
</item>
<tag><c>gid = integer() >= 0</c></tag>
<item>
- <p>Gives the group that the owner of the file belongs to.
+ <p>Gives the group that the file owner belongs to.
Ignored for non-Unix file systems.</p>
</item>
</taglist>
@@ -1927,7 +2095,7 @@
</item>
<tag><c>enotdir</c></tag>
<item>
- <p>A component of the file name is not a directory. On some
+ <p>A component of the filename is not a directory. On some
platforms, <c>enoent</c> is returned instead.</p>
</item>
</taglist>
@@ -1938,178 +2106,119 @@
<section>
<title>POSIX Error Codes</title>
<list type="bulleted">
- <item><c>eacces</c> - permission denied</item>
- <item><c>eagain</c> - resource temporarily unavailable</item>
- <item><c>ebadf</c> - bad file number</item>
- <item><c>ebusy</c> - file busy</item>
- <item><c>edquot</c> - disk quota exceeded</item>
- <item><c>eexist</c> - file already exists</item>
- <item><c>efault</c> - bad address in system call argument</item>
- <item><c>efbig</c> - file too large</item>
- <item><c>eintr</c> - interrupted system call</item>
- <item><c>einval</c> - invalid argument</item>
- <item><c>eio</c> - IO error</item>
- <item><c>eisdir</c> - illegal operation on a directory</item>
- <item><c>eloop</c> - too many levels of symbolic links</item>
- <item><c>emfile</c> - too many open files</item>
- <item><c>emlink</c> - too many links</item>
- <item><c>enametoolong</c> - file name too long</item>
- <item><c>enfile</c> - file table overflow</item>
- <item><c>enodev</c> - no such device</item>
- <item><c>enoent</c> - no such file or directory</item>
- <item><c>enomem</c> - not enough memory</item>
- <item><c>enospc</c> - no space left on device</item>
- <item><c>enotblk</c> - block device required</item>
- <item><c>enotdir</c> - not a directory</item>
- <item><c>enotsup</c> - operation not supported</item>
- <item><c>enxio</c> - no such device or address</item>
- <item><c>eperm</c> - not owner</item>
- <item><c>epipe</c> - broken pipe</item>
- <item><c>erofs</c> - read-only file system</item>
- <item><c>espipe</c> - invalid seek</item>
- <item><c>esrch</c> - no such process</item>
- <item><c>estale</c> - stale remote file handle</item>
- <item><c>exdev</c> - cross-domain link</item>
+ <item><c>eacces</c> - Permission denied</item>
+ <item><c>eagain</c> - Resource temporarily unavailable</item>
+ <item><c>ebadf</c> - Bad file number</item>
+ <item><c>ebusy</c> - File busy</item>
+ <item><c>edquot</c> - Disk quota exceeded</item>
+ <item><c>eexist</c> - File already exists</item>
+ <item><c>efault</c> - Bad address in system call argument</item>
+ <item><c>efbig</c> - File too large</item>
+ <item><c>eintr</c> - Interrupted system call</item>
+ <item><c>einval</c> - Invalid argument</item>
+ <item><c>eio</c> - I/O error</item>
+ <item><c>eisdir</c> - Illegal operation on a directory</item>
+ <item><c>eloop</c> - Too many levels of symbolic links</item>
+ <item><c>emfile</c> - Too many open files</item>
+ <item><c>emlink</c> - Too many links</item>
+ <item><c>enametoolong</c> - Filename too long</item>
+ <item><c>enfile</c> - File table overflow</item>
+ <item><c>enodev</c> - No such device</item>
+ <item><c>enoent</c> - No such file or directory</item>
+ <item><c>enomem</c> - Not enough memory</item>
+ <item><c>enospc</c> - No space left on device</item>
+ <item><c>enotblk</c> - Block device required</item>
+ <item><c>enotdir</c> - Not a directory</item>
+ <item><c>enotsup</c> - Operation not supported</item>
+ <item><c>enxio</c> - No such device or address</item>
+ <item><c>eperm</c> - Not owner</item>
+ <item><c>epipe</c> - Broken pipe</item>
+ <item><c>erofs</c> - Read-only file system</item>
+ <item><c>espipe</c> - Invalid seek</item>
+ <item><c>esrch</c> - No such process</item>
+ <item><c>estale</c> - Stale remote file handle</item>
+ <item><c>exdev</c> - Cross-domain link</item>
</list>
</section>
<section>
<title>Performance</title>
- <p>Some operating system file operations, for example a
- <c>sync/1</c> or <c>close/1</c> on a huge file, may block their
- calling thread for seconds. If this befalls the emulator main
- thread, the response time is no longer in the order of
- milliseconds, depending on the definition of "soft" in soft
- real-time system.</p>
- <p>If the device driver thread pool is active, file operations are
- done through those threads instead, so the emulator can go on
- executing Erlang processes. Unfortunately, the time for serving a
- file operation increases due to the extra scheduling required
- from the operating system.</p>
- <p>If the device driver thread pool is disabled or of size 0, large
- file reads and writes are segmented into several smaller, which
- enables the emulator so server other processes during the file
- operation. This gives the same effect as when using the thread
- pool, but with larger overhead. Other file operations, for
- example <c>sync/1</c> or <c>close/1</c> on a huge file, still are
- a problem.</p>
- <p>For increased performance, raw files are recommended. Raw files
- uses the file system of the node's host machine. For normal files
- (non-raw), the file server is used to find the files, and if
- the node is running its file server as slave to another node's,
- and the other node runs on some other host machine, they may have
- different file systems. This is seldom a problem, but you have
- now been warned.</p>
- <p>A normal file is really a process so it can be used as an IO
- device (see <c>io</c>). Therefore when data is written to a
- normal file, the sending of the data to the file process, copies
- all data that are not binaries. Opening the file in binary mode
- and writing binaries is therefore recommended. If the file is
- opened on another node, or if the file server runs as slave to
- another node's, also binaries are copied.</p>
- <p>Caching data to reduce the number of file operations, or rather
- the number of calls to the file driver, will generally increase
- performance. The following function writes 4 MBytes in 23
- seconds when tested:</p>
+ <p>For increased performance, raw files are recommended.</p>
+ <p>A normal file is really a process so it can be used as an I/O
+ device (see <seealso marker="stdlib:io"><c>io</c></seealso>).
+ Therefore, when data is written to a normal file, the sending of the
+ data to the file process, copies all data that are not binaries. Opening
+ the file in binary mode and writing binaries is therefore recommended.
+ If the file is opened on another node, or if the file server runs as
+ slave to the file server of another node, also binaries are copied.</p>
+ <note>
+ <p>Raw files use the file system of the host machine of the node.
+ For normal files (non-raw), the file server is used to find the files,
+ and if the node is running its file server as slave to the file server
+ of another node, and the other node runs on some other host machine,
+ they can have different file systems.
+ However, this is seldom a problem.</p>
+ </note>
+ <p><seealso marker="#open/2"><c>open/2</c></seealso> can be given the
+ options <c>delayed_write</c> and <c>read_ahead</c> to turn on caching,
+ which will reduce the number of operating system calls and greatly
+ improve performance for small reads and writes. However, the overhead
+ won't disappear completely and it's best to keep the number of file
+ operations to a minimum. As a contrived example, the following function
+ writes 4MB in 2.5 seconds when tested:</p>
+
<code type="none"><![CDATA[
-create_file_slow(Name, N) when integer(N), N >= 0 ->
- {ok, FD} = file:open(Name, [raw, write, delayed_write, binary]),
- ok = create_file_slow(FD, 0, N),
- ok = ?FILE_MODULE:close(FD),
- ok.
-
-create_file_slow(FD, M, M) ->
+create_file_slow(Name) ->
+ {ok, Fd} = file:open(Name, [raw, write, delayed_write, binary]),
+ create_file_slow_1(Fd, 4 bsl 20),
+ file:close(Fd).
+
+create_file_slow_1(_Fd, 0) ->
ok;
-create_file_slow(FD, M, N) ->
- ok = file:write(FD, <<M:32/unsigned>>),
- create_file_slow(FD, M+1, N).]]></code>
- <p>The following, functionally equivalent, function collects 1024
- entries into a list of 128 32-byte binaries before each call to
- <c>file:write/2</c> and so does the same work in 0.52 seconds,
- which is 44 times faster.</p>
+create_file_slow_1(Fd, M) ->
+ ok = file:write(Fd, <<0>>),
+ create_file_slow_1(Fd, M - 1).]]></code>
+
+ <p>The following functionally equivalent code writes 128 bytes per call
+ to <seealso marker="#write/2"><c>write/2</c></seealso> and so does the
+ same work in 0.08 seconds, which is roughly 30 times faster:</p>
+
<code type="none"><![CDATA[
-create_file(Name, N) when integer(N), N >= 0 ->
- {ok, FD} = file:open(Name, [raw, write, delayed_write, binary]),
- ok = create_file(FD, 0, N),
- ok = ?FILE_MODULE:close(FD),
+create_file(Name) ->
+ {ok, Fd} = file:open(Name, [raw, write, delayed_write, binary]),
+ create_file_1(Fd, 4 bsl 20),
+ file:close(Fd),
ok.
-
-create_file(FD, M, M) ->
+
+create_file_1(_Fd, 0) ->
ok;
-create_file(FD, M, N) when M + 1024 =&lt; N ->
- create_file(FD, M, M + 1024, []),
- create_file(FD, M + 1024, N);
-create_file(FD, M, N) ->
- create_file(FD, M, N, []).
-
-create_file(FD, M, M, R) ->
- ok = file:write(FD, R);
-create_file(FD, M, N0, R) when M + 8 =&lt; N0 ->
- N1 = N0-1, N2 = N0-2, N3 = N0-3, N4 = N0-4,
- N5 = N0-5, N6 = N0-6, N7 = N0-7, N8 = N0-8,
- create_file(FD, M, N8,
- [<<N8:32/unsigned, N7:32/unsigned,
- N6:32/unsigned, N5:32/unsigned,
- N4:32/unsigned, N3:32/unsigned,
- N2:32/unsigned, N1:32/unsigned>> | R]);
-create_file(FD, M, N0, R) ->
- N1 = N0-1,
- create_file(FD, M, N1, [<<N1:32/unsigned>> | R]).]]></code>
- <note>
- <p>Trust only your own benchmarks. If the list length in
- <c>create_file/2</c> above is increased, it will run slightly
- faster, but consume more memory and cause more memory
- fragmentation. How much this affects your application is
- something that this simple benchmark can not predict.</p>
- <p>If the size of each binary is increased to 64 bytes, it will
- also run slightly faster, but the code will be twice as clumsy.
- In the current implementation are binaries larger than 64 bytes
- stored in memory common to all processes and not copied when
- sent between processes, while these smaller binaries are stored
- on the process heap and copied when sent like any other term.</p>
- <p>So, with a binary size of 68 bytes <c>create_file/2</c> runs
- 30 percent slower then with 64 bytes, and will cause much more
- memory fragmentation. Note that if the binaries were to be sent
- between processes (for example a non-raw file) the results
- would probably be completely different.</p>
- </note>
- <p>A raw file is really a port. When writing data to a port, it is
- efficient to write a list of binaries. There is no need to
- flatten a deep list before writing. On Unix hosts, scatter output,
- which writes a set of buffers in one operation, is used when
- possible. In this way <c>file:write(FD, [Bin1, Bin2 | Bin3])</c>
- will write the contents of the binaries without copying the data
- at all except for perhaps deep down in the operating system
- kernel.</p>
- <p>For raw files, <c>pwrite/2</c> and <c>pread/2</c> are
- efficiently implemented. The file driver is called only once for
- the whole operation, and the list iteration is done in the file
- driver.</p>
- <p>The options <c>delayed_write</c> and <c>read_ahead</c> to
- <c>file:open/2</c> makes the file driver cache data to reduce
- the number of operating system calls. The function
- <c>create_file/2</c> in the example above takes 60 seconds
- seconds without the <c>delayed_write</c> option, which is 2.6
- times slower.</p>
- <p>And, as a really bad example, <c>create_file_slow/2</c> above
- without the <c>raw</c>, <c>binary</c> and <c>delayed_write</c>
- options, that is it calls <c>file:open(Name, [write])</c>, needs
- 1 min 20 seconds for the job, which is 3.5 times slower than
- the first example, and 150 times slower than the optimized
- <c>create_file/2</c>. </p>
- </section>
+create_file_1(Fd, M) when M >= 128 ->
+ ok = file:write(Fd, <<0:(128)/unit:8>>),
+ create_file_1(Fd, M - 128);
+create_file_1(Fd, M) ->
+ ok = file:write(Fd, <<0:(M)/unit:8>>),
+ create_file_1(Fd, M - 1).]]></code>
- <section>
- <title>Warnings</title>
- <p>If an error occurs when accessing an open file with the <c>io</c>
- module, the process which handles the file will exit. The dead
- file process might hang if a process tries to access it later.
- This will be fixed in a future release.</p>
+ <p>When writing data it's generally more efficient to write a list of
+ binaries rather than a list of integers. It is not needed to
+ flatten a deep list before writing. On Unix hosts, scatter output,
+ which writes a set of buffers in one operation, is used when
+ possible. In this way <c>write(FD, [Bin1, Bin2 | Bin3])</c>
+ writes the contents of the binaries without copying the data
+ at all, except for perhaps deep down in the operating system
+ kernel.</p>
+ <warning>
+ <p>If an error occurs when accessing an open file with module
+ <seealso marker="stdlib:io"><c>io</c></seealso>, the process
+ handling the file exits. The dead file process can hang if a process
+ tries to access it later. This will be fixed in a future release.
+ </p>
+ </warning>
</section>
<section>
- <title>SEE ALSO</title>
- <p><seealso marker="stdlib:filename">filename(3)</seealso></p>
+ <title>See Also</title>
+ <p><seealso marker="stdlib:filename"><c>filename(3)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/gen_sctp.xml b/lib/kernel/doc/src/gen_sctp.xml
index 456108a2fe..1e08b25f66 100644
--- a/lib/kernel/doc/src/gen_sctp.xml
+++ b/lib/kernel/doc/src/gen_sctp.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2007</year><year>2013</year>
+ <year>2007</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -30,79 +30,69 @@
<checked></checked>
<date>2007-03-21</date>
<rev>A</rev>
- <file>gen_sctp.sgml</file>
+ <file>gen_sctp.xml</file>
</header>
<module>gen_sctp</module>
- <modulesummary>The gen_sctp module provides functions for communicating with sockets using the SCTP protocol.</modulesummary>
+ <modulesummary>Functions for communicating with sockets using the SCTP
+ protocol.</modulesummary>
<description>
- <p>The <c>gen_sctp</c> module provides functions for communicating with
+ <p>This module provides functions for communicating with
sockets using the SCTP protocol. The implementation assumes that
the OS kernel supports SCTP
- <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">(RFC2960)</url> through the user-level
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions.</url>
- During development this implementation was tested on
- Linux Fedora Core 5.0 (kernel 2.6.15-2054 or later is needed),
- and on Solaris 10, 11. During OTP adaptation it was tested on
- SUSE Linux Enterprise Server 10 (x86_64) kernel 2.6.16.27-0.6-smp,
- with lksctp-tools-1.0.6, briefly on Solaris 10, and later on
- SUSE Linux Enterprise Server 10 Service Pack 1 (x86_64)
- kernel 2.6.16.54-0.2.3-smp with lksctp-tools-1.0.7,
- and later also on FreeBSD 8.2.
- </p>
- <p>
- This module was written for one-to-many style sockets
+ <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">(RFC 2960)</url>
+ through the user-level
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions</url>.</p>
+ <p>During development, this implementation was tested on:</p>
+ <list type="bulleted">
+ <item>Linux Fedora Core 5.0 (kernel 2.6.15-2054 or later is needed)</item>
+ <item>Solaris 10, 11</item>
+ </list>
+ <p>During OTP adaptation it was tested on:</p>
+ <list type="bulleted">
+ <item>SUSE Linux Enterprise Server 10 (x86_64) kernel 2.6.16.27-0.6-smp,
+ with lksctp-tools-1.0.6</item>
+ <item>Briefly on Solaris 10</item>
+ <item>SUSE Linux Enterprise Server 10 Service Pack 1 (x86_64)
+ kernel 2.6.16.54-0.2.3-smp with lksctp-tools-1.0.7</item>
+ <item>FreeBSD 8.2</item>
+ </list>
+ <p>This module was written for one-to-many style sockets
(type <c>seqpacket</c>). With the addition of
- <seealso marker="#peeloff/2">peeloff/2</seealso>, one-to-one style
- sockets (type <c>stream</c>) were introduced.
- </p>
- <p>Record definitions for the <c>gen_sctp</c> module can be found using:</p>
-<pre> -include_lib("kernel/include/inet_sctp.hrl"). </pre>
+ <seealso marker="#peeloff/2"><c>peeloff/2</c></seealso>,
+ one-to-one style sockets (type <c>stream</c>) were introduced.</p>
+ <p>Record definitions for this module can be found using:</p>
+ <pre>
+-include_lib("kernel/include/inet_sctp.hrl").</pre>
<p>These record definitions use the "new" spelling 'adaptation',
not the deprecated 'adaption', regardless of which
spelling the underlying C API uses.</p>
</description>
- <section>
- <marker id="contents"></marker>
- <title>CONTENTS</title>
- <list type="bulleted">
- <item><seealso marker="#types">DATA TYPES</seealso></item>
- <item><seealso marker="#exports">EXPORTS</seealso></item>
- <item><seealso marker="#options">SCTP SOCKET OPTIONS</seealso></item>
- <item><seealso marker="#examples">SCTP EXAMPLES</seealso></item>
- <item><seealso marker="#seealso">SEE ALSO</seealso></item>
- </list>
- <marker id="types"></marker>
- </section>
-
<datatypes>
<datatype>
<name>assoc_id()</name>
<desc>
- <p><marker id="type-assoc_id"/>
- An opaque term returned in for example #sctp_paddr_change{}
- that identifies an association for an SCTP socket. The term
- is opaque except for the special value <c>0</c> that has a
- meaning such as "the whole endpoint" or "all future associations".
- </p>
+ <p>An opaque term returned in, for example, <c>#sctp_paddr_change{}</c>,
+ which identifies an association for an SCTP socket. The term
+ is opaque except for the special value <c>0</c>, which has a
+ meaning such as "the whole endpoint" or "all future associations".</p>
</desc>
</datatype>
<datatype>
<name name="option"/>
<desc>
- <p>One of the
- <seealso marker="#options">SCTP Socket Options.</seealso></p>
+ <p>One of the
+ <seealso marker="#options">SCTP Socket Options</seealso>.</p>
</desc>
</datatype>
<datatype>
<name name="option_name"/>
- <desc><marker id="type-sctp_socket"></marker></desc>
</datatype>
<datatype>
<name>sctp_socket()</name>
<desc>
- <p><marker id="type-sctp_socket"/>
- Socket identifier returned from <c>open/*</c>.</p>
+ <p>Socket identifier returned from
+ <seealso marker="#open/0"><c>open/*</c></seealso>.</p>
<marker id="exports"></marker>
</desc>
</datatype>
@@ -111,405 +101,482 @@
<funcs>
<func>
<name name="abort" arity="2"/>
- <fsummary>Abnormally terminate the association given by Assoc, without flushing of unsent data</fsummary>
+ <fsummary>Abnormally terminate the association specified by
+ <c>Assoc</c>, without flushing of unsent data.</fsummary>
<desc>
- <p>Abnormally terminates the association given by <c><anno>Assoc</anno></c>, without
+ <p>Abnormally terminates the association specified by
+ <c><anno>Assoc</anno></c>, without
flushing of unsent data. The socket itself remains open. Other
- associations opened on this socket are still valid, and it can be
- used in new associations.</p>
+ associations opened on this socket are still valid, and the socket
+ can be used in new associations.</p>
</desc>
</func>
+
<func>
<name name="close" arity="1"/>
- <fsummary>Completely close the socket and all associations on it</fsummary>
+ <fsummary>Close the socket and all associations on it.</fsummary>
<desc>
- <p>Completely closes the socket and all associations on it. The unsent
- data is flushed as in <c>eof/2</c>. The <c>close/1</c> call
+ <p>Closes the socket and all associations on it. The unsent
+ data is flushed as in <seealso marker="#eof/2"><c>eof/2</c></seealso>.
+ The <c>close/1</c> call
is blocking or otherwise depending of the value of
- the <seealso marker="inet#option-linger">linger</seealso> socket
- <seealso marker="#options">option</seealso>.
- If <c>close</c> does not linger or linger timeout expires,
+ the <seealso marker="inet#option-linger"><c>linger</c></seealso>
+ socket <seealso marker="#options">option</seealso>.
+ If <c>close</c> does not linger or linger time-out expires,
the call returns and the data is flushed in the background.</p>
</desc>
</func>
+
<func>
<name name="connect" arity="4"/>
<fsummary>Same as <c>connect(Socket, Addr, Port, Opts, infinity)</c>.</fsummary>
<desc>
- <p>Same as <c>connect(<anno>Socket</anno>, <anno>Addr</anno>, <anno>Port</anno>, <anno>Opts</anno>, infinity)</c>.</p>
+ <p>Same as <c>connect(<anno>Socket</anno>, <anno>Addr</anno>,
+ <anno>Port</anno>, <anno>Opts</anno>, infinity)</c>.</p>
</desc>
</func>
+
<func>
<name name="connect" arity="5"/>
- <fsummary>Establish a new association for the socket <c>Socket</c>, with a peer (SCTP server socket)</fsummary>
+ <fsummary>Establish a new association for socket <c>Socket</c>, with a
+ peer (SCTP server socket).</fsummary>
<desc>
- <p>Establishes a new association for the socket <c><anno>Socket</anno></c>,
- with the peer (SCTP server socket) given by
- <c><anno>Addr</anno></c> and <c><anno>Port</anno></c>. The <c><anno>Timeout</anno></c>,
- is expressed in milliseconds. A socket can be associated with multiple peers.</p>
-
- <p><em>WARNING:</em>Using a value of <c><anno>Timeout</anno></c> less than
- the maximum time taken by the OS to establish an association (around 4.5 minutes
- if the default values from RFC 4960 are used) can result in
- inconsistent or incorrect return values. This is especially
- relevant for associations sharing the same <c><anno>Socket</anno></c>
- (i.e. source address and port) since the controlling process
- blocks until <c>connect/*</c> returns.
- <seealso marker="#connect_init/4">connect_init/*</seealso>
- provides an alternative not subject to this limitation.</p>
-
+ <p>Establishes a new association for socket <c><anno>Socket</anno></c>,
+ with the peer (SCTP server socket) specified by
+ <c><anno>Addr</anno></c> and <c><anno>Port</anno></c>.
+ <c><anno>Timeout</anno></c>, is expressed in milliseconds.
+ A socket can be associated with multiple peers.</p>
+ <warning><p>Using a value of <c><anno>Timeout</anno></c> less than
+ the maximum time taken by the OS to establish an association (around
+ 4.5 minutes if the default values from
+ <url href="https://tools.ietf.org/html/rfc4960">RFC 4960</url>
+ are used), can result
+ in inconsistent or incorrect return values. This is especially
+ relevant for associations sharing the same <c><anno>Socket</anno></c>
+ (that is, source address and port), as the controlling process
+ blocks until <c>connect/*</c> returns.
+ <seealso marker="#connect_init/4"><c>connect_init/*</c></seealso>
+ provides an alternative without this limitation.</p>
+ </warning>
<p><marker id="record-sctp_assoc_change"></marker>
The result of <c>connect/*</c> is an <c>#sctp_assoc_change{}</c>
- event which contains, in particular, the new
- <seealso marker="#type-assoc_id">Association ID</seealso>.</p>
-<pre> #sctp_assoc_change{
- state = atom(),
- error = atom(),
- outbound_streams = integer(),
- inbound_streams = integer(),
- assoc_id = assoc_id()
- } </pre>
+ event that contains, in particular, the new
+ <seealso marker="#type-assoc_id">Association ID</seealso>:</p>
+ <pre>
+#sctp_assoc_change{
+ state = atom(),
+ error = atom(),
+ outbound_streams = integer(),
+ inbound_streams = integer(),
+ assoc_id = assoc_id()
+}</pre>
<p>The number of outbound and inbound streams can be set by
- giving an <c>sctp_initmsg</c> option to <c>connect</c>
- as in:</p>
-<pre> connect(Socket, Ip, Port>,
- [{sctp_initmsg,#sctp_initmsg{num_ostreams=OutStreams,
- max_instreams=MaxInStreams}}]) </pre>
+ giving an <c>sctp_initmsg</c> option to <c>connect</c> as in:</p>
+ <pre>
+connect(Socket, Ip, Port>,
+ [{sctp_initmsg,#sctp_initmsg{num_ostreams=OutStreams,
+ max_instreams=MaxInStreams}}])</pre>
<p>All options <c><anno>Opt</anno></c> are set on the socket before the
- association is attempted. If an option record has got undefined
+ association is attempted. If an option record has undefined
field values, the options record is first read from the socket
- for those values. In effect, <c><anno>Opt</anno></c> option records only
- define field values to change before connecting.</p>
+ for those values. In effect, <c><anno>Opt</anno></c> option records
+ only define field values to change before connecting.</p>
<p>The returned <c>outbound_streams</c> and <c>inbound_streams</c>
- are the actual stream numbers on the socket, which may be different
- from the requested values (<c>OutStreams</c> and <c>MaxInStreams</c>
+ are the stream numbers on the socket. These can be different
+ from the requested values (<c>OutStreams</c> and <c>MaxInStreams</c>,
respectively) if the peer requires lower values.</p>
- <p>The following values of <c>state</c> are possible:</p>
- <list type="bulleted">
- <item>
- <p><c>comm_up</c>: association successfully established. This
- indicates a successful completion of <c>connect</c>.</p>
- </item>
- <item>
- <p><c>cant_assoc</c>: association cannot be established
- (<c>connect/*</c> failure).</p>
- </item>
- </list>
- <p>All other states do not normally occur in the output from
- <c>connect/*</c>. Rather, they may occur in
+ <p><c>state</c> can have the following values:</p>
+ <taglist>
+ <tag><c>comm_up</c></tag>
+ <item><p>Association is successfully established. This
+ indicates a successful completion of <c>connect</c>.</p></item>
+ <tag><c>cant_assoc</c></tag>
+ <item><p>The association cannot be established
+ (<c>connect/*</c> failure).</p></item>
+ </taglist>
+ <p>Other states do not normally occur in the output from
+ <c>connect/*</c>. Rather, they can occur in
<c>#sctp_assoc_change{}</c> events received instead of data in
- <seealso marker="#recv/1">recv/*</seealso> calls.
- All of them indicate losing the association due to various
- error conditions, and are listed here for the sake of completeness.
- The <c>error</c> field may provide more detailed diagnostics.</p>
- <list type="bulleted">
- <item>
- <p><c>comm_lost</c>;</p>
- </item>
- <item>
- <p><c>restart</c>;</p>
- </item>
- <item>
- <p><c>shutdown_comp</c>.</p>
- </item>
- </list>
+ <seealso marker="#recv/1"><c>recv/*</c></seealso> calls.
+ All of them indicate losing the association because of various error
+ conditions, and are listed here for the sake of completeness:</p>
+ <taglist>
+ <tag><c>comm_lost</c></tag>
+ <item></item>
+ <tag><c>restart</c></tag>
+ <item></item>
+ <tag><c>shutdown_comp</c></tag>
+ <item></item>
+ </taglist>
+ <p>Field <c>error</c> can provide more detailed diagnostics.</p>
</desc>
</func>
+
<func>
<name name="connect_init" arity="4"/>
- <fsummary>Same as <c>connect_init(Socket, Addr, Port, Opts, infinity)</c>.</fsummary>
+ <fsummary>Same as <c>connect_init(Socket, Addr, Port, Opts, infinity)</c>..</fsummary>
<desc>
- <p>Same as <c>connect_init(<anno>Socket</anno>, <anno>Addr</anno>, <anno>Port</anno>, <anno>Opts</anno>, infinity)</c>.</p>
+ <p>Same as <c>connect_init(<anno>Socket</anno>, <anno>Addr</anno>,
+ <anno>Port</anno>, <anno>Opts</anno>, infinity)</c>.</p>
</desc>
</func>
+
<func>
<name name="connect_init" arity="5"/>
- <fsummary>Initiate a new association for the socket <c>Socket</c>, with a peer (SCTP server socket)</fsummary>
+ <fsummary>Initiate a new association for socket <c>Socket</c>, with a
+ peer (SCTP server socket).</fsummary>
<desc>
- <p>Initiates a new association for the socket <c><anno>Socket</anno></c>,
- with the peer (SCTP server socket) given by
+ <p>Initiates a new association for socket <c><anno>Socket</anno></c>,
+ with the peer (SCTP server socket) specified by
<c><anno>Addr</anno></c> and <c><anno>Port</anno></c>.</p>
- <p>The fundamental difference between this API
- and <c>connect/*</c> is that the return value is that of the
- underlying OS connect(2) system call. If <c>ok</c> is returned
- then the result of the association establishement is received
- by the calling process as
- an <seealso marker="#record-sctp_assoc_change">
- #sctp_assoc_change{}</seealso>
- event. The calling process must be prepared to receive this, or
- poll for it using <c>recv/*</c> depending on the value of the
- active option.</p>
- <p>The parameters are as described
- in <seealso marker="#connect/5">connect/*</seealso>, with the
- exception of the <c><anno>Timeout</anno></c> value.</p>
- <p>The timer associated with <c><anno>Timeout</anno></c> only supervises
- IP resolution of <c><anno>Addr</anno></c></p>
+ <p>The fundamental difference between this API
+ and <c>connect/*</c> is that the return value is that of the
+ underlying OS <c>connect(2)</c> system call. If <c>ok</c> is returned,
+ the result of the association establishment is received
+ by the calling process as an
+ <seealso marker="#record-sctp_assoc_change"><c>#sctp_assoc_change{}</c></seealso>
+ event. The calling process must be prepared to receive this, or
+ poll for it using
+ <seealso marker="#recv/1"><c>recv/*</c></seealso>,
+ depending on the value of the active option.</p>
+ <p>The parameters are as described in
+ <seealso marker="#connect/5"><c>connect/*</c></seealso>,
+ except the <c><anno>Timeout</anno></c> value.</p>
+ <p>The timer associated with <c><anno>Timeout</anno></c> only supervises
+ IP resolution of <c><anno>Addr</anno></c>.</p>
</desc>
</func>
+
<func>
<name name="controlling_process" arity="2"/>
- <fsummary>Assign a new controlling process pid to the socket</fsummary>
+ <fsummary>Assign a new controlling process pid to the socket.</fsummary>
<desc>
- <p>Assigns a new controlling process <c><anno>Pid</anno></c> to <c><anno>Socket</anno></c>. Same implementation
- as <c>gen_udp:controlling_process/2</c>.</p>
+ <p>Assigns a new controlling process <c><anno>Pid</anno></c> to
+ <c><anno>Socket</anno></c>. Same implementation as
+ <seealso marker="gen_udp:controlling_process/2"><c>gen_udp:controlling_process/2</c></seealso>.
+ </p>
</desc>
</func>
+
<func>
<name name="eof" arity="2"/>
- <fsummary>Gracefully terminate the association given by Assoc, with flushing of all unsent data</fsummary>
+ <fsummary>Gracefully terminate the association specified by <c>Assoc</c>,
+ with flushing of all unsent data.</fsummary>
<desc>
- <p>Gracefully terminates the association given by <c><anno>Assoc</anno></c>, with
+ <p>Gracefully terminates the association specified by
+ <c><anno>Assoc</anno></c>, with
flushing of all unsent data. The socket itself remains open. Other
- associations opened on this socket are still valid, and it can be
- used in new associations.</p>
+ associations opened on this socket are still valid. The socket can
+ be used in new associations.</p>
</desc>
</func>
+
+ <func>
+ <name name="error_string" arity="1"/>
+ <fsummary>Translate an SCTP error number into a string.</fsummary>
+ <desc>
+ <p>Translates an SCTP error number from, for example,
+ <c>#sctp_remote_error{}</c> or <c>#sctp_send_failed{}</c> into
+ an explanatory string, or one of the atoms <c>ok</c> for no
+ error or <c>undefined</c> for an unrecognized error.</p>
+ </desc>
+ </func>
+
<func>
<name name="listen" arity="2" clause_i="1"/>
<name name="listen" arity="2" clause_i="2"/>
<fsummary>Set up a socket to listen.</fsummary>
<desc>
<p>Sets up a socket to listen on the IP address and port number
- it is bound to.</p>
- <p>For type <c>seqpacket</c> sockets (the default)
- <c><anno>IsServer</anno></c> must be <c>true</c> or <c>false</c>.
- In contrast to TCP, in SCTP there is no listening queue length.
- If <c><anno>IsServer</anno></c> is <c>true</c> the socket accepts new associations, i.e.
- it will become an SCTP server socket.</p>
- <p>For type <c>stream</c> sockets <anno>Backlog</anno> defines
- the backlog queue length just like in TCP.</p>
+ it is bound to.</p>
+ <p>For type <c>seqpacket</c>, sockets (the default)
+ <c><anno>IsServer</anno></c> must be <c>true</c> or <c>false</c>.
+ In contrast to TCP, there is no listening queue length in SCTP.
+ If <c><anno>IsServer</anno></c> is <c>true</c>, the socket accepts
+ new associations, that is, it becomes an SCTP server socket.</p>
+ <p>For type <c>stream</c>, sockets <anno>Backlog</anno> define
+ the backlog queue length just like in TCP.</p>
</desc>
</func>
+
<func>
<name name="open" arity="0"/>
<name name="open" arity="1" clause_i="1"/>
<name name="open" arity="1" clause_i="2"/>
<name name="open" arity="2"/>
- <fsummary>Create an SCTP socket and bind it to local addresses</fsummary>
+ <fsummary>Create an SCTP socket and binds it to local addresses.</fsummary>
<desc>
- <p>Creates an SCTP socket and binds it to the local addresses
- specified by all <c>{ip,<anno>IP</anno>}</c> (or synonymously <c>{ifaddr,<anno>IP</anno>}</c>)
- options (this feature is called SCTP multi-homing).
- The default <c><anno>IP</anno></c> and <c><anno>Port</anno></c> are <c>any</c>
+ <p>Creates an SCTP socket and binds it to the local addresses
+ specified by all <c>{ip,<anno>IP</anno>}</c> (or synonymously
+ <c>{ifaddr,<anno>IP</anno>}</c>)
+ options (this feature is called SCTP multi-homing). The default
+ <c><anno>IP</anno></c> and <c><anno>Port</anno></c> are <c>any</c>
and <c>0</c>, meaning bind to all local addresses on any
- one free port.</p>
-
- <p>Other options are:</p>
+ free port.</p>
+ <p>Other options:</p>
<taglist>
<tag><c>inet6</c></tag>
<item>
- <p>Set up the socket for IPv6.</p>
+ <p>Sets up the socket for IPv6.</p>
</item>
<tag><c>inet</c></tag>
<item>
- <p>Set up the socket for IPv4. This is the default.</p>
+ <p>Sets up the socket for IPv4. This is the default.</p>
</item>
</taglist>
-
<p>A default set of socket <seealso marker="#options">options</seealso>
- is used. In particular, the socket is opened in
+ is used. In particular, the socket is opened in
<seealso marker="#option-binary">binary</seealso> and
<seealso marker="#option-active">passive</seealso> mode,
- with <anno>SockType</anno> <c>seqpacket</c>,
- and with reasonably large
+ with <anno>SockType</anno> <c>seqpacket</c>, and with reasonably large
<seealso marker="inet#option-sndbuf">kernel</seealso> and driver
- <seealso marker="inet#option-buffer">buffers.</seealso></p>
+ <seealso marker="inet#option-buffer">buffers</seealso>.</p>
+ <p>
+ If the socket is in
+ <seealso marker="#option-active">passive</seealso>
+ mode data can be received through the
+ <seealso marker="#recv/1"><c>recv/1,2</c></seealso>
+ calls.
+ </p>
+ <p>
+ If the socket is in
+ <seealso marker="#option-active">active</seealso>
+ mode data received data is delivered to the controlling process
+ as messages:
+ </p>
+ <code type="none">
+{sctp, <anno>Socket</anno>, FromIP, FromPort, {AncData, Data}}
+ </code>
+ <p>
+ See
+ <seealso marker="#recv/1"><c>recv/1,2</c></seealso>
+ for a description of the message fields.
+ </p>
+ <note>
+ <p>
+ This message format unfortunately differs slightly from the
+ <seealso marker="gen_udp#open/1"><c>gen_udp</c></seealso>
+ message format with ancillary data,
+ and from the
+ <seealso marker="#recv/1"><c>recv/1,2</c></seealso>
+ return tuple format.
+ </p>
+ </note>
</desc>
</func>
+
<func>
<name name="peeloff" arity="2"/>
- <fsummary>
- Peel off a type <c>stream</c> socket from a type <c>seqpacket</c> one
- </fsummary>
+ <fsummary>Peel off a type <c>stream</c> socket from a type
+ <c>seqpacket</c> one.</fsummary>
<desc>
- <p>
- Branch off an existing association <anno>Assoc</anno>
- in a socket <anno>Socket</anno> of type <c>seqpacket</c>
- (one-to-many style) into
- a new socket <anno>NewSocket</anno> of type <c>stream</c>
- (one-to-one style).
- </p>
- <p>
- The existing association argument <anno>Assoc</anno>
- can be either a
- <seealso marker="#record-sctp_assoc_change">
- #sctp_assoc_change{}
- </seealso>
- record as returned from e.g
- <seealso marker="#recv-2">recv/*</seealso>,
- <seealso marker="#connect-5">connect/*</seealso> or
- from a listening socket in active mode. Or it can be just
- the field <c>assoc_id</c> integer from such a record.
- </p>
+ <p>Branches off an existing association <c><anno>Assoc</anno></c>
+ in a socket <c><anno>Socket</anno></c> of type <c>seqpacket</c>
+ (one-to-many style) into
+ a new socket <c><anno>NewSocket</anno></c> of type <c>stream</c>
+ (one-to-one style).</p>
+ <p>The existing association argument <c><anno>Assoc</anno></c>
+ can be either a
+ <seealso marker="#record-sctp_assoc_change"><c>#sctp_assoc_change{}</c></seealso>
+ record as returned from, for example,
+ <seealso marker="#recv-2"><c>recv/*</c></seealso>,
+ <seealso marker="#connect-5"><c>connect/*</c></seealso>, or
+ from a listening socket in active mode. It can also be just
+ the field <c>assoc_id</c> integer from such a record.</p>
</desc>
</func>
+
<func>
<name name="recv" arity="1"/>
<name name="recv" arity="2"/>
- <fsummary>Receive a message from a socket</fsummary>
+ <fsummary>Receive a message from a socket.</fsummary>
<desc>
- <p>Receives the <c><anno>Data</anno></c> message from any association of the socket.
- If the receive times out <c>{error,timeout</c> is returned.
- The default timeout is <c>infinity</c>.
- <c><anno>FromIP</anno></c> and <c><anno>FromPort</anno></c> indicate the sender's address.</p>
- <p><c><anno>AncData</anno></c> is a list of Ancillary Data items which
- may be received along with the main <c><anno>Data</anno></c>.
+ <p>Receives the <c><anno>Data</anno></c> message from any association
+ of the socket.
+ If the receive times out, <c>{error,timeout}</c> is returned.
+ The default time-out is <c>infinity</c>. <c><anno>FromIP</anno></c>
+ and <c><anno>FromPort</anno></c> indicate the address of the
+ sender.</p>
+ <p><c><anno>AncData</anno></c> is a list of ancillary data items that
+ can be received along with the main <c><anno>Data</anno></c>.
This list can be empty, or contain a single
- <seealso marker="#record-sctp_sndrcvinfo">#sctp_sndrcvinfo{}</seealso>
- record, if receiving of such ancillary data is enabled
- (see option
- <seealso marker="#option-sctp_events">sctp_events</seealso>).
- It is enabled by default, since such ancillary data
- provide an easy way of determining the association and stream
- over which the message has been received.
- (An alternative way would be to get the Association ID from the
- <c><anno>FromIP</anno></c> and <c><anno>FromPort</anno></c> using the
- <seealso marker="#option-sctp_get_peer_addr_info">sctp_get_peer_addr_info</seealso> socket option,
- but this would still not produce the Stream number).</p>
- <p>The actual <c><anno>Data</anno></c> received may be a <c>binary()</c>,
- or <c>list()</c> of bytes (integers in the range 0 through 255)
- depending on the socket mode, or an SCTP Event.
- <marker id="sctp_events"></marker>
-
- The following SCTP Events are possible:</p>
+ <seealso marker="#record-sctp_sndrcvinfo"><c>#sctp_sndrcvinfo{}</c></seealso>
+ record if receiving of such ancillary data is enabled (see option
+ <seealso marker="#option-sctp_events"><c>sctp_events</c></seealso>).
+ It is enabled by default, as such ancillary data
+ provides an easy way of determining the association and stream
+ over which the message is received.
+ (An alternative way is to get the association ID from
+ <c><anno>FromIP</anno></c> and <c><anno>FromPort</anno></c> using
+ socket option
+ <seealso marker="#option-sctp_get_peer_addr_info"><c>sctp_get_peer_addr_info</c></seealso>,
+ but this does still not produce the stream number).</p>
+ <p>
+ <c><anno>AncData</anno></c> may also contain
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ from the socket
+ <seealso marker="#type-option">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ or
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>,
+ if that is supported by the platform for the socket.
+ </p>
+ <p>The <c><anno>Data</anno></c> received can be a <c>binary()</c>
+ or a <c>list()</c> of bytes (integers in the range 0 through 255)
+ depending on the socket mode, or an SCTP event.</p>
+ <marker id="sctp_events"></marker>
+ <p>Possible SCTP events:</p>
<list type="bulleted">
<item>
- <p><seealso marker="#record-sctp_sndrcvinfo">#sctp_sndrcvinfo{}</seealso></p>
+ <seealso marker="#record-sctp_sndrcvinfo"><c>#sctp_sndrcvinfo{}</c></seealso>
</item>
<item>
- <p><seealso marker="#record-sctp_assoc_change">#sctp_assoc_change{}</seealso>;</p>
+ <seealso marker="#record-sctp_assoc_change"><c>#sctp_assoc_change{}</c></seealso>
</item>
<item>
-<pre> #sctp_paddr_change{
- addr = {ip_address(),port()},
- state = atom(),
- error = integer(),
- assoc_id = assoc_id()
- } </pre>
- <p>Indicates change of the status of the peer's IP address given by
- <c>addr</c> within the association <c>assoc_id</c>.
- Possible values of <c>state</c> (mostly self-explanatory) include:</p>
- <list type="bulleted">
- <item>
- <p><c>addr_unreachable</c>;</p>
- </item>
- <item>
- <p><c>addr_available</c>;</p>
- </item>
- <item>
- <p><c>addr_removed</c>;</p>
- </item>
- <item>
- <p><c>addr_added</c>;</p>
+ <pre>
+#sctp_paddr_change{
+ addr = {ip_address(),port()},
+ state = atom(),
+ error = integer(),
+ assoc_id = assoc_id()
+}</pre>
+ <p>Indicates change of the status of the IP address of the peer
+ specified by
+ <c>addr</c> within association <c>assoc_id</c>. Possible
+ values of <c>state</c> (mostly self-explanatory) include:</p>
+ <taglist>
+ <tag><c>addr_unreachable</c></tag>
+ <item></item>
+ <tag><c>addr_available</c></tag>
+ <item></item>
+ <tag><c>addr_removed</c></tag>
+ <item></item>
+ <tag><c>addr_added</c></tag>
+ <item></item>
+ <tag><c>addr_made_prim</c></tag>
+ <item></item>
+ <tag><c>addr_confirmed</c></tag>
+ <item></item>
+ </taglist>
+ <p>In case of an error (for example, <c>addr_unreachable</c>),
+ field <c>error</c> provides more diagnostics. In such cases,
+ event <c>#sctp_paddr_change{}</c> is automatically
+ converted into an <c>error</c> term returned by
+ <seealso marker="#recv/1"><c>recv</c></seealso>.
+ The <c>error</c> field value can be converted into a string using
+ <seealso marker="#error_string/1"><c>error_string/1</c></seealso>.
+ </p>
+ </item>
+ <item>
+ <pre>
+#sctp_send_failed{
+ flags = true | false,
+ error = integer(),
+ info = #sctp_sndrcvinfo{},
+ assoc_id = assoc_id()
+ data = binary()
+}</pre>
+ <p>The sender can receive this event if a send operation fails.</p>
+ <taglist>
+ <tag><c>flags</c></tag>
+ <item><p>A Boolean specifying if the data has been transmitted
+ over the wire.</p></item>
+ <tag><c>error</c></tag>
+ <item><p>Provides extended diagnostics, use
+ <seealso marker="#error_string/1"><c>error_string/1</c>.</seealso></p>
</item>
- <item>
- <p><c>addr_made_prim</c>.</p>
+ <tag><c>info</c></tag>
+ <item><p>The original
+ <seealso marker="#record-sctp_sndrcvinfo"><c>#sctp_sndrcvinfo{}</c></seealso>
+ record used in the failed
+ <seealso marker="#send/3"><c>send/*</c>.</seealso></p>
</item>
- <item>
- <p><c>addr_confirmed</c>.</p>
+ <tag><c>data</c></tag>
+ <item><p>The whole original data chunk attempted to be sent.</p>
</item>
- </list>
- <p>In case of an error (e.g. <c>addr_unreachable</c>), the
- <c>error</c> field provides additional diagnostics. In such cases,
- the <c>#sctp_paddr_change{}</c> Event is automatically
- converted into an <c>error</c> term returned by
- <c>gen_sctp:recv</c>. The <c>error</c> field value can be
- converted into a string using <c>error_string/1</c>.</p>
- </item>
- <item>
-<pre> #sctp_send_failed{
- flags = true | false,
- error = integer(),
- info = #sctp_sndrcvinfo{},
- assoc_id = assoc_id()
- data = binary()
- } </pre>
- <p>The sender may receive this event if a send operation fails.
- The <c>flags</c> is a Boolean specifying whether the data have
- actually been transmitted over the wire; <c>error</c> provides
- extended diagnostics, use <c>error_string/1</c>;
- <c>info</c> is the original
- <seealso marker="#record-sctp_sndrcvinfo">#sctp_sndrcvinfo{}</seealso> record used in the failed
- <seealso marker="#send/3">send/*,</seealso> and <c>data</c>
- is the whole original data chunk attempted to be sent.</p>
+ </taglist>
<p>In the current implementation of the Erlang/SCTP binding,
- this Event is internally converted into an <c>error</c> term
- returned by <c>recv/*</c>.</p>
+ this event is internally converted into an <c>error</c> term
+ returned by
+ <seealso marker="#recv/1"><c>recv/*</c></seealso>.</p>
</item>
<item>
-<pre> #sctp_adaptation_event{
- adaptation_ind = integer(),
- assoc_id = assoc_id()
- } </pre>
- <p>Delivered when a peer sends an Adaptation Layer Indication
- parameter (configured through the option
- <seealso marker="#option-sctp_adaptation_layer">sctp_adaptation_layer</seealso>).
- Note that with the current implementation of
+ <pre>
+#sctp_adaptation_event{
+ adaptation_ind = integer(),
+ assoc_id = assoc_id()
+}</pre>
+ <p>Delivered when a peer sends an adaptation layer indication
+ parameter (configured through option
+ <seealso marker="#option-sctp_adaptation_layer"><c>sctp_adaptation_layer</c></seealso>).
+ Notice that with the current implementation of
the Erlang/SCTP binding, this event is disabled by default.</p>
</item>
<item>
-<pre> #sctp_pdapi_event{
- indication = sctp_partial_delivery_aborted,
- assoc_id = assoc_id()
- } </pre>
+ <pre>
+#sctp_pdapi_event{
+ indication = sctp_partial_delivery_aborted,
+ assoc_id = assoc_id()
+}</pre>
<p>A partial delivery failure. In the current implementation of
- the Erlang/SCTP binding, this Event is internally converted
- into an <c>error</c> term returned by <c>recv/*</c>.</p>
+ the Erlang/SCTP binding, this event is internally converted
+ into an <c>error</c> term returned by
+ <seealso marker="#recv/1"><c>recv/*</c></seealso>.</p>
</item>
</list>
</desc>
</func>
+
<func>
<name name="send" arity="3"/>
- <fsummary>Send a message using an <c>#sctp_sndrcvinfo{}</c>record</fsummary>
+ <fsummary>Send a message using an <c>#sctp_sndrcvinfo{}</c>record.</fsummary>
<desc>
- <p>Sends the <c><anno>Data</anno></c> message with all sending parameters from a
- <seealso marker="#record-sctp_sndrcvinfo">#sctp_sndrcvinfo{}</seealso> record.
- This way, the user can specify the PPID (passed to the remote end)
- and Context (passed to the local SCTP layer) which can be used
- for example for error identification.
+ <p>Sends the <c><anno>Data</anno></c> message with all sending
+ parameters from a
+ <seealso marker="#record-sctp_sndrcvinfo"><c>#sctp_sndrcvinfo{}</c></seealso>
+ record. This way, the user can specify the PPID (passed to the remote
+ end) and context (passed to the local SCTP layer), which can be used,
+ for example, for error identification.
However, such a fine level of user control is rarely required.
- The send/4 function is sufficient for most applications.</p>
+ The function <c>send/4</c> is sufficient for most applications.</p>
</desc>
</func>
+
<func>
<name name="send" arity="4"/>
- <fsummary>Send a message over an existing association and given stream</fsummary>
- <desc>
- <p>Sends <c><anno>Data</anno></c> message over an existing association and given
- stream.</p>
- </desc>
- </func>
- <func>
- <name name="error_string" arity="1"/>
- <fsummary>Translate an SCTP error number into a string</fsummary>
+ <fsummary>Send a message over an existing association and specified
+ stream.</fsummary>
<desc>
- <p>Translates an SCTP error number from for example
- <c>#sctp_remote_error{}</c> or <c>#sctp_send_failed{}</c> into
- an explanatory string, or one of the atoms <c>ok</c> for no
- error and <c>undefined</c> for an unrecognized error.</p>
+ <p>Sends a <c><anno>Data</anno></c> message over an existing association
+ and specified stream.</p>
</desc>
</func>
</funcs>
<section>
<marker id="options"></marker>
- <title>SCTP SOCKET OPTIONS</title>
+ <title>SCTP Socket Options</title>
<p>The set of admissible SCTP socket options is by construction
- orthogonal to the sets of TCP, UDP and generic INET options:
- only those options which are explicitly listed below are allowed
+ orthogonal to the sets of TCP, UDP, and generic <c>inet</c> options.
+ Only options listed here are allowed
for SCTP sockets. Options can be set on the socket using
- <seealso marker="#open/1"><c>gen_sctp:open/1,2</c></seealso>
- or <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>,
- retrieved using <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso>,
- and when calling <seealso marker="#connect/4"><c>gen_sctp:connect/4,5</c></seealso>
- options can be changed.</p>
+ <seealso marker="#open/1"><c>open/1,2</c></seealso> or
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>,
+ retrieved using
+ <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso>.
+ Options can be changed when calling
+ <seealso marker="#connect/4"><c>connect/4,5</c></seealso>.</p>
<marker id="option-binary"></marker>
<marker id="option-list"></marker>
<taglist>
<tag><c>{mode, list|binary}</c> or just <c>list</c> or <c>binary</c></tag>
<item>
- <p>Determines the type of data returned from <c>gen_sctp:recv/1,2</c>.</p>
+ <p>Determines the type of data returned from
+ <seealso marker="#recv/1"><c>recv/1,2</c></seealso>.</p>
<marker id="option-active"></marker>
</item>
<tag><c>{active, true|false|once|N}</c></tag>
@@ -517,176 +584,190 @@
<list type="bulleted">
<item>
<p>If <c>false</c> (passive mode, the default),
- the caller needs to do an explicit <c>gen_sctp:recv</c> call
- in order to retrieve the available data from the socket.</p>
+ the caller must do an explicit
+ <seealso marker="#recv/1"><c>recv</c></seealso> call
+ to retrieve the available data from the socket.</p>
</item>
+ <item>
+ <p>
+ If <c>true|once|N</c> (active modes)
+ received data or events are sent to the owning process.
+ See <seealso marker="#open/0"><c>open/0..2</c></seealso>
+ for the message format.
+ </p>
+ </item>
<item>
- <p>If <c>true</c> (full active mode), the pending data or events are
- sent to the owning process.</p>
- <p><em>NB:</em> This can cause the message queue to overflow,
- as there is no way to throttle the sender in this case
- (no flow control!).</p>
+ <p>
+ If <c>true</c> (full active mode) there is no flow control.
+ </p>
+ <note>
+ <p>
+ Note that this can cause the message queue to overflow
+ causing for example the virtual machine
+ to run out of memory and crash.
+ </p>
+ </note>
</item>
<item>
<p>If <c>once</c>, only one message is automatically placed
in the message queue, and after that the mode is automatically
- reset to passive. This provides flow control as well as
+ reset to passive. This provides flow control and
the possibility for the receiver to listen for its incoming
SCTP data interleaved with other inter-process messages.</p>
</item>
<item>
<p>If <c>active</c> is specified as an integer <c>N</c> in the
- range -32768 to 32767 (inclusive), then that number is added to
- the socket's count of the number of data messages to be
+ range -32768 to 32767 (inclusive), that number is added to
+ the socket's counting of data messages to be
delivered to the controlling process. If the result of the
- addition would be negative, the count is set to 0. Once the
- count reaches 0, either through the delivery of messages or by
- being explicitly set with <seealso
- marker="inet#setopts/2">inet:setopts/2</seealso>, the socket's
- mode is automatically reset to passive (<c>{active,
- false}</c>) mode. When a socket in this active mode transitions to
+ addition is negative, the count is set to <c>0</c>. Once the
+ count reaches <c>0</c>, either through the delivery of messages
+ or by being explicitly set with
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>,
+ the socket mode is automatically reset to passive (<c>{active,
+ false}</c>). When a socket in this active mode transitions to
passive mode, the message <c>{sctp_passive, Socket}</c> is sent
to the controlling process to notify it that if it wants to
receive more data messages from the socket, it must call
- <seealso marker="inet#setopts/2">inet:setopts/2</seealso> to set
- the socket back into an active mode.</p>
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>
+ to set the socket back into an active mode.</p>
</item>
</list>
</item>
- <tag><c>{tos, integer()}</c></tag>
+ <tag><c>{tos, integer()}</c></tag>
<item>
- <p>Sets the Type-Of-Service field on the IP datagrams being sent,
- to the given value, which effectively determines a prioritization
+ <p>Sets the Type-Of-Service field on the IP datagrams that are sent,
+ to the specified value. This effectively determines a prioritization
policy for the outbound packets. The acceptable values
- are system-dependent. TODO: we do not provide
- symbolic names for these values yet.</p>
+ are system-dependent.</p>
</item>
<tag><c>{priority, integer()}</c></tag>
<item>
<p>A protocol-independent equivalent of <c>tos</c> above. Setting
- priority implies setting tos as well.</p>
+ priority implies setting <c>tos</c> as well.</p>
</item>
<tag><c>{dontroute, true|false}</c></tag>
<item>
- <p>By default <c>false</c>. If <c>true</c>, the kernel does not
- send packets via any gateway, only sends them to directly
+ <p>Defaults to <c>false</c>. If <c>true</c>, the kernel does not
+ send packets through any gateway, only sends them to directly
connected hosts.</p>
</item>
<tag><c>{reuseaddr, true|false}</c></tag>
<item>
- <p>By default <c>false</c>. If true, the local binding address
- <c>{IP,Port}</c> of the socket can be re-used immediately:
- no waiting in the CLOSE_WAIT state is performed (may be
+ <p>Defaults to <c>false</c>. If true, the local binding address
+ <c>{IP,Port}</c> of the socket can be reused immediately.
+ No waiting in state <c>CLOSE_WAIT</c> is performed (can be
required for high-throughput servers).</p>
</item>
- <tag><c>{sndbuf, integer()}</c></tag>
+ <tag><c>{sndbuf, integer()}</c></tag>
<item>
- <p>The size, in bytes, of the *kernel* send buffer for this socket.
+ <p>The size, in bytes, of the OS kernel send buffer for this socket.
Sending errors would occur for datagrams larger than
<c>val(sndbuf)</c>. Setting this option also adjusts
the size of the driver buffer (see <c>buffer</c> above).</p>
</item>
<tag><c>{recbuf, integer()}</c></tag>
<item>
- <p>The size, in bytes, of the *kernel* recv buffer for this socket.
+ <p>The size, in bytes, of the OS kernel receive buffer for this socket.
Sending errors would occur for datagrams larger than
- <c>val(sndbuf)</c>. Setting this option also adjusts
+ <c>val(recbuf)</c>. Setting this option also adjusts
the size of the driver buffer (see <c>buffer</c> above).</p>
</item>
-
- <tag><c>{sctp_module, module()}</c></tag>
- <item> <p>
- Override which callback module is used. Defaults to
- <c>inet_sctp</c> for IPv4 and <c>inet6_sctp</c> for IPv6.
- </p>
- </item>
-
-
+ <tag><c>{sctp_module, module()}</c></tag>
+ <item>
+ <p>Overrides which callback module is used. Defaults to
+ <c>inet_sctp</c> for IPv4 and <c>inet6_sctp</c> for IPv6.</p>
+ </item>
<tag><c>{sctp_rtoinfo, #sctp_rtoinfo{}}</c></tag>
<item>
-<pre> #sctp_rtoinfo{
- assoc_id = assoc_id(),
- initial = integer(),
- max = integer(),
- min = integer()
- } </pre>
- <p>Determines re-transmission time-out parameters, in milliseconds,
- for the association(s) given by <c>assoc_id</c>.
- If <c>assoc_id = 0</c> (default) indicates the whole endpoint. See
- <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC2960</url> and
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions for SCTP</url> for the exact semantics of the fields values.</p>
+ <pre>
+#sctp_rtoinfo{
+ assoc_id = assoc_id(),
+ initial = integer(),
+ max = integer(),
+ min = integer()
+}</pre>
+ <p>Determines retransmission time-out parameters, in milliseconds,
+ for the association(s) specified by <c>assoc_id</c>.</p>
+ <p><c>assoc_id = 0</c> (default) indicates the whole endpoint. See
+ <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC
+ 2960</url> and
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets
+ API Extensions for SCTP</url>
+ for the exact semantics of the field values.</p>
</item>
<tag><c>{sctp_associnfo, #sctp_assocparams{}}</c></tag>
<item>
-<pre> #sctp_assocparams{
- assoc_id = assoc_id(),
- asocmaxrxt = integer(),
- number_peer_destinations = integer(),
- peer_rwnd = integer(),
- local_rwnd = integer(),
- cookie_life = integer()
- } </pre>
- <p>Determines association parameters for the association(s) given by
- <c>assoc_id</c>. <c>assoc_id = 0</c> (default) indicates
- the whole endpoint. See
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions for SCTP</url> for the discussion of their semantics. Rarely used.</p>
+ <pre>
+#sctp_assocparams{
+ assoc_id = assoc_id(),
+ asocmaxrxt = integer(),
+ number_peer_destinations = integer(),
+ peer_rwnd = integer(),
+ local_rwnd = integer(),
+ cookie_life = integer()
+}</pre>
+ <p>Determines association parameters for the association(s) specified by
+ <c>assoc_id</c>.</p>
+ <p><c>assoc_id = 0</c> (default) indicates the whole endpoint. See
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions for SCTP</url>
+ for the discussion of their semantics. Rarely used.</p>
</item>
<tag><c>{sctp_initmsg, #sctp_initmsg{}}</c></tag>
<item>
-<pre> #sctp_initmsg{
- num_ostreams = integer(),
- max_instreams = integer(),
- max_attempts = integer(),
- max_init_timeo = integer()
- } </pre>
- <p>Determines the default parameters which this socket attempts
+ <pre>
+#sctp_initmsg{
+ num_ostreams = integer(),
+ max_instreams = integer(),
+ max_attempts = integer(),
+ max_init_timeo = integer()
+}</pre>
+ <p>Determines the default parameters that this socket tries
to negotiate with its peer while establishing an association with it.
- Should be set after <c>open/*</c> but before the first
- <c>connect/*</c>. <c>#sctp_initmsg{}</c> can also be used
- as ancillary data with the first call of <c>send/*</c> to
+ Is to be set after
+ <seealso marker="#open/1"><c>open/*</c></seealso>
+ but before the first
+ <seealso marker="#connect/4"><c>connect/*</c></seealso>.
+ <c>#sctp_initmsg{}</c> can also be used
+ as ancillary data with the first call of
+ <seealso marker="#send/3"><c>send/*</c></seealso> to
a new peer (when a new association is created).</p>
- <list type="bulleted">
- <item>
- <p><c>num_ostreams</c>: number of outbound streams;</p>
- </item>
- <item>
- <p><c>max_instreams</c>: max number of in-bound streams;</p>
- </item>
- <item>
- <p><c>max_attempts</c>: max re-transmissions while
- establishing an association;</p>
- </item>
- <item>
- <p><c>max_init_timeo</c>: time-out in milliseconds
- for establishing an association.</p>
- </item>
- </list>
+ <taglist>
+ <tag><c>num_ostreams</c></tag>
+ <item>Number of outbound streams</item>
+ <tag><c>max_instreams</c></tag>
+ <item>Maximum number of inbound streams</item>
+ <tag><c>max_attempts</c></tag>
+ <item>Maximum retransmissions while establishing an association</item>
+ <tag><c>max_init_timeo</c></tag>
+ <item>Time-out, in milliseconds, for establishing an association</item>
+ </taglist>
</item>
<tag><c>{sctp_autoclose, integer() >= 0}</c></tag>
<item>
- <p>Determines the time (in seconds) after which an idle association is
+ <p>Determines the time, in seconds, after which an idle association is
automatically closed. <c>0</c> means that the association is
never automatically closed.</p>
</item>
<tag><c>{sctp_nodelay, true|false}</c></tag>
<item>
<p>Turns on|off the Nagle algorithm for merging small packets
- into larger ones (which improves throughput at the expense
- of latency).</p>
+ into larger ones. This improves throughput at the expense
+ of latency.</p>
</item>
<tag><c>{sctp_disable_fragments, true|false}</c></tag>
<item>
<p>If <c>true</c>, induces an error on an attempt to send
- a message which is larger than the current PMTU size
- (which would require fragmentation/re-assembling).
- Note that message fragmentation does not affect
+ a message larger than the current PMTU size
+ (which would require fragmentation/reassembling).
+ Notice that message fragmentation does not affect
the logical atomicity of its delivery; this option
is provided for performance reasons only.</p>
</item>
<tag><c>{sctp_i_want_mapped_v4_addr, true|false}</c></tag>
<item>
<p>Turns on|off automatic mapping of IPv4 addresses into IPv6 ones
- (if the socket address family is AF_INET6).</p>
+ (if the socket address family is <c>AF_INET6</c>).</p>
</item>
<tag><c>{sctp_maxseg, integer()}</c></tag>
<item>
@@ -695,176 +776,171 @@
</item>
<tag><c>{sctp_primary_addr, #sctp_prim{}}</c></tag>
<item>
-<pre> #sctp_prim{
- assoc_id = assoc_id(),
- addr = {IP, Port}
- }
- IP = ip_address()
- Port = port_number() </pre>
- <p>For the association given by <c>assoc_id</c>,
- <c>{IP,Port}</c> must be one of the peer's addresses.
- This option determines that the given address is
- treated by the local SCTP stack as the peer's primary address.</p>
+ <pre>
+#sctp_prim{
+ assoc_id = assoc_id(),
+ addr = {IP, Port}
+}
+ IP = ip_address()
+ Port = port_number()</pre>
+ <p>For the association specified by <c>assoc_id</c>,
+ <c>{IP,Port}</c> must be one of the peer addresses.
+ This option determines that the specified address is treated by
+ the local SCTP stack as the primary address of the peer.</p>
</item>
<tag><c>{sctp_set_peer_primary_addr, #sctp_setpeerprim{}}</c></tag>
<item>
-<pre> #sctp_setpeerprim{
- assoc_id = assoc_id(),
- addr = {IP, Port}
- }
- IP = ip_address()
- Port = port_number() </pre>
- <p>When set, informs the peer that it should use <c>{IP, Port}</c>
+ <pre>
+#sctp_setpeerprim{
+ assoc_id = assoc_id(),
+ addr = {IP, Port}
+}
+ IP = ip_address()
+ Port = port_number()</pre>
+ <p>When set, informs the peer to use <c>{IP, Port}</c>
as the primary address of the local endpoint for the association
- given by <c>assoc_id</c>.</p>
+ specified by <c>assoc_id</c>.</p>
<marker id="option-sctp_adaptation_layer"></marker>
</item>
<tag><c>{sctp_adaptation_layer, #sctp_setadaptation{}}</c></tag>
<item>
<marker id="record-sctp_setadaptation"></marker>
-<pre> #sctp_setadaptation{
- adaptation_ind = integer()
- } </pre>
- <p>When set, requests that the local endpoint uses the value given by
- <c>adaptation_ind</c> as the Adaptation Indication parameter for
- establishing new associations. See
- <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC2960</url> and
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extenstions for SCTP</url> for more details.</p>
+ <pre>
+#sctp_setadaptation{
+ adaptation_ind = integer()
+}</pre>
+ <p>When set, requests that the local endpoint uses the value specified
+ by <c>adaptation_ind</c> as the Adaptation Indication parameter for
+ establishing new associations. For details, see
+ <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC 2960</url>
+ and
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets
+ API Extenstions for SCTP</url>.</p>
</item>
<tag><c>{sctp_peer_addr_params, #sctp_paddrparams{}}</c></tag>
<item>
-<pre> #sctp_paddrparams{
- assoc_id = assoc_id(),
- address = {IP, Port},
- hbinterval = integer(),
- pathmaxrxt = integer(),
- pathmtu = integer(),
- sackdelay = integer(),
- flags = list()
- }
- IP = ip_address()
- Port = port_number() </pre>
- <p>This option determines various per-address parameters for
- the association given by <c>assoc_id</c> and the peer address
- <c>address</c> (the SCTP protocol supports multi-homing,
- so more than 1 address can correspond to a given association).</p>
- <list type="bulleted">
- <item>
- <p><c>hbinterval</c>: heartbeat interval, in milliseconds;</p>
- </item>
- <item>
- <p><c>pathmaxrxt</c>: max number of retransmissions
- before this address is considered unreachable (and an
- alternative address is selected);</p>
- </item>
- <item>
- <p><c>pathmtu</c>: fixed Path MTU, if automatic discovery is
- disabled (see <c>flags</c> below);</p>
- </item>
- <item>
- <p><c>sackdelay</c>: delay in milliseconds for SAC messages
- (if the delay is enabled, see <c>flags</c> below);</p>
- </item>
- <item>
- <p><c>flags</c>: the following flags are available:</p>
- <list type="bulleted">
- <item>
- <p><c>hb_enable</c>: enable heartbeat; </p>
- </item>
- <item>
- <p><c>hb_disable</c>: disable heartbeat;</p>
- </item>
- <item>
- <p><c>hb_demand</c>: initiate heartbeat immediately;</p>
- </item>
- <item>
- <p><c>pmtud_enable</c>: enable automatic Path MTU discovery;</p>
- </item>
- <item>
- <p><c>pmtud_disable</c>: disable automatic Path MTU discovery;</p>
- </item>
- <item>
- <p><c>sackdelay_enable</c>: enable SAC delay;</p>
- </item>
- <item>
- <p><c>sackdelay_disable</c>: disable SAC delay.</p>
- </item>
- </list>
+ <pre>
+#sctp_paddrparams{
+ assoc_id = assoc_id(),
+ address = {IP, Port},
+ hbinterval = integer(),
+ pathmaxrxt = integer(),
+ pathmtu = integer(),
+ sackdelay = integer(),
+ flags = list()
+}
+IP = ip_address()
+Port = port_number()</pre>
+ <p>Determines various per-address parameters for
+ the association specified by <c>assoc_id</c> and the peer address
+ <c>address</c> (the SCTP protocol supports multi-homing, so
+ more than one address can correspond to a specified association).</p>
+ <taglist>
+ <tag><c>hbinterval</c></tag>
+ <item><p>Heartbeat interval, in milliseconds</p></item>
+ <tag><c>pathmaxrxt</c></tag>
+ <item><p>Maximum number of retransmissions before this address is
+ considered unreachable (and an alternative address is selected)</p>
</item>
- </list>
+ <tag><c>pathmtu</c></tag>
+ <item><p>Fixed Path MTU, if automatic discovery is disabled (see
+ <c>flags</c> below)</p></item>
+ <tag><c>sackdelay</c></tag>
+ <item><p>Delay, in milliseconds, for SAC messages (if the delay is
+ enabled, see <c>flags</c> below)</p></item>
+ <tag><c>flags</c></tag>
+ <item><p>The following flags are available:</p>
+ <taglist>
+ <tag><c>hb_enable</c></tag>
+ <item>Enables heartbeat</item>
+ <tag><c>hb_disable</c></tag>
+ <item>Disables heartbeat</item>
+ <tag><c>hb_demand</c></tag>
+ <item>Initiates heartbeat immediately</item>
+ <tag><c>pmtud_enable</c></tag>
+ <item>Enables automatic Path MTU discovery</item>
+ <tag><c>pmtud_disable</c></tag>
+ <item>Disables automatic Path MTU discovery</item>
+ <tag><c>sackdelay_enable</c></tag>
+ <item>Enables SAC delay</item>
+ <tag><c>sackdelay_disable</c></tag>
+ <item>Disables SAC delay</item>
+ </taglist></item>
+ </taglist>
</item>
<tag><c>{sctp_default_send_param, #sctp_sndrcvinfo{}}</c></tag>
<item>
<marker id="record-sctp_sndrcvinfo"></marker>
-<pre> #sctp_sndrcvinfo{
- stream = integer(),
- ssn = integer(),
- flags = list(),
- ppid = integer(),
- context = integer(),
- timetolive = integer(),
- tsn = integer(),
- cumtsn = integer(),
- assoc_id = assoc_id()
- } </pre>
+ <pre>
+#sctp_sndrcvinfo{
+ stream = integer(),
+ ssn = integer(),
+ flags = list(),
+ ppid = integer(),
+ context = integer(),
+ timetolive = integer(),
+ tsn = integer(),
+ cumtsn = integer(),
+ assoc_id = assoc_id()
+}</pre>
<p><c>#sctp_sndrcvinfo{}</c> is used both in this socket option, and as
ancillary data while sending or receiving SCTP messages. When
- set as an option, it provides a default values for subsequent
- <c>gen_sctp:send</c>calls on the association given by
- <c>assoc_id</c>. <c>assoc_id = 0</c> (default) indicates
- the whole endpoint. The following fields typically need
- to be specified by the sender:</p>
- <list type="bulleted">
- <item>
- <p><c>sinfo_stream</c>: stream number (0-base) within the association
- to send the messages through;</p>
- </item>
- <item>
- <p><c>sinfo_flags</c>: the following flags are recognised:</p>
- <list type="bulleted">
- <item>
- <p><c>unordered</c>: the message is to be sent unordered;</p>
- </item>
- <item>
- <p><c>addr_over</c>: the address specified in
- <c>gen_sctp:send</c> overwrites the primary peer address;</p>
- </item>
- <item>
- <p><c>abort</c>: abort the current association without
- flushing any unsent data;</p>
- </item>
- <item>
- <p><c>eof</c>: gracefully shut down the current
- association, with flushing of unsent data.</p>
- </item>
- </list>
- <p>Other fields are rarely used. See
- <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC2960</url> and
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions for SCTP</url> for full information.</p>
- </item>
- </list>
+ set as an option, it provides default values for subsequent
+ <seealso marker="#send/3"><c>send</c></seealso>
+ calls on the association specified by
+ <c>assoc_id</c>.</p>
+ <p><c>assoc_id = 0</c> (default) indicates
+ the whole endpoint.</p>
+ <p>The following fields typically must be specified by the sender:</p>
+ <taglist>
+ <tag><c>sinfo_stream</c></tag>
+ <item><p>Stream number (0-base) within the association
+ to send the messages through;</p></item>
+ <tag><c>sinfo_flags</c></tag>
+ <item><p>The following flags are recognised:</p>
+ <taglist>
+ <tag><c>unordered</c></tag>
+ <item>The message is to be sent unordered</item>
+ <tag><c>addr_over</c></tag>
+ <item>The address specified in
+ <seealso marker="#send/3"><c>send</c></seealso>
+ overwrites the primary peer address</item>
+ <tag><c>abort</c></tag>
+ <item>Aborts the current association without flushing any unsent
+ data</item>
+ <tag><c>eof</c></tag>
+ <item>Gracefully shuts down the current association, with
+ flushing of unsent data</item>
+ </taglist>
+ <p>Other fields are rarely used. For complete information, see
+ <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC 2960</url>
+ and
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets
+ API Extensions for SCTP</url>.</p></item>
+ </taglist>
<marker id="option-sctp_events"></marker>
</item>
<tag><c>{sctp_events, #sctp_event_subscribe{}}</c></tag>
<item>
<marker id="record-sctp_event_subscribe"></marker>
-<pre> #sctp_event_subscribe{
- data_io_event = true | false,
- association_event = true | false,
- address_event = true | false,
- send_failure_event = true | false,
- peer_error_event = true | false,
- shutdown_event = true | false,
- partial_delivery_event = true | false,
- adaptation_layer_event = true | false
- } </pre>
+ <pre>
+#sctp_event_subscribe{
+ data_io_event = true | false,
+ association_event = true | false,
+ address_event = true | false,
+ send_failure_event = true | false,
+ peer_error_event = true | false,
+ shutdown_event = true | false,
+ partial_delivery_event = true | false,
+ adaptation_layer_event = true | false
+}</pre>
<p>This option determines which
<seealso marker="#sctp_events">SCTP Events</seealso> are to be
- received (via <seealso marker="#recv/1">recv/*</seealso>)
- along with the data. The only
- exception is <c>data_io_event</c> which enables or disables
- receiving of
- <seealso marker="#record-sctp_sndrcvinfo">#sctp_sndrcvinfo{}</seealso>
+ received (through
+ <seealso marker="#recv/1"><c>recv/*</c></seealso>)
+ along with the data. The only exception is <c>data_io_event</c>,
+ which enables or disables receiving of
+ <seealso marker="#record-sctp_sndrcvinfo"><c>#sctp_sndrcvinfo{}</c></seealso>
ancillary data, not events.
By default, all flags except <c>adaptation_layer_event</c> are
enabled, although <c>sctp_data_io_event</c> and
@@ -873,201 +949,185 @@
</item>
<tag><c>{sctp_delayed_ack_time, #sctp_assoc_value{}}</c></tag>
<item>
-<pre> #sctp_assoc_value{
- assoc_id = assoc_id(),
- assoc_value = integer()
- } </pre>
+ <pre>
+#sctp_assoc_value{
+ assoc_id = assoc_id(),
+ assoc_value = integer()
+}</pre>
<p>Rarely used. Determines the ACK time
- (given by <c>assoc_value</c> in milliseconds) for
- the given association or the whole endpoint
+ (specified by <c>assoc_value</c>, in milliseconds) for
+ the specified association or the whole endpoint
if <c>assoc_value = 0</c> (default).</p>
</item>
<tag><c>{sctp_status, #sctp_status{}}</c></tag>
<item>
-<pre> #sctp_status{
- assoc_id = assoc_id(),
- state = atom(),
- rwnd = integer(),
- unackdata = integer(),
- penddata = integer(),
- instrms = integer(),
- outstrms = integer(),
- fragmentation_point = integer(),
- primary = #sctp_paddrinfo{}
- } </pre>
+ <pre>
+#sctp_status{
+ assoc_id = assoc_id(),
+ state = atom(),
+ rwnd = integer(),
+ unackdata = integer(),
+ penddata = integer(),
+ instrms = integer(),
+ outstrms = integer(),
+ fragmentation_point = integer(),
+ primary = #sctp_paddrinfo{}
+}</pre>
<p>This option is read-only. It determines the status of
- the SCTP association given by <c>assoc_id</c>. Possible values of
- <c>state</c> follows. The state designations are mostly
- self-explanatory. <c>state_empty</c> is the default which means
- that no other state is active:</p>
- <list type="bulleted">
- <item>
- <p><c>sctp_state_empty</c></p>
- </item>
- <item>
- <p><c>sctp_state_closed</c></p>
- </item>
- <item>
- <p><c>sctp_state_cookie_wait</c></p>
- </item>
- <item>
- <p><c>sctp_state_cookie_echoed</c></p>
- </item>
- <item>
- <p><c>sctp_state_established</c></p>
- </item>
- <item>
- <p><c>sctp_state_shutdown_pending</c></p>
- </item>
- <item>
- <p><c>sctp_state_shutdown_sent</c></p>
- </item>
- <item>
- <p><c>sctp_state_shutdown_received</c></p>
- </item>
- <item>
- <p><c>sctp_state_shutdown_ack_sent</c></p>
- </item>
- </list>
- <p>The semantics of other fields is the following:</p>
- <list type="bulleted">
- <item>
- <p><c>sstat_rwnd</c>: the association peer's current receiver
- window size;</p>
- </item>
- <item>
- <p><c>sstat_unackdata</c>: number of unacked data chunks;</p>
- </item>
- <item>
- <p><c>sstat_penddata</c>: number of data chunks pending receipt;</p>
- </item>
- <item>
- <p><c>sstat_instrms</c>: number of inbound streams;</p>
- </item>
- <item>
- <p><c>sstat_outstrms</c>: number of outbound streams;</p>
- </item>
- <item>
- <p><c>sstat_fragmentation_point</c>: message size at which SCTP
- fragmentation will occur;</p>
- </item>
- <item>
- <p><c>sstat_primary</c>: information on the current primary peer
- address (see below for the format of <c>#sctp_paddrinfo{}</c>).</p>
- </item>
- </list>
+ the SCTP association specified by <c>assoc_id</c>.
+ The following are the
+ possible values of <c>state</c> (the state designations are mostly
+ self-explanatory):</p>
+ <taglist>
+ <tag><c>sctp_state_empty</c></tag>
+ <item>Default. Means that no other state is active.</item>
+ <tag><c>sctp_state_closed</c></tag>
+ <item></item>
+ <tag><c>sctp_state_cookie_wait</c></tag>
+ <item></item>
+ <tag><c>sctp_state_cookie_echoed</c></tag>
+ <item></item>
+ <tag><c>sctp_state_established</c></tag>
+ <item></item>
+ <tag><c>sctp_state_shutdown_pending</c></tag>
+ <item></item>
+ <tag><c>sctp_state_shutdown_sent</c></tag>
+ <item></item>
+ <tag><c>sctp_state_shutdown_received</c></tag>
+ <item></item>
+ <tag><c>sctp_state_shutdown_ack_sent</c></tag>
+ <item></item>
+ </taglist>
+ <p>Semantics of the other fields:</p>
+ <taglist>
+ <tag><c>sstat_rwnd</c></tag>
+ <item>Current receiver window size of the association</item>
+ <tag><c>sstat_unackdata</c></tag>
+ <item>Number of unacked data chunks</item>
+ <tag><c>sstat_penddata</c></tag>
+ <item>Number of data chunks pending receipt</item>
+ <tag><c>sstat_instrms</c></tag>
+ <item>Number of inbound streams</item>
+ <tag><c>sstat_outstrms</c></tag>
+ <item>Number of outbound streams</item>
+ <tag><c>sstat_fragmentation_point</c></tag>
+ <item>Message size at which SCTP fragmentation occurs</item>
+ <tag><c>sstat_primary</c></tag>
+ <item>Information on the current primary peer address (see below for
+ the format of <c>#sctp_paddrinfo{}</c>)</item>
+ </taglist>
<marker id="option-sctp_get_peer_addr_info"></marker>
</item>
<tag><c>{sctp_get_peer_addr_info, #sctp_paddrinfo{}}</c></tag>
<item>
<marker id="record-sctp_paddrinfo"></marker>
-<pre> #sctp_paddrinfo{
- assoc_id = assoc_id(),
- address = {IP, Port},
- state = inactive | active | unconfirmed,
- cwnd = integer(),
- srtt = integer(),
- rto = integer(),
- mtu = integer()
- }
- IP = ip_address()
- Port = port_number() </pre>
+ <pre>
+#sctp_paddrinfo{
+ assoc_id = assoc_id(),
+ address = {IP, Port},
+ state = inactive | active | unconfirmed,
+ cwnd = integer(),
+ srtt = integer(),
+ rto = integer(),
+ mtu = integer()
+}
+IP = ip_address()
+Port = port_number()</pre>
<p>This option is read-only. It determines the parameters specific to
- the peer's address given by <c>address</c> within the association
- given by <c>assoc_id</c>. The <c>address</c> field must be set by the
+ the peer address specified by <c>address</c> within the association
+ specified by <c>assoc_id</c>. Field <c>address</c> fmust be set by the
caller; all other fields are filled in on return.
If <c>assoc_id = 0</c> (default), the <c>address</c>
is automatically translated into the corresponding
- association ID. This option is rarely used; see
- <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC2960</url> and
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions for SCTP</url> for the semantics of all fields.</p>
+ association ID. This option is rarely used.
+ For the semantics of all fields, see
+ <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC 2960</url>
+ and
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets
+ API Extensions for SCTP</url>.</p>
</item>
</taglist>
</section>
<section>
<marker id="examples"></marker>
- <title>SCTP EXAMPLES</title>
- <list type="bulleted">
- <item>
- <p>Example of an Erlang SCTP Server which receives SCTP messages and
- prints them on the standard output:</p>
-<pre> -module(sctp_server).
-
- -export([server/0,server/1,server/2]).
- -include_lib("kernel/include/inet.hrl").
- -include_lib("kernel/include/inet_sctp.hrl").
-
- server() -&gt;
- server(any, 2006).
-
- server([Host,Port]) when is_list(Host), is_list(Port) -&gt;
- {ok, #hostent{h_addr_list = [IP|_]}} = inet:gethostbyname(Host),
- io:format("~w -&gt; ~w~n", [Host, IP]),
- server([IP, list_to_integer(Port)]).
-
- server(IP, Port) when is_tuple(IP) orelse IP == any orelse IP == loopback,
- is_integer(Port) -&gt;
- {ok,S} = gen_sctp:open(Port, [{recbuf,65536}, {ip,IP}]),
- io:format("Listening on ~w:~w. ~w~n", [IP,Port,S]),
- ok = gen_sctp:listen(S, true),
- server_loop(S).
-
- server_loop(S) -&gt;
- case gen_sctp:recv(S) of
- {error, Error} -&gt;
- io:format("SCTP RECV ERROR: ~p~n", [Error]);
- Data -&gt;
- io:format("Received: ~p~n", [Data])
- end,
- server_loop(S). </pre>
- </item>
- <item>
- <p>Example of an Erlang SCTP Client which interacts with the above Server.
- Note that in this example, the Client creates an association with
- the Server with 5 outbound streams. For this reason, sending of
- "Test 0" over Stream 0 succeeds, but sending of "Test 5"
- over Stream 5 fails. The client then <c>abort</c>s the association,
- which results in the corresponding Event being received on
- the Server side.</p>
-<pre> -module(sctp_client).
-
- -export([client/0, client/1, client/2]).
- -include_lib("kernel/include/inet.hrl").
- -include_lib("kernel/include/inet_sctp.hrl").
+ <title>SCTP Examples</title>
+ <p>Example of an Erlang SCTP server that receives SCTP messages and
+ prints them on the standard output:</p>
+ <pre>
+-module(sctp_server).
+
+-export([server/0,server/1,server/2]).
+-include_lib("kernel/include/inet.hrl").
+-include_lib("kernel/include/inet_sctp.hrl").
+
+server() -&gt;
+ server(any, 2006).
+
+server([Host,Port]) when is_list(Host), is_list(Port) -&gt;
+ {ok, #hostent{h_addr_list = [IP|_]}} = inet:gethostbyname(Host),
+ io:format("~w -&gt; ~w~n", [Host, IP]),
+ server([IP, list_to_integer(Port)]).
+
+server(IP, Port) when is_tuple(IP) orelse IP == any orelse IP == loopback,
+ is_integer(Port) -&gt;
+ {ok,S} = gen_sctp:open(Port, [{recbuf,65536}, {ip,IP}]),
+ io:format("Listening on ~w:~w. ~w~n", [IP,Port,S]),
+ ok = gen_sctp:listen(S, true),
+ server_loop(S).
+
+server_loop(S) -&gt;
+ case gen_sctp:recv(S) of
+ {error, Error} -&gt;
+ io:format("SCTP RECV ERROR: ~p~n", [Error]);
+ Data -&gt;
+ io:format("Received: ~p~n", [Data])
+ end,
+ server_loop(S).</pre>
+ <p>Example of an Erlang SCTP client interacting with the above server.
+ Notice that in this example the client creates an association with
+ the server with 5 outbound streams. Therefore, sending of
+ <c>"Test 0"</c> over stream 0 succeeds, but sending of <c>"Test 5"</c>
+ over stream 5 fails. The client then <c>abort</c>s the association,
+ which results in that the corresponding event is received on
+ the server side.</p>
+ <pre>
+-module(sctp_client).
+
+-export([client/0, client/1, client/2]).
+-include_lib("kernel/include/inet.hrl").
+-include_lib("kernel/include/inet_sctp.hrl").
+
+client() -&gt;
+ client([localhost]).
+
+client([Host]) -&gt;
+ client(Host, 2006);
- client() -&gt;
- client([localhost]).
-
- client([Host]) -&gt;
- client(Host, 2006);
-
- client([Host, Port]) when is_list(Host), is_list(Port) -&gt;
- client(Host,list_to_integer(Port)),
- init:stop().
-
- client(Host, Port) when is_integer(Port) -&gt;
- {ok,S} = gen_sctp:open(),
- {ok,Assoc} = gen_sctp:connect
- (S, Host, Port, [{sctp_initmsg,#sctp_initmsg{num_ostreams=5}}]),
- io:format("Connection Successful, Assoc=~p~n", [Assoc]),
-
- io:write(gen_sctp:send(S, Assoc, 0, &lt;&lt;"Test 0"&gt;&gt;)),
- io:nl(),
- timer:sleep(10000),
- io:write(gen_sctp:send(S, Assoc, 5, &lt;&lt;"Test 5"&gt;&gt;)),
- io:nl(),
- timer:sleep(10000),
- io:write(gen_sctp:abort(S, Assoc)),
- io:nl(),
-
- timer:sleep(1000),
- gen_sctp:close(S). </pre>
- </item>
- <item>
- <p>A very simple Erlang SCTP Client which uses the
- connect_init API.</p>
-<pre>-module(ex3).
+client([Host, Port]) when is_list(Host), is_list(Port) -&gt;
+ client(Host,list_to_integer(Port)),
+ init:stop().
+
+client(Host, Port) when is_integer(Port) -&gt;
+ {ok,S} = gen_sctp:open(),
+ {ok,Assoc} = gen_sctp:connect
+ (S, Host, Port, [{sctp_initmsg,#sctp_initmsg{num_ostreams=5}}]),
+ io:format("Connection Successful, Assoc=~p~n", [Assoc]),
+
+ io:write(gen_sctp:send(S, Assoc, 0, &lt;&lt;"Test 0"&gt;&gt;)),
+ io:nl(),
+ timer:sleep(10000),
+ io:write(gen_sctp:send(S, Assoc, 5, &lt;&lt;"Test 5"&gt;&gt;)),
+ io:nl(),
+ timer:sleep(10000),
+ io:write(gen_sctp:abort(S, Assoc)),
+ io:nl(),
+
+ timer:sleep(1000),
+ gen_sctp:close(S).</pre>
+ <p>A simple Erlang SCTP client that uses the <c>connect_init</c> API:</p>
+ <pre>
+-module(ex3).
-export([client/4]).
-include_lib("kernel/include/inet.hrl").
@@ -1099,7 +1159,7 @@ client_loop(S, Peer1, Port1, AssocId1, Peer2, Port2, AssocId2) -&gt;
io:format("Association 2 connect result: ~p. AssocId: ~p~n",
[SAC#sctp_assoc_change.state, SAC#sctp_assoc_change.assoc_id]),
client_loop(S, Peer1, Port1, AssocId1, Peer2, Port2,
- SAC#sctp_assoc_change.assoc_id);
+ SAC#sctp_assoc_change.assoc_id);
{sctp, S, Peer1, Port1, Data} -&gt;
io:format("Association 1: received ~p~n", [Data]),
@@ -1118,20 +1178,19 @@ client_loop(S, Peer1, Port1, AssocId1, Peer2, Port2, AssocId2) -&gt;
after 5000 -&gt;
ok
- end.
-</pre>
- </item>
- </list>
+ end.</pre>
</section>
<section>
<marker id="seealso"></marker>
- <title>SEE ALSO</title>
- <p><seealso marker="inet">inet(3)</seealso>,
- <seealso marker="gen_tcp">gen_tcp(3)</seealso>,
- <seealso marker="gen_udp">gen_udp(3)</seealso>,
- <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC2960</url> (Stream Control Transmission Protocol),
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets API Extensions for SCTP.</url></p>
+ <title>See Also</title>
+ <p><seealso marker="gen_tcp"><c>gen_tcp(3)</c></seealso>,
+ <seealso marker="gen_udp"><c>gen_udp(3)</c></seealso>,
+ <seealso marker="inet"><c>inet(3)</c></seealso>,
+ <url href="http://www.rfc-archive.org/getrfc.php?rfc=2960">RFC 2960</url>
+ (Stream Control Transmission Protocol),
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">Sockets
+ API Extensions for SCTP</url></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/gen_tcp.xml b/lib/kernel/doc/src/gen_tcp.xml
index 6a19e76c4f..24d63693fd 100644
--- a/lib/kernel/doc/src/gen_tcp.xml
+++ b/lib/kernel/doc/src/gen_tcp.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -21,7 +21,6 @@
limitations under the License.
</legalnotice>
-
<title>gen_tcp</title>
<prepared>[email protected]</prepared>
<docno></docno>
@@ -29,13 +28,13 @@
<rev>A</rev>
</header>
<module>gen_tcp</module>
- <modulesummary>Interface to TCP/IP sockets</modulesummary>
+ <modulesummary>Interface to TCP/IP sockets.</modulesummary>
<description>
- <p>The <c>gen_tcp</c> module provides functions for communicating
+ <p>This module provides functions for communicating
with sockets using the TCP/IP protocol.</p>
- <p>The following code fragment provides a simple example of
+ <p>The following code fragment is a simple example of
a client connecting to a server at port 5678, transferring a
- binary and closing the connection:</p>
+ binary, and closing the connection:</p>
<code type="none">
client() ->
SomeHostInNet = "localhost", % to make it runnable on one machine
@@ -43,8 +42,8 @@ client() ->
[binary, {packet, 0}]),
ok = gen_tcp:send(Sock, "Some Data"),
ok = gen_tcp:close(Sock).</code>
- <p>At the other end a server is listening on port 5678, accepts
- the connection and receives the binary:</p>
+ <p>At the other end, a server is listening on port 5678, accepts
+ the connection, and receives the binary:</p>
<code type="none">
server() ->
{ok, LSock} = gen_tcp:listen(5678, [binary, {packet, 0},
@@ -52,6 +51,7 @@ server() ->
{ok, Sock} = gen_tcp:accept(LSock),
{ok, Bin} = do_recv(Sock, []),
ok = gen_tcp:close(Sock),
+ ok = gen_tcp:close(LSock),
Bin.
do_recv(Sock, Bs) ->
@@ -61,7 +61,8 @@ do_recv(Sock, Bs) ->
{error, closed} ->
{ok, list_to_binary(Bs)}
end.</code>
- <p>For more examples, see the <seealso marker="#examples">examples</seealso> section.</p>
+ <p>For more examples, see section
+ <seealso marker="#examples">Examples</seealso>.</p>
</description>
<datatypes>
@@ -69,6 +70,32 @@ do_recv(Sock, Bs) ->
<name name="option"/>
</datatype>
<datatype>
+ <name name="pktoptions_value"/>
+ <desc>
+ <p>
+ If the platform implements the IPv4 option
+ <c>IP_PKTOPTIONS</c>, or the IPv6 option
+ <c>IPV6_PKTOPTIONS</c> or <c>IPV6_2292PKTOPTIONS</c> for the socket
+ this value is returned from
+ <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso>
+ when called with the option name
+ <seealso marker="#type-option_name"><c>pktoptions</c></seealso>.
+ </p>
+ <note>
+ <p>
+ This option appears to be VERY Linux specific,
+ and its existence in future Linux kernel versions
+ is also worrying since the option is part of RFC 2292
+ which is since long (2003) obsoleted by RFC 3542
+ that <em>explicitly</em> removes this possibility to get
+ packet information from a stream socket.
+ For comparision: it has existed in FreeBSD but is now removed,
+ at least since FreeBSD 10.
+ </p>
+ </note>
+ </desc>
+ </datatype>
+ <datatype>
<name name="option_name"/>
</datatype>
<datatype>
@@ -79,9 +106,9 @@ do_recv(Sock, Bs) ->
</datatype>
<datatype>
<name>socket()</name>
- <desc>
- <p><marker id="type-socket"/>
- As returned by accept/1,2 and connect/3,4.</p>
+ <desc><p>As returned by
+ <seealso marker="#accept/1"><c>accept/1,2</c></seealso> and
+ <seealso marker="#connect/3"><c>connect/3,4</c></seealso>.</p>
<marker id="connect"></marker>
</desc>
</datatype>
@@ -89,285 +116,294 @@ do_recv(Sock, Bs) ->
<funcs>
<func>
+ <name name="accept" arity="1"/>
+ <name name="accept" arity="2"/>
+ <fsummary>Accept an incoming connection request on a listening socket.</fsummary>
+ <type_desc variable="ListenSocket">Returned by
+ <seealso marker="#listen/2"><c>listen/2</c></seealso>.
+ </type_desc>
+ <desc>
+ <p>Accepts an incoming connection request on a listening socket.
+ <c><anno>Socket</anno></c> must be a socket returned from
+ <seealso marker="#listen/2"><c>listen/2</c></seealso>.
+ <c><anno>Timeout</anno></c> specifies a time-out value in
+ milliseconds. Defaults to <c>infinity</c>.</p>
+ <p>Returns:</p>
+ <list type="bulleted">
+ <item><p><c>{ok, <anno>Socket</anno>}</c> if a connection is
+ established</p></item>
+ <item><p><c>{error, closed}</c> if <c><anno>ListenSocket</anno></c>
+ is closed</p></item>
+ <item><p><c>{error, timeout}</c> if no connection is established
+ within the specified time</p></item>
+ <item><p><c>{error, system_limit}</c> if all available ports in the
+ Erlang emulator are in use</p></item>
+ <item><p>A POSIX error value if something else goes wrong, see
+ <seealso marker="inet"><c>inet(3)</c></seealso> for possible
+ error values</p></item>
+ </list>
+ <p>Packets can be sent to the returned socket <c><anno>Socket</anno></c>
+ using
+ <seealso marker="#send/2"><c>send/2</c></seealso>.
+ Packets sent from the peer are delivered as messages (unless
+ <c>{active, false}</c> is specified in the option list for the
+ listening socket, in which case packets are retrieved by calling
+ <seealso marker="#recv/2"><c>recv/2</c></seealso>):</p>
+ <code type="none">
+{tcp, Socket, Data}</code>
+ <note>
+ <p>The <c>accept</c> call does
+ <em>not</em> have to be issued from the socket owner
+ process. Using version 5.5.3 and higher of the emulator,
+ multiple simultaneous accept calls can be issued from
+ different processes, which allows for a pool of acceptor
+ processes handling incoming connections.</p>
+ </note>
+ </desc>
+ </func>
+
+ <func>
+ <name name="close" arity="1"/>
+ <fsummary>Close a TCP socket.</fsummary>
+ <desc>
+ <p>Closes a TCP socket.</p>
+ <p>Note that in most implementations of TCP, doing a <c>close</c> does
+ not guarantee that any data sent is delivered to the recipient before
+ the close is detected at the remote side. If you want to guarantee
+ delivery of the data to the recipient there are two common ways to
+ achieve this.</p>
+ <list type="ordered">
+ <item><p>Use <seealso marker="#shutdown/2">
+ <c>gen_tcp:shutdown(Sock, write)</c></seealso> to signal that
+ no more data is to be sent and wait for the read side of the
+ socket to be closed.</p>
+ </item>
+ <item><p>Use the socket option <seealso marker="inet#packet">
+ <c>{packet, N}</c></seealso> (or something similar) to make
+ it possible for the receiver to close the connection when it
+ knowns it has received all the data.</p>
+ </item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
<name name="connect" arity="3"/>
<name name="connect" arity="4"/>
- <fsummary>Connect to a TCP port</fsummary>
+ <fsummary>Connect to a TCP port.</fsummary>
<desc>
<p>Connects to a server on TCP port <c><anno>Port</anno></c> on the host
- with IP address <c><anno>Address</anno></c>. The <c><anno>Address</anno></c> argument
- can be either a hostname, or an IP address.</p>
- <p>The available options are:</p>
+ with IP address <c><anno>Address</anno></c>. Argument
+ <c><anno>Address</anno></c> can be a hostname or an IP address.</p>
+ <p>The following options are available:</p>
<taglist>
- <tag><c>{ip, ip_address()}</c></tag>
- <item>
- <p>If the host has several network interfaces, this option
- specifies which one to use.</p>
+ <tag><c>{ip, Address}</c></tag>
+ <item><p>If the host has many network interfaces, this option
+ specifies which one to use.</p></item>
+ <tag><c>{ifaddr, Address}</c></tag>
+ <item><p>Same as <c>{ip, Address}</c>. If the host has many
+ network interfaces, this option specifies which one to use.</p>
</item>
-
- <tag><c>{ifaddr, ip_address()}</c></tag>
- <item>
- <p>Same as <c>{ip, ip_address()}</c>. If the host has several network interfaces, this option
- specifies which one to use.</p>
- </item>
-
<tag><c>{fd, integer() >= 0}</c></tag>
- <item>
- <p>If a socket has somehow been connected without using
- <c>gen_tcp</c>, use this option to pass the file
- descriptor for it. If <c>{ip, ip_address()}</c>
- and/or <c>{port, port_number()}</c> is combined with
- this option the fd will be bound to the given interface
- and port before connecting. If these options are not given
- it is assumed that the fd is already bound appropriately.
- </p>
- </item>
-
+ <item><p>If a socket has somehow been connected without using
+ <c>gen_tcp</c>, use this option to pass the file descriptor
+ for it. If <c>{ip, Address}</c> and/or
+ <c>{port, port_number()}</c> is combined with this option, the
+ <c>fd</c> is bound to the specified interface and port before
+ connecting. If these options are not specified, it is assumed that
+ the <c>fd</c> is already bound appropriately.</p></item>
<tag><c>inet</c></tag>
- <item>
- <p>Set up the socket for IPv4.</p>
- </item>
-
- <tag><c>inet6</c></tag>
- <item>
- <p>Set up the socket for IPv6.</p>
+ <item><p>Sets up the socket for IPv4.</p></item>
+ <tag><c>inet6</c></tag>
+ <item><p>Sets up the socket for IPv6.</p></item>
+ <tag><c>local</c></tag>
+ <item>
+ <p>
+ Sets up a Unix Domain Socket. See
+ <seealso marker="inet#type-local_address">
+ <c>inet:local_address()</c>
+ </seealso>
+ </p>
</item>
-
<tag><c>{port, Port}</c></tag>
- <item>
- <p>Specify which local port number to use.</p>
- </item>
-
- <tag><c>{tcp_module, module()}</c></tag>
- <item> <p>
- Override which callback module is used. Defaults to
- <c>inet_tcp</c> for IPv4 and <c>inet6_tcp</c> for IPv6.
- </p>
- </item>
-
+ <item><p>Specifies which local port number to use.</p></item>
+ <tag><c>{tcp_module, module()}</c></tag>
+ <item><p>Overrides which callback module is used. Defaults to
+ <c>inet_tcp</c> for IPv4 and <c>inet6_tcp</c> for IPv6.</p></item>
<tag><c>Opt</c></tag>
- <item>
- <p>See
- <seealso marker="inet#setopts/2">inet:setopts/2</seealso>.</p>
+ <item><p>See
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>.</p>
</item>
</taglist>
<p>Packets can be sent to the returned socket <c><anno>Socket</anno></c>
- using <c>send/2</c>. Packets sent from the peer are delivered
- as messages:</p>
+ using <seealso marker="#send/2"><c>send/2</c></seealso>.
+ Packets sent from the peer are delivered as messages:</p>
<code type="none">
{tcp, Socket, Data}</code>
- <p>If the socket is in <c>{active, N}</c> mode (see <seealso marker="inet#setopts/2">
- inet:setopts/2</seealso> for details) and its message counter
- drops to 0, the following message is delivered to indicate that the
+ <p>If the socket is in <c>{active, N}</c> mode (see
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>
+ for details) and its message counter drops to <c>0</c>, the following
+ message is delivered to indicate that the
socket has transitioned to passive (<c>{active, false}</c>) mode:</p>
<code type="none">
{tcp_passive, Socket}</code>
<p>If the socket is closed, the following message is delivered:</p>
<code type="none">
{tcp_closed, Socket}</code>
- <p>If an error occurs on the socket, the following message is
- delivered:</p>
+ <p>If an error occurs on the socket, the following message is delivered
+ (unless <c>{active, false}</c> is specified in the option list for
+ the socket, in which case packets are retrieved by calling
+ <seealso marker="#recv/2"><c>recv/2</c></seealso>):</p>
<code type="none">
{tcp_error, Socket, Reason}</code>
- <p>unless <c>{active, false}</c> is specified in the option list
- for the socket, in which case packets are retrieved by
- calling <c>recv/2</c>.</p>
- <p>The optional <c><anno>Timeout</anno></c> parameter specifies a timeout in
- milliseconds. The default value is <c>infinity</c>.</p>
+ <p>The optional <c><anno>Timeout</anno></c> parameter specifies a
+ time-out in milliseconds. Defaults to <c>infinity</c>.</p>
<note>
- <p>The default values for options given to <c>connect</c> can
+ <p>The default values for options specified to <c>connect</c> can
be affected by the Kernel configuration parameter
- <c>inet_default_connect_options</c>. See
- <seealso marker="inet">inet(3)</seealso> for details.</p>
+ <c>inet_default_connect_options</c>. For details, see
+ <seealso marker="inet"><c>inet(3)</c></seealso>.</p>
</note>
</desc>
</func>
+
+ <func>
+ <name name="controlling_process" arity="2"/>
+ <fsummary>Change controlling process of a socket.</fsummary>
+ <desc>
+ <p>Assigns a new controlling process <c><anno>Pid</anno></c> to
+ <c><anno>Socket</anno></c>. The controlling process is the process
+ that receives messages from the socket. If called by any other
+ process than the current controlling process,
+ <c>{error, not_owner}</c> is returned. If the process identified
+ by <c><anno>Pid</anno></c> is not an existing local pid,
+ <c>{error, badarg}</c> is returned. <c>{error, badarg}</c> may also
+ be returned in some cases when <c><anno>Socket</anno></c> is closed
+ during the execution of this function.</p>
+ <p>If the socket is set in active mode, this function
+ will transfer any messages in the mailbox of the caller
+ to the new controlling process.
+ If any other process is interacting with the socket while
+ the transfer is happening, the transfer may not work correctly
+ and messages may remain in the caller's mailbox. For instance
+ changing the sockets active mode before the transfere is complete
+ may cause this.</p>
+ </desc>
+ </func>
+
<func>
<name name="listen" arity="2"/>
- <fsummary>Set up a socket to listen on a port</fsummary>
+ <fsummary>Set up a socket to listen on a port.</fsummary>
<desc>
- <p>Sets up a socket to listen on the port <c><anno>Port</anno></c> on
+ <p>Sets up a socket to listen on port <c><anno>Port</anno></c> on
the local host.</p>
- <p>If <c><anno>Port</anno> == 0</c>, the underlying OS assigns an available
- port number, use <c>inet:port/1</c> to retrieve it.</p>
- <p>The available options are:</p>
+ <p>If <c><anno>Port</anno> == 0</c>, the underlying OS assigns an
+ available port number, use
+ <seealso marker="inet#port/1"><c>inet:port/1</c></seealso>
+ to retrieve it.</p>
+ <p>The following options are available:</p>
<taglist>
<tag><c>list</c></tag>
- <item>
- <p>Received <c>Packet</c> is delivered as a list.</p>
- </item>
+ <item><p>Received <c>Packet</c> is delivered as a list.</p></item>
<tag><c>binary</c></tag>
- <item>
- <p>Received <c>Packet</c> is delivered as a binary.</p>
- </item>
+ <item><p>Received <c>Packet</c> is delivered as a binary.</p></item>
<tag><c>{backlog, B}</c></tag>
- <item>
- <p><c>B</c> is an integer &gt;= 0. The backlog value defaults
- to 5. The backlog value defines the maximum length that
- the queue of pending connections may grow to.</p>
- </item>
- <tag><c>{ip, ip_address()}</c></tag>
- <item>
- <p>If the host has several network interfaces, this option
- specifies which one to listen on.</p>
- </item>
+ <item><p><c>B</c> is an integer &gt;= <c>0</c>. The backlog value
+ defines the maximum length that the queue of pending connections
+ can grow to. Defaults to <c>5</c>.</p></item>
+ <tag><c>{ip, Address}</c></tag>
+ <item><p>If the host has many network interfaces, this option
+ specifies which one to listen on.</p></item>
<tag><c>{port, Port}</c></tag>
- <item>
- <p>Specify which local port number to use.</p>
- </item>
+ <item><p>Specifies which local port number to use.</p></item>
<tag><c>{fd, Fd}</c></tag>
- <item>
- <p>If a socket has somehow been connected without using
- <c>gen_tcp</c>, use this option to pass the file
- descriptor for it.</p>
+ <item><p>If a socket has somehow been connected without using
+ <c>gen_tcp</c>, use this option to pass the file
+ descriptor for it.</p></item>
+ <tag><c>{ifaddr, Address}</c></tag>
+ <item><p>Same as <c>{ip, Address}</c>. If the host has many
+ network interfaces, this option specifies which one to use.</p>
</item>
-
- <tag><c>{ifaddr, ip_address()}</c></tag>
- <item>
- <p>Same as <c>{ip, ip_address()}</c>. If the host has several network interfaces, this option
- specifies which one to use.</p>
- </item>
-
<tag><c>inet6</c></tag>
- <item>
- <p>Set up the socket for IPv6.</p>
- </item>
+ <item><p>Sets up the socket for IPv6.</p></item>
<tag><c>inet</c></tag>
- <item>
- <p>Set up the socket for IPv4.</p>
- </item>
-
- <tag><c>{tcp_module, module()}</c></tag>
- <item> <p>
- Override which callback module is used. Defaults to
- <c>inet_tcp</c> for IPv4 and <c>inet6_tcp</c> for IPv6.
- </p>
- </item>
-
+ <item><p>Sets up the socket for IPv4.</p></item>
+ <tag><c>{tcp_module, module()}</c></tag>
+ <item><p>Overrides which callback module is used. Defaults to
+ <c>inet_tcp</c> for IPv4 and <c>inet6_tcp</c> for IPv6.</p></item>
<tag><c>Opt</c></tag>
- <item>
- <p>See
- <seealso marker="inet#setopts/2">inet:setopts/2</seealso>.</p>
- </item>
+ <item><p>See
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>.
+ </p></item>
</taglist>
- <p>The returned socket <c><anno>ListenSocket</anno></c> can only be used in
- calls to <c>accept/1,2</c>.</p>
+ <p>The returned socket <c><anno>ListenSocket</anno></c> should be used
+ in calls to <seealso marker="#accept/1"><c>accept/1,2</c></seealso> to
+ accept incoming connection requests.</p>
<note>
- <p>The default values for options given to <c>listen</c> can
+ <p>The default values for options specified to <c>listen</c> can
be affected by the Kernel configuration parameter
- <c>inet_default_listen_options</c>. See
- <seealso marker="inet">inet(3)</seealso> for details.</p>
+ <c>inet_default_listen_options</c>. For details, see
+ <seealso marker="inet"><c>inet(3)</c></seealso>.</p>
</note>
</desc>
</func>
- <func>
- <name name="accept" arity="1"/>
- <name name="accept" arity="2"/>
- <fsummary>Accept an incoming connection request on a listen socket</fsummary>
- <type_desc variable="ListenSocket">Returned by <c>listen/2</c>.
- </type_desc>
- <desc>
- <p>Accepts an incoming connection request on a listen socket.
- <c><anno>Socket</anno></c> must be a socket returned from <c>listen/2</c>.
- <c><anno>Timeout</anno></c> specifies a timeout value in ms, defaults to
- <c>infinity</c>.</p>
- <p>Returns <c>{ok, <anno>Socket</anno>}</c> if a connection is established,
- or <c>{error, closed}</c> if <c><anno>ListenSocket</anno></c> is closed,
- or <c>{error, timeout}</c> if no connection is established
- within the specified time,
- or <c>{error, system_limit}</c> if all available ports in the
- Erlang emulator are in use. May also return a POSIX error
- value if something else goes wrong, see inet(3) for possible
- error values.</p>
- <p>Packets can be sent to the returned socket <c><anno>Socket</anno></c>
- using <c>send/2</c>. Packets sent from the peer are delivered
- as messages:</p>
- <code type="none">
-{tcp, Socket, Data}</code>
- <p>unless <c>{active, false}</c> was specified in the option
- list for the listen socket, in which case packets are
- retrieved by calling <c>recv/2</c>.</p>
- <note>
- <p>It is worth noting that the <c>accept</c> call does
- <em>not</em> have to be issued from the socket owner
- process. Using version 5.5.3 and higher of the emulator,
- multiple simultaneous accept calls can be issued from
- different processes, which allows for a pool of acceptor
- processes handling incoming connections.</p>
- </note>
- </desc>
- </func>
- <func>
- <name name="send" arity="2"/>
- <fsummary>Send a packet</fsummary>
- <desc>
- <p>Sends a packet on a socket. </p>
- <p>There is no <c>send</c> call with timeout option, you use the
- <c>send_timeout</c> socket option if timeouts are
- desired. See the <seealso marker="#examples">examples</seealso> section.</p>
- </desc>
- </func>
+
<func>
<name name="recv" arity="2"/>
<name name="recv" arity="3"/>
- <fsummary>Receive a packet from a passive socket</fsummary>
+ <fsummary>Receive a packet from a passive socket.</fsummary>
<type_desc variable="HttpPacket">See the description of
- <c>HttpPacket</c> in <seealso marker="erts:erlang#decode_packet/3">
- erlang:decode_packet/3</seealso>.
+ <c>HttpPacket</c> in
+ <seealso marker="erts:erlang#decode_packet/3"><c>erlang:decode_packet/3</c></seealso>
+ in ERTS.
</type_desc>
<desc>
- <p>This function receives a packet from a socket in passive
- mode. A closed socket is indicated by a return value
+ <p>Receives a packet from a socket in passive
+ mode. A closed socket is indicated by return value
<c>{error, closed}</c>.</p>
- <p>The <c><anno>Length</anno></c> argument is only meaningful when
+ <p>Argument <c><anno>Length</anno></c> is only meaningful when
the socket is in <c>raw</c> mode and denotes the number of
- bytes to read. If <c><anno>Length</anno></c> = 0, all available bytes are
- returned. If <c><anno>Length</anno></c> &gt; 0, exactly <c><anno>Length</anno></c>
- bytes are returned, or an error; possibly discarding less
- than <c><anno>Length</anno></c> bytes of data when the socket gets closed
- from the other side.</p>
- <p>The optional <c><anno>Timeout</anno></c> parameter specifies a timeout in
- milliseconds. The default value is <c>infinity</c>.</p>
- </desc>
- </func>
- <func>
- <name name="controlling_process" arity="2"/>
- <fsummary>Change controlling process of a socket</fsummary>
- <desc>
- <p>Assigns a new controlling process <c><anno>Pid</anno></c> to
- <c><anno>Socket</anno></c>. The controlling process is the process which
- receives messages from the socket. If called by any other
- process than the current controlling process,
- <c>{error, not_owner}</c> is returned.</p>
+ bytes to read. If <c><anno>Length</anno></c> is <c>0</c>, all
+ available bytes are returned.
+ If <c><anno>Length</anno></c> &gt; <c>0</c>, exactly
+ <c><anno>Length</anno></c> bytes are returned, or an error;
+ possibly discarding less than <c><anno>Length</anno></c> bytes of
+ data when the socket is closed from the other side.</p>
+ <p>The optional <c><anno>Timeout</anno></c> parameter specifies a
+ time-out in milliseconds. Defaults to <c>infinity</c>.</p>
</desc>
</func>
+
<func>
- <name name="close" arity="1"/>
- <fsummary>Close a TCP socket</fsummary>
+ <name name="send" arity="2"/>
+ <fsummary>Send a packet.</fsummary>
<desc>
- <p>Closes a TCP socket.</p>
+ <p>Sends a packet on a socket.</p>
+ <p>There is no <c>send</c> call with a time-out option, use socket
+ option <c>send_timeout</c> if time-outs are desired. See section
+ <seealso marker="#examples">Examples</seealso>.</p>
</desc>
</func>
+
<func>
<name name="shutdown" arity="2"/>
- <fsummary>Asynchronously close a socket</fsummary>
+ <fsummary>Asynchronously close a socket.</fsummary>
<desc>
- <p>Close a socket in one or two directions.</p>
- <p><c><anno>How</anno> == write</c> means closing the socket for writing,
- reading from it is still possible.</p>
- <p>If <c><anno>How</anno> == read</c>, or there is no outgoing
+ <p>Closes a socket in one or two directions.</p>
+ <p><c><anno>How</anno> == write</c> means closing the socket for
+ writing, reading from it is still possible.</p>
+ <p>If <c><anno>How</anno> == read</c> or there is no outgoing
data buffered in the <c><anno>Socket</anno></c> port,
- then the socket is shutdown immediately and any error encountered
+ the socket is shut down immediately and any error encountered
is returned in <c><anno>Reason</anno></c>.</p>
- <p>If there is data buffered in the socket port, then the attempt
+ <p>If there is data buffered in the socket port, the attempt
to shutdown the socket is postponed until that data is written to the
- kernel socket send buffer. Any errors encountered will result
- in the socket being closed and <c>{error, closed}</c> being returned
- on the next
- <seealso marker="gen_tcp#recv/2">recv/2</seealso> or
- <seealso marker="gen_tcp#send/2">send/2</seealso>.</p>
- <p>To be able to handle that the peer has done a shutdown on
- the write side, the <c>{exit_on_close, false}</c> option
- is useful.</p>
+ kernel socket send buffer. If any errors are encountered, the socket
+ is closed and <c>{error, closed}</c> is returned on the next
+ <seealso marker="#recv/2"><c>recv/2</c></seealso> or
+ <seealso marker="#send/2"><c>send/2</c></seealso>.</p>
+ <p>Option <c>{exit_on_close, false}</c> is useful if the peer has done
+ a shutdown on the write side.</p>
</desc>
</func>
</funcs>
@@ -375,14 +411,14 @@ do_recv(Sock, Bs) ->
<section>
<title>Examples</title>
<marker id="examples"></marker>
- <p>The following example illustrates usage of the {active,once}
- option and multiple accepts by implementing a server as a
- number of worker processes doing accept on one single listen
- socket. The start/2 function takes the number of worker
- processes as well as a port number to listen for incoming
- connections on. If <c>LPort</c> is specified as <c>0</c>, an
- ephemeral portnumber is used, why the start function returns
- the actual portnumber allocated:</p>
+ <p>The following example illustrates use of option
+ <c>{active,once}</c> and multiple accepts by implementing a server
+ as a number of worker processes doing accept on a single listening
+ socket. Function <c>start/2</c> takes the number of worker
+ processes and the port number on which to listen for incoming
+ connections. If <c>LPort</c> is specified as <c>0</c>, an
+ ephemeral port number is used, which is why the start function
+ returns the actual port number allocated:</p>
<code type="none">
start(Num,LPort) ->
case gen_tcp:listen(LPort,[{active, false},{packet,2}]) of
@@ -421,7 +457,7 @@ loop(S) ->
io:format("Socket ~w closed [~w]~n",[S,self()]),
ok
end.</code>
- <p>A simple client could look like this:</p>
+ <p>Example of a simple client:</p>
<code type="none">
client(PortNo,Message) ->
{ok,Sock} = gen_tcp:connect("localhost",PortNo,[{active,false},
@@ -430,30 +466,29 @@ client(PortNo,Message) ->
A = gen_tcp:recv(Sock,0),
gen_tcp:close(Sock),
A.</code>
- <p>The fact that the <c>send</c> call does not accept a timeout
- option, is because timeouts on send is handled through the socket
+ <p>The <c>send</c> call does not accept a time-out
+ option because time-outs on send is handled through socket
option <c>send_timeout</c>. The behavior of a send operation with
- no receiver is in a very high degree defined by the underlying TCP
- stack, as well as the network infrastructure. If one wants to write
- code that handles a hanging receiver that might eventually cause
- the sender to hang on a <c>send</c> call, one writes code like
- the following.</p>
- <p>Consider a process that receives data from a client process that
- is to be forwarded to a server on the network. The process has
- connected to the server via TCP/IP and does not get any acknowledge
- for each message it sends, but has to rely on the send timeout
- option to detect that the other end is unresponsive. We could use
- the <c>send_timeout</c> option when connecting:</p>
+ no receiver is mainly defined by the underlying TCP
+ stack and the network infrastructure. To write
+ code that handles a hanging receiver that can eventually cause
+ the sender to hang on a <c>send</c> do like the following.</p>
+ <p>Consider a process that receives data from a client process
+ to be forwarded to a server on the network. The process is
+ connected to the server through TCP/IP and does not get any acknowledge
+ for each message it sends, but has to rely on the send time-out
+ option to detect that the other end is unresponsive. Option
+ <c>send_timeout</c> can be used when connecting:</p>
<code type="none">
- ...
- {ok,Sock} = gen_tcp:connect(HostAddress, Port,
- [{active,false},
- {send_timeout, 5000},
- {packet,2}]),
- loop(Sock), % See below
- ... </code>
- <p>In the loop where requests are handled, we can now detect send
- timeouts:</p>
+...
+{ok,Sock} = gen_tcp:connect(HostAddress, Port,
+ [{active,false},
+ {send_timeout, 5000},
+ {packet,2}]),
+ loop(Sock), % See below
+...</code>
+ <p>In the loop where requests are handled, send time-outs can now be
+ detected:</p>
<code type="none">
loop(Sock) ->
receive
@@ -477,11 +512,11 @@ loop(Sock) ->
Client ! {self(), data_sent},
loop(Sock)
end
- end. </code>
- <p>Usually it would suffice to detect timeouts on receive, as most
+ end.</code>
+ <p>Usually it suffices to detect time-outs on receive, as most
protocols include some sort of acknowledgment from the server,
- but if the protocol is strictly one way, the <c>send_timeout</c>
- option comes in handy!</p>
+ but if the protocol is strictly one way, option <c>send_timeout</c>
+ comes in handy.</p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/gen_udp.xml b/lib/kernel/doc/src/gen_udp.xml
index 79cd87dcef..840ca3c188 100644
--- a/lib/kernel/doc/src/gen_udp.xml
+++ b/lib/kernel/doc/src/gen_udp.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,9 +29,9 @@
<rev>A</rev>
</header>
<module>gen_udp</module>
- <modulesummary>Interface to UDP sockets</modulesummary>
+ <modulesummary>Interface to UDP sockets.</modulesummary>
<description>
- <p>The <c>gen_udp</c> module provides functions for communicating
+ <p>This module provides functions for communicating
with sockets using the UDP protocol.</p>
</description>
@@ -45,175 +45,181 @@
<datatype>
<name>socket()</name>
<desc>
- <p><marker id="type-socket"/>As returned by open/1,2.</p>
+ <p>As returned by
+ <seealso marker="#open/1"><c>open/1,2</c></seealso>.</p>
</desc>
</datatype>
</datatypes>
<funcs>
<func>
+ <name name="close" arity="1"/>
+ <fsummary>Close a UDP socket.</fsummary>
+ <desc>
+ <p>Closes a UDP socket.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="controlling_process" arity="2"/>
+ <fsummary>Change controlling process of a socket.</fsummary>
+ <desc>
+ <p>Assigns a new controlling process <c><anno>Pid</anno></c> to
+ <c><anno>Socket</anno></c>. The controlling process is the process
+ that receives messages from the socket. If called by any other
+ process than the current controlling process,
+ <c>{error, not_owner}</c> is returned. If the process identified
+ by <c><anno>Pid</anno></c> is not an existing local pid,
+ <c>{error, badarg}</c> is returned. <c>{error, badarg}</c> may also
+ be returned in some cases when <c><anno>Socket</anno></c> is closed
+ during the execution of this function.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="open" arity="1"/>
<name name="open" arity="2"/>
- <fsummary>Associate a UDP port number with the process calling it</fsummary>
+ <fsummary>Associate a UDP port number with the process calling it.</fsummary>
<desc>
- <p>Associates a UDP port number (<c><anno>Port</anno></c>) with the calling
- process.</p>
- <p>The available options are:</p>
+ <p>Associates a UDP port number (<c><anno>Port</anno></c>) with the
+ calling process.</p>
+ <p>The following options are available:</p>
<taglist>
<tag><c>list</c></tag>
- <item>
- <p>Received <c>Packet</c> is delivered as a list.</p>
- </item>
+ <item><p>Received <c>Packet</c> is delivered as a list.</p></item>
<tag><c>binary</c></tag>
- <item>
- <p>Received <c>Packet</c> is delivered as a binary.</p>
- </item>
- <tag><c>{ip, ip_address()}</c></tag>
- <item>
- <p>If the host has several network interfaces, this option
- specifies which one to use.</p>
- </item>
-
- <tag><c>{ifaddr, ip_address()}</c></tag>
- <item>
- <p>Same as <c>{ip, ip_address()}</c>. If the host has several network interfaces, this option
- specifies which one to use.</p>
- </item>
-
-
+ <item><p>Received <c>Packet</c> is delivered as a binary.</p></item>
+ <tag><c>{ip, Address}</c></tag>
+ <item><p>If the host has many network interfaces, this option
+ specifies which one to use.</p></item>
+ <tag><c>{ifaddr, Address}</c></tag>
+ <item><p>Same as <c>{ip, Address}</c>. If the host has many
+ network interfaces, this option specifies which one to
+ use.</p></item>
<tag><c>{fd, integer() >= 0}</c></tag>
- <item>
- <p>If a socket has somehow been opened without using
- <c>gen_udp</c>, use this option to pass the file
- descriptor for it. If <c><anno>Port</anno></c> is not set to 0
- and/or <c>{ip, ip_address()}</c> is combined with this option
- the fd will be bound to the given interface and port after being
- opened. If these options are not given it is assumed that the fd
- is already bound appropriately.
- </p>
- </item>
+ <item><p>If a socket has somehow been opened without using
+ <c>gen_udp</c>, use this option to pass the file descriptor
+ for it. If <c><anno>Port</anno></c> is not set to <c>0</c> and/or
+ <c>{ip, ip_address()}</c> is combined with this option, the
+ <c>fd</c> is bound to the specified interface and port after it is
+ being opened. If these options are not specified, it is assumed that
+ the <c>fd</c> is already bound appropriately.</p></item>
<tag><c>inet6</c></tag>
- <item>
- <p>Set up the socket for IPv6.</p>
- </item>
+ <item><p>Sets up the socket for IPv6.</p></item>
<tag><c>inet</c></tag>
- <item>
- <p>Set up the socket for IPv4.</p>
+ <item><p>Sets up the socket for IPv4.</p></item>
+ <tag><c>local</c></tag>
+ <item>
+ <p>
+ Sets up a Unix Domain Socket. See
+ <seealso marker="inet#type-local_address">
+ <c>inet:local_address()</c>
+ </seealso>
+ </p>
</item>
-
- <tag><c>{udp_module, module()}</c></tag>
- <item> <p>
- Override which callback module is used. Defaults to
- <c>inet_udp</c> for IPv4 and <c>inet6_udp</c> for IPv6.
- </p>
- </item>
-
+ <tag><c>{udp_module, module()}</c></tag>
+ <item><p>Overrides which callback module is used. Defaults to
+ <c>inet_udp</c> for IPv4 and <c>inet6_udp</c> for IPv6.</p></item>
<tag><c>{multicast_if, Address}</c></tag>
- <item>
- <p>Set the local device for a multicast socket.</p>
- </item>
-
+ <item><p>Sets the local device for a multicast socket.</p></item>
<tag><c>{multicast_loop, true | false}</c></tag>
- <item>
- <p>
- When <c>true</c> sent multicast packets will be looped back to the local
- sockets.
- </p>
- </item>
-
+ <item><p>When <c>true</c>, sent multicast packets are looped back to
+ the local sockets.</p></item>
<tag><c>{multicast_ttl, Integer}</c></tag>
- <item>
- <p>
- The <c>multicast_ttl</c> option changes the time-to-live (TTL) for
- outgoing multicast datagrams in order to control the scope of the
- multicasts.
- </p>
- <p>
- Datagrams with a TTL of 1 are not forwarded beyond the local
- network.
- <br />Default: 1
- </p>
- </item>
-
- <tag><c>{add_membership, {MultiAddress, InterfaceAddress}}</c></tag>
- <item>
- <p>Join a multicast group. </p>
- </item>
-
- <tag><c>{drop_membership, {MultiAddress, InterfaceAddress}}</c></tag>
- <item>
- <p>Leave multicast group.</p>
- </item>
-
+ <item><p>Option <c>multicast_ttl</c> changes the time-to-live (TTL)
+ for outgoing multicast datagrams to control the scope of the
+ multicasts.</p>
+ <p>Datagrams with a TTL of 1 are not forwarded beyond the local
+ network. Defaults to <c>1</c>.</p></item>
+ <tag><c>{add_membership, {MultiAddress, InterfaceAddress}}</c></tag>
+ <item><p>Joins a multicast group.</p></item>
+ <tag><c>{drop_membership, {MultiAddress, InterfaceAddress}}</c></tag>
+ <item><p>Leaves a multicast group.</p></item>
<tag><c>Opt</c></tag>
- <item>
- <p>See
- <seealso marker="inet#setopts/2">inet:setopts/2</seealso>.</p>
- </item>
+ <item><p>See
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>.
+ </p></item>
</taglist>
<p>The returned socket <c><anno>Socket</anno></c> is used to send
- packets from this port with <c>send/4</c>. When UDP packets arrive
- at the opened port, if the socket is in an active mode the packets
+ packets from this port with
+ <seealso marker="#send/4"><c>send/4</c></seealso>.
+ When UDP packets arrive
+ at the opened port, if the socket is in an active mode, the packets
are delivered as messages to the controlling process:</p>
<code type="none">
-{udp, Socket, IP, InPortNo, Packet}</code>
+{udp, Socket, IP, InPortNo, Packet} % Without ancillary data
+{udp, Socket, IP, InPortNo, AncData, Packet} % With ancillary data
+ </code>
+ <p>
+ The message contains an <c>AncData</c> field
+ if any of the socket
+ <seealso marker="#type-option">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ or
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>
+ are active, otherwise it does not.
+ </p>
+ <p>
+ </p>
<p>If the socket is not in an active mode, data can be
- retrieved via the <seealso marker="#recv/2">recv/2,3</seealso> calls.
- Note that arriving UDP packets that are longer than
- the receive buffer option specifies, might be truncated
+ retrieved through the
+ <seealso marker="#recv/2"><c>recv/2,3</c></seealso> calls.
+ Notice that arriving UDP packets that are longer than
+ the receive buffer option specifies can be truncated
without warning.</p>
- <p>When a socket in <c>{active, N}</c> mode (see <seealso marker="inet#setopts/2">
- inet:setopts/2</seealso> for details) transitions to passive
- (<c>{active, false}</c>) mode, the controlling process is notified by a
- message of the following form:</p>
+ <p>When a socket in <c>{active, N}</c> mode (see
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>
+ for details), transitions to passive (<c>{active, false}</c>) mode,
+ the controlling process is notified by a message of the following
+ form:</p>
<code type="none">
{udp_passive, Socket}</code>
<p><c>IP</c> and <c>InPortNo</c> define the address from which
- <c>Packet</c> came. <c>Packet</c> is a list of bytes if
- the option <c>list</c> was specified. <c>Packet</c> is a
- binary if the option <c>binary</c> was specified.</p>
+ <c>Packet</c> comes. <c>Packet</c> is a list of bytes if
+ option <c>list</c> is specified. <c>Packet</c> is a
+ binary if option <c>binary</c> is specified.</p>
<p>Default value for the receive buffer option is
<c>{recbuf, 8192}</c>.</p>
- <p>If <c><anno>Port</anno> == 0</c>, the underlying OS assigns a free UDP
- port, use <c>inet:port/1</c> to retrieve it.</p>
- </desc>
- </func>
- <func>
- <name name="send" arity="4"/>
- <fsummary>Send a packet</fsummary>
- <desc>
- <p>Sends a packet to the specified address and port.
- The <c><anno>Address</anno></c> argument can be either a hostname, or an
- IP address.</p>
+ <p>If <c><anno>Port</anno> == 0</c>, the underlying OS assigns a free
+ UDP port, use
+ <seealso marker="inet#port/1"><c>inet:port/1</c></seealso>
+ to retrieve it.</p>
</desc>
</func>
+
<func>
<name name="recv" arity="2"/>
<name name="recv" arity="3"/>
- <fsummary>Receive a packet from a passive socket</fsummary>
+ <fsummary>Receive a packet from a passive socket.</fsummary>
<desc>
- <p>This function receives a packet from a socket in passive
- mode.</p>
- <p>The optional <c><anno>Timeout</anno></c> parameter specifies a timeout in
- milliseconds. The default value is <c>infinity</c>.</p>
- </desc>
- </func>
- <func>
- <name name="controlling_process" arity="2"/>
- <fsummary>Change controlling process of a socket</fsummary>
- <desc>
- <p>Assigns a new controlling process <c><anno>Pid</anno></c> to
- <c><anno>Socket</anno></c>. The controlling process is the process which
- receives messages from the socket. If called by any other
- process than the current controlling process,
- <c>{error, not_owner}</c> is returned.</p>
+ <p>
+ Receives a packet from a socket in passive mode. Optional parameter
+ <c><anno>Timeout</anno></c> specifies a time-out in milliseconds.
+ Defaults to <c>infinity</c>.
+ </p>
+ <p>
+ If any of the socket
+ <seealso marker="#type-option">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ or
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>
+ are active, the <c><anno>RecvData</anno></c> tuple contains an
+ <c><anno>AncData</anno></c> field,
+ otherwise it does not.
+ </p>
</desc>
</func>
+
<func>
- <name name="close" arity="1"/>
- <fsummary>Close a UDP socket</fsummary>
+ <name name="send" arity="4"/>
+ <fsummary>Send a packet.</fsummary>
<desc>
- <p>Closes a UDP socket.</p>
+ <p>
+ Sends a packet to the specified address and port. Argument
+ <c><anno>Address</anno></c> can be a hostname or a socket address.
+ </p>
</desc>
</func>
</funcs>
diff --git a/lib/kernel/doc/src/global.xml b/lib/kernel/doc/src/global.xml
index bd75945115..4442741f54 100644
--- a/lib/kernel/doc/src/global.xml
+++ b/lib/kernel/doc/src/global.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,82 +29,67 @@
<rev></rev>
</header>
<module>global</module>
- <modulesummary>A Global Name Registration Facility</modulesummary>
+ <modulesummary>A global name registration facility.</modulesummary>
<description>
- <p>This documentation describes the Global module which consists
- of the following functionalities:</p>
-
+ <p>This module consists of the following services:</p>
<list type="bulleted">
- <item>registration of global names;</item>
- <item>global locks;</item>
- <item>maintenance of the fully connected network.</item>
+ <item>Registration of global names</item>
+ <item>Global locks</item>
+ <item>Maintenance of the fully connected network</item>
</list>
-
- <p>These services are controlled via the process
- <c>global_name_server</c> which exists on every node. The global
- name server is started automatically when a node is started.
+ <p>These services are controlled through the process
+ <c>global_name_server</c> that exists on every node. The global
+ name server starts automatically when a node is started.
With the term <em>global</em> is meant over a system consisting
- of several Erlang nodes.</p>
-
+ of many Erlang nodes.</p>
<p>The ability to globally register names is a central concept in
the programming of distributed Erlang systems. In this module,
the equivalent of the <c>register/2</c> and <c>whereis/1</c>
- BIFs (for local name registration) are implemented, but for a
+ BIFs (for local name registration) are provided, but for a
network of Erlang nodes. A registered name is an alias for a
process identifier (pid). The global name server monitors
- globally registered pids. If a process terminates, the name will
- also be globally unregistered.</p>
-
+ globally registered pids. If a process terminates, the name is
+ also globally unregistered.</p>
<p>The registered names are stored in replica global name tables on
every node. There is no central storage point. Thus,
the translation of a name to a pid is fast, as it is always done
- locally. When any action in taken which results in a change to
- the global name table, all tables on other nodes are automatically
- updated.</p>
-
+ locally. For any action resulting in a change to the global name table,
+ all tables on other nodes are automatically updated.</p>
<p>Global locks have lock identities and are set on a specific
- resource. For instance, the specified resource could be a pid.
+ resource. For example, the specified resource can be a pid.
When a global lock is set, access to the locked resource is
- denied for all other resources other than the lock requester.</p>
-
- <p>Both the registration and lock functionalities are atomic. All
- nodes involved in these actions will have the same view of
+ denied for all resources other than the lock requester.</p>
+ <p>Both the registration and lock services are atomic.
+ All nodes involved in these actions have the same view of
the information.</p>
-
<p>The global name server also performs the critical task of
- continuously monitoring changes in node configuration: if a node
- which runs a globally registered process goes down, the name
- will be globally unregistered. To this end the global name
+ continuously monitoring changes in node configuration. If a node
+ that runs a globally registered process goes down, the name
+ is globally unregistered. To this end, the global name
server subscribes to <c>nodeup</c> and <c>nodedown</c> messages
- sent from the <c>net_kernel</c> module. Relevant Kernel
+ sent from module <c>net_kernel</c>. Relevant Kernel
application variables in this context are <c>net_setuptime</c>,
<c>net_ticktime</c>, and <c>dist_auto_connect</c>. See also
- <seealso marker="kernel_app#net_setuptime">kernel(6)</seealso>.</p>
-
- <p>The name server will also maintain a fully connected network. For
+ <seealso marker="kernel_app#net_setuptime"><c>kernel(6)</c></seealso>.</p>
+ <p>The name server also maintains a fully connected network. For
example, if node <c>N1</c> connects to node <c>N2</c> (which is
already connected to <c>N3</c>), the global name servers on the
- nodes <c>N1</c> and <c>N3</c> will make sure that also <c>N1</c>
- and <c>N3</c> are connected. If this is not desired, the command
- line flag <c>-connect_all false</c> can be used (see also
- <seealso marker="erts:erl#connect_all">erl(1)</seealso>). In
- this case the name registration facility cannot be used, but the
- lock mechanism will still work.</p>
-
+ nodes <c>N1</c> and <c>N3</c> ensure that also <c>N1</c>
+ and <c>N3</c> are connected. If this is not desired,
+ command-line flag <c>-connect_all false</c> can be used (see also
+ <seealso marker="erts:erl#connect_all"><c>erl(1)</c></seealso>).
+ In this case, the name registration service cannot be used, but the
+ lock mechanism still works.</p>
<p>If the global name server fails to connect nodes (<c>N1</c> and
- <c>N3</c> in the example above) a warning event is sent to the
+ <c>N3</c> in the example), a warning event is sent to the
error logger. The presence of such an event does not exclude the
- possibility that the nodes will later connect--one can for
- example try the command <c>rpc:call(N1, net_adm, ping, [N2])</c> in
- the Erlang shell--but it indicates some kind of problem with
- the network.</p>
-
+ nodes to connect later (you can, for
+ example, try command <c>rpc:call(N1, net_adm, ping, [N2])</c> in
+ the Erlang shell), but it indicates a network problem.</p>
<note>
- <p>If the fully connected network is not set up properly, the
- first thing to try is to increase the value of
- <c>net_setuptime</c>.</p>
+ <p>If the fully connected network is not set up properly, try
+ first to increase the value of <c>net_setuptime</c>.</p>
</note>
-
</description>
<datatypes>
@@ -117,7 +102,7 @@
<func>
<name name="del_lock" arity="1"/>
<name name="del_lock" arity="2"/>
- <fsummary>Delete a lock</fsummary>
+ <fsummary>Delete a lock.</fsummary>
<desc>
<p>Deletes the lock <c><anno>Id</anno></c> synchronously.</p>
</desc>
@@ -125,11 +110,13 @@
<func>
<name name="notify_all_name" arity="3"/>
- <fsummary>Name resolving function that notifies both pids</fsummary>
+ <fsummary>Name resolving function that notifies both pids.</fsummary>
<desc>
- <p>This function can be used as a name resolving function for
- <c>register_name/3</c> and <c>re_register_name/3</c>. It
- unregisters both pids, and sends the message
+ <p>Can be used as a name resolving function for
+ <seealso marker="#register_name/3"><c>register_name/3</c></seealso>
+ and
+ <seealso marker="#re_register_name/3"><c>re_register_name/3</c></seealso>.</p>
+ <p>The function unregisters both pids and sends the message
<c>{global_name_conflict, <anno>Name</anno>, OtherPid}</c> to both
processes.</p>
</desc>
@@ -137,85 +124,97 @@
<func>
<name name="random_exit_name" arity="3"/>
- <fsummary>Name resolving function that kills one pid</fsummary>
+ <fsummary>Name resolving function that kills one pid.</fsummary>
<desc>
- <p>This function can be used as a name resolving function for
- <c>register_name/3</c> and <c>re_register_name/3</c>. It
- randomly chooses one of the pids for registration and kills
- the other one.</p>
+ <p>Can be used as a name resolving function for
+ <seealso marker="#register_name/3"><c>register_name/3</c></seealso>
+ and
+ <seealso marker="#re_register_name/3"><c>re_register_name/3</c></seealso>.</p>
+ <p>The function randomly selects one of the pids for registration and
+ kills the other one.</p>
</desc>
</func>
<func>
<name name="random_notify_name" arity="3"/>
- <fsummary>Name resolving function that notifies one pid</fsummary>
+ <fsummary>Name resolving function that notifies one pid.</fsummary>
<desc>
- <p>This function can be used as a name resolving function for
- <c>register_name/3</c> and <c>re_register_name/3</c>. It
- randomly chooses one of the pids for registration, and sends
- the message <c>{global_name_conflict, <anno>Name</anno>}</c> to the other
- pid.</p>
+ <p>Can be used as a name resolving function for
+ <seealso marker="#register_name/3"><c>register_name/3</c></seealso>
+ and
+ <seealso marker="#re_register_name/3"><c>re_register_name/3</c></seealso>.</p>
+ <p>The function randomly selects one of the pids for registration, and
+ sends the message <c>{global_name_conflict, <anno>Name</anno>}</c> to
+ the other pid.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="re_register_name" arity="2"/>
+ <name name="re_register_name" arity="3"/>
+ <fsummary>Atomically re-register a name.</fsummary>
+ <type name="method"/>
+ <type_desc name="method">{<c>Module</c>, <c>Function</c>}
+ is also allowed.
+ </type_desc>
+ <desc>
+ <p>Atomically changes the registered name <c><anno>Name</anno></c> on
+ all nodes to refer to <c><anno>Pid</anno></c>.</p>
+ <p>Function <c><anno>Resolve</anno></c> has the same behavior as in
+ <seealso marker="#register_name/2"><c>register_name/2,3</c></seealso>.
+ </p>
</desc>
</func>
<func>
<name name="register_name" arity="2"/>
<name name="register_name" arity="3"/>
- <fsummary>Globally register a name for a pid</fsummary>
+ <fsummary>Globally register a name for a pid.</fsummary>
<type name="method"/>
- <type_desc name="method">{<c>Module</c>, <c>Function</c>}
- is currently also allowed for backward compatibility, but its use is
- deprecated
+ <type_desc name="method">{<c>Module</c>, <c>Function</c>} is also
+ allowed for backward compatibility, but its use is deprecated.
</type_desc>
<desc>
- <p>Globally associates the name <c><anno>Name</anno></c> with a pid, that is,
- Globally notifies all nodes of a new global name in a network
+ <p>Globally associates name <c><anno>Name</anno></c> with a pid, that
+ is, globally notifies all nodes of a new global name in a network
of Erlang nodes.</p>
-
<p>When new nodes are added to the network, they are informed
of the globally registered names that already exist.
The network is also informed of any global names in newly
connected nodes. If any name clashes are discovered,
- the <c><anno>Resolve</anno></c> function is called. Its purpose is to
+ function <c><anno>Resolve</anno></c> is called. Its purpose is to
decide which pid is correct. If the function crashes, or
returns anything other than one of the pids, the name is
unregistered. This function is called once for each name
clash.</p>
-
<warning>
<p>If you plan to change code without restarting your system,
you must use an external fun (<c>fun Module:Function/Arity</c>)
- as the <c><anno>Resolve</anno></c> function; if you use a
- local fun you can never replace the code for the module that
- the fun belongs to.
- </p>
+ as function <c><anno>Resolve</anno></c>. If you use a
+ local fun, you can never replace the code for the module that
+ the fun belongs to.</p>
</warning>
-
- <p>There are three pre-defined resolve functions:
+ <p>Three predefined resolve functions exist:
<c>random_exit_name/3</c>, <c>random_notify_name/3</c>, and
- <c>notify_all_name/3</c>. If no <c><anno>Resolve</anno></c> function is
- defined, <c>random_exit_name</c> is used. This means that one
- of the two registered processes will be selected as correct
+ <c>notify_all_name/3</c>. If no <c><anno>Resolve</anno></c> function
+ is defined, <c>random_exit_name</c> is used. This means that one
+ of the two registered processes is selected as correct
while the other is killed.</p>
-
- <p>This function is completely synchronous. This means that
+ <p>This function is completely synchronous, that is,
when this function returns, the name is either registered on
all nodes or none.</p>
-
<p>The function returns <c>yes</c> if successful, <c>no</c> if
it fails. For example, <c>no</c> is returned if an attempt
is made to register an already registered process or to
register a process with a name that is already in use.</p>
-
<note>
- <p>Releases up to and including OTP R10 did not check if the
- process was already registered. As a consequence the
- global name table could become inconsistent. The old
+ <p>Releases up to and including Erlang/OTP R10 did not check if the
+ process was already registered. The global name table could
+ therefore become inconsistent. The old
(buggy) behavior can be chosen by giving the Kernel
application variable <c>global_multi_name_action</c> the
value <c>allow</c>.</p>
</note>
-
<p>If a process with a registered name dies, or the node goes
down, the name is unregistered on all nodes.</p>
</desc>
@@ -223,38 +222,20 @@
<func>
<name name="registered_names" arity="0"/>
- <fsummary>All globally registered names</fsummary>
- <desc>
- <p>Returns a lists of all globally registered names.</p>
- </desc>
- </func>
-
- <func>
- <name name="re_register_name" arity="2"/>
- <name name="re_register_name" arity="3"/>
- <fsummary>Atomically re-register a name</fsummary>
- <type name="method"/>
- <type_desc name="method">{<c>Module</c>, <c>Function</c>}
- is also allowed
- </type_desc>
+ <fsummary>All globally registered names.</fsummary>
<desc>
- <p>Atomically changes the registered name <c><anno>Name</anno></c> on all
- nodes to refer to <c><anno>Pid</anno></c>.</p>
-
- <p>The <c><anno>Resolve</anno></c> function has the same behavior as in
- <c>register_name/2,3</c>.</p>
+ <p>Returns a list of all globally registered names.</p>
</desc>
</func>
<func>
<name name="send" arity="2"/>
- <fsummary>Send a message to a globally registered pid</fsummary>
+ <fsummary>Send a message to a globally registered pid.</fsummary>
<desc>
- <p>Sends the message <c><anno>Msg</anno></c> to the pid globally registered
+ <p>Sends message <c><anno>Msg</anno></c> to the pid globally registered
as <c><anno>Name</anno></c>.</p>
-
- <p>Failure: If <c><anno>Name</anno></c> is not a globally registered
- name, the calling function will exit with reason
+ <p>If <c><anno>Name</anno></c> is not a globally registered
+ name, the calling function exits with reason
<c>{badarg, {<anno>Name</anno>, <anno>Msg</anno>}}</c>.</p>
</desc>
</func>
@@ -263,7 +244,7 @@
<name name="set_lock" arity="1"/>
<name name="set_lock" arity="2"/>
<name name="set_lock" arity="3"/>
- <fsummary>Set a lock on the specified nodes</fsummary>
+ <fsummary>Set a lock on the specified nodes.</fsummary>
<type name="id"/>
<type name="retries"/>
<desc>
@@ -271,50 +252,48 @@
are specified) on <c><anno>ResourceId</anno></c> for
<c><anno>LockRequesterId</anno></c>. If a lock already exists on
<c><anno>ResourceId</anno></c> for another requester than
- <c><anno>LockRequesterId</anno></c>, and <c><anno>Retries</anno></c> is not equal to 0,
- the process sleeps for a while and will try to execute
- the action later. When <c><anno>Retries</anno></c> attempts have been made,
- <c>false</c> is returned, otherwise <c>true</c>. If
- <c><anno>Retries</anno></c> is <c>infinity</c>, <c>true</c> is eventually
- returned (unless the lock is never released).</p>
-
- <p>If no value for <c><anno>Retries</anno></c> is given, <c>infinity</c> is
- used.</p>
-
+ <c><anno>LockRequesterId</anno></c>, and <c><anno>Retries</anno></c>
+ is not equal to <c>0</c>, the process sleeps for a while and tries
+ to execute the action later. When <c><anno>Retries</anno></c>
+ attempts have been made, <c>false</c> is returned, otherwise
+ <c>true</c>. If <c><anno>Retries</anno></c> is <c>infinity</c>,
+ <c>true</c> is eventually returned (unless the lock is never
+ released).</p>
+ <p>If no value for <c><anno>Retries</anno></c> is specified,
+ <c>infinity</c> is used.</p>
<p>This function is completely synchronous.</p>
-
- <p>If a process which holds a lock dies, or the node goes
+ <p>If a process that holds a lock dies, or the node goes
down, the locks held by the process are deleted.</p>
-
<p>The global name server keeps track of all processes sharing
the same lock, that is, if two processes set the same lock,
both processes must delete the lock.</p>
-
<p>This function does not address the problem of a deadlock. A
deadlock can never occur as long as processes only lock one
- resource at a time. But if some processes try to lock two or
- more resources, a deadlock may occur. It is up to the
+ resource at a time. A deadlock can occur if some processes
+ try to lock two or more resources. It is up to the
application to detect and rectify a deadlock.</p>
-
<note>
- <p>Some values of <c><anno>ResourceId</anno></c> should be avoided or
- Erlang/OTP will not work properly. A list of resources to
- avoid: <c>global</c>, <c>dist_ac</c>,
- <c>mnesia_table_lock</c>, <c>mnesia_adjust_log_writes</c>,
- <c>pg2</c>.</p>
+ <p>Avoid the following values of <c><anno>ResourceId</anno></c>,
+ otherwise Erlang/OTP does not work properly:</p>
+ <list type="bulleted">
+ <item><c>dist_ac</c></item>
+ <item><c>global</c></item>
+ <item><c>mnesia_adjust_log_writes</c></item>
+ <item><c>mnesia_table_lock</c></item>
+ <item><c>pg2</c></item>
+ </list>
</note>
-
</desc>
</func>
<func>
<name name="sync" arity="0"/>
- <fsummary>Synchronize the global name server</fsummary>
+ <fsummary>Synchronize the global name server.</fsummary>
<desc>
<p>Synchronizes the global name server with all nodes known to
- this node. These are the nodes which are returned from
+ this node. These are the nodes that are returned from
<c>erlang:nodes()</c>. When this function returns,
- the global name server will receive global information from
+ the global name server receives global information from
all nodes. This function can be called when new nodes are
added to the network.</p>
<p>The only possible error reason <c>Reason</c> is
@@ -326,24 +305,25 @@
<name name="trans" arity="2"/>
<name name="trans" arity="3"/>
<name name="trans" arity="4"/>
- <fsummary>Micro transaction facility</fsummary>
+ <fsummary>Micro transaction facility.</fsummary>
<type name="retries"/>
<type name="trans_fun"/>
<desc>
- <p>Sets a lock on <c><anno>Id</anno></c> (using <c>set_lock/3</c>). If this
- succeeds, <c><anno>Fun</anno>()</c> is evaluated and the result <c><anno>Res</anno></c>
- is returned. Returns <c>aborted</c> if the lock attempt
- failed. If <c><anno>Retries</anno></c> is set to <c>infinity</c>,
- the transaction will not abort.</p>
-
- <p><c>infinity</c> is the default setting and will be used if
- no value is given for <c><anno>Retries</anno></c>.</p>
+ <p>Sets a lock on <c><anno>Id</anno></c> (using
+ <seealso marker="#set_lock/3"><c>set_lock/3</c></seealso>).
+ If this succeeds, <c><anno>Fun</anno>()</c> is evaluated and the
+ result <c><anno>Res</anno></c>
+ is returned. Returns <c>aborted</c> if the lock attempt fails.
+ If <c><anno>Retries</anno></c> is set to <c>infinity</c>,
+ the transaction does not abort.</p>
+ <p><c>infinity</c> is the default setting and is used if
+ no value is specified for <c><anno>Retries</anno></c>.</p>
</desc>
</func>
<func>
<name name="unregister_name" arity="1"/>
- <fsummary>Remove a globally registered name for a pid</fsummary>
+ <fsummary>Remove a globally registered name for a pid.</fsummary>
<desc>
<p>Removes the globally registered name <c><anno>Name</anno></c> from
the network of Erlang nodes.</p>
@@ -352,7 +332,7 @@
<func>
<name name="whereis_name" arity="1"/>
- <fsummary>Get the pid with a given globally registered name</fsummary>
+ <fsummary>Get the pid with a specified globally registered name.</fsummary>
<desc>
<p>Returns the pid with the globally registered name
<c><anno>Name</anno></c>. Returns <c>undefined</c> if the name is not
@@ -363,8 +343,8 @@
<section>
<title>See Also</title>
- <p><seealso marker="global_group">global_group(3)</seealso>,
- <seealso marker="net_kernel">net_kernel(3)</seealso></p>
+ <p><seealso marker="global_group"><c>global_group(3)</c></seealso>,
+ <seealso marker="net_kernel"><c>net_kernel(3)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/global_group.xml b/lib/kernel/doc/src/global_group.xml
index 4b21b0a14a..8f947b9adf 100644
--- a/lib/kernel/doc/src/global_group.xml
+++ b/lib/kernel/doc/src/global_group.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1998</year><year>2013</year>
+ <year>1998</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -26,22 +26,22 @@
<prepared>Esko Vierum&auml;ki</prepared>
<docno></docno>
<date>1998-12-18</date>
- <rev>b</rev>
+ <rev>B</rev>
</header>
<module>global_group</module>
- <modulesummary>Grouping Nodes to Global Name Registration Groups</modulesummary>
+ <modulesummary>Grouping nodes to global name registration groups.</modulesummary>
<description>
- <p>The global group function makes it possible to group the nodes
- in a system into partitions, each partition having its own global
- name space, refer to <c>global(3)</c>. These partitions are
- called global groups.</p>
- <p>The main advantage of dividing systems to global groups is that
+ <p>This module makes it possible to partition the nodes of a
+ system into <em>global groups</em>. Each global group has its own
+ global namespace, see <seealso marker="global">
+ <c>global(3)</c></seealso>.</p>
+ <p>The main advantage of dividing systems into global groups is that
the background load decreases while the number of nodes to be
updated is reduced when manipulating globally registered names.</p>
<p>The Kernel configuration parameter <c>global_groups</c> defines
the global groups (see also
- <seealso marker="kernel_app">kernel(6)</seealso>,
- <seealso marker="config">config(4)</seealso>:</p>
+ <seealso marker="kernel_app#global_groups"><c>kernel(6)</c></seealso>
+ and <seealso marker="config"><c>config(4)</c></seealso>):</p>
<code type="none">
{global_groups, [GroupTuple :: group_tuple()]}</code>
<p>For the processes and nodes to run smoothly using the global
@@ -54,22 +54,24 @@
</item>
<item>
<p>All involved nodes must agree on the global group definition,
- or the behavior of the system is undefined.</p>
+ otherwise the behavior of the system is undefined.</p>
</item>
<item>
- <p><em>All</em> nodes in the system should belong to exactly
+ <p><em>All</em> nodes in the system must belong to exactly
one global group.</p>
</item>
</list>
- <p>In the following description, a <em>group node</em> is a node
+ <p>In the following descriptions, a <em>group node</em> is a node
belonging to the same global group as the local node.</p>
</description>
- <datatypes>
+
+ <datatypes>
<datatype>
<name name="group_tuple"/>
<desc>
<p>A <c>GroupTuple</c> without <c>PublishType</c> is the same as a
- <c>GroupTuple</c> with <c>PublishType == normal</c>.</p>
+ <c>GroupTuple</c> with <c>PublishType</c> equal to <c>normal</c>.
+ </p>
</desc>
</datatype>
<datatype>
@@ -78,52 +80,57 @@
<datatype>
<name name="publish_type"/>
<desc>
- <p>A node started with the command line flag <c>-hidden</c>, see
- <seealso marker="erts:erl">erl(1)</seealso>, is said to be a
- <em>hidden</em> node. A hidden node will establish hidden
+ <p>A node started with command-line flag <c>-hidden</c> (see
+ <seealso marker="erts:erl"><c>erl(1)</c></seealso>) is said
+ to be a <em>hidden</em> node. A hidden node establishes hidden
connections to nodes not part of the same global group, but
normal (visible) connections to nodes part of the same global
group.</p>
- <p>A global group defined with <c>PublishType == hidden</c>, is
- said to be a hidden global group. All nodes in a hidden global
- group are hidden nodes, regardless if they are started with
- the <c>-hidden</c> command line flag or not.</p>
+ <p>A global group defined with <c>PublishType</c> equal to
+ <c>hidden</c> is said to be a hidden global group.
+ All nodes in a hidden global
+ group are hidden nodes, whether they are started with
+ command-line flag <c>-hidden</c> or not.</p>
</desc>
</datatype>
<datatype>
<name name="name"/>
<desc><p>A registered name.</p></desc>
</datatype>
+
<datatype>
<name name="where"/>
</datatype>
</datatypes>
+
<funcs>
<func>
<name name="global_groups" arity="0"/>
- <fsummary>Return the global group names</fsummary>
+ <fsummary>Return the global group names.</fsummary>
<desc>
- <p>Returns a tuple containing the name of the global group
+ <p>Returns a tuple containing the name of the global group that
the local node belongs to, and the list of all other known
group names. Returns <c>undefined</c> if no global groups are
defined.</p>
</desc>
</func>
+
<func>
<name name="info" arity="0"/>
- <fsummary>Information about global groups</fsummary>
+ <fsummary>Information about global groups.</fsummary>
<type name="info_item"/>
<type name="sync_state"/>
<desc>
<p>Returns a list containing information about the global
- groups. Each element of the list is a tuple. The order of
- the tuples is not defined.</p>
+ groups. Each list element is a tuple. The order of
+ the tuples is undefined.</p>
<taglist>
<tag><c>{state, <anno>State</anno>}</c></tag>
<item>
<p>If the local node is part of a global group,
- <c><anno>State</anno> == synced</c>. If no global groups are defined,
- <c><anno>State</anno> == no_conf</c>.</p>
+ <c><anno>State</anno></c> is equal to <c>synced</c>.
+ If no global groups are defined,
+ <c><anno>State</anno></c> is equal to <c>no_conf</c>.</p>
</item>
<tag><c>{own_group_name, <anno>GroupName</anno>}</c></tag>
<item>
@@ -152,117 +159,131 @@
<tag><c>{other_groups, <anno>Groups</anno>}</c></tag>
<item>
<p><c><anno>Groups</anno></c> is a list of tuples
- <c>{<anno>GroupName</anno>, <anno>Nodes</anno>}</c>, specifying the name and nodes
+ <c>{<anno>GroupName</anno>, <anno>Nodes</anno>}</c>,
+ specifying the name and nodes
of the other global groups.</p>
</item>
<tag><c>{monitoring, <anno>Pids</anno>}</c></tag>
<item>
- <p>A list of pids, specifying the processes which have
+ <p>A list of pids, specifying the processes that have
subscribed to <c>nodeup</c> and <c>nodedown</c> messages.</p>
</item>
</taglist>
</desc>
</func>
+
<func>
<name name="monitor_nodes" arity="1"/>
- <fsummary>Subscribe to node status changes</fsummary>
+ <fsummary>Subscribe to node status changes.</fsummary>
<desc>
- <p>Depending on <c><anno>Flag</anno></c>, the calling process starts
- subscribing (<c><anno>Flag</anno> == true</c>) or stops subscribing
- (<c><anno>Flag</anno> == false</c>) to node status change messages.</p>
- <p>A process which has subscribed will receive the messages
+ <p>Depending on <c><anno>Flag</anno></c>, the calling process
+ starts subscribing (<c><anno>Flag</anno></c> equal to
+ <c>true</c>) or stops subscribing (<c><anno>Flag</anno></c>
+ equal to <c>false</c>) to node status change messages.</p>
+ <p>A process that has subscribed receives the messages
<c>{nodeup, Node}</c> and <c>{nodedown, Node}</c> when a
group node connects or disconnects, respectively.</p>
</desc>
</func>
+
<func>
<name name="own_nodes" arity="0"/>
- <fsummary>Return the group nodes</fsummary>
+ <fsummary>Return the group nodes.</fsummary>
<desc>
<p>Returns the names of all group nodes, regardless of their
current status.</p>
</desc>
</func>
+
<func>
<name name="registered_names" arity="1"/>
- <fsummary>Return globally registered names</fsummary>
+ <fsummary>Return globally registered names.</fsummary>
<desc>
- <p>Returns a list of all names which are globally registered
+ <p>Returns a list of all names that are globally registered
on the specified node or in the specified global group.</p>
</desc>
</func>
+
<func>
<name name="send" arity="2"/>
<name name="send" arity="3"/>
- <fsummary>Send a message to a globally registered pid</fsummary>
+ <fsummary>Send a message to a globally registered pid.</fsummary>
<desc>
<p>Searches for <c><anno>Name</anno></c>, globally registered on
- the specified node or in the specified global group, or --
- if the <c><anno>Where</anno></c> argument is not provided -- in any global
- group. The global groups are searched in the order in which
- they appear in the value of the <c>global_groups</c>
- configuration parameter.</p>
- <p>If <c><anno>Name</anno></c> is found, the message <c><anno>Msg</anno></c> is sent to
+ the specified node or in the specified global group, or
+ (if argument <c><anno>Where</anno></c> is not provided) in any
+ global group. The global groups are searched in the order that
+ they appear in the value of configuration parameter
+ <c>global_groups</c>.</p>
+ <p>If <c><anno>Name</anno></c> is found, message
+ <c><anno>Msg</anno></c> is sent to
the corresponding pid. The pid is also the return value of
the function. If the name is not found, the function returns
<c>{badarg, {<anno>Name</anno>, <anno>Msg</anno>}}</c>.</p>
</desc>
</func>
+
<func>
<name name="sync" arity="0"/>
- <fsummary>Synchronize the group nodes</fsummary>
+ <fsummary>Synchronize the group nodes.</fsummary>
<desc>
<p>Synchronizes the group nodes, that is, the global name
- servers on the group nodes. Also check the names globally
+ servers on the group nodes. Also checks the names globally
registered in the current global group and unregisters them
on any known node not part of the group.</p>
<p>If synchronization is not possible, an error report is sent
- to the error logger (see also <c>error_logger(3)</c>).</p>
- <p>Failure:
- <c>{error, {'invalid global_groups definition', Bad}}</c> if
- the <c>global_groups</c> configuration parameter has an
+ to the error logger (see also
+ <seealso marker="error_logger"><c>error_logger(3)</c></seealso>.
+ </p>
+ <p>Returns <c>{error, {'invalid global_groups definition', Bad}}</c>
+ if configuration parameter <c>global_groups</c> has an
invalid value <c>Bad</c>.</p>
</desc>
</func>
+
<func>
<name name="whereis_name" arity="1"/>
<name name="whereis_name" arity="2"/>
- <fsummary>Get the pid with a given globally registered name</fsummary>
+ <fsummary>Get the pid with a specified globally registered name.</fsummary>
<desc>
<p>Searches for <c><anno>Name</anno></c>, globally registered on
- the specified node or in the specified global group, or -- if
- the <c><anno>Where</anno></c> argument is not provided -- in any global
- group. The global groups are searched in the order in which
- they appear in the value of the <c>global_groups</c>
- configuration parameter.</p>
- <p>If <c><anno>Name</anno></c> is found, the corresponding pid is returned.
- If the name is not found, the function returns
+ the specified node or in the specified global group, or
+ (if argument <c><anno>Where</anno></c> is not provided) in any global
+ group. The global groups are searched in the order that
+ they appear in the value of configuration parameter
+ <c>global_groups</c>.</p>
+ <p>If <c><anno>Name</anno></c> is found, the corresponding pid is
+ returned. If the name is not found, the function returns
<c>undefined</c>.</p>
</desc>
</func>
</funcs>
<section>
- <title>NOTE</title>
- <p>In the situation where a node has lost its connections to other
- nodes in its global group, but has connections to nodes in other
- global groups, a request from another global group may produce an
- incorrect or misleading result. For example, the isolated node may
- not have accurate information about registered names in its
- global group.</p>
- <p>Note also that the <c>send/2,3</c> function is not secure.</p>
- <p>Distribution of applications is highly dependent of the global
- group definitions. It is not recommended that an application is
- distributed over several global groups of the obvious reason that
- the registered names may be moved to another global group at
- failover/takeover. There is nothing preventing doing this, but
- the application code must in such case handle the situation.</p>
+ <title>Notes</title>
+ <list type="bulleted">
+ <item><p>In the situation where a node has lost its connections to other
+ nodes in its global group, but has connections to nodes in other
+ global groups, a request from another global group can produce an
+ incorrect or misleading result. For example, the isolated node can
+ have inaccurate information about registered names in its
+ global group.</p></item>
+ <item><p>Function
+ <seealso marker="#send/2"><c>send/2,3</c></seealso>
+ is not secure.</p></item>
+ <item><p>Distribution of applications is highly dependent of the global
+ group definitions. It is not recommended that an application is
+ distributed over many global groups, as
+ the registered names can be moved to another global group at
+ failover/takeover. Nothing prevents this to be done, but
+ the application code must then handle the situation.</p></item>
+ </list>
</section>
<section>
- <title>SEE ALSO</title>
- <p><seealso marker="erts:erl">erl(1)</seealso>,
- <seealso marker="global">global(3)</seealso></p>
+ <title>See Also</title>
+ <p><seealso marker="global"><c>global(3)</c></seealso>,
+ <seealso marker="erts:erl"><c>erl(1)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/heart.xml b/lib/kernel/doc/src/heart.xml
index 9da4773f2d..ad1a2ffeb9 100644
--- a/lib/kernel/doc/src/heart.xml
+++ b/lib/kernel/doc/src/heart.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,93 +29,85 @@
<rev>A</rev>
</header>
<module>heart</module>
- <modulesummary>Heartbeat Monitoring of an Erlang Runtime System</modulesummary>
+ <modulesummary>Heartbeat monitoring of an Erlang runtime system.</modulesummary>
<description>
<p>This modules contains the interface to the <c>heart</c> process.
<c>heart</c> sends periodic heartbeats to an external port
program, which is also named <c>heart</c>. The purpose of
- the heart port program is to check that the Erlang runtime system
+ the <c>heart</c> port program is to check that the Erlang runtime system
it is supervising is still running. If the port program has not
received any heartbeats within <c>HEART_BEAT_TIMEOUT</c> seconds
- (default is 60 seconds), the system can be rebooted. Also, if
- the system is equipped with a hardware watchdog timer and is
- running Solaris, the watchdog can be used to supervise the entire
- system.</p>
- <p>An Erlang runtime system to be monitored by a heart program,
- should be started with the command line flag <c>-heart</c> (see
- also <seealso marker="erts:erl">erl(1)</seealso>). The <c>heart</c>
- process is then started automatically:</p>
+ (defaults to 60 seconds), the system can be rebooted.</p>
+ <p>An Erlang runtime system to be monitored by a heart program
+ is to be started with command-line flag <c>-heart</c> (see
+ also <seealso marker="erts:erl"><c>erl(1)</c></seealso>).
+ The <c>heart</c> process is then started automatically:</p>
<pre>
% <input>erl -heart ...</input></pre>
- <p>If the system should be rebooted because of missing heart-beats,
- or a terminated Erlang runtime system, the environment variable
- <c>HEART_COMMAND</c> has to be set before the system is started.
- If this variable is not set, a warning text will be printed but
- the system will not reboot. However, if the hardware watchdog is
- used, it will trigger a reboot <c>HEART_BEAT_BOOT_DELAY</c>
- seconds later nevertheless (default is 60).</p>
- <p>To reboot on the WINDOWS platform <c>HEART_COMMAND</c> can be
+ <p>If the system is to be rebooted because of missing heartbeats,
+ or a terminated Erlang runtime system, environment variable
+ <c>HEART_COMMAND</c> must be set before the system is started.
+ If this variable is not set, a warning text is printed but
+ the system does not reboot.</p>
+ <p>To reboot on Windows, <c>HEART_COMMAND</c> can be
set to <c>heart -shutdown</c> (included in the Erlang delivery)
- or of course to any other suitable program which can activate a
- reboot.</p>
- <p>The hardware watchdog will not be started under Solaris if
- the environment variable <c>HW_WD_DISABLE</c> is set.</p>
- <p>The <c>HEART_BEAT_TIMEOUT</c> and <c>HEART_BEAT_BOOT_DELAY</c>
- environment variables can be used to configure the heart timeouts,
- they can be set in the operating system shell before Erlang is
- started or be specified at the command line:</p>
+ or to any other suitable program that can activate a reboot.</p>
+ <p>The environment variable <c>HEART_BEAT_TIMEOUT</c>
+ can be used to configure the heart
+ time-outs; it can be set in the operating system shell before Erlang
+ is started or be specified at the command line:</p>
<pre>
% <input>erl -heart -env HEART_BEAT_TIMEOUT 30 ...</input></pre>
<p>The value (in seconds) must be in the range 10 &lt; X &lt;= 65535.</p>
- <p>It should be noted that if the system clock is adjusted with
- more than <c>HEART_BEAT_TIMEOUT</c> seconds, <c>heart</c> will
- timeout and try to reboot the system. This can happen, for
- example, if the system clock is adjusted automatically by use of
- NTP (Network Time Protocol).</p>
-
- <p> If a crash occurs, an <c><![CDATA[erl_crash.dump]]></c> will <em>not</em> be written
- unless the environment variable <c><![CDATA[ERL_CRASH_DUMP_SECONDS]]></c> is set.
- </p>
-
+ <p>When running on OSs lacking support for monotonic time,
+ <c>heart</c> is susceptible to system clock adjustments of more than
+ <c>HEART_BEAT_TIMEOUT</c> seconds. When this happens, <c>heart</c>
+ times out and tries to reboot the system. This can occur, for
+ example, if the system clock is adjusted automatically by use of the
+ Network Time Protocol (NTP).</p>
+ <p>If a crash occurs, an <c><![CDATA[erl_crash.dump]]></c> is <em>not</em>
+ written unless environment variable
+ <c><![CDATA[ERL_CRASH_DUMP_SECONDS]]></c> is set:</p>
<pre>
% <input>erl -heart -env ERL_CRASH_DUMP_SECONDS 10 ...</input></pre>
-
- <p> If a regular core dump is wanted, let heart know by setting the kill signal to abort
- using the environment variable <c><![CDATA[HEART_KILL_SIGNAL=SIGABRT]]></c>.
- If unset, or not set to <c><![CDATA[SIGABRT]]></c>, the default behaviour will be a kill
- signal using <c><![CDATA[SIGKILL]]></c>.
- </p>
-
+ <p>If a regular core dump is wanted, let <c>heart</c> know by setting
+ the kill signal to abort using environment variable
+ <c><![CDATA[HEART_KILL_SIGNAL=SIGABRT]]></c>. If unset, or not set to
+ <c><![CDATA[SIGABRT]]></c>, the default behavior is a kill signal using
+ <c><![CDATA[SIGKILL]]></c>:</p>
<pre>
% <input>erl -heart -env HEART_KILL_SIGNAL SIGABRT ...</input></pre>
-
- <p>
- Furthermore, <c><![CDATA[ERL_CRASH_DUMP_SECONDS]]></c> has the following behaviour on
- <c>heart</c>:
+ <p> If heart should <em>not</em> kill the Erlang runtime system, this can be indicated
+ using the environment variable <c><![CDATA[HEART_NO_KILL=TRUE]]></c>.
+ This can be useful if the command executed by heart takes care of this,
+ for example as part of a specific cleanup sequence.
+ If unset, or not set to <c><![CDATA[TRUE]]></c>, the default behaviour
+ will be to kill as described above.
</p>
- <taglist>
- <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=0]]></c></tag>
- <item><p>
- Suppresses the writing a crash dump file entirely,
- thus rebooting the runtime system immediately.
- This is the same as not setting the environment variable.
- </p>
- </item>
- <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=-1]]></c></tag>
- <item><p> Setting the environment variable to a negative value will not reboot
- the runtime system until the crash dump file has been completly written.
- </p>
- </item>
- <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=S]]></c></tag>
- <item><p>
- Heart will wait for <c>S</c> seconds to let the crash dump file be written.
- After <c>S</c> seconds <c>heart</c> will reboot the runtime system regardless of
- the crash dump file has been written or not.
- </p>
- </item>
- </taglist>
- <p>In the following descriptions, all function fails with reason
+ <pre>
+% <input>erl -heart -env HEART_NO_KILL 1 ...</input></pre>
+
+ <p>Furthermore, <c><![CDATA[ERL_CRASH_DUMP_SECONDS]]></c> has the
+ following behavior on <c>heart</c>:</p>
+ <taglist>
+ <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=0]]></c></tag>
+ <item><p>Suppresses the writing of a crash dump file entirely,
+ thus rebooting the runtime system immediately.
+ This is the same as not setting the environment variable.</p>
+ </item>
+ <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=-1]]></c></tag>
+ <item><p>Setting the environment variable to a negative value does not
+ reboot the runtime system until the crash dump file is completly
+ written.</p>
+ </item>
+ <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=S]]></c></tag>
+ <item><p><c>heart</c> waits for <c>S</c> seconds to let the crash dump
+ file be written. After <c>S</c> seconds, <c>heart</c> reboots the
+ runtime system, whether the crash dump file is written or not.</p>
+ </item>
+ </taglist>
+ <p>In the following descriptions, all functions fail with reason
<c>badarg</c> if <c>heart</c> is not started.</p>
</description>
@@ -128,37 +120,36 @@
<funcs>
<func>
<name name="set_cmd" arity="1"/>
- <fsummary>Set a temporary reboot command</fsummary>
+ <fsummary>Set a temporary reboot command.</fsummary>
<desc>
- <p>Sets a temporary reboot command. This command is used if
+ <p>Sets a temporary reboot command. This command is used if
a <c>HEART_COMMAND</c> other than the one specified with
- the environment variable should be used in order to reboot
- the system. The new Erlang runtime system will (if it
- misbehaves) use the environment variable
- <c>HEART_COMMAND</c> to reboot.</p>
-
- <p>Limitations: The <c><anno>Cmd</anno></c> command string
- will be sent to the heart program as a ISO-latin-1 or UTF-8
- encoded binary depending on the file name encoding mode of the
- emulator (see
- <seealso marker="kernel:file#native_name_encoding/0"><c>file:native_name_encoding/0</c></seealso>).
- The size of the encoded binary must be less than 2047 bytes.</p>
+ the environment variable is to be used to reboot
+ the system. The new Erlang runtime system uses (if it misbehaves)
+ environment variable <c>HEART_COMMAND</c> to reboot.</p>
+ <p>Limitations: Command string <c><anno>Cmd</anno></c> is sent to the
+ <c>heart</c> program as an ISO Latin-1 or UTF-8 encoded binary,
+ depending on the filename encoding mode of the emulator (see
+ <seealso marker="kernel:file#native_name_encoding/0"><c>file:native_name_encoding/0</c></seealso>).
+ The size of the encoded binary must be less than 2047 bytes.</p>
</desc>
</func>
+
<func>
<name name="clear_cmd" arity="0"/>
- <fsummary>Clear the temporary boot command</fsummary>
+ <fsummary>Clear the temporary boot command.</fsummary>
<desc>
<p>Clears the temporary boot command. If the system terminates,
the normal <c>HEART_COMMAND</c> is used to reboot.</p>
</desc>
</func>
+
<func>
<name name="get_cmd" arity="0"/>
- <fsummary>Get the temporary reboot command</fsummary>
+ <fsummary>Get the temporary reboot command.</fsummary>
<desc>
- <p>Get the temporary reboot command. If the command is cleared,
- the empty string will be returned.</p>
+ <p>Gets the temporary reboot command. If the command is cleared,
+ the empty string is returned.</p>
</desc>
</func>
@@ -166,12 +157,12 @@
<name name="set_callback" arity="2"/>
<fsummary>Set a validation callback</fsummary>
<desc>
- <p> This validation callback will be executed before any heartbeat sent
- to the port program. For the validation to succeed it needs to return
- with the value <c>ok</c>.
+ <p> This validation callback will be executed before any
+ heartbeat is sent to the port program. For the validation to
+ succeed it needs to return with the value <c>ok</c>.
</p>
- <p> An exception within the callback will be treated as a validation failure. </p>
- <p> The callback will be removed if the system reboots. </p>
+ <p>An exception within the callback will be treated as a validation failure.</p>
+ <p>The callback will be removed if the system reboots.</p>
</desc>
</func>
<func>
diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml
index 088d78c1d6..87b08e4e36 100644
--- a/lib/kernel/doc/src/inet.xml
+++ b/lib/kernel/doc/src/inet.xml
@@ -4,14 +4,14 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2015</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
@@ -29,69 +29,78 @@
<rev>A</rev>
</header>
<module>inet</module>
- <modulesummary>Access to TCP/IP Protocols</modulesummary>
+ <modulesummary>Access to TCP/IP protocols.</modulesummary>
<description>
- <p>Provides access to TCP/IP protocols.</p>
- <p>See also <em>ERTS User's Guide, Inet configuration</em> for more
- information on how to configure an Erlang runtime system for IP
- communication.</p>
- <p>Two Kernel configuration parameters affect the behaviour of all
- sockets opened on an Erlang node:
- <c>inet_default_connect_options</c> can contain a list of default
- options used for all sockets returned when doing <c>connect</c>,
- and <c>inet_default_listen_options</c> can contain a list of
- default options used when issuing a <c>listen</c> call. When
- <c>accept</c> is issued, the values of the listensocket options
- are inherited, why no such application variable is needed for
+ <p>This module provides access to TCP/IP protocols.</p>
+ <p>See also
+ <seealso marker="erts:inet_cfg">ERTS User's Guide:
+ Inet Configuration</seealso> for more information about how to
+ configure an Erlang runtime system for IP communication.</p>
+ <p>The following two Kernel configuration parameters affect the
+ behavior of all sockets opened on an Erlang node:</p>
+ <list type="bulleted">
+ <item><p><c>inet_default_connect_options</c> can contain a list of
+ default options used for all sockets returned when doing
+ <c>connect</c>.</p></item>
+ <item><p><c>inet_default_listen_options</c> can contain a list of
+ default options used when issuing a <c>listen</c> call.</p></item>
+ </list>
+ <p>When <c>accept</c> is issued, the values of the listening socket options
+ are inherited. No such application variable is therefore needed for
<c>accept</c>.</p>
- <p>Using the Kernel configuration parameters mentioned above, one
- can set default options for all TCP sockets on a node. This should
- be used with care, but options like <c>{delay_send,true}</c>
- might be specified in this way. An example of starting an Erlang
- node with all sockets using delayed send could look like this:</p>
+ <p>Using the Kernel configuration parameters above, one
+ can set default options for all TCP sockets on a node, but use this
+ with care. Options such as <c>{delay_send,true}</c> can be
+ specified in this way. The following is an example of starting an Erlang
+ node with all sockets using delayed send:</p>
<pre>
$ <input>erl -sname test -kernel \</input>
<input>inet_default_connect_options '[{delay_send,true}]' \</input>
<input>inet_default_listen_options '[{delay_send,true}]'</input></pre>
- <p>Note that the default option <c>{active, true}</c> currently
+ <p>Notice that default option <c>{active, true}</c>
cannot be changed, for internal reasons.</p>
<p>Addresses as inputs to functions can be either a string or a
- tuple. For instance, the IP address 150.236.20.73 can be passed to
- <c>gethostbyaddr/1</c> either as the string "150.236.20.73"
- or as the tuple <c>{150, 236, 20, 73}</c>.</p>
- <p>IPv4 address examples:</p>
+ tuple. For example, the IP address 150.236.20.73 can be passed to
+ <c>gethostbyaddr/1</c>, either as string <c>"150.236.20.73"</c>
+ or as tuple <c>{150, 236, 20, 73}</c>.</p>
+ <p><em>IPv4 address examples:</em></p>
<code type="none">
Address ip_address()
------- ------------
127.0.0.1 {127,0,0,1}
192.168.42.2 {192,168,42,2}</code>
- <p>IPv6 address examples:</p>
+ <p><em>IPv6 address examples:</em></p>
<code type="none">
Address ip_address()
------- ------------
::1 {0,0,0,0,0,0,0,1}
::192.168.42.2 {0,0,0,0,0,0,(192 bsl 8) bor 168,(42 bsl 8) bor 2}
-FFFF::192.168.42.2
- {16#FFFF,0,0,0,0,0,(192 bsl 8) bor 168,(42 bsl 8) bor 2}
+::FFFF:192.168.42.2
+ {0,0,0,0,0,16#FFFF,(192 bsl 8) bor 168,(42 bsl 8) bor 2}
3ffe:b80:1f8d:2:204:acff:fe17:bf38
{16#3ffe,16#b80,16#1f8d,16#2,16#204,16#acff,16#fe17,16#bf38}
fe80::204:acff:fe17:bf38
{16#fe80,0,0,0,0,16#204,16#acff,16#fe17,16#bf38}</code>
- <p>A function that may be useful is <seealso marker="#parse_address/1">parse_address/1</seealso>:</p>
+ <p>Function
+ <seealso marker="#parse_address/1"><c>parse_address/1</c></seealso>
+ can be useful:</p>
<pre>
1> <input>inet:parse_address("192.168.42.2").</input>
{ok,{192,168,42,2}}
-2> <input>inet:parse_address("FFFF::192.168.42.2").</input>
-{ok,{65535,0,0,0,0,0,49320,10754}}</pre>
+2> <input>inet:parse_address("::FFFF:192.168.42.2").</input>
+{ok,{0,0,0,0,0,65535,49320,10754}}</pre>
</description>
<datatypes>
<datatype>
<name name="hostent"/>
<desc>
- <p>The record is defined in the Kernel include file "inet.hrl".
- Add the following directive to the module:</p>
-<code>-include_lib("kernel/include/inet.hrl").</code></desc>
+ <p>The record is defined in the Kernel include file
+ <c>"inet.hrl"</c>.</p>
+ <p>Add the following directive to the module:</p>
+ <code>
+-include_lib("kernel/include/inet.hrl").</code>
+ </desc>
</datatype>
<datatype>
<name name="hostname"/>
@@ -109,465 +118,700 @@ fe80::204:acff:fe17:bf38
<name name="port_number"/>
</datatype>
<datatype>
+ <name name="local_address"/>
+ <desc>
+ <p>
+ This address family only works on Unix-like systems.
+ </p>
+ <p>
+ <c><anno>File</anno></c> is normally a file pathname
+ in a local filesystem. It is limited in length by the
+ operating system, traditionally to 108 bytes.
+ </p>
+ <p>
+ A <c>binary()</c> is passed as is to the operating system,
+ but a <c>string()</c> is encoded according to the
+ <seealso marker="file#native_name_encoding/0">
+ system filename encoding mode.
+ </seealso>
+ </p>
+ <p>
+ Other addresses are possible, for example Linux implements
+ "Abstract Addresses". See the documentation for
+ Unix Domain Sockets on your system,
+ normally <c>unix</c> in manual section 7.
+ </p>
+ <p>
+ In most API functions where you can use
+ this address family the port number must be <c>0</c>.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="socket_address"/>
+ </datatype>
+ <datatype>
+ <name name="socket_getopt"/>
+ </datatype>
+ <datatype>
+ <name name="socket_setopt"/>
+ </datatype>
+ <datatype>
+ <name name="returned_non_ip_address"/>
+ <desc>
+ <p>
+ Addresses besides
+ <seealso marker="#type-ip_address">
+ <c>ip_address()</c>
+ </seealso>
+ ones that are returned from socket API functions.
+ See in particular
+ <seealso marker="#type-local_address">
+ <c>local_address()</c>.
+ </seealso>
+ The <c>unspec</c> family corresponds to AF_UNSPEC and can
+ occur if the other side has no socket address.
+ The <c>undefined</c> family can only occur in the unlikely
+ event of an address family that the VM does not recognize.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="ancillary_data"/>
+ <desc>
+ <p>
+ Ancillary data received with the data packet
+ or read with the socket option
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ from a TCP socket.
+ </p>
+ <p>
+ The value(s) correspond to the currently active socket
+ <seealso marker="#type-socket_setopt">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ and
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="getifaddrs_ifopts"/>
+ <desc>
+ <p>
+ Interface address description list returned from
+ <seealso marker="#getifaddrs/0"><c>getifaddrs/0,1</c></seealso>
+ for a named interface, translated from the returned
+ data of the POSIX API function <c>getaddrinfo()</c>.
+ </p>
+ <p>
+ <c><anno>Hwaddr</anno></c> is hardware dependent,
+ for example, on Ethernet interfaces it is
+ the 6-byte Ethernet address (MAC address (EUI-48 address)).
+ </p>
+ <p>
+ The tuples <c>{addr,<anno>Addr</anno>}</c>,
+ <c>{netmask,<anno>Netmask</anno>}</c>, and possibly
+ <c>{broadaddr,<anno>Broadaddr</anno>}</c> or
+ <c>{dstaddr,<anno>Dstaddr</anno>}</c>
+ are repeated in the list
+ if the interface has got multiple addresses.
+ An interface may have multiple <c>{flag,_}</c> tuples
+ for example if it has different flags
+ for different address families.
+ Multiple <c>{hwaddr,<anno>Hwaddr</anno>}</c> tuples
+ is hard to say anything definite about, though.
+ The tuple <c>{flag,<anno>Flags</anno>}</c> is mandatory,
+ all others are optional.
+ </p>
+ <p>
+ Do not rely too much on the order
+ of <c><anno>Flags</anno></c> atoms
+ or the <c><anno>Ifopt</anno></c> tuples.
+ There are however some rules:
+ </p>
+ <list type="bulleted">
+ <item><p>
+ A <c>{flag,_}</c> tuple applies to all other tuples that follow.
+ </p></item>
+ <item><p>
+ Immediately after <c>{addr,_}</c> follows <c>{netmask,_}</c>.
+ </p></item>
+ <item><p>
+ Immediately thereafter may <c>{broadaddr,_}</c> follow
+ if <c>broadcast</c> is member of <c><anno>Flags</anno></c>,
+ or <c>{dstaddr,_}</c> if <c>pointtopoint</c>
+ is member of <c><anno>Flags</anno></c>.
+ Both <c>{dstaddr,_}</c> and <c>{broadaddr,_}</c>
+ does not occur for the same <c>{addr,_}</c>.
+ </p></item>
+ <item><p>
+ Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c>, or
+ <c>{dstaddr,_}</c> tuples that follow an
+ <c>{addr,<anno>Addr</anno>}</c>
+ tuple concerns the address <c><anno>Addr</anno></c>.
+ </p></item>
+ </list>
+ <p>
+ The tuple <c>{hwaddr,_}</c> is not returned on Solaris, as the
+ hardware address historically belongs to the link layer
+ and it is not returned by the Solaris API function
+ <c>getaddrinfo()</c>.
+ </p>
+ <warning>
+ <p>
+ On Windows, the data is fetched from different
+ OS API functions, so the <c><anno>Netmask</anno></c>
+ and <c><anno>Broadaddr</anno></c> values may be calculated,
+ just as some <c><anno>Flags</anno></c> values.
+ </p>
+ </warning>
+ </desc>
+ </datatype>
+ <datatype>
<name name="posix"/>
- <desc><p>An atom which is named from the Posix error codes
- used in Unix, and in the runtime libraries of most
- C compilers. See
+ <desc>
+ <p>An atom that is named from the POSIX error codes used in Unix,
+ and in the runtime libraries of most C compilers. See section
<seealso marker="#error_codes">POSIX Error Codes</seealso>.</p>
</desc>
</datatype>
<datatype>
<name>socket()</name>
- <desc><p><marker id="type-socket"></marker>
- See <seealso marker="gen_tcp#type-socket">gen_tcp(3)</seealso>
- and <seealso marker="gen_udp#type-socket">gen_udp(3)</seealso>.</p>
+ <desc>
+ <p>See
+ <seealso marker="gen_tcp#type-socket"><c>gen_tcp:type-socket</c></seealso>
+ and
+ <seealso marker="gen_udp#type-socket"><c>gen_udp:type-socket</c></seealso>.
+ </p>
</desc>
</datatype>
<datatype>
<name name="address_family"/>
</datatype>
+ <datatype>
+ <name name="socket_protocol"/>
+ </datatype>
</datatypes>
<funcs>
<func>
<name name="close" arity="1"/>
- <fsummary>Close a socket of any type</fsummary>
+ <fsummary>Close a socket of any type.</fsummary>
<desc>
<p>Closes a socket of any type.</p>
</desc>
</func>
+
<func>
- <name name="get_rc" arity="0"/>
- <fsummary>Return a list of IP configuration parameters</fsummary>
+ <name name="format_error" arity="1"/>
+ <fsummary>Return a descriptive string for an error reason.</fsummary>
<desc>
- <p>Returns the state of the Inet configuration database in
- form of a list of recorded configuration parameters. (See the
- ERTS User's Guide, Inet configuration, for more information).
- Only parameters with other than default values are returned.</p>
+ <p>Returns a diagnostic error string. For possible POSIX values and
+ corresponding strings, see section
+ <seealso marker="#error_codes">POSIX Error Codes</seealso>.</p>
</desc>
</func>
+
<func>
- <name name="format_error" arity="1"/>
- <fsummary>Return a descriptive string for an error reason</fsummary>
+ <name name="get_rc" arity="0"/>
+ <fsummary>Return a list of IP configuration parameters.</fsummary>
<desc>
- <p>Returns a diagnostic error string. See the section below
- for possible Posix values and the corresponding
- strings.</p>
+ <p>
+ Returns the state of the <c>Inet</c> configuration database in
+ form of a list of recorded configuration parameters. For more
+ information, see <seealso marker="erts:inet_cfg">ERTS User's Guide:
+ Inet Configuration</seealso>.
+ </p>
+ <p>
+ Only actual parameters with other than default values
+ are returned, for example not directives that specify
+ other sources for configuration parameters nor
+ directives that clear parameters.
+ </p>
</desc>
</func>
+
<func>
<name name="getaddr" arity="2"/>
- <fsummary>Return the IP-address for a host</fsummary>
+ <fsummary>Return the IP address for a host.</fsummary>
<desc>
- <p>Returns the IP-address for <c><anno>Host</anno></c> as a tuple of
- integers. <c><anno>Host</anno></c> can be an IP-address, a single hostname
- or a fully qualified hostname.</p>
+ <p>Returns the IP address for <c><anno>Host</anno></c> as a tuple of
+ integers. <c><anno>Host</anno></c> can be an IP address, a single
+ hostname, or a fully qualified hostname.</p>
</desc>
</func>
+
<func>
<name name="getaddrs" arity="2"/>
- <fsummary>Return the IP-addresses for a host</fsummary>
+ <fsummary>Return the IP addresses for a host.</fsummary>
<desc>
- <p>Returns a list of all IP-addresses for <c><anno>Host</anno></c>.
- <c><anno>Host</anno></c> can be an IP-address, a single hostname or a fully
- qualified hostname.</p>
+ <p>Returns a list of all IP addresses for <c><anno>Host</anno></c>.
+ <c><anno>Host</anno></c> can be an IP address, a single hostname, or
+ a fully qualified hostname.</p>
</desc>
</func>
+
<func>
<name name="gethostbyaddr" arity="1"/>
- <fsummary>Return a hostent record for the host with the given address</fsummary>
+ <fsummary>Return a hostent record for the host with the specified
+ address.</fsummary>
<desc>
- <p>Returns a <c>hostent</c> record given an address.</p>
- </desc>
+ <p>Returns a <c>hostent</c> record for the host with the specified
+ address.</p></desc>
</func>
+
<func>
<name name="gethostbyname" arity="1"/>
- <fsummary>Return a hostent record for the host with the given name</fsummary>
+ <fsummary>Return a hostent record for the host with the specified name.
+ </fsummary>
<desc>
- <p>Returns a <c>hostent</c> record given a hostname.</p>
+ <p>Returns a <c>hostent</c> record for the host with the specified
+ hostname.</p>
+ <p>If resolver option <c>inet6</c> is <c>true</c>,
+ an IPv6 address is looked up.</p>
</desc>
</func>
+
<func>
<name name="gethostbyname" arity="2"/>
- <fsummary>Return a hostent record for the host with the given name</fsummary>
+ <fsummary>Return a hostent record for the host with the specified name.
+ </fsummary>
<desc>
- <p>Returns a <c>hostent</c> record given a hostname, restricted
- to the given address family.</p>
+ <p>Returns a <c>hostent</c> record for the host with the specified
+ name, restricted to the specified address family.</p>
</desc>
</func>
+
<func>
<name name="gethostname" arity="0"/>
- <fsummary>Return the local hostname</fsummary>
+ <fsummary>Return the local hostname.</fsummary>
<desc>
- <p>Returns the local hostname. Will never fail.</p>
+ <p>Returns the local hostname. Never fails.</p>
</desc>
</func>
<func>
<name name="getifaddrs" arity="0"/>
- <fsummary>Return a list of interfaces and their addresses</fsummary>
- <desc>
- <p>
- Returns a list of 2-tuples containing interface names and the
- interface's addresses. <c><anno>Ifname</anno></c> is a Unicode string.
- <c><anno>Hwaddr</anno></c> is hardware dependent, e.g on Ethernet interfaces
- it is the 6-byte Ethernet address (MAC address (EUI-48 address)).
- </p>
- <p>
- The <c>{addr,<anno>Addr</anno>}</c>, <c>{netmask,_}</c> and <c>{broadaddr,_}</c>
- tuples are repeated in the result list iff the interface has multiple
- addresses. If you come across an interface that has
- multiple <c>{flag,_}</c> or <c>{hwaddr,_}</c> tuples you have
- a really strange interface or possibly a bug in this function.
- The <c>{flag,_}</c> tuple is mandatory, all other optional.
- </p>
- <p>
- Do not rely too much on the order of <c><anno>Flag</anno></c> atoms or
- <c><anno>Ifopt</anno></c> tuples. There are some rules, though:</p>
- <list>
- <item>
- Immediately after <c>{addr,_}</c> follows <c>{netmask,_}</c>
- </item>
- <item>
- Immediately thereafter follows <c>{broadaddr,_}</c> if
- the <c>broadcast</c> flag is <em>not</em> set and the
- <c>pointtopoint</c> flag <em>is</em> set.
- </item>
- <item>
- Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c> or
- <c>{dstaddr,_}</c> tuples that follow an <c>{addr,_}</c>
- tuple concerns that address.
- </item>
- </list>
- <p>
- The <c>{hwaddr,_}</c> tuple is not returned on Solaris since the
- hardware address historically belongs to the link layer and only
- the superuser can read such addresses.
- </p>
- <p>
- On Windows, the data is fetched from quite different OS API
- functions, so the <c><anno>Netmask</anno></c> and <c><anno>Broadaddr</anno></c>
- values may be calculated, just as some <c><anno>Flag</anno></c> values.
- You have been warned. Report flagrant bugs.
- </p>
- </desc>
+ <fsummary>Return a list of interfaces and their addresses.</fsummary>
+ <desc>
+ <p>
+ Returns a list of 2-tuples containing interface names and
+ the interfaces' addresses. <c><anno>Ifname</anno></c>
+ is a Unicode string and
+ <c><anno>Ifopts</anno></c> is a list of
+ interface address description tuples.
+ </p>
+ <p>
+ The interface address description tuples
+ are documented under the type of the
+ <seealso marker="#type-getifaddrs_ifopts">
+ <c><anno>Ifopts</anno></c>
+ </seealso>
+ value.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name>getifaddrs(Opts) ->
+ {ok, [{Ifname, Ifopts}]} | {error, Posix}
+ </name>
+ <fsummary>Return a list of interfaces and their addresses.</fsummary>
+ <type>
+ <v>
+ Opts = [{netns, Namespace}]
+ </v>
+ <v>
+ Namespace =
+ <seealso marker="file#type-filename_all">
+ file:filename_all()
+ </seealso>
+ </v>
+ <v>Ifname = string()</v>
+ <v>
+ Ifopts =
+ <seealso marker="#type-getifaddrs_ifopts">
+ getifaddrs_ifopts()
+ </seealso>
+ </v>
+ <v>Posix = <seealso marker="#type-posix">posix()</seealso></v>
+ </type>
+ <desc>
+ <p>
+ The same as
+ <seealso marker="#getifaddrs/0"><c>getifaddrs/0</c></seealso>
+ but the <c>Option</c>
+ <c>{netns, Namespace}</c> sets a network namespace
+ for the OS call, on platforms that supports that feature.
+ </p>
+ <p>
+ See the socket option
+ <seealso marker="#option-netns">
+ <c>{netns, Namespace}</c>
+ </seealso>
+ under
+ <seealso marker="#setopts/2"><c>setopts/2</c></seealso>.
+ </p>
+ </desc>
</func>
<func>
<name name="getopts" arity="2"/>
- <fsummary>Get one or more options for a socket</fsummary>
- <type name="socket_getopt"/>
- <type name="socket_setopt"/>
+ <fsummary>Get one or more options for a socket.</fsummary>
<desc>
- <p>Gets one or more options for a socket.
- See <seealso marker="#setopts/2">setopts/2</seealso>
- for a list of available options.</p>
- <p>The number of elements in the returned <c><anno>OptionValues</anno></c>
+ <p>Gets one or more options for a socket. For a list of available
+ options, see
+ <seealso marker="#setopts/2"><c>setopts/2</c></seealso>.
+ See also the description for the type
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>gen_tcp:pktoptions_value()</c>
+ </seealso>.</p>
+ <p>The number of elements in the returned
+ <c><anno>OptionValues</anno></c>
list does not necessarily correspond to the number of options
asked for. If the operating system fails to support an option,
- it is simply left out in the returned list. An error tuple is only
- returned when getting options for the socket is impossible
- (i.e. the socket is closed or the buffer size in a raw request
+ it is left out in the returned list. An error tuple is returned
+ only when getting options for the socket is impossible (that is,
+ the socket is closed or the buffer size in a raw request
is too large). This behavior is kept for backward
compatibility reasons.</p>
- <p>A raw option request <c>RawOptReq = {raw, Protocol, OptionNum, ValueSpec}</c> can be used to get information about
+ <p>A raw option request
+ <c>RawOptReq = {raw, Protocol, OptionNum, ValueSpec}</c>
+ can be used to get information about
socket options not (explicitly) supported by the emulator. The
- use of raw socket options makes the code non portable, but
+ use of raw socket options makes the code non-portable, but
allows the Erlang programmer to take advantage of unusual features
- present on the current platform.</p>
- <p>The <c>RawOptReq</c> consists of the tag <c>raw</c> followed
- by the protocol level, the option number and either a binary
+ present on a particular platform.</p>
+ <p><c>RawOptReq</c> consists of tag <c>raw</c> followed
+ by the protocol level, the option number, and either a binary
or the size, in bytes, of the
- buffer in which the option value is to be stored. A binary
- should be used when the underlying <c>getsockopt</c> requires
- <em>input</em>
- in the argument field, in which case the size of the binary
- should correspond to the required buffer
+ buffer in which the option value is to be stored. A binary is to be
+ used when the underlying <c>getsockopt</c> requires <em>input</em>
+ in the argument field. In this case, the binary size
+ is to correspond to the required buffer
size of the return value. The supplied values in a <c>RawOptReq</c>
- correspond to the second, third and fourth/fifth parameters to the
+ correspond to the second, third, and fourth/fifth parameters to the
<c>getsockopt</c> call in the C socket API. The value stored
- in the buffer is returned as a binary <c>ValueBin</c>
+ in the buffer is returned as a binary <c>ValueBin</c>,
where all values are coded in the native endianess.</p>
- <p>Asking for and inspecting raw socket options require low
- level information about the current operating system and TCP
- stack.</p>
- <p>As an example, consider a Linux machine where the
- <c>TCP_INFO</c> option could be used to collect TCP statistics
- for a socket. Lets say we're interested in the
- <c>tcpi_sacked</c> field of the <c>struct tcp_info</c>
- filled in when asking for <c>TCP_INFO</c>. To
- be able to access this information, we need to know both the
- numeric value of the protocol level <c>IPPROTO_TCP</c>, the
- numeric value of the option <c>TCP_INFO</c>, the size of the
- <c>struct tcp_info</c> and the size and offset of
- the specific field. By inspecting the headers or writing a small C
- program, we found <c>IPPROTO_TCP</c> to be 6,
- <c>TCP_INFO</c> to be 11, the structure size to be 92 (bytes),
- the offset of <c>tcpi_sacked</c> to be 28 bytes and the actual
- value to be a 32 bit integer. We could use the following
- code to retrieve the value:</p>
+ <p>Asking for and inspecting raw socket options require low-level
+ information about the current operating system and TCP stack.</p>
+ <p><em>Example:</em></p>
+ <p>Consider a Linux machine where option
+ <c>TCP_INFO</c> can be used to collect TCP statistics
+ for a socket. Assume you are interested in field
+ <c>tcpi_sacked</c> of <c>struct tcp_info</c>
+ filled in when asking for <c>TCP_INFO</c>. To be able to access
+ this information, you need to know the following:</p>
+ <list type="bulleted">
+ <item>The numeric value of protocol level <c>IPPROTO_TCP</c></item>
+ <item>The numeric value of option <c>TCP_INFO</c></item>
+ <item>The size of <c>struct tcp_info</c></item>
+ <item>The size and offset of the specific field</item>
+ </list>
+ <p>By inspecting the headers or writing a small C program, it is found
+ that <c>IPPROTO_TCP</c> is 6, <c>TCP_INFO</c> is 11, the structure
+ size is 92 (bytes), the offset of <c>tcpi_sacked</c> is 28 bytes,
+ and the value is a 32-bit integer. The following code can be used
+ to retrieve the value:</p>
<code type="none"><![CDATA[
- get_tcpi_sacked(Sock) ->
- {ok,[{raw,_,_,Info}]} = inet:getopts(Sock,[{raw,6,11,92}]),
- <<_:28/binary,TcpiSacked:32/native,_/binary>> = Info,
- TcpiSacked.]]></code>
- <p>Preferably, you would check the machine type, the OS
- and the kernel version prior to executing anything similar to the
- code above.</p>
+get_tcpi_sacked(Sock) ->
+ {ok,[{raw,_,_,Info}]} = inet:getopts(Sock,[{raw,6,11,92}]),
+ <<_:28/binary,TcpiSacked:32/native,_/binary>> = Info,
+ TcpiSacked.]]></code>
+ <p>Preferably, you would check the machine type, the operating system,
+ and the Kernel version before executing anything similar to
+ this code.</p>
</desc>
</func>
<func>
<name name="getstat" arity="1"/>
<name name="getstat" arity="2"/>
- <fsummary>Get one or more statistic options for a socket</fsummary>
+ <fsummary>Get one or more statistic options for a socket.</fsummary>
<type name="stat_option"/>
<desc>
<p>Gets one or more statistic options for a socket.</p>
-
- <p><c>getstat(<anno>Socket</anno>)</c> is equivalent to
- <c>getstat(<anno>Socket</anno>, [recv_avg, recv_cnt, recv_dvi,
- recv_max, recv_oct, send_avg, send_cnt, send_dvi, send_max,
- send_oct])</c>.</p>
- <p>The following options are available:</p>
+ <p><c>getstat(<anno>Socket</anno>)</c> is equivalent to
+ <c>getstat(<anno>Socket</anno>, [recv_avg, recv_cnt, recv_dvi,
+ recv_max, recv_oct, send_avg, send_cnt, send_dvi, send_max,
+ send_oct])</c>.</p>
+ <p>The following options are available:</p>
<taglist>
- <tag><c>recv_avg</c></tag>
- <item>
- <p>Average size of packets in bytes received by the socket.</p>
- </item>
- <tag><c>recv_cnt</c></tag>
- <item>
+ <tag><c>recv_avg</c></tag>
+ <item>
+ <p>Average size of packets, in bytes, received by the socket.</p>
+ </item>
+ <tag><c>recv_cnt</c></tag>
+ <item>
<p>Number of packets received by the socket.</p>
- </item>
- <tag><c>recv_dvi</c></tag>
- <item>
- <p>Average packet size deviation in bytes received by the socket.</p>
- </item>
- <tag><c>recv_max</c></tag>
- <item>
- <p>The size of the largest packet in bytes received by the socket.</p>
- </item>
- <tag><c>recv_oct</c></tag>
- <item>
+ </item>
+ <tag><c>recv_dvi</c></tag>
+ <item>
+ <p>Average packet size deviation, in bytes, received by the socket.</p>
+ </item>
+ <tag><c>recv_max</c></tag>
+ <item>
+ <p>Size of the largest packet, in bytes, received by the socket.</p>
+ </item>
+ <tag><c>recv_oct</c></tag>
+ <item>
<p>Number of bytes received by the socket.</p>
- </item>
-
- <tag><c>send_avg</c></tag>
- <item>
- <p>Average size of packets in bytes sent from the socket.</p>
- </item>
- <tag><c>send_cnt</c></tag>
- <item>
+ </item>
+ <tag><c>send_avg</c></tag>
+ <item>
+ <p>Average size of packets, in bytes, sent from the socket.</p>
+ </item>
+ <tag><c>send_cnt</c></tag>
+ <item>
<p>Number of packets sent from the socket.</p>
- </item>
- <tag><c>send_dvi</c></tag>
- <item>
- <p>Average packet size deviation in bytes sent from the socket.</p>
- </item>
- <tag><c>send_max</c></tag>
- <item>
- <p>The size of the largest packet in bytes sent from the socket.</p>
- </item>
- <tag><c>send_oct</c></tag>
- <item>
+ </item>
+ <tag><c>send_dvi</c></tag>
+ <item>
+ <p>Average packet size deviation, in bytes, sent from the socket.</p>
+ </item>
+ <tag><c>send_max</c></tag>
+ <item>
+ <p>Size of the largest packet, in bytes, sent from the socket.</p>
+ </item>
+ <tag><c>send_oct</c></tag>
+ <item>
<p>Number of bytes sent from the socket.</p>
- </item>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="i" arity="0" />
+ <name name="i" arity="1" />
+ <name name="i" arity="2" />
+ <fsummary>Displays information and statistics about sockets on the terminal</fsummary>
+ <desc>
+ <p>
+ Lists all TCP, UDP and SCTP sockets, including those that the Erlang runtime system uses as well as
+ those created by the application.
+ </p>
+ <p>
+ The following options are available:
+ </p>
+
+ <taglist>
+ <tag><c>port</c></tag>
+ <item>
+ <p>The internal index of the port.</p>
+ </item>
+ <tag><c>module</c></tag>
+ <item>
+ <p>The callback module of the socket.</p>
+ </item>
+ <tag><c>recv</c></tag>
+ <item>
+ <p>Number of bytes received by the socket.</p>
+ </item>
+ <tag><c>sent</c></tag>
+ <item>
+ <p>Number of bytes sent from the socket.</p>
+ </item>
+ <tag><c>owner</c></tag>
+ <item>
+ <p>The socket owner process.</p>
+ </item>
+ <tag><c>local_address</c></tag>
+ <item>
+ <p>The local address of the socket.</p>
+ </item>
+ <tag><c>foreign_address</c></tag>
+ <item>
+ <p>The address and port of the other end of the connection.</p>
+ </item>
+ <tag><c>state</c></tag>
+ <item>
+ <p>The connection state.</p>
+ </item>
+ <tag><c>type</c></tag>
+ <item>
+ <p>STREAM or DGRAM or SEQPACKET.</p>
+ </item>
</taglist>
</desc>
</func>
+
<func>
<name name="ntoa" arity="1" />
- <fsummary>Convert IPv6 / IPV4 adress to ascii</fsummary>
+ <fsummary>Convert IPv6/IPV4 address to ASCII.</fsummary>
<desc>
- <p>Parses an <seealso marker="#type-ip_address">ip_address()</seealso> and returns an IPv4 or IPv6 address string.</p>
+ <p>Parses an
+ <seealso marker="#type-ip_address"><c>ip_address()</c></seealso>
+ and returns an IPv4 or IPv6 address string.</p>
</desc>
</func>
+
+ <func>
+ <name name="parse_address" arity="1" />
+ <fsummary>Parse an IPv4 or IPv6 address.</fsummary>
+ <desc>
+ <p>Parses an IPv4 or IPv6 address string and returns an
+ <seealso marker="#type-ip4_address"><c>ip4_address()</c></seealso> or
+ <seealso marker="#type-ip6_address"><c>ip6_address()</c></seealso>.
+ Accepts a shortened IPv4 address string.</p>
+ </desc>
+ </func>
+
<func>
<name name="parse_ipv4_address" arity="1" />
- <fsummary>Parse an IPv4 address</fsummary>
+ <fsummary>Parse an IPv4 address.</fsummary>
<desc>
- <p>Parses an IPv4 address string and returns an <seealso marker="#type-ip4_address">ip4_address()</seealso>.
- Accepts a shortened IPv4 shortened address string.</p>
+ <p>Parses an IPv4 address string and returns an
+ <seealso marker="#type-ip4_address"><c>ip4_address()</c></seealso>.
+ Accepts a shortened IPv4 address string.</p>
</desc>
</func>
+
<func>
<name name="parse_ipv4strict_address" arity="1" />
<fsummary>Parse an IPv4 address strict.</fsummary>
<desc>
- <p>Parses an IPv4 address string containing four fields, i.e <em>not</em> shortened, and returns an <seealso marker="#type-ip4_address">ip4_address()</seealso>.</p>
+ <p>Parses an IPv4 address string containing four fields, that is,
+ <em>not</em> shortened, and returns an
+ <seealso marker="#type-ip4_address"><c>ip4_address()</c></seealso>.
+ </p>
</desc>
</func>
+
<func>
<name name="parse_ipv6_address" arity="1" />
- <fsummary>Parse an IPv6 address</fsummary>
+ <fsummary>Parse an IPv6 address.</fsummary>
<desc>
- <p>Parses an IPv6 address string and returns an <seealso marker="#type-ip6_address">ip6_address()</seealso>.
- If an IPv4 address string is passed, an IPv4-mapped IPv6 address is returned.</p>
+ <p>Parses an IPv6 address string and returns an
+ <seealso marker="#type-ip6_address"><c>ip6_address()</c></seealso>.
+ If an IPv4 address string is specified, an IPv4-mapped IPv6 address
+ is returned.</p>
</desc>
</func>
+
<func>
<name name="parse_ipv6strict_address" arity="1" />
<fsummary>Parse an IPv6 address strict.</fsummary>
<desc>
- <p>Parses an IPv6 address string and returns an <seealso marker="#type-ip6_address">ip6_address()</seealso>.
- Does <em>not</em> accept IPv4 adresses.</p>
+ <p>Parses an IPv6 address string and returns an
+ <seealso marker="#type-ip6_address"><c>ip6_address()</c></seealso>.
+ Does <em>not</em> accept IPv4 addresses.</p>
</desc>
</func>
+
<func>
- <name name="parse_address" arity="1" />
- <fsummary>Parse an IPv4 or IPv6 address.</fsummary>
+ <name name="ipv4_mapped_ipv6_address" arity="1" />
+ <fsummary>Convert to and from IPv4-mapped IPv6 address.</fsummary>
<desc>
- <p>Parses an IPv4 or IPv6 address string and returns an <seealso marker="#type-ip4_address">ip4_address()</seealso> or <seealso marker="#type-ip6_address">ip6_address()</seealso>. Accepts a shortened IPv4 address string.</p>
+ <p>
+ Convert an IPv4 address to an IPv4-mapped IPv6 address
+ or the reverse. When converting from an IPv6 address
+ all but the 2 low words are ignored so this function also
+ works on some other types of addresses than IPv4-mapped.
+ </p>
</desc>
</func>
+
<func>
<name name="parse_strict_address" arity="1" />
<fsummary>Parse an IPv4 or IPv6 address strict.</fsummary>
<desc>
- <p>Parses an IPv4 or IPv6 address string and returns an <seealso marker="#type-ip4_address">ip4_address()</seealso> or <seealso marker="#type-ip6_address">ip6_address()</seealso>. Does <em>not</em> accept a shortened IPv4 address string.</p>
+ <p>Parses an IPv4 or IPv6 address string and returns an
+ <seealso marker="#type-ip4_address"><c>ip4_address()</c></seealso> or
+ <seealso marker="#type-ip6_address"><c>ip6_address()</c></seealso>.
+ Does <em>not</em> accept a shortened IPv4 address string.</p>
</desc>
</func>
+
<func>
<name name="peername" arity="1"/>
- <fsummary>Return the address and port for the other end of a connection</fsummary>
+ <fsummary>Return the address and port for the other end of a connection.
+ </fsummary>
<desc>
- <p>
- Returns the address and port for the other end of a
- connection.
- </p>
- <p>
- Note that for SCTP sockets this function only returns
- one of the socket's peer addresses. The function
- <seealso marker="#peernames/1">peernames/1,2</seealso>
- returns all.
- </p>
+ <p>Returns the address and port for the other end of a connection.</p>
+ <p>Notice that for SCTP sockets, this function returns only
+ one of the peer addresses of the socket. Function
+ <seealso marker="#peernames/1"><c>peernames/1,2</c></seealso>
+ returns all.</p>
</desc>
</func>
+
<func>
<name name="peernames" arity="1"/>
- <fsummary>
- Return all address/port numbers for the other end of a connection
- </fsummary>
+ <fsummary>Return all address/port numbers for the other end of a
+ connection.</fsummary>
<desc>
- <p>
- Equivalent to
+ <p>Equivalent to
<seealso marker="#peernames/2"><c>peernames(<anno>Socket</anno>, 0)</c></seealso>.
- Note that this function's behaviour for an SCTP
+ </p>
+ <p>Notice that the behavior of this function for an SCTP
one-to-many style socket is not defined by the
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>.
- </p>
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>.</p>
</desc>
</func>
+
<func>
<name name="peernames" arity="2"/>
- <fsummary>
- Return all address/port numbers for the other end of a connection
- </fsummary>
+ <fsummary>Return all address/port numbers for the other end of a
+ connection.</fsummary>
<desc>
- <p>
- Returns a list of all address/port number pairs for the other end
- of a socket's association <c><anno>Assoc</anno></c>.
- </p>
- <p>
- This function can return multiple addresses for multihomed
- sockets such as SCTP sockets. For other sockets it
- returns a one element list.
- </p>
- <p>
- Note that the <c><anno>Assoc</anno></c> parameter is by the
+ <p>Returns a list of all address/port number pairs for the other end
+ of an association <c><anno>Assoc</anno></c> of a socket.</p>
+ <p>This function can return multiple addresses for multihomed
+ sockets, such as SCTP sockets. For other sockets it
+ returns a one-element list.</p>
+ <p>Notice that parameter <c><anno>Assoc</anno></c> is by the
<url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>
defined to be ignored for
- one-to-one style sockets. What the special value <c>0</c>
- means hence its behaviour for one-to-many style sockets
- is unfortunately not defined.
- </p>
+ one-to-one style sockets. What the special value <c>0</c>
+ means, hence its behavior for one-to-many style sockets,
+ is unfortunately undefined.</p>
</desc>
</func>
+
<func>
<name name="port" arity="1"/>
- <fsummary>Return the local port number for a socket</fsummary>
+ <fsummary>Return the local port number for a socket.</fsummary>
<desc>
<p>Returns the local port number for a socket.</p>
</desc>
</func>
- <func>
- <name name="sockname" arity="1"/>
- <fsummary>Return the local address and port number for a socket</fsummary>
- <desc>
- <p>Returns the local address and port number for a socket.</p>
- <p>
- Note that for SCTP sockets this function only returns
- one of the socket addresses. The function
- <seealso marker="#socknames/1">socknames/1,2</seealso>
- returns all.
- </p>
- </desc>
- </func>
- <func>
- <name name="socknames" arity="1"/>
- <fsummary>Return all local address/port numbers for a socket</fsummary>
- <desc>
- <p>
- Equivalent to
- <seealso marker="#socknames/2"><c>socknames(<anno>Socket</anno>, 0)</c></seealso>.
- </p>
- </desc>
- </func>
- <func>
- <name name="socknames" arity="2"/>
- <fsummary>Return all local address/port numbers for a socket</fsummary>
- <desc>
- <p>
- Returns a list of all local address/port number pairs for a socket
- for the given association <c><anno>Assoc</anno></c>.
- </p>
- <p>
- This function can return multiple addresses for multihomed
- sockets such as SCTP sockets. For other sockets it
- returns a one element list.
- </p>
- <p>
- Note that the <c><anno>Assoc</anno></c> parameter is by the
- <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>
- defined to be ignored for one-to-one style sockets.
- For one-to-many style sockets the special value <c>0</c>
- is defined to mean that the returned addresses shall be
- without regard to any particular association.
- How different SCTP implementations interprets this varies somewhat.
- </p>
- </desc>
- </func>
+
<func>
<name name="setopts" arity="2"/>
- <fsummary>Set one or more options for a socket</fsummary>
- <type name="socket_setopt"/>
+ <fsummary>Set one or more options for a socket.</fsummary>
<desc>
- <p>Sets one or more options for a socket. The following options
- are available:</p>
+ <p>Sets one or more options for a socket.</p>
+ <p>The following options are available:</p>
<taglist>
<tag><c>{active, true | false | once | N}</c></tag>
<item>
<p>If the value is <c>true</c>, which is the default,
- everything received from the socket will be sent as
- messages to the receiving process. If the value is
- <c>false</c> (passive mode), the process must explicitly
- receive incoming data by calling
+ everything received from the socket is sent as
+ messages to the receiving process.</p>
+ <p>If the value is <c>false</c> (passive mode), the process must
+ explicitly receive incoming data by calling
<seealso marker="gen_tcp#recv/2"><c>gen_tcp:recv/2,3</c></seealso>,
- <seealso marker="gen_udp#recv/2"><c>gen_udp:recv/2,3</c></seealso>
+ <seealso marker="gen_udp#recv/2"><c>gen_udp:recv/2,3</c></seealso>,
or <seealso marker="gen_sctp#recv/1"><c>gen_sctp:recv/1,2</c></seealso>
(depending on the type of socket).</p>
<p>If the value is <c>once</c> (<c>{active, once}</c>),
- <em>one</em> data message from the socket will be sent
- to the process. To receive one more message,
- <c>setopts/2</c> must be called again with the
- <c>{active, once}</c> option.</p>
+ <em>one</em> data message from the socket is sent
+ to the process. To receive one more message,
+ <c>setopts/2</c> must be called again with option
+ <c>{active, once}</c>.</p>
<p>If the value is an integer <c>N</c> in the range -32768 to 32767
(inclusive), the value is added to the socket's count of data
messages sent to the controlling process. A socket's default
- message count is 0. If a negative value is specified and its
- magnitude is equal to or greater than the socket's current
- message count, the socket's message count is set to 0. Once
- the socket's message count reaches 0, either due to sending
+ message count is <c>0</c>. If a negative value is specified, and
+ its magnitude is equal to or greater than the socket's current
+ message count, the socket's message count is set to <c>0</c>.
+ Once the socket's message count reaches <c>0</c>, either because
+ of sending
received data messages to the process or by being explicitly set,
the process is then notified by a special message, specific to
the type of socket, that the socket has entered passive
@@ -575,339 +819,350 @@ fe80::204:acff:fe17:bf38
messages <c>setopts/2</c> must be called again to set the
socket back into an active mode.</p>
<p>When using <c>{active, once}</c> or <c>{active, N}</c>, the
- socket changes behaviour automatically when data is received.
- This can sometimes be confusing in combination with
- connection-oriented sockets (i.e. <c>gen_tcp</c>) as a socket
- with <c>{active, false}</c> behaviour reports closing
+ socket changes behavior automatically when data is received.
+ This can be confusing in combination with connection-oriented
+ sockets (that is, <c>gen_tcp</c>), as a socket
+ with <c>{active, false}</c> behavior reports closing
differently than a socket with <c>{active, true}</c>
- behaviour. To make programming easier, a socket where
- the peer closed and this was detected while in
- <c>{active, false}</c> mode, will still generate the
- message
+ behavior. To simplify programming, a socket where
+ the peer closed, and this is detected while in
+ <c>{active, false}</c> mode, still generates message
<c>{tcp_closed,Socket}</c> when set to <c>{active, once}</c>,
- <c>{active, true}</c> or <c>{active, N}</c> mode. It is therefore
- safe to assume that the message
- <c>{tcp_closed,Socket}</c>, possibly followed by socket
- port termination (depending on the <c>exit_on_close</c>
- option) will eventually appear when a socket changes
+ <c>{active, true}</c>, or <c>{active, N}</c> mode.
+ It is therefore safe to assume that message
+ <c>{tcp_closed,Socket}</c>, possibly followed by socket port
+ termination (depending on option <c>exit_on_close</c>)
+ eventually appears when a socket changes
back and forth between <c>{active, true}</c> and
<c>{active, false}</c> mode. However,
- <em>when</em> peer closing is detected is all up to the
+ <em>when</em> peer closing is detected it is all up to the
underlying TCP/IP stack and protocol.</p>
- <p>Note that <c>{active, true}</c> mode provides no flow
- control; a fast sender could easily overflow the
- receiver with incoming messages. The same is true of
- <c>{active, N}</c> mode while the message count is greater
- than zero. Use active mode only if
+ <p>Notice that <c>{active, true}</c> mode provides no flow
+ control; a fast sender can easily overflow the
+ receiver with incoming messages. The same is true for
+ <c>{active, N}</c> mode, while the message count is greater
+ than zero.</p>
+ <p>Use active mode only if
your high-level protocol provides its own flow control
- (for instance, acknowledging received messages) or the
+ (for example, acknowledging received messages) or the
amount of data exchanged is small. <c>{active, false}</c>
- mode, use of the <c>{active, once}</c> mode or <c>{active, N}</c>
+ mode, use of the <c>{active, once}</c> mode, or <c>{active, N}</c>
mode with values of <c>N</c> appropriate for the application
- provides flow control; the other side will not be able send
+ provides flow control. The other side cannot send
faster than the receiver can read.</p>
</item>
-
- <tag><c>{broadcast, Boolean}</c>(UDP sockets)</tag>
+ <tag><c>{broadcast, Boolean}</c> (UDP sockets)</tag>
<item>
- <p>Enable/disable permission to send broadcasts.</p>
- <marker id="option-buffer"></marker>
+ <p>Enables/disables permission to send broadcasts.</p>
+ <marker id="option-buffer"></marker>
+ </item>
+ <tag><c>{buffer, Size}</c></tag>
+ <item>
+ <p>The size of the user-level buffer used by
+ the driver. Not to be confused with options <c>sndbuf</c>
+ and <c>recbuf</c>, which correspond to the
+ Kernel socket buffers. For TCP it is recommended
+ to have <c>val(buffer) &gt;= val(recbuf)</c> to
+ avoid performance issues because of unnecessary copying.
+ For UDP the same recommendation applies, but the max should
+ not be larger than the MTU of the network path.
+ <c>val(buffer)</c> is automatically set to the above
+ maximum when <c>recbuf</c> is set.
+ However, as the size set for <c>recbuf</c>
+ usually become larger, you are encouraged to use
+ <seealso marker="#getopts/2"><c>getopts/2</c></seealso>
+ to analyze the behavior of your operating system.</p>
+ <p>Note that this is also the maximum amount of data that can be
+ received from a single recv call. If you are using higher than
+ normal MTU consider setting buffer higher.</p>
</item>
-
- <tag><c>{buffer, Size}</c></tag>
- <item>
- <p>The size of the user-level software buffer used by
- the driver. Not to be confused with <c>sndbuf</c>
- and <c>recbuf</c> options which correspond to
- the kernel socket buffers. It is recommended
- to have <c>val(buffer) &gt;= max(val(sndbuf),val(recbuf))</c> to
- avoid performance issues due to unnecessary copying.
- In fact, the <c>val(buffer)</c> is automatically set to
- the above maximum when <c>sndbuf</c> or <c>recbuf</c> values are set.
- However, since the actual sizes set for <c>sndbuf</c> and <c>recbuf</c>
- usually becomes larger, you are encouraged to use
- <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso>
- to analyze the behavior of your operating system.</p>
- </item>
-
<tag><c>{delay_send, Boolean}</c></tag>
<item>
<p>Normally, when an Erlang process sends to a socket,
- the driver will try to immediately send the data. If that
- fails, the driver will use any means available to queue
+ the driver tries to send the data immediately. If that
+ fails, the driver uses any means available to queue
up the message to be sent whenever the operating system
says it can handle it. Setting <c>{delay_send, true}</c>
- will make <em>all</em> messages queue up. This makes
- the messages actually sent onto the network be larger but
- fewer. The option actually affects the scheduling of send
+ makes <em>all</em> messages queue up. The messages sent
+ to the network are then larger but fewer.
+ The option affects the scheduling of send
requests versus Erlang processes instead of changing any
- real property of the socket. Needless to say it is an
- implementation specific option. Default is <c>false</c>.</p>
+ real property of the socket. The option is
+ implementation-specific. Defaults to <c>false</c>.</p>
+ </item>
+ <tag><c>{deliver, port | term}</c></tag>
+ <item>
+ <p>When <c>{active, true}</c>, data is delivered on the form
+ <c>port</c> : <c>{S, {data, [H1,..Hsz | Data]}}</c> or
+ <c>term</c> : <c>{tcp, S, [H1..Hsz | Data]}</c>.</p>
</item>
-
- <tag><c>{deliver, port | term}</c></tag>
- <item> <p> When <c>{active, true}</c> delivers data on the forms
- <c>port</c> : <c>{S, {data, [H1,..Hsz | Data]}}</c> or
- <c>term</c> : <c>{tcp, S, [H1..Hsz | Data]}</c>.
- </p>
- </item>
-
<tag><c>{dontroute, Boolean}</c></tag>
<item>
- <p>Enable/disable routing bypass for outgoing messages.</p>
+ <p>Enables/disables routing bypass for outgoing messages.</p>
</item>
-
<tag><c>{exit_on_close, Boolean}</c></tag>
<item>
- <p>By default this option is set to <c>true</c>.</p>
+ <p>This option is set to <c>true</c> by default.</p>
<p>The only reason to set it to <c>false</c> is if you want
- to continue sending data to the socket after a close has
- been detected, for instance if the peer has used
- <seealso marker="gen_tcp#shutdown/2">gen_tcp:shutdown/2</seealso>
- to shutdown the write side.</p>
+ to continue sending data to the socket after a close is
+ detected, for example, if the peer uses
+ <seealso marker="gen_tcp#shutdown/2"><c>gen_tcp:shutdown/2</c></seealso>
+ to shut down the write side.</p>
</item>
-
<tag><c>{header, Size}</c></tag>
<item>
- <p>This option is only meaningful if the <c>binary</c>
- option was specified when the socket was created. If
- the <c>header</c> option is specified, the first
+ <p>This option is only meaningful if option <c>binary</c>
+ was specified when the socket was created. If option
+ <c>header</c> is specified, the first
<c>Size</c> number bytes of data received from the socket
- will be elements of a list, and the rest of the data will
- be a binary given as the tail of the same list. If for
- example <c>Size == 2</c>, the data received will match
+ are elements of a list, and the remaining data is
+ a binary specified as the tail of the same list. For example,
+ if <c>Size == 2</c>, the data received matches
<c>[Byte1,Byte2|Binary]</c>.</p>
</item>
-
<tag><c>{high_msgq_watermark, Size}</c></tag>
<item>
- <p>The socket message queue will be set into a busy
- state when the amount of data queued on the message
- queue reaches this limit. Note that this limit only
- concerns data that have not yet reached the ERTS internal
- socket implementation. Default value used is 8 kB.</p>
- <p>Senders of data to the socket will be suspended if
- either the socket message queue is busy, or the socket
- itself is busy.</p>
- <p>For more information see the <c>low_msgq_watermark</c>,
- <c>high_watermark</c>, and <c>low_watermark</c> options.</p>
- <p>Note that distribution sockets will disable the use of
- <c>high_msgq_watermark</c> and <c>low_msgq_watermark</c>,
- and will instead use the
- <seealso marker="erts:erlang#system_info_dist_buf_busy_limit">distribution
- buffer busy limit</seealso> which is a similar feature.</p>
+ <p>The socket message queue is set to a busy
+ state when the amount of data on the message
+ queue reaches this limit. Notice that this limit only
+ concerns data that has not yet reached the ERTS internal
+ socket implementation. Defaults to 8 kB.</p>
+ <p>Senders of data to the socket are suspended if
+ either the socket message queue is busy or the socket
+ itself is busy.</p>
+ <p>For more information, see options <c>low_msgq_watermark</c>,
+ <c>high_watermark</c>, and <c>low_watermark</c>.</p>
+ <p>Notice that distribution sockets disable the use of
+ <c>high_msgq_watermark</c> and <c>low_msgq_watermark</c>.
+ Instead use the
+ <seealso marker="erts:erlang#system_info_dist_buf_busy_limit">distribution buffer busy limit</seealso>,
+ which is a similar feature.</p>
</item>
-
<tag><c>{high_watermark, Size}</c> (TCP/IP sockets)</tag>
<item>
- <p>The socket will be set into a busy state when the amount
- of data queued internally by the ERTS socket implementation
- reaches this limit. Default value used is 8 kB.</p>
- <p>Senders of data to the socket will be suspended if
- either the socket message queue is busy, or the socket
- itself is busy.</p>
- <p>For more information see the <c>low_watermark</c>,
- <c>high_msgq_watermark</c>, and <c>low_msqg_watermark</c>
- options.</p>
+ <p>The socket is set to a busy state when the amount
+ of data queued internally by the ERTS socket implementation
+ reaches this limit. Defaults to 8 kB.</p>
+ <p>Senders of data to the socket are suspended if
+ either the socket message queue is busy or the socket
+ itself is busy.</p>
+ <p>For more information, see options <c>low_watermark</c>,
+ <c>high_msgq_watermark</c>, and <c>low_msqg_watermark</c>.</p>
</item>
-
<tag><c>{ipv6_v6only, Boolean}</c></tag>
<item>
- <p>
- Restricts the socket to only use IPv6, prohibiting any
+ <p>Restricts the socket to use only IPv6, prohibiting any
IPv4 connections. This is only applicable for
- IPv6 sockets (option <c>inet6</c>).
- </p>
- <p>
- On most platforms this option has to be set on the socket
- before associating it to an address. Therefore it is only
- reasonable to give it when creating the socket and not
- to use it when calling the function
- (<seealso marker="#setopts/2">setopts/2</seealso>)
- containing this description.
- </p>
- <p>
- The behaviour of a socket with this socket option set to
- <c>true</c> is becoming the only portable one. The original
+ IPv6 sockets (option <c>inet6</c>).</p>
+ <p>On most platforms this option must be set on the socket
+ before associating it to an address. It is therefore only
+ reasonable to specify it when creating the socket and not
+ to use it when calling function
+ (<seealso marker="#setopts/2"><c>setopts/2</c></seealso>)
+ containing this description.</p>
+ <p>The behavior of a socket with this option set to
+ <c>true</c> is the only portable one. The original
idea when IPv6 was new of using IPv6 for all traffic
is now not recommended by FreeBSD (you can use
<c>{ipv6_v6only,false}</c> to override the recommended
system default value),
- forbidden by OpenBSD (the supported GENERIC kernel)
- and impossible on Windows (that has separate
+ forbidden by OpenBSD (the supported GENERIC kernel),
+ and impossible on Windows (which has separate
IPv4 and IPv6 protocol stacks). Most Linux distros
still have a system default value of <c>false</c>.
- This policy shift among operating systems towards
- separating IPv6 from IPv4 traffic has evolved since
+ This policy shift among operating systems to
+ separate IPv6 from IPv4 traffic has evolved, as
it gradually proved hard and complicated to get
- a dual stack implementation correct and secure.
- </p>
- <p>
- On some platforms the only allowed value for this option
- is <c>true</c>, e.g. OpenBSD and Windows. Trying to set
- this option to <c>false</c> when creating the socket
- will in this case fail.
- </p>
- <p>
- Setting this option on platforms where it does not exist
- is ignored and getting this option with
- <seealso marker="#getopts/2">getopts/2</seealso>
- returns no value i.e the returned list will not contain an
- <c>{ipv6_v6only,_}</c> tuple. On Windows the option acually
- does not exist, but it is emulated as being a
- read-only option with the value <c>true</c>.
- </p>
- <p>
- So it boils down to that setting this option to <c>true</c>
- when creating a socket will never fail except possibly
- (at the time of this writing) on a platform where you
+ a dual stack implementation correct and secure.</p>
+ <p>On some platforms, the only allowed value for this option
+ is <c>true</c>, for example, OpenBSD and Windows. Trying to set
+ this option to <c>false</c>, when creating the socket, fails
+ in this case.</p>
+ <p>Setting this option on platforms where it does not exist
+ is ignored. Getting this option with
+ <seealso marker="#getopts/2"><c>getopts/2</c></seealso>
+ returns no value, that is, the returned list does not contain an
+ <c>{ipv6_v6only,_}</c> tuple. On Windows, the option
+ does not exist, but it is emulated as a
+ read-only option with value <c>true</c>.</p>
+ <p>Therefore, setting this option to <c>true</c>
+ when creating a socket never fails, except possibly on a
+ platform where you
have customized the kernel to only allow <c>false</c>,
- which might be doable (but weird) on e.g. OpenBSD.
- </p>
- <p>
- If you read back the option value using
- <seealso marker="#getopts/2">getopts/2</seealso>
- and get no value the option does not exist in the host OS
- and all bets are off regarding the behaviour of both
- an IPv6 and an IPv4 socket listening on the same port
- as well as for an IPv6 socket getting IPv4 traffic.
- </p>
+ which can be doable (but awkward) on, for example, OpenBSD.</p>
+ <p>If you read back the option value using
+ <seealso marker="#getopts/2"><c>getopts/2</c></seealso>
+ and get no value, the option does not exist in the host
+ operating system. The behavior of both an IPv6 and an IPv4
+ socket listening on the same port, and for an IPv6 socket
+ getting IPv4 traffic is then no longer predictable.</p>
</item>
-
<tag><c>{keepalive, Boolean}</c>(TCP/IP sockets)</tag>
<item>
<p>Enables/disables periodic transmission on a connected
- socket, when no other data is being exchanged. If
+ socket when no other data is exchanged. If
the other end does not respond, the connection is
- considered broken and an error message will be sent to
- the controlling process. Default disabled.</p>
- <marker id="option-linger"></marker>
+ considered broken and an error message is sent to
+ the controlling process. Defaults to <c>disabled</c>.</p>
+ <marker id="option-linger"></marker>
+ </item>
+ <tag><c>{linger, {true|false, Seconds}}</c></tag>
+ <item>
+ <p>Determines the time-out, in seconds, for flushing unsent data
+ in the <c>close/1</c> socket call. If the first component of
+ the value tuple is <c>false</c>, the second is ignored. This
+ means that <c>close/1</c> returns immediately, not waiting
+ for data to be flushed. Otherwise, the second component is
+ the flushing time-out, in seconds.</p>
</item>
-
- <tag><c>{linger, {true|false, Seconds}}</c></tag>
- <item>
- <p>Determines the timeout in seconds for flushing unsent data in the
- <c>close/1</c> socket call. If the 1st component of the value
- tuple is <c>false</c>, the 2nd one is ignored, which means that
- <c>close/1</c> returns immediately not waiting
- for data to be flushed. Otherwise, the 2nd component is
- the flushing time-out in seconds.</p>
- </item>
-
<tag><c>{low_msgq_watermark, Size}</c></tag>
<item>
<p>If the socket message queue is in a busy state, the
- socket message queue will be set in a not busy state when
- the amount of data queued in the message queue falls
- below this limit. Note that this limit only concerns data
- that have not yet reached the ERTS internal socket
- implementation. Default value used is 4 kB.</p>
- <p>Senders that have been suspended due to either a
- busy message queue or a busy socket, will be resumed
- when neither the socket message queue, nor the socket
- are busy.</p>
- <p>For more information see the <c>high_msgq_watermark</c>,
- <c>high_watermark</c>, and <c>low_watermark</c> options.</p>
- <p>Note that distribution sockets will disable the use of
- <c>high_msgq_watermark</c> and <c>low_msgq_watermark</c>,
- and will instead use the
- <seealso marker="erts:erlang#system_info_dist_buf_busy_limit">distribution
- buffer busy limit</seealso> which is a similar feature.</p>
+ socket message queue is set in a not busy state when
+ the amount of data queued in the message queue falls
+ below this limit. Notice that this limit only concerns data
+ that has not yet reached the ERTS internal socket
+ implementation. Defaults to 4 kB.</p>
+ <p>Senders that are suspended because of either a
+ busy message queue or a busy socket are resumed
+ when the socket message queue and the socket
+ are not busy.</p>
+ <p>For more information, see options <c>high_msgq_watermark</c>,
+ <c>high_watermark</c>, and <c>low_watermark</c>.</p>
+ <p>Notice that distribution sockets disable the use of
+ <c>high_msgq_watermark</c> and <c>low_msgq_watermark</c>.
+ Instead they use the
+ <seealso marker="erts:erlang#system_info_dist_buf_busy_limit">distribution
+ buffer busy limit</seealso>, which is a similar feature.</p>
</item>
-
<tag><c>{low_watermark, Size}</c> (TCP/IP sockets)</tag>
<item>
- <p>If the socket is in a busy state, the socket will
- be set in a not busy state when the amount of data
- queued internally by the ERTS socket implementation
- falls below this limit. Default value used is 4 kB.</p>
- <p>Senders that have been suspended due to either a
- busy message queue or a busy socket, will be resumed
- when neither the socket message queue, nor the socket
- are busy.</p>
- <p>For more information see the <c>high_watermark</c>,
- <c>high_msgq_watermark</c>, and <c>low_msgq_watermark</c>
- options.</p>
+ <p>If the socket is in a busy state, the socket is
+ set in a not busy state when the amount of data
+ queued internally by the ERTS socket implementation
+ falls below this limit. Defaults to 4 kB.</p>
+ <p>Senders that are suspended because of a
+ busy message queue or a busy socket are resumed
+ when the socket message queue and the socket are not busy.</p>
+ <p>For more information, see options <c>high_watermark</c>,
+ <c>high_msgq_watermark</c>, and <c>low_msgq_watermark</c>.</p>
</item>
-
- <tag><c>{mode, Mode :: binary | list}</c></tag>
+ <tag><c>{mode, Mode :: binary | list}</c></tag>
<item>
- <p>Received <c>Packet</c> is delivered as defined by Mode.</p>
- </item>
-
- <tag><c>{netns, Namespace :: file:filename_all()}</c></tag>
+ <p>
+ Received <c>Packet</c> is delivered as defined by <c>Mode</c>.
+ </p>
+ </item>
+ <tag>
+ <marker id="option-netns"/>
+ <c>{netns, Namespace :: file:filename_all()}</c>
+ </tag>
<item>
- <p>Set a network namespace for the socket. The <c>Namespace</c>
- parameter is a filename defining the namespace for example
- <c>"/var/run/netns/example"</c> typically created by the command
- <c>ip netns add example</c>. This option must be used in a
- function call that creates a socket i.e
- <seealso marker="gen_tcp#connect/3">
- gen_tcp:connect/3,4</seealso>,
- <seealso marker="gen_tcp#listen/2">
- gen_tcp:listen/2</seealso>,
- <seealso marker="gen_udp#open/1">
- gen_udp:open/1,2</seealso> or
- <seealso marker="gen_sctp#open/0">
- gen_sctp:open/0-2</seealso>.
- </p>
- <p>This option uses the Linux specific syscall
- <c>setns()</c> such as in Linux kernel 3.0 or later
- and therefore only exists when the runtime system
- has been compiled for such an operating system.
- </p>
<p>
- The virtual machine also needs elevated privileges either
- running as superuser or (for Linux) having the capability
- <c>CAP_SYS_ADMIN</c> according to the documentation for setns(2).
- However, during testing also <c>CAP_SYS_PTRACE</c>
- and <c>CAP_DAC_READ_SEARCH</c> has proven to be necessary.
- Example:</p><code>
-setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp
-</code>
- <p>Note also that the filesystem containing the virtual machine
- executable (<c>beam.smp</c> in the example above) has to be local,
- mounted without the <c>nosetuid</c> flag,
- support extended attributes and that
- the kernel has to support file capabilities.
- All this runs out of the box on at least Ubuntu 12.04 LTS,
- except that SCTP sockets appears to not support
- network namespaces.
+ Sets a network namespace for the socket. Parameter
+ <c>Namespace</c> is a filename defining the namespace, for
+ example, <c>"/var/run/netns/example"</c>, typically created by
+ command <c>ip netns add example</c>. This option must be used in
+ a function call that creates a socket, that is,
+ <seealso marker="gen_tcp#connect/3"><c>gen_tcp:connect/3,4</c></seealso>,
+ <seealso marker="gen_tcp#listen/2"><c>gen_tcp:listen/2</c></seealso>,
+ <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso>
+ or
+ <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>,
+ and also
+ <seealso marker="#getifaddrs/1"><c>getifaddrs/1</c></seealso>.
</p>
- <p>The <c>Namespace</c> is a file name and is encoded
- and decoded as discussed in
- <seealso marker="file">file</seealso>
- except that the emulator flag <c>+fnu</c> is ignored and
- <seealso marker="#getopts/2">getopts/2</seealso>
- for this option will return a binary for the filename
- if the stored filename can not be decoded,
- which should only happen if you set the option using a binary
- that can not be decoded with the emulator's filename encoding:
- <seealso marker="file#native_name_encoding/0">
- file:native_name_encoding/0</seealso>.
+ <p>This option uses the Linux-specific syscall
+ <c>setns()</c>, such as in Linux kernel 3.0 or later,
+ and therefore only exists when the runtime system
+ is compiled for such an operating system.</p>
+ <p>The virtual machine also needs elevated privileges, either
+ running as superuser or (for Linux) having capability
+ <c>CAP_SYS_ADMIN</c> according to the documentation for
+ <c>setns(2)</c>.
+ However, during testing also <c>CAP_SYS_PTRACE</c>
+ and <c>CAP_DAC_READ_SEARCH</c> have proven to be necessary.</p>
+ <p><em>Example:</em></p>
+ <code>
+setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code>
+ <p>Notice that the filesystem containing the virtual machine
+ executable (<c>beam.smp</c> in the example) must be local,
+ mounted without flag <c>nosetuid</c>,
+ support extended attributes, and
+ the kernel must support file capabilities.
+ All this runs out of the box on at least Ubuntu 12.04 LTS,
+ except that SCTP sockets appear to not support
+ network namespaces.</p>
+ <p><c>Namespace</c> is a filename and is encoded
+ and decoded as discussed in module
+ <seealso marker="file">file</seealso>, with the
+ following exceptions:</p>
+ <list type="bulleted">
+ <item><p>Emulator flag <c>+fnu</c> is ignored.</p></item>
+ <item><p><seealso marker="#getopts/2"><c>getopts/2</c></seealso>
+ for this option returns a binary for the filename if the stored
+ filename cannot be decoded. This is only to occur if you set the
+ option using a binary that cannot be decoded with the emulator's
+ filename encoding:
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding/0</c></seealso>.</p></item>
+ </list>
+ </item>
+ <tag><c>{bind_to_device, Ifname :: binary()}</c></tag>
+ <item>
+ <p>Binds a socket to a specific network interface. This option
+ must be used in a function call that creates a socket, that is,
+ <seealso marker="gen_tcp#connect/3"><c>gen_tcp:connect/3,4</c></seealso>,
+ <seealso marker="gen_tcp#listen/2"><c>gen_tcp:listen/2</c></seealso>,
+ <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso>, or
+ <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>.</p>
+ <p>Unlike <seealso marker="#getifaddrs/0"><c>getifaddrs/0</c></seealso>, Ifname
+ is encoded a binary. In the unlikely case that a system is using
+ non-7-bit-ASCII characters in network device names, special care
+ has to be taken when encoding this argument.</p>
+ <p>This option uses the Linux-specific socket option
+ <c>SO_BINDTODEVICE</c>, such as in Linux kernel 2.0.30 or later,
+ and therefore only exists when the runtime system
+ is compiled for such an operating system.</p>
+ <p>Before Linux 3.8, this socket option could be set, but could not retrieved
+ with <seealso marker="#getopts/2"><c>getopts/2</c></seealso>. Since Linux 3.8,
+ it is readable.</p>
+ <p>The virtual machine also needs elevated privileges, either
+ running as superuser or (for Linux) having capability
+ <c>CAP_NET_RAW</c>.</p>
+ <p>The primary use case for this option is to bind sockets into
+ <url href="http://www.kernel.org/doc/Documentation/networking/vrf.txt">Linux VRF instances</url>.
</p>
</item>
-
- <tag><c>list</c></tag>
+ <tag><c>list</c></tag>
<item>
- <p>Received <c>Packet</c> is delivered as a list.</p>
- </item>
-
- <tag><c>binary</c></tag>
+ <p>Received <c>Packet</c> is delivered as a list.</p>
+ </item>
+ <tag><c>binary</c></tag>
<item>
- <p>Received <c>Packet</c> is delivered as a binary.</p>
- </item>
-
+ <p>Received <c>Packet</c> is delivered as a binary.</p>
+ </item>
<tag><c>{nodelay, Boolean}</c>(TCP/IP sockets)</tag>
<item>
- <p>If <c>Boolean == true</c>, the <c>TCP_NODELAY</c> option
- is turned on for the socket, which means that even small
- amounts of data will be sent immediately.</p>
+ <p>If <c>Boolean == true</c>, option <c>TCP_NODELAY</c>
+ is turned on for the socket, which means that also small
+ amounts of data are sent immediately.</p>
+ </item>
+ <tag><c>{nopush, Boolean}</c>(TCP/IP sockets)</tag>
+ <item>
+ <p>This translates to <c>TCP_NOPUSH</c> on BSD and
+ to <c>TCP_CORK</c> on Linux.</p>
+ <p>If <c>Boolean == true</c>, the corresponding option
+ is turned on for the socket, which means that small
+ amounts of data are accumulated until a full MSS-worth
+ of data is available or this option is turned off.</p>
+ <p>Note that while <c>TCP_NOPUSH</c> socket option is available on OSX, its semantics
+ is very different (e.g., unsetting it does not cause immediate send
+ of accumulated data). Hence, <c>nopush</c> option is intentionally ignored on OSX.</p>
</item>
<tag><c>{packet, PacketType}</c>(TCP/IP sockets)</tag>
<item>
- <p>Defines the type of packets to use for a socket.
- The following values are valid:</p>
+ <p><marker id="packet"/>Defines the type of packets to use for a socket.
+ Possible values:</p>
<taglist>
<tag><c>raw | 0</c></tag>
<item>
@@ -917,104 +1172,188 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp
<item>
<p>Packets consist of a header specifying the number of
bytes in the packet, followed by that number of bytes.
- The length of header can be one, two, or four bytes;
+ The header length can be one, two, or four bytes, and
containing an unsigned integer in big-endian byte order.
- Each send operation will generate the header, and the header
- will be stripped off on each receive operation.</p>
- <p>In current implementation the 4-byte header is limited to 2Gb.</p>
+ Each send operation generates the header, and the header
+ is stripped off on each receive operation.</p>
+ <p>The 4-byte header is limited to 2Gb.</p>
</item>
<tag><c>asn1 | cdr | sunrm | fcgi | tpkt | line</c></tag>
<item>
<p>These packet types only have effect on receiving.
When sending a packet, it is the responsibility of
the application to supply a correct header. On
- receiving, however, there will be one message sent to
+ receiving, however, one message is sent to
the controlling process for each complete packet
received, and, similarly, each call to
<c>gen_tcp:recv/2,3</c> returns one complete packet.
The header is <em>not</em> stripped off.</p>
- <p>The meanings of the packet types are as follows:
- <br></br>
-<c>asn1</c> - ASN.1 BER,
- <br></br>
-<c>sunrm</c> - Sun's RPC encoding,
- <br></br>
-<c>cdr</c> - CORBA (GIOP 1.1),
- <br></br>
-<c>fcgi</c> - Fast CGI,
- <br></br>
-<c>tpkt</c> - TPKT format [RFC1006],
- <br></br>
-<c>line</c> - Line mode, a packet is a line
- terminated with newline, lines longer than
- the receive buffer are truncated.</p>
- </item>
+ <p>The meanings of the packet types are as follows:</p>
+ <list type="bulleted">
+ <item><c>asn1</c> - ASN.1 BER</item>
+ <item><c>sunrm</c> - Sun's RPC encoding</item>
+ <item><c>cdr</c> - CORBA (GIOP 1.1)</item>
+ <item><c>fcgi</c> - Fast CGI</item>
+ <item><c>tpkt</c> - TPKT format [RFC1006]</item>
+ <item><c>line</c> - Line mode, a packet is a line-terminated
+ with newline, lines longer than the receive buffer are
+ truncated</item>
+ </list>
+ </item>
<tag><c>http | http_bin</c></tag>
<item>
<p>The Hypertext Transfer Protocol. The packets
are returned with the format according to <c>HttpPacket</c>
- described in <seealso marker="erts:erlang#decode_packet/3">
- erlang:decode_packet/3</seealso>. A socket in passive
- mode will return <c>{ok, HttpPacket}</c> from <c>gen_tcp:recv</c>
- while an active socket will send messages like <c>{http,
- Socket, HttpPacket}</c>.</p>
+ described in
+ <seealso marker="erts:erlang#decode_packet/3">
+ <c>erlang:decode_packet/3</c></seealso> in ERTS.
+ A socket in passive
+ mode returns <c>{ok, HttpPacket}</c> from <c>gen_tcp:recv</c>
+ while an active socket sends messages like
+ <c>{http, Socket, HttpPacket}</c>.</p>
</item>
<tag><c>httph | httph_bin</c></tag>
<item>
- <p>These two types are often not needed as the socket will
- automatically switch from <c>http</c>/<c>http_bin</c> to
+ <p>These two types are often not needed, as the socket
+ automatically switches from <c>http</c>/<c>http_bin</c> to
<c>httph</c>/<c>httph_bin</c> internally after the first line
- has been read. There might be occasions however when they are
+ is read. However, there can be occasions when they are
useful, such as parsing trailers from chunked encoding.</p>
</item>
</taglist>
</item>
<tag><c>{packet_size, Integer}</c>(TCP/IP sockets)</tag>
<item>
- <p>Sets the max allowed length of the packet body. If
+ <p>Sets the maximum allowed length of the packet body. If
the packet header indicates that the length of the packet
- is longer than the max allowed length, the packet is
- considered invalid. The same happens if the packet header
- is too big for the socket receive buffer.</p>
- <p>For line oriented protocols (<c>line</c>,<c>http*</c>),
- option <c>packet_size</c> also guarantees that lines up to the
- indicated length are accepted and not considered invalid due
- to internal buffer limitations.</p>
+ is longer than the maximum allowed length, the packet is
+ considered invalid. The same occurs if the packet header
+ is too large for the socket receive buffer.</p>
+ <p>For line-oriented protocols (<c>line</c>, <c>http*</c>),
+ option <c>packet_size</c> also guarantees that lines up to the
+ indicated length are accepted and not considered invalid
+ because of internal buffer limitations.</p>
</item>
<tag><c>{line_delimiter, Char}</c>(TCP/IP sockets)</tag>
<item>
- <p>Sets the line delimiting character for line oriented protocols
- (<c>line</c>). Default value is <c>$\n</c>.</p>
+ <p>Sets the line delimiting character for line-oriented protocols
+ (<c>line</c>). Defaults to <c>$\n</c>.</p>
+ </item>
+ <tag><c>{raw, Protocol, OptionNum, ValueBin}</c></tag>
+ <item>
+ <p>See below.</p>
</item>
-
- <tag><c>{priority, Priority}</c></tag>
- <item> <p>Set the protocol-defined priority for all packets to be sent
- on this socket.</p>
- </item>
-
- <tag><c>{raw, Protocol, OptionNum, ValueBin}</c></tag>
- <item> <p>See below.</p>
- </item>
-
<tag><c>{read_packets, Integer}</c>(UDP sockets)</tag>
<item>
- <p>Sets the max number of UDP packets to read without
+ <p>Sets the maximum number of UDP packets to read without
intervention from the socket when data is available.
When this many packets have been read and delivered
to the destination process, new packets are not read
until a new notification of available data has arrived.
- The default is 5, and if this parameter is set too
- high the system can become unresponsive due to
+ Defaults to <c>5</c>. If this parameter is set too
+ high, the system can become unresponsive because of
UDP packet flooding.</p>
</item>
<tag><c>{recbuf, Size}</c></tag>
<item>
<p>The minimum size of the receive buffer to use for
the socket. You are encouraged to use
- <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso>,
- to retrieve the actual size set by your operating system.
-
- </p>
+ <seealso marker="#getopts/2"><c>getopts/2</c></seealso>
+ to retrieve the size set by your operating system.</p>
+ <marker id="option-recvtclass"></marker>
+ </item>
+ <tag><c>{recvtclass, Boolean}</c></tag>
+ <item>
+ <p>
+ If set to <c>true</c> activates returning the received
+ <c>TCLASS</c> value on platforms that implements
+ the protocol <c>IPPROTO_IPV6</c>
+ option <c>IPV6_RECVTCLASS</c> or <c>IPV6_2292RECVTCLASS</c>
+ for the socket.
+ The value is returned as a <c>{tclass,TCLASS}</c> tuple
+ regardless of if the platform returns an <c>IPV6_TCLASS</c>
+ or an <c>IPV6_RECVTCLASS</c> CMSG value.
+ </p>
+ <p>
+ For packet oriented sockets that supports receiving
+ ancillary data with the payload data
+ (<c>gen_udp</c> and <c>gen_sctp</c>),
+ the <c>TCLASS</c> value is returned
+ in an extended return tuple contained in an
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ list.
+ For stream oriented sockets (<c>gen_tcp</c>)
+ the only way to get the <c>TCLASS</c>
+ value is if the platform supports the
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ option.
+ </p>
+ <marker id="option-recvtos"></marker>
+ </item>
+ <tag><c>{recvtos, Boolean}</c></tag>
+ <item>
+ <p>
+ If set to <c>true</c> activates returning the received
+ <c>TOS</c> value on platforms that implements
+ the protocol <c>IPPROTO_IP</c> option <c>IP_RECVTOS</c>
+ for the socket.
+ The value is returned as a <c>{tos,TOS}</c> tuple
+ regardless of if the platform returns an <c>IP_TOS</c>
+ or an <c>IP_RECVTOS</c> CMSG value.
+ </p>
+ <p>
+ For packet oriented sockets that supports receiving
+ ancillary data with the payload data
+ (<c>gen_udp</c> and <c>gen_sctp</c>),
+ the <c>TOS</c> value is returned
+ in an extended return tuple contained in an
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ list.
+ For stream oriented sockets (<c>gen_tcp</c>)
+ the only way to get the <c>TOS</c>
+ value is if the platform supports the
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ option.
+ </p>
+ <marker id="option-recvttl"></marker>
+ </item>
+ <tag><c>{recvttl, Boolean}</c></tag>
+ <item>
+ <p>
+ If set to <c>true</c> activates returning the received
+ <c>TTL</c> value on platforms that implements
+ the protocol <c>IPPROTO_IP</c> option <c>IP_RECVTTL</c>
+ for the socket.
+ The value is returned as a <c>{ttl,TTL}</c> tuple
+ regardless of if the platform returns an <c>IP_TTL</c>
+ or an <c>IP_RECVTTL</c> CMSG value.
+ </p>
+ <p>
+ For packet oriented sockets that supports receiving
+ ancillary data with the payload data
+ (<c>gen_udp</c> and <c>gen_sctp</c>),
+ the <c>TTL</c> value is returned
+ in an extended return tuple contained in an
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ list.
+ For stream oriented sockets (<c>gen_tcp</c>)
+ the only way to get the <c>TTL</c>
+ value is if the platform supports the
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ option.
+ </p>
</item>
<tag><c>{reuseaddr, Boolean}</c></tag>
<item>
@@ -1023,118 +1362,169 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp
</item>
<tag><c>{send_timeout, Integer}</c></tag>
<item>
- <p>Only allowed for connection oriented sockets.</p>
+ <p>Only allowed for connection-oriented sockets.</p>
<p>Specifies a longest time to wait for a send operation to
be accepted by the underlying TCP stack. When the limit is
- exceeded, the send operation will return
- <c>{error,timeout}</c>. How much of a packet that actually
- got sent is unknown, why the socket should be closed
- whenever a timeout has occurred (see <c>send_timeout_close</c>).
- Default is <c>infinity</c>.</p>
+ exceeded, the send operation returns
+ <c>{error,timeout}</c>. How much of a packet that
+ got sent is unknown; the socket is therefore to be closed
+ whenever a time-out has occurred (see <c>send_timeout_close</c>
+ below). Defaults to <c>infinity</c>.</p>
</item>
<tag><c>{send_timeout_close, Boolean}</c></tag>
<item>
- <p>Only allowed for connection oriented sockets.</p>
+ <p>Only allowed for connection-oriented sockets.</p>
<p>Used together with <c>send_timeout</c> to specify whether
- the socket will be automatically closed when the send operation
+ the socket is to be automatically closed when the send operation
returns <c>{error,timeout}</c>. The recommended setting is
- <c>true</c> which will automatically close the socket.
- Default is <c>false</c> due to backward compatibility.</p>
- <marker id="option-sndbuf"></marker>
+ <c>true</c>, which automatically closes the socket.
+ Defaults to <c>false</c> because of backward compatibility.</p>
+ <marker id="option-sndbuf"></marker>
</item>
-
<tag><c>{show_econnreset, Boolean}</c>(TCP/IP sockets)</tag>
<item>
- <p>When this option is set to <c>false</c>, as it is by
- default, an RST that is received from the TCP peer is treated
- as a normal close (as though a FIN was sent). A caller
- to <seealso marker="gen_tcp#recv/2">gen_tcp:recv/2</seealso>
- will get <c>{error, closed}</c>. In active
- mode the controlling process will receive a
- <c>{tcp_close, Socket}</c> message, indicating that the
+ <p>When this option is set to <c>false</c>, which is
+ default, an RST received from the TCP peer is treated
+ as a normal close (as though an FIN was sent). A caller to
+ <seealso marker="gen_tcp#recv/2"><c>gen_tcp:recv/2</c></seealso>
+ gets <c>{error, closed}</c>. In active
+ mode, the controlling process receives a
+ <c>{tcp_closed, Socket}</c> message, indicating that the
peer has closed the connection.</p>
- <p>Setting this option to <c>true</c> will allow you to
+ <p>Setting this option to <c>true</c> allows you to
distinguish between a connection that was closed normally,
- and one which was aborted (intentionally or unintentionally)
+ and one that was aborted (intentionally or unintentionally)
by the TCP peer. A call to
- <seealso marker="gen_tcp#recv/2">gen_tcp:recv/2</seealso>
- will return <c>{error, econnreset}</c>. In
- active mode, the controlling process will receive a
+ <seealso marker="gen_tcp#recv/2"><c>gen_tcp:recv/2</c></seealso>
+ returns <c>{error, econnreset}</c>. In
+ active mode, the controlling process receives a
<c>{tcp_error, Socket, econnreset}</c> message
before the usual <c>{tcp_closed, Socket}</c>, as is
the case for any other socket error. Calls to
- <seealso marker="gen_tcp#send/2">gen_tcp:send/2</seealso>
- will also return <c>{error, econnreset}</c> when it
+ <seealso marker="gen_tcp#send/2"><c>gen_tcp:send/2</c></seealso>
+ also returns <c>{error, econnreset}</c> when it
is detected that a TCP peer has sent an RST.</p>
<p>A connected socket returned from
- <seealso marker="gen_tcp#accept/1">gen_tcp:accept/1</seealso>
- will inherit the <c>show_econnreset</c> setting from the
+ <seealso marker="gen_tcp#accept/1"><c>gen_tcp:accept/1</c></seealso>
+ inherits the <c>show_econnreset</c> setting from the
listening socket.</p>
- <marker id="option-show_econnreset"></marker>
+ <marker id="option-show_econnreset"></marker>
</item>
-
<tag><c>{sndbuf, Size}</c></tag>
<item>
<p>The minimum size of the send buffer to use for the socket.
- You are encouraged to use
- <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso>,
- to retrieve the actual size set by your operating system.
- </p>
+ You are encouraged to use
+ <seealso marker="#getopts/2"><c>getopts/2</c></seealso>,
+ to retrieve the size set by your operating system.</p>
</item>
<tag><c>{priority, Integer}</c></tag>
<item>
- <p>Sets the SO_PRIORITY socket level option on platforms where
- this is implemented. The behaviour and allowed range varies on
- different systems. The option is ignored on platforms where the
- option is not implemented. Use with caution.</p>
+ <p>Sets the <c>SO_PRIORITY</c> socket level option on platforms
+ where this is implemented. The behavior and allowed range varies
+ between different systems.
+ The option is ignored on platforms where it
+ is not implemented. Use with caution.</p>
</item>
<tag><c>{tos, Integer}</c></tag>
<item>
- <p>Sets IP_TOS IP level options on platforms where this is
- implemented. The behaviour and allowed range varies on different
- systems. The option is ignored on platforms where the option is
- not implemented. Use with caution.</p>
+ <p>Sets <c>IP_TOS IP</c> level options on platforms where this is
+ implemented. The behavior and allowed range varies between
+ different systems.
+ The option is ignored on platforms where it is not
+ implemented. Use with caution.</p>
+ </item>
+ <tag><c>{tclass, Integer}</c></tag>
+ <item>
+ <p>
+ Sets <c>IPV6_TCLASS IP</c> level options on platforms
+ where this is implemented. The behavior and allowed range
+ varies between different systems.
+ The option is ignored on platforms where it is not
+ implemented. Use with caution.</p>
</item>
</taglist>
-
- <p>In addition to the options mentioned above, <em>raw</em>
+ <p>In addition to these options, <em>raw</em>
option specifications can be used. The raw options are
- specified as a tuple of arity four, beginning with the tag
- <c>raw</c>, followed by the protocol level, the option number
- and the actual option value specified as a binary. This
- corresponds to the second, third and fourth argument to the
+ specified as a tuple of arity four, beginning with tag
+ <c>raw</c>, followed by the protocol level, the option number,
+ and the option value specified as a binary. This
+ corresponds to the second, third, and fourth arguments to the
<c>setsockopt</c> call in the C socket API. The option value
- needs to be coded in the native endianess of the platform and,
- if a structure is required, needs to follow the struct
+ must be coded in the native endianess of the platform and,
+ if a structure is required, must follow the structure
alignment conventions on the specific platform.</p>
- <p>Using raw socket options require detailed knowledge about
+ <p>Using raw socket options requires detailed knowledge about
the current operating system and TCP stack.</p>
- <p>As an example of the usage of raw options, consider a Linux
- system where you want to set the <c>TCP_LINGER2</c> option on
- the <c>IPPROTO_TCP</c> protocol level in the stack. You know
+ <p><em>Example:</em></p>
+ <p>This example concerns the use of raw options. Consider a Linux
+ system where you want to set option <c>TCP_LINGER2</c> on
+ protocol level <c>IPPROTO_TCP</c> in the stack. You know
that on this particular system it defaults to 60 (seconds),
- but you would like to lower it to 30 for a particular
- socket. The <c>TCP_LINGER2</c> option is not explicitly
- supported by inet, but you know that the protocol level
- translates to the number 6, the option number to the number 8
- and the value is to be given as a 32 bit integer. You can use
- this line of code to set the option for the socket named
+ but you want to lower it to 30 for a particular
+ socket. Option <c>TCP_LINGER2</c> is not explicitly
+ supported by <c>inet</c>, but you know that the protocol level
+ translates to number 6, the option number to number 8,
+ and the value is to be specified as a 32-bit integer. You can use
+ this code line to set the option for the socket named
<c>Sock</c>:</p>
<code type="none"><![CDATA[
- inet:setopts(Sock,[{raw,6,8,<<30:32/native>>}]),]]></code>
+inet:setopts(Sock,[{raw,6,8,<<30:32/native>>}]),]]></code>
<p>As many options are silently discarded by the stack if they
- are given out of range, it could be a good idea to check that
- a raw option really got accepted. This code places the value
- in the variable TcpLinger2:</p>
+ are specified out of range; it can be a good idea to check that
+ a raw option is accepted. The following code places the value
+ in variable <c>TcpLinger2:</c></p>
<code type="none"><![CDATA[
- {ok,[{raw,6,8,<<TcpLinger2:32/native>>}]}=inet:getopts(Sock,[{raw,6,8,4}]),]]></code>
- <p>Code such as the examples above is inherently non portable,
- even different versions of the same OS on the same platform
- may respond differently to this kind of option
+{ok,[{raw,6,8,<<TcpLinger2:32/native>>}]}=inet:getopts(Sock,[{raw,6,8,4}]),]]></code>
+ <p>Code such as these examples is inherently non-portable,
+ even different versions of the same OS on the same platform
+ can respond differently to this kind of option
manipulation. Use with care.</p>
- <p>Note that the default options for TCP/IP sockets can be
+ <p>Notice that the default options for TCP/IP sockets can be
changed with the Kernel configuration parameters mentioned in
- the beginning of this document.</p>
+ the beginning of this manual page.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="sockname" arity="1"/>
+ <fsummary>Return the local address and port number for a socket.
+ </fsummary>
+ <desc>
+ <p>Returns the local address and port number for a socket.</p>
+ <p>Notice that for SCTP sockets this function returns only
+ one of the socket addresses. Function
+ <seealso marker="#socknames/1"><c>socknames/1,2</c></seealso>
+ returns all.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="socknames" arity="1"/>
+ <fsummary>Return all local address/port numbers for a socket.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#socknames/2"><c>socknames(<anno>Socket</anno>, 0)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="socknames" arity="2"/>
+ <fsummary>Return all local address/port numbers for a socket.</fsummary>
+ <desc>
+ <p>Returns a list of all local address/port number pairs for a socket
+ for the specified association <c><anno>Assoc</anno></c>.</p>
+ <p>This function can return multiple addresses for multihomed
+ sockets, such as SCTP sockets. For other sockets it
+ returns a one-element list.</p>
+ <p>Notice that parameter <c><anno>Assoc</anno></c> is by the
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>
+ defined to be ignored for one-to-one style sockets.
+ For one-to-many style sockets, the special value <c>0</c>
+ is defined to mean that the returned addresses must be
+ without any particular association.
+ How different SCTP implementations interpret this varies somewhat.
+ </p>
</desc>
</func>
</funcs>
@@ -1143,149 +1533,147 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp
<marker id="error_codes"></marker>
<title>POSIX Error Codes</title>
<list type="bulleted">
- <item><c>e2big</c> - argument list too long</item>
- <item><c>eacces</c> - permission denied</item>
- <item><c>eaddrinuse</c> - address already in use</item>
- <item><c>eaddrnotavail</c> - cannot assign requested address</item>
- <item><c>eadv</c> - advertise error</item>
- <item><c>eafnosupport</c> - address family not supported by
- protocol family</item>
- <item><c>eagain</c> - resource temporarily unavailable</item>
+ <item><c>e2big</c> - Too long argument list</item>
+ <item><c>eacces</c> - Permission denied</item>
+ <item><c>eaddrinuse</c> - Address already in use</item>
+ <item><c>eaddrnotavail</c> - Cannot assign requested address</item>
+ <item><c>eadv</c> - Advertise error</item>
+ <item><c>eafnosupport</c> - Address family not supported by
+ protocol family</item>
+ <item><c>eagain</c> - Resource temporarily unavailable</item>
<item><c>ealign</c> - EALIGN</item>
- <item><c>ealready</c> - operation already in progress</item>
- <item><c>ebade</c> - bad exchange descriptor</item>
- <item><c>ebadf</c> - bad file number</item>
- <item><c>ebadfd</c> - file descriptor in bad state</item>
- <item><c>ebadmsg</c> - not a data message</item>
- <item><c>ebadr</c> - bad request descriptor</item>
- <item><c>ebadrpc</c> - RPC structure is bad</item>
- <item><c>ebadrqc</c> - bad request code</item>
- <item><c>ebadslt</c> - invalid slot</item>
- <item><c>ebfont</c> - bad font file format</item>
- <item><c>ebusy</c> - file busy</item>
- <item><c>echild</c> - no children</item>
- <item><c>echrng</c> - channel number out of range</item>
- <item><c>ecomm</c> - communication error on send</item>
- <item><c>econnaborted</c> - software caused connection abort</item>
- <item><c>econnrefused</c> - connection refused</item>
- <item><c>econnreset</c> - connection reset by peer</item>
- <item><c>edeadlk</c> - resource deadlock avoided</item>
- <item><c>edeadlock</c> - resource deadlock avoided</item>
- <item><c>edestaddrreq</c> - destination address required</item>
- <item><c>edirty</c> - mounting a dirty fs w/o force</item>
- <item><c>edom</c> - math argument out of range</item>
- <item><c>edotdot</c> - cross mount point</item>
- <item><c>edquot</c> - disk quota exceeded</item>
- <item><c>eduppkg</c> - duplicate package name</item>
- <item><c>eexist</c> - file already exists</item>
- <item><c>efault</c> - bad address in system call argument</item>
- <item><c>efbig</c> - file too large</item>
- <item><c>ehostdown</c> - host is down</item>
- <item><c>ehostunreach</c> - host is unreachable</item>
- <item><c>eidrm</c> - identifier removed</item>
- <item><c>einit</c> - initialization error</item>
- <item><c>einprogress</c> - operation now in progress</item>
- <item><c>eintr</c> - interrupted system call</item>
- <item><c>einval</c> - invalid argument</item>
+ <item><c>ealready</c> - Operation already in progress</item>
+ <item><c>ebade</c> - Bad exchange descriptor</item>
+ <item><c>ebadf</c> - Bad file number</item>
+ <item><c>ebadfd</c> - File descriptor in bad state</item>
+ <item><c>ebadmsg</c> - Not a data message</item>
+ <item><c>ebadr</c> - Bad request descriptor</item>
+ <item><c>ebadrpc</c> - Bad RPC structure</item>
+ <item><c>ebadrqc</c> - Bad request code</item>
+ <item><c>ebadslt</c> - Invalid slot</item>
+ <item><c>ebfont</c> - Bad font file format</item>
+ <item><c>ebusy</c> - File busy</item>
+ <item><c>echild</c> - No children</item>
+ <item><c>echrng</c> - Channel number out of range</item>
+ <item><c>ecomm</c> - Communication error on send</item>
+ <item><c>econnaborted</c> - Software caused connection abort</item>
+ <item><c>econnrefused</c> - Connection refused</item>
+ <item><c>econnreset</c> - Connection reset by peer</item>
+ <item><c>edeadlk</c> - Resource deadlock avoided</item>
+ <item><c>edeadlock</c> - Resource deadlock avoided</item>
+ <item><c>edestaddrreq</c> - Destination address required</item>
+ <item><c>edirty</c> - Mounting a dirty fs without force</item>
+ <item><c>edom</c> - Math argument out of range</item>
+ <item><c>edotdot</c> - Cross mount point</item>
+ <item><c>edquot</c> - Disk quota exceeded</item>
+ <item><c>eduppkg</c> - Duplicate package name</item>
+ <item><c>eexist</c> - File already exists</item>
+ <item><c>efault</c> - Bad address in system call argument</item>
+ <item><c>efbig</c> - File too large</item>
+ <item><c>ehostdown</c> - Host is down</item>
+ <item><c>ehostunreach</c> - Host is unreachable</item>
+ <item><c>eidrm</c> - Identifier removed</item>
+ <item><c>einit</c> - Initialization error</item>
+ <item><c>einprogress</c> - Operation now in progress</item>
+ <item><c>eintr</c> - Interrupted system call</item>
+ <item><c>einval</c> - Invalid argument</item>
<item><c>eio</c> - I/O error</item>
- <item><c>eisconn</c> - socket is already connected</item>
- <item><c>eisdir</c> - illegal operation on a directory</item>
- <item><c>eisnam</c> - is a named file</item>
- <item><c>el2hlt</c> - level 2 halted</item>
- <item><c>el2nsync</c> - level 2 not synchronized</item>
- <item><c>el3hlt</c> - level 3 halted</item>
- <item><c>el3rst</c> - level 3 reset</item>
+ <item><c>eisconn</c> - Socket is already connected</item>
+ <item><c>eisdir</c> - Illegal operation on a directory</item>
+ <item><c>eisnam</c> - Is a named file</item>
+ <item><c>el2hlt</c> - Level 2 halted</item>
+ <item><c>el2nsync</c> - Level 2 not synchronized</item>
+ <item><c>el3hlt</c> - Level 3 halted</item>
+ <item><c>el3rst</c> - Level 3 reset</item>
<item><c>elbin</c> - ELBIN</item>
- <item><c>elibacc</c> - cannot access a needed shared library</item>
- <item><c>elibbad</c> - accessing a corrupted shared library</item>
- <item><c>elibexec</c> - cannot exec a shared library directly</item>
- <item><c>elibmax</c> - attempting to link in more shared
- libraries than system limit</item>
- <item><c>elibscn</c> - .lib section in a.out corrupted</item>
- <item><c>elnrng</c> - link number out of range</item>
- <item><c>eloop</c> - too many levels of symbolic links</item>
- <item><c>emfile</c> - too many open files</item>
- <item><c>emlink</c> - too many links</item>
- <item><c>emsgsize</c> - message too long</item>
- <item><c>emultihop</c> - multihop attempted</item>
- <item><c>enametoolong</c> - file name too long</item>
- <item><c>enavail</c> - not available</item>
+ <item><c>elibacc</c> - Cannot access a needed shared library</item>
+ <item><c>elibbad</c> - Accessing a corrupted shared library</item>
+ <item><c>elibexec</c> - Cannot exec a shared library directly</item>
+ <item><c>elibmax</c> - Attempting to link in more shared
+ libraries than system limit</item>
+ <item><c>elibscn</c> - <c>.lib</c> section in <c>a.out</c>
+ corrupted</item>
+ <item><c>elnrng</c> - Link number out of range</item>
+ <item><c>eloop</c> - Too many levels of symbolic links</item>
+ <item><c>emfile</c> - Too many open files</item>
+ <item><c>emlink</c> - Too many links</item>
+ <item><c>emsgsize</c> - Message too long</item>
+ <item><c>emultihop</c> - Multihop attempted</item>
+ <item><c>enametoolong</c> - Filename too long</item>
+ <item><c>enavail</c> - Unavailable</item>
<item><c>enet</c> - ENET</item>
- <item><c>enetdown</c> - network is down</item>
- <item><c>enetreset</c> - network dropped connection on reset</item>
- <item><c>enetunreach</c> - network is unreachable</item>
- <item><c>enfile</c> - file table overflow</item>
- <item><c>enoano</c> - anode table overflow</item>
- <item><c>enobufs</c> - no buffer space available</item>
- <item><c>enocsi</c> - no CSI structure available</item>
- <item><c>enodata</c> - no data available</item>
- <item><c>enodev</c> - no such device</item>
- <item><c>enoent</c> - no such file or directory</item>
- <item><c>enoexec</c> - exec format error</item>
- <item><c>enolck</c> - no locks available</item>
- <item><c>enolink</c> - link has be severed</item>
- <item><c>enomem</c> - not enough memory</item>
- <item><c>enomsg</c> - no message of desired type</item>
- <item><c>enonet</c> - machine is not on the network</item>
- <item><c>enopkg</c> - package not installed</item>
- <item><c>enoprotoopt</c> - bad protocol option</item>
- <item><c>enospc</c> - no space left on device</item>
- <item><c>enosr</c> - out of stream resources or not a stream
- device</item>
- <item><c>enosym</c> - unresolved symbol name</item>
- <item><c>enosys</c> - function not implemented</item>
- <item><c>enotblk</c> - block device required</item>
- <item><c>enotconn</c> - socket is not connected</item>
- <item><c>enotdir</c> - not a directory</item>
- <item><c>enotempty</c> - directory not empty</item>
- <item><c>enotnam</c> - not a named file</item>
- <item><c>enotsock</c> - socket operation on non-socket</item>
- <item><c>enotsup</c> - operation not supported</item>
- <item><c>enotty</c> - inappropriate device for ioctl</item>
- <item><c>enotuniq</c> - name not unique on network</item>
- <item><c>enxio</c> - no such device or address</item>
- <item><c>eopnotsupp</c> - operation not supported on socket</item>
- <item><c>eperm</c> - not owner</item>
- <item><c>epfnosupport</c> - protocol family not supported</item>
- <item><c>epipe</c> - broken pipe</item>
- <item><c>eproclim</c> - too many processes</item>
- <item><c>eprocunavail</c> - bad procedure for program</item>
- <item><c>eprogmismatch</c> - program version wrong</item>
- <item><c>eprogunavail</c> - RPC program not available</item>
- <item><c>eproto</c> - protocol error</item>
- <item><c>eprotonosupport</c> - protocol not supported</item>
- <item><c>eprototype</c> - protocol wrong type for socket</item>
- <item><c>erange</c> - math result unrepresentable</item>
+ <item><c>enetdown</c> - Network is down</item>
+ <item><c>enetreset</c> - Network dropped connection on reset</item>
+ <item><c>enetunreach</c> - Network is unreachable</item>
+ <item><c>enfile</c> - File table overflow</item>
+ <item><c>enoano</c> - Anode table overflow</item>
+ <item><c>enobufs</c> - No buffer space available</item>
+ <item><c>enocsi</c> - No CSI structure available</item>
+ <item><c>enodata</c> - No data available</item>
+ <item><c>enodev</c> - No such device</item>
+ <item><c>enoent</c> - No such file or directory</item>
+ <item><c>enoexec</c> - Exec format error</item>
+ <item><c>enolck</c> - No locks available</item>
+ <item><c>enolink</c> - Link has been severed</item>
+ <item><c>enomem</c> - Not enough memory</item>
+ <item><c>enomsg</c> - No message of desired type</item>
+ <item><c>enonet</c> - Machine is not on the network</item>
+ <item><c>enopkg</c> - Package not installed</item>
+ <item><c>enoprotoopt</c> - Bad protocol option</item>
+ <item><c>enospc</c> - No space left on device</item>
+ <item><c>enosr</c> - Out of stream resources or not a stream device</item>
+ <item><c>enosym</c> - Unresolved symbol name</item>
+ <item><c>enosys</c> - Function not implemented</item>
+ <item><c>enotblk</c> - Block device required</item>
+ <item><c>enotconn</c> - Socket is not connected</item>
+ <item><c>enotdir</c> - Not a directory</item>
+ <item><c>enotempty</c> - Directory not empty</item>
+ <item><c>enotnam</c> - Not a named file</item>
+ <item><c>enotsock</c> - Socket operation on non-socket</item>
+ <item><c>enotsup</c> - Operation not supported</item>
+ <item><c>enotty</c> - Inappropriate device for <c>ioctl</c></item>
+ <item><c>enotuniq</c> - Name not unique on network</item>
+ <item><c>enxio</c> - No such device or address</item>
+ <item><c>eopnotsupp</c> - Operation not supported on socket</item>
+ <item><c>eperm</c> - Not owner</item>
+ <item><c>epfnosupport</c> - Protocol family not supported</item>
+ <item><c>epipe</c> - Broken pipe</item>
+ <item><c>eproclim</c> - Too many processes</item>
+ <item><c>eprocunavail</c> - Bad procedure for program</item>
+ <item><c>eprogmismatch</c> - Wrong program version</item>
+ <item><c>eprogunavail</c> - RPC program unavailable</item>
+ <item><c>eproto</c> - Protocol error</item>
+ <item><c>eprotonosupport</c> - Protocol not supported</item>
+ <item><c>eprototype</c> - Wrong protocol type for socket</item>
+ <item><c>erange</c> - Math result unrepresentable</item>
<item><c>erefused</c> - EREFUSED</item>
- <item><c>eremchg</c> - remote address changed</item>
- <item><c>eremdev</c> - remote device</item>
- <item><c>eremote</c> - pathname hit remote file system</item>
- <item><c>eremoteio</c> - remote i/o error</item>
+ <item><c>eremchg</c> - Remote address changed</item>
+ <item><c>eremdev</c> - Remote device</item>
+ <item><c>eremote</c> - Pathname hit remote filesystem</item>
+ <item><c>eremoteio</c> - Remote I/O error</item>
<item><c>eremoterelease</c> - EREMOTERELEASE</item>
- <item><c>erofs</c> - read-only file system</item>
- <item><c>erpcmismatch</c> - RPC version is wrong</item>
- <item><c>erremote</c> - object is remote</item>
- <item><c>eshutdown</c> - cannot send after socket shutdown</item>
- <item><c>esocktnosupport</c> - socket type not supported</item>
- <item><c>espipe</c> - invalid seek</item>
- <item><c>esrch</c> - no such process</item>
- <item><c>esrmnt</c> - srmount error</item>
- <item><c>estale</c> - stale remote file handle</item>
+ <item><c>erofs</c> - Read-only filesystem</item>
+ <item><c>erpcmismatch</c> - Wrong RPC version</item>
+ <item><c>erremote</c> - Object is remote</item>
+ <item><c>eshutdown</c> - Cannot send after socket shutdown</item>
+ <item><c>esocktnosupport</c> - Socket type not supported</item>
+ <item><c>espipe</c> - Invalid seek</item>
+ <item><c>esrch</c> - No such process</item>
+ <item><c>esrmnt</c> - Srmount error</item>
+ <item><c>estale</c> - Stale remote file handle</item>
<item><c>esuccess</c> - Error 0</item>
- <item><c>etime</c> - timer expired</item>
- <item><c>etimedout</c> - connection timed out</item>
- <item><c>etoomanyrefs</c> - too many references</item>
- <item><c>etxtbsy</c> - text file or pseudo-device busy</item>
- <item><c>euclean</c> - structure needs cleaning</item>
- <item><c>eunatch</c> - protocol driver not attached</item>
- <item><c>eusers</c> - too many users</item>
- <item><c>eversion</c> - version mismatch</item>
- <item><c>ewouldblock</c> - operation would block</item>
- <item><c>exdev</c> - cross-domain link</item>
- <item><c>exfull</c> - message tables full</item>
- <item><c>nxdomain</c> - the hostname or domain name could not be
- found</item>
+ <item><c>etime</c> - Timer expired</item>
+ <item><c>etimedout</c> - Connection timed out</item>
+ <item><c>etoomanyrefs</c> - Too many references</item>
+ <item><c>etxtbsy</c> - Text file or pseudo-device busy</item>
+ <item><c>euclean</c> - Structure needs cleaning</item>
+ <item><c>eunatch</c> - Protocol driver not attached</item>
+ <item><c>eusers</c> - Too many users</item>
+ <item><c>eversion</c> - Version mismatch</item>
+ <item><c>ewouldblock</c> - Operation would block</item>
+ <item><c>exdev</c> - Cross-domain link</item>
+ <item><c>exfull</c> - Message tables full</item>
+ <item><c>nxdomain</c> - Hostname or domain name cannot be found</item>
</list>
</section>
</erlref>
-
diff --git a/lib/kernel/doc/src/inet_res.xml b/lib/kernel/doc/src/inet_res.xml
index 851a36aba9..351d86a93a 100644
--- a/lib/kernel/doc/src/inet_res.xml
+++ b/lib/kernel/doc/src/inet_res.xml
@@ -4,14 +4,14 @@
<erlref>
<header>
<copyright>
- <year>2009</year><year>2015</year>
+ <year>2009</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
@@ -19,7 +19,7 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
+
</legalnotice>
<title>inet_res</title>
@@ -29,52 +29,50 @@
<rev>A</rev>
</header>
<module>inet_res</module>
- <modulesummary>A Rudimentary DNS Client</modulesummary>
+ <modulesummary>A rudimentary DNS client.</modulesummary>
<description>
- <p>Performs DNS name resolving towards recursive name servers</p>
- <p>See also
- <seealso marker="erts:inet_cfg">
- ERTS User's Guide: Inet configuration
- </seealso> for more
- information on how to configure an Erlang runtime system for IP
- communication and how to enable this DNS client by defining
- <c><![CDATA['dns']]></c> as a lookup method. It then acts
- as a backend for the resolving functions in
- <seealso marker="kernel:inet">inet</seealso>.</p>
+ <p>This module performs DNS name resolving to recursive name servers.</p>
+ <p>See also
+ <seealso marker="erts:inet_cfg">ERTS User's Guide: Inet Configuration</seealso>
+ for more information about how to configure an Erlang runtime system
+ for IP communication, and how to enable this DNS client by defining
+ <c><![CDATA['dns']]></c> as a lookup method.
+ The DNS client then acts as a backend for the resolving functions in
+ <seealso marker="kernel:inet"><c>inet</c></seealso>.</p>
<p>This DNS client can resolve DNS records even if it
is not used for normal name resolving in the node.</p>
- <p>This is not a full-fledged resolver. It is just a
- DNS client that relies on asking trusted recursive nameservers.</p>
+ <p>This is not a full-fledged resolver, only a
+ DNS client that relies on asking trusted recursive name servers.</p>
</description>
<section>
<title>Name Resolving</title>
<p>UDP queries are used unless resolver option
<c>usevc</c> is <c>true</c>, which forces TCP queries.
- If the query is to large for UDP, TCP is used instead.
- For regular DNS queries 512 bytes is the size limit.
- When EDNS is enabled (resolver option
- <c>edns</c> is set to the EDNS version i.e <c>0</c>
+ If the query is too large for UDP, TCP is used instead.
+ For regular DNS queries, 512 bytes is the size limit.</p>
+ <p>When EDNS is enabled (resolver option
+ <c>edns</c> is set to the EDNS version (that is, <c>0</c>
instead of <c>false</c>), resolver option
- <c>udp_payload_size</c> sets the limit. If a nameserver
- replies with the TC bit set (truncation), indicating
+ <c>udp_payload_size</c> sets the limit. If a name server
+ replies with the TC bit set (truncation), indicating that
the answer is incomplete, the query is retried
- to that nameserver using TCP. The resolver option
+ to that name server using TCP. Resolver option
<c>udp_payload_size</c> also sets the advertised
- size for the max allowed reply size, if EDNS is
- enabled, otherwise the nameserver uses the limit
- 512 byte. If the reply is larger it gets truncated,
- forcing a TCP re-query.</p>
- <p>For UDP queries, the resolver options <c>timeout</c>
+ size for the maximum allowed reply size, if EDNS is
+ enabled, otherwise the name server uses the limit
+ 512 bytes. If the reply is larger, it gets truncated,
+ forcing a TCP requery.</p>
+ <p>For UDP queries, resolver options <c>timeout</c>
and <c>retry</c> control retransmission.
- Each nameserver in the <c>nameservers</c> list is
- tried with a timeout of <c>timeout</c> / <c>retry</c>.
- Then all nameservers are tried again doubling the
- timeout, for a total of <c>retry</c> times.</p>
- <p>For queries that not use the <c>search</c> list,
+ Each name server in the <c>nameservers</c> list is
+ tried with a time-out of <c>timeout</c>/<c>retry</c>.
+ Then all name servers are tried again, doubling the
+ time-out, for a total of <c>retry</c> times.</p>
+ <p>For queries not using the <c>search</c> list,
if the query to all <c>nameservers</c> results in
- <c>{error,nxdomain}</c>or an empty answer, the same
- query is tried for the <c>alt_nameservers</c>.</p>
+ <c>{error,nxdomain}</c> or an empty answer, the same
+ query is tried for <c>alt_nameservers</c>.</p>
</section>
<section>
@@ -92,11 +90,13 @@
<name name="res_error"/>
</datatype>
</datatypes>
+
<section>
<title>DNS Types</title>
<p><marker id="dns_types"/>
The following data types concern the DNS client:</p>
</section>
+
<datatypes>
<datatype>
<name name="dns_name"/>
@@ -112,10 +112,10 @@
<name name="dns_msg"/>
<desc>
<p>This is the start of a hiearchy of opaque data structures
- that can be examined with access functions in inet_dns that
- return lists of {Field,Value} tuples. The arity 2 functions
- just return the value for a given field.</p>
-<pre>
+ that can be examined with access functions in <c>inet_dns</c>, which
+ return lists of <c>{Field,Value}</c> tuples. The arity 2 functions
+ only return the value for a specified field.</p>
+ <pre>
dns_msg() = DnsMsg
inet_dns:msg(DnsMsg) ->
[ {header, dns_header()}
@@ -130,7 +130,7 @@ dns_header() = DnsHeader
inet_dns:header(DnsHeader) ->
[ {id, integer()}
| {qr, boolean()}
- | {opcode, 'query' | iquery | status | integer()}
+ | {opcode, query | iquery | status | integer()}
| {aa, boolean()}
| {tc, boolean()}
| {rd, boolean()}
@@ -163,63 +163,55 @@ dns_rr() = DnsRr
| {z, integer()}
| {data, dns_data()} ]
inet_dns:rr(DnsRr, Field) -> Value</pre>
-
-<p>There is an info function for the types above:</p>
-
-<pre>
+ <p>There is an information function for the types above:</p>
+ <pre>
inet_dns:record_type(dns_msg()) -> msg;
inet_dns:record_type(dns_header()) -> header;
inet_dns:record_type(dns_query()) -> dns_query;
inet_dns:record_type(dns_rr()) -> rr;
inet_dns:record_type(_) -> undefined.</pre>
-
-<p>So; inet_dns:(inet_dns:record_type(X))(X) will convert
-any of these data structures into a {Field,Value} list.</p>
+ <p>So, <c>inet_dns:(inet_dns:record_type(X))(X)</c> converts
+ any of these data structures into a <c>{Field,Value}</c> list.</p>
</desc>
</datatype>
<datatype>
<name name="dns_data"/>
- <desc><p><c><anno>Regexp</anno></c> is a string with characters encoded in the
- UTF-8 coding standard.</p>
+ <desc>
+ <p><c><anno>Regexp</anno></c> is a string with characters encoded
+ in the UTF-8 coding standard.</p>
</desc>
</datatype>
</datatypes>
-
<funcs>
-
<func>
<name name="getbyname" arity="2"/>
<name name="getbyname" arity="3"/>
- <fsummary>Resolve a DNS record of the given type for the given host
- </fsummary>
+ <fsummary>Resolve a DNS record of the specified type for the specified
+ host.</fsummary>
<desc>
- <p>Resolve a DNS record of the given type for the given host,
- of class <c>in</c>. On success returns a <c>hostent()</c> record with
- <c>dns_data()</c> elements in the address list field.
- </p><p>
- This function uses the resolver option <c>search</c> that
+ <p>Resolves a DNS record of the specified type for the specified host,
+ of class <c>in</c>. Returns, on success, a <c>hostent()</c> record
+ with <c>dns_data()</c> elements in the address list field.</p>
+ <p>This function uses resolver option <c>search</c> that
is a list of domain names. If the name to resolve contains
no dots, it is prepended to each domain name in the
search list, and they are tried in order. If the name
contains dots, it is first tried as an absolute name
- and if that fails the search list is used. If the name
- has a trailing dot it is simply supposed to be
- an absolute name and the search list is not used.
- </p>
+ and if that fails, the search list is used. If the name
+ has a trailing dot, it is supposed to be
+ an absolute name and the search list is not used.</p>
</desc>
</func>
<func>
<name name="gethostbyaddr" arity="1"/>
<name name="gethostbyaddr" arity="2"/>
- <fsummary>Return a hostent record for the host with the given address
- </fsummary>
+ <fsummary>Return a hostent record for the host with the specified
+ address.</fsummary>
<desc>
<p>Backend functions used by
- <seealso marker="kernel:inet#gethostbyaddr/1">
- inet:gethostbyaddr/1
- </seealso>.
+ <seealso marker="kernel:inet#gethostbyaddr/1"><c>inet:gethostbyaddr/1</c></seealso>.
</p>
</desc>
</func>
@@ -228,22 +220,17 @@ any of these data structures into a {Field,Value} list.</p>
<name name="gethostbyname" arity="1"/>
<name name="gethostbyname" arity="2"/>
<name name="gethostbyname" arity="3"/>
- <fsummary>Return a hostent record for the host with the given name
+ <fsummary>Return a hostent record for the host with the specified name.
</fsummary>
<desc>
<p>Backend functions used by
- <seealso marker="kernel:inet#gethostbyname/1">
- inet:gethostbyname/1,2
- </seealso>.
- </p><p>
- This function uses the resolver option <c>search</c> just like
- <seealso marker="#getbyname/2">getbyname/2,3</seealso>.
- </p><p>
- If the resolver option <c>inet6</c> is <c>true</c>,
- an IPv6 address is looked up, and if that fails
- the IPv4 address is looked up and returned on
- IPv6 mapped IPv4 format.
+ <seealso marker="kernel:inet#gethostbyname/1"><c>inet:gethostbyname/1,2</c></seealso>.
+ </p>
+ <p>This function uses resolver option <c>search</c> just like
+ <seealso marker="#getbyname/2"><c>getbyname/2,3</c></seealso>.
</p>
+ <p>If resolver option <c>inet6</c> is <c>true</c>,
+ an IPv6 address is looked up.</p>
</desc>
</func>
@@ -251,22 +238,21 @@ any of these data structures into a {Field,Value} list.</p>
<name name="lookup" arity="3"/>
<name name="lookup" arity="4"/>
<name name="lookup" arity="5"/>
- <fsummary>Resolve the DNS data for the record of the given type and class
- for the given name
- </fsummary>
+ <fsummary>Resolve the DNS data for the record of the specified type
+ and class for the specified name.</fsummary>
<desc>
- <p>Resolve the DNS data for the record of the given type and class
- for the given name. On success filters out the answer records
- with the correct <c><anno>Class</anno></c> and <c><anno>Type</anno></c> and returns
- a list of their data fields. So a lookup for type <c>any</c>
- will give an empty answer since the answer records have
+ <p>Resolves the DNS data for the record of the specified type and class
+ for the specified name. On success, filters out the answer records
+ with the correct <c><anno>Class</anno></c> and
+ <c><anno>Type</anno></c>, and returns
+ a list of their data fields. So, a lookup for type <c>any</c>
+ gives an empty answer, as the answer records have
specific types that are not <c>any</c>. An empty answer
- as well as a failed lookup returns an empty list.
- </p><p>
- Calls <seealso marker="#resolve/3">resolve/2..4</seealso>
+ or a failed lookup returns an empty list.</p>
+ <p>Calls
+ <seealso marker="#resolve/3"><c>resolve/*</c></seealso>
with the same arguments and filters the result, so
- <c><anno>Opts</anno></c> is explained there.
- </p>
+ <c><anno>Opts</anno></c> is described for those functions.</p>
</desc>
</func>
@@ -274,90 +260,77 @@ any of these data structures into a {Field,Value} list.</p>
<name name="resolve" arity="3"/>
<name name="resolve" arity="4"/>
<name name="resolve" arity="5"/>
- <fsummary>Resolve a DNS record of the given type and class
- for the given name
- </fsummary>
+ <fsummary>Resolve a DNS record of the specified type and class
+ for the specified name.</fsummary>
<desc>
- <p>Resolve a DNS record of the given type and class for the given name.
- The returned <c>dns_msg()</c> can be examined using
- access functions in <c>inet_db</c> as described
- in <seealso marker="#dns_types">DNS Types</seealso>.
- </p><p>
- If <c><anno>Name</anno></c> is an <c>ip_address()</c>, the domain name
- to query for is generated as the standard reverse
- ".IN-ADDR.ARPA." name for an IPv4 address, or the
- ".IP6.ARPA." name for an IPv6 address.
- In this case you most probably want to use
- <c><anno>Class</anno> = in</c> and <c><anno>Type</anno> = ptr</c> but it
- is not done automatically.
- </p><p>
- <c><anno>Opts</anno></c> override the corresponding resolver options.
- If the option <c>nameservers</c> is given, it is
- also assumed that it is the complete list of nameserves,
- so the resolver option <c>alt_nameserves</c> is ignored.
- Of course, if that option is also given to this function,
- it is used.
- </p><p>
- The <c>verbose</c> option (or rather <c>{verbose,true}</c>),
+ <p>Resolves a DNS record of the specified type and class for the
+ specified name. The returned <c>dns_msg()</c> can be examined using
+ access functions in <c>inet_db</c>, as described in section
+ in <seealso marker="#dns_types">DNS Types</seealso>.</p>
+ <p>If <c><anno>Name</anno></c> is an <c>ip_address()</c>, the domain
+ name to query for is generated as the standard reverse
+ <c>".IN-ADDR.ARPA."</c> name for an IPv4 address, or the
+ <c>".IP6.ARPA."</c> name for an IPv6 address.
+ In this case, you most probably want to use
+ <c><anno>Class</anno> = in</c> and <c><anno>Type</anno> = ptr</c>,
+ but it is not done automatically.</p>
+ <p><c><anno>Opts</anno></c> overrides the corresponding resolver
+ options. If option <c>nameservers</c> is specified, it is
+ assumed that it is the complete list of name serves,
+ so resolver option <c>alt_nameserves</c> is ignored.
+ However, if option <c>alt_nameserves</c> is also specified to this
+ function, it is used.</p>
+ <p>Option <c>verbose</c> (or rather <c>{verbose,true}</c>)
causes diagnostics printout through
- <seealso marker="stdlib:io#format/3">io:format/2</seealso>
- of queries, replies retransmissions, etc, similar
- to from utilities like <c>dig</c>, <c>nslookup</c> et.al.
- </p><p>
- If <c><anno>Opt</anno></c> is an arbitrary atom it is interpreted
+ <seealso marker="stdlib:io#format/3"><c>io:format/2</c></seealso>
+ of queries, replies retransmissions, and so on, similar
+ to from utilities, such as <c>dig</c> and <c>nslookup</c>.</p>
+ <p>If <c><anno>Opt</anno></c> is any atom, it is interpreted
as <c>{<anno>Opt</anno>,true}</c> unless the atom string starts with
- <c>"no"</c> making the interpretation <c>{<anno>Opt</anno>,false}</c>.
- For example: <c>usevc</c> is an alias for <c>{usevc,true}</c>,
- and <c>nousevc</c> an alias for <c>{usevc,false}</c>.
- </p><p>
- The <c>inet6</c> option currently has no effect on this function.
- You probably want to use <c><anno>Type</anno> = a | aaaa</c> instead.
- </p>
+ <c>"no"</c>, making the
+ interpretation <c>{<anno>Opt</anno>,false}</c>.
+ For example, <c>usevc</c> is an alias for <c>{usevc,true}</c>
+ and <c>nousevc</c> is an alias for <c>{usevc,false}</c>.</p>
+ <p>Option <c>inet6</c> has no effect on this function. You
+ probably want to use <c><anno>Type</anno> = a | aaaa</c> instead.</p>
</desc>
</func>
-
</funcs>
-
-
<section>
- <title>Examples</title>
- <p>Access functions example: how
- <seealso marker="#lookup/3">lookup/3</seealso>
- could have been implemented using
- <seealso marker="#resolve/3">resolve/3</seealso>
- from outside the module.
- </p><code type="none">
- example_lookup(Name, Class, Type) ->
- case inet_res:resolve(Name, Class, Type) of
- {ok,Msg} ->
- [inet_dns:rr(RR, data)
- || RR &lt;- inet_dns:msg(Msg, anlist),
- inet_dns:rr(RR, type) =:= Type,
- inet_dns:rr(RR, class) =:= Class];
- {error,_} ->
- []
- end.</code>
+ <title>Example</title>
+ <p>This access functions example shows how
+ <seealso marker="#lookup/3"><c>lookup/3</c></seealso>
+ can be implemented using
+ <seealso marker="#resolve/3"><c>resolve/3</c></seealso>
+ from outside the module:</p>
+ <code type="none">
+example_lookup(Name, Class, Type) ->
+ case inet_res:resolve(Name, Class, Type) of
+ {ok,Msg} ->
+ [inet_dns:rr(RR, data)
+ || RR &lt;- inet_dns:msg(Msg, anlist),
+ inet_dns:rr(RR, type) =:= Type,
+ inet_dns:rr(RR, class) =:= Class];
+ {error,_} ->
+ []
+ end.</code>
</section>
-
-
<section>
<title>Legacy Functions</title>
- <p>These have been deprecated due to the annoying double
- meaning of the nameservers/timeout argument, and
- because they had no decent place for a resolver options list.</p>
+ <p>These are deprecated because the annoying double
+ meaning of the name servers/time-out argument, and
+ because they have no decent place for a resolver options list.</p>
</section>
<funcs>
-
<func>
<name name="nslookup" arity="3"/>
<name name="nslookup" arity="4" clause_i="1"/>
<name name="nslookup" arity="4" clause_i="2"/>
- <fsummary>Resolve a DNS record of the given type and class
- for the given name
- </fsummary>
+ <fsummary>Resolve a DNS record of the specified type and class for the
+ specified name.</fsummary>
<type variable="Name"/>
<type variable="Class"/>
<type variable="Type"/>
@@ -365,23 +338,20 @@ any of these data structures into a {Field,Value} list.</p>
<type variable="Nameservers"/>
<type variable="Reason"/>
<desc>
- <p>Resolve a DNS record of the given type and class for the given name.
- </p>
+ <p>Resolves a DNS record of the specified type and class for the
+ specified name.</p>
</desc>
</func>
<func>
<name name="nnslookup" arity="4"/>
<name name="nnslookup" arity="5"/>
- <fsummary>Resolve a DNS record of the given type and class
- for the given name
- </fsummary>
+ <fsummary>Resolve a DNS record of the specified type and class
+ for the specified name.</fsummary>
<desc>
- <p>Resolve a DNS record of the given type and class for the given name.
- </p>
+ <p>Resolves a DNS record of the specified type and class for the
+ specified name.</p>
</desc>
</func>
-
</funcs>
-
</erlref>
diff --git a/lib/kernel/doc/src/init_stub.xml b/lib/kernel/doc/src/init_stub.xml
index eae2cbea95..1297c8264d 100644
--- a/lib/kernel/doc/src/init_stub.xml
+++ b/lib/kernel/doc/src/init_stub.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1997</year>
- <year>2013</year>
+ <year>2016</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -31,13 +31,9 @@
<rev>A</rev>
</header>
<module>init</module>
- <modulesummary>Coordination of System Startup</modulesummary>
- <description><p>
-
- The module init is moved to the runtime system
- application. Please see <seealso
- marker="erts:init">init(3)</seealso> in the
- erts reference manual instead.
-
- </p></description>
+ <modulesummary>Coordination of system startup.</modulesummary>
+ <description>
+ <p>This module is moved to the
+ <seealso marker="erts:init">ERTS</seealso> application.</p>
+ </description>
</erlref>
diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml
new file mode 100644
index 0000000000..d02b1a2ee5
--- /dev/null
+++ b/lib/kernel/doc/src/introduction_chapter.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Introduction</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>introduction.xml</file>
+ </header>
+
+ <section>
+ <title>Scope</title>
+ <p>The Kernel application has all the code necessary to run
+ the Erlang runtime system: file servers, code servers,
+ and so on.</p>
+ <p>The Kernel application is the first application started. It is
+ mandatory in the sense that the minimal system based on
+ Erlang/OTP consists of Kernel and STDLIB. Kernel
+ contains the following functional areas:</p>
+ <list type="bulleted">
+ <item>Start, stop, supervision, configuration, and distribution of applications</item>
+ <item>Code loading</item>
+ <item>Logging</item>
+ <item>Global name service</item>
+ <item>Supervision of Erlang/OTP</item>
+ <item>Communication with sockets</item>
+ <item>Operating system interface</item>
+ </list>
+ </section>
+
+ <section>
+ <title>Prerequisites</title>
+ <p>It is assumed that the reader is familiar with the Erlang programming
+ language.</p>
+ </section>
+</chapter>
+
+
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 956c57f7c1..15dbdb47dc 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -4,14 +4,14 @@
<appref>
<header>
<copyright>
- <year>1996</year><year>2015</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
@@ -19,7 +19,7 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
+
</legalnotice>
<title>kernel</title>
@@ -29,66 +29,102 @@
<rev></rev>
</header>
<app>kernel</app>
- <appsummary>The Kernel Application</appsummary>
+ <appsummary>The Kernel application.</appsummary>
<description>
+ <p>The Kernel application has all the code necessary to run
+ the Erlang runtime system: file servers, code servers,
+ and so on.</p>
<p>The Kernel application is the first application started. It is
mandatory in the sense that the minimal system based on
- Erlang/OTP consists of Kernel and STDLIB. The Kernel application
- contains the following services:</p>
+ Erlang/OTP consists of Kernel and STDLIB. Kernel
+ contains the following functional areas:</p>
<list type="bulleted">
- <item>application controller, see <seealso marker="application">application(3)</seealso></item>
- <item><c>code</c></item>
- <item><c>disk_log</c></item>
- <item><c>dist_ac</c>, distributed application controller</item>
- <item><c>erl_boot_server</c></item>
- <item><c>erl_ddll</c></item>
- <item><c>error_logger</c></item>
- <item><c>error_logger_format_depth</c></item>
- <item><c>file</c></item>
- <item><c>global</c></item>
- <item><c>global_group</c></item>
- <item><c>heart</c></item>
- <item><c>inet</c></item>
- <item><c>net_kernel</c></item>
- <item><c>os</c></item>
- <item><c>pg2</c></item>
- <item><c>rpc</c></item>
- <item><c>seq_trace</c></item>
- <item><c>user</c></item>
+ <item>Start, stop, supervision, configuration, and distribution of applications</item>
+ <item>Code loading</item>
+ <item>Logging</item>
+ <item>Global name service</item>
+ <item>Supervision of Erlang/OTP</item>
+ <item>Communication with sockets</item>
+ <item>Operating system interface</item>
</list>
</description>
<section>
- <title>Error Logger Event Handlers</title>
- <p>Two standard error logger event handlers are defined in
- the Kernel application. These are described in
- <seealso marker="error_logger">error_logger(3)</seealso>.</p>
+ <title>Logger Handlers</title>
+ <p>Two standard logger handlers are defined in
+ the Kernel application. These are described in the
+ <seealso marker="logger_chapter">Kernel User's Guide</seealso>,
+ and in the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
+ </seealso> manual pages.</p>
</section>
<section>
+ <marker id="erl_signal_server"/>
+ <title>OS Signal Event Handler</title>
+ <p>Asynchronous OS signals may be subscribed to via the Kernel applications event manager
+ (see <seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and
+ <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>) registered as <c>erl_signal_server</c>.
+ A default signal handler is installed which handles the following signals:</p>
+ <taglist>
+ <tag><c>sigusr1</c></tag>
+ <item><p>The default handler will halt Erlang and produce a crashdump
+ with slogan "Received SIGUSR1".
+ This is equivalent to calling <c>erlang:halt("Received SIGUSR1")</c>.
+ </p></item>
+
+ <tag><c>sigquit</c></tag>
+ <item><p>The default handler will halt Erlang immediately.
+ This is equivalent to calling <c>erlang:halt()</c>.
+ </p></item>
+
+ <tag><c>sigterm</c></tag>
+ <item><p>The default handler will terminate Erlang normally.
+ This is equivalent to calling <c>init:stop()</c>.
+ </p></item>
+ </taglist>
+
+ <section>
+ <title>Events</title>
+ <p>Any event handler added to <c>erl_signal_server</c> must handle the following events.</p>
+ <taglist>
+ <tag><c>sighup</c></tag>
+ <item><p>Hangup detected on controlling terminal or death of controlling process</p></item>
+ <tag><c>sigquit</c></tag>
+ <item><p>Quit from keyboard</p></item>
+ <tag><c>sigabrt</c></tag>
+ <item><p>Abort signal from abort</p></item>
+ <tag><c>sigalrm</c></tag>
+ <item><p>Timer signal from alarm</p></item>
+ <tag><c>sigterm</c></tag>
+ <item><p>Termination signal</p></item>
+ <tag><c>sigusr1</c></tag>
+ <item><p>User-defined signal 1</p></item>
+ <tag><c>sigusr2</c></tag>
+ <item><p>User-defined signal 2</p></item>
+ <tag><c>sigchld</c></tag>
+ <item><p>Child process stopped or terminated</p></item>
+ <tag><c>sigstop</c></tag>
+ <item><p>Stop process</p></item>
+ <tag><c>sigtstp</c></tag>
+ <item><p>Stop typed at terminal</p></item>
+ </taglist>
+
+ <p>Setting OS signals are described in <seealso marker="os#set_signal/2"><c>os:set_signal/2</c></seealso>.</p>
+ </section>
+ </section>
+
+ <section>
+ <marker id="configuration"/>
<title>Configuration</title>
<p>The following configuration parameters are defined for the Kernel
- application. See <seealso marker="app">app(4)</seealso> for more
- information about configuration parameters.</p>
+ application. For more information about configuration parameters,
+ see file <seealso marker="app"><c>app(4)</c></seealso>.</p>
<taglist>
- <tag><c>browser_cmd = string() | {M,F,A}</c></tag>
- <item>
- <p>When pressing the Help button in a tool such as Debugger or
- TV, the help text (an HTML file <c>File</c>) is by default
- displayed in a Netscape browser which is required to be up and
- running. This parameter can be used to change the command for
- how to display the help text if another browser than Netscape
- is preferred, or another platform than Unix or Windows is
- used.</p>
- <p>If set to a string <c>Command</c>, the command
- <c>"Command File"</c> will be evaluated using <c>os:cmd/1</c>.</p>
- <p>If set to a module-function-args tuple <c>{M,F,A}</c>,
- the call <c>apply(M,F,[File|A])</c> will be evaluated.</p>
- </item>
<tag><c>distributed = [Distrib]</c></tag>
<item>
- <p>Specifies which applications are distributed and on which
- nodes they may execute. In this parameter:</p>
+ <p>Specifies which applications that are distributed and on which
+ nodes they are allowed to execute. In this parameter:</p>
<list type="bulleted">
<item><c>Distrib = {App,Nodes} | {App,Time,Nodes}</c></item>
<item><c>App = atom()</c></item>
@@ -96,25 +132,24 @@
<item><c>Nodes = [node() | {node(),...,node()}]</c></item>
</list>
<p>The parameter is described in
- <seealso marker="application">application(3)</seealso>, function
- <c>load/2</c>.</p>
+ <seealso marker="application#load/2"><c>application:load/2</c></seealso>.</p>
</item>
<tag><c>dist_auto_connect = Value</c></tag>
<item>
- <p>Specifies when nodes will be automatically connected. If
+ <p>Specifies when nodes are automatically connected. If
this parameter is not specified, a node is always
- automatically connected, e.g when a message is to be sent to
+ automatically connected, for example, when a message is to be sent to
that node. <c>Value</c> is one of:</p>
<taglist>
<tag><c>never</c></tag>
- <item>Connections are never automatically established, they
+ <item><p>Connections are never automatically established, they
must be explicitly connected. See
- <seealso marker="net_kernel">net_kernel(3)</seealso>.</item>
+ <seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>.</p></item>
<tag><c>once</c></tag>
- <item>Connections will be established automatically, but only
+ <item><p>Connections are established automatically, but only
once per node. If a node goes down, it must thereafter be
explicitly connected. See
- <seealso marker="net_kernel">net_kernel(3)</seealso>.</item>
+ <seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>.</p></item>
</taglist>
</item>
<tag><c>permissions = [Perm]</c></tag>
@@ -127,186 +162,200 @@
<item><c>Bool = boolean()</c></item>
</list>
<p>Permissions are described in
- <seealso marker="application">application(3)</seealso>, function
- <c>permit/2</c>.</p>
+ <seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p>
</item>
- <tag><c>error_logger = Value</c></tag>
+ <tag><marker id="logger"/><c>logger = [Config]</c></tag>
<item>
- <p><c>Value</c> is one of:</p>
- <taglist>
- <tag><c>tty</c></tag>
- <item>Installs the standard event handler which prints error
- reports to <c>stdio</c>. This is the default option.</item>
- <tag><c>{file, FileName}</c></tag>
- <item>Installs the standard event handler which prints error
- reports to the file <c>FileName</c>, where <c>FileName</c>
- is a string.</item>
- <tag><c>false</c></tag>
- <item>
- <p>No standard event handler is installed, but
- the initial, primitive event handler is kept, printing
- raw event messages to tty.</p>
- </item>
- <tag><c>silent</c></tag>
- <item>
- <p>Error logging is turned off.</p>
- </item>
- </taglist>
+ <p>Specifies the configuration
+ for <seealso marker="logger">Logger</seealso>, except the
+ primary log level, which is specified
+ with <seealso marker="#logger_level"><c>logger_level</c></seealso>,
+ and the compatibility
+ with <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso>, which is specified
+ with <seealso marker="#logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso>.</p>
+ <p>The <c>logger </c> parameter is described in
+ section <seealso marker="logger_chapter#logger_parameter">
+ Logging</seealso> in the Kernel User's Guide.</p>
</item>
- <tag><c>error_logger_format_depth = Depth</c></tag>
+ <tag><marker id="logger_level"/><c>logger_level = Level</c></tag>
<item>
- <marker id="error_logger_format_depth"></marker>
- <p>This parameter can be used to limit the size of the
- formatted output from the error logger event handlers.</p>
-
- <note><p>This configuration parameter was introduced in OTP 18.1.
- It is currently experimental. Based on user feedback it
- may be changed or improved in future releases, for example
- to gain better control over how to limit the size of the
- formatted output. We have no plans to entirely remove this
- new feature, unless it turns out to be completely
- useless. In OTP 19, the default may be changed to limit the
- formatted output.</p></note>
-
- <p><c>Depth</c> is a positive integer that is the maximum
- depth to which terms are printed by the error logger event
- handlers included in OTP. Specifically, the two event handlers
- defined by the <c>Kernel</c> application and the two event
- handlers in the <c>SASL</c> application will use this
- configuration parameter. (If you have implemented you own
- error handlers, this configuration parameter will have no
- effect on them.)</p>
-
- <p>The way <c>Depth</c> is used, is that format strings
- string passed to the event handlers will be rewritten.
- The "~p" and "~w" format controls will be replaced with
- "~P" and "~W", respectively, and <c>Depth</c> will be
- used as the depth parameter. See
- <seealso marker="stdlib:io#format/2">io:format/2</seealso>.</p>
-
- <note><p>A reasonable starting value for <c>Depth</c> is
- <c>30</c>. You should test crashing various processes in your
- application and examine the logs from the crashes, and then
- either increase or decrease the value.</p></note>
- </item>
+ <p>Specifies the primary log level for Logger. Log events with
+ the same, or a more severe level, pass through the primary
+ log level check. See
+ section <seealso marker="logger_chapter">Logging</seealso>
+ in the Kernel User's Guide for more information about Logger
+ and log levels.</p>
+ <p><c>Level = emergency | alert | critical | error | warning |
+ notice | info | debug | all | none</c></p>
+ <p>To change the primary log level at runtime, use
+ <seealso marker="logger#set_primary_config/2">
+ <c>logger:set_primary_config(level, Level)</c></seealso>.</p>
+ <p>Defaults to <c>notice</c>.</p>
+ </item>
+ <tag><marker id="logger_sasl_compatible"/>
+ <c>logger_sasl_compatible = true | false</c></tag>
+ <item>
+ <p>Specifies if Logger behaves backwards compatible with the
+ SASL error logging functionality from releases prior to
+ Erlang/OTP 21.0.</p>
+ <p>If this parameter is set to <c>true</c>, the default Logger
+ handler does not log any progress-, crash-, or supervisor
+ reports. If the SASL application is then started, it adds a
+ Logger handler named <c>sasl</c>, which logs these events
+ according to values of the SASL configuration
+ parameter <c>sasl_error_logger</c>
+ and <c>sasl_errlog_type</c>.</p>
+ <p>See section
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">
+ Deprecated Error Logger Event Handlers and
+ Configuration</seealso> in the sasl(6) manual page for
+ information about the SASL configuration parameters.</p>
+ <p>See section <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso> in the SASL User's Guide, and
+ section <seealso marker="logger_chapter#compatibility">Backwards
+ Compatibility with error_logger</seealso> in the Kernel
+ User's Guide for information about the SASL error logging
+ functionality, and how Logger can be backwards compatible
+ with this.</p>
+ <p>Defaults to <c>false</c>.</p>
+ <note>
+ <p>If this parameter is set to <c>true</c>,
+ <c>sasl_errlog_type</c> indicates that progress reports
+ shall be logged, and the configured primary log level
+ is <c>notice</c> or more severe, then SASL automatically
+ sets the primary log level to <c>info</c>. That is, this
+ setting can potentially overwrite the value of the Kernel
+ configuration parameter <c>logger_level</c>. This is to
+ allow progress reports, which have log level <c>info</c>,
+ to be forwarded to the handlers.</p>
+ </note>
+ </item>
<tag><c>global_groups = [GroupTuple]</c></tag>
<item>
+ <marker id="global_groups"></marker>
<p>Defines global groups, see
- <seealso marker="global_group">global_group(3)</seealso>.</p>
+ <seealso marker="global_group"><c>global_group(3)</c></seealso>.
+ In this parameter:</p>
<list type="bulleted">
- <item><c>GroupTuple = {GroupName, [Node]} | {GroupName, PublishType, [Node]}</c></item>
- <item><c>GroupName = atom()</c></item>
- <item><c>PublishType = normal | hidden</c></item>
- <item><c>Node = node()</c></item>
+ <item><p><c>GroupTuple = {GroupName, [Node]} | {GroupName, PublishType, [Node]}</c></p></item>
+ <item><p><c>GroupName = atom()</c></p></item>
+ <item><p><c>PublishType = normal | hidden</c></p></item>
+ <item><p><c>Node = node()</c></p></item>
</list>
</item>
<tag><c>inet_default_connect_options = [{Opt, Val}]</c></tag>
<item>
<p>Specifies default options for <c>connect</c> sockets,
- see <seealso marker="inet">inet(3)</seealso>.</p>
+ see <seealso marker="inet"><c>inet(3)</c></seealso>.</p>
</item>
<tag><c>inet_default_listen_options = [{Opt, Val}]</c></tag>
<item>
<p>Specifies default options for <c>listen</c> (and
- <c>accept</c>) sockets, see <seealso marker="inet">inet(3)</seealso>.</p>
+ <c>accept</c>) sockets, see <seealso marker="inet"><c>inet(3)</c></seealso>.</p>
</item>
<tag><c>{inet_dist_use_interface, ip_address()}</c></tag>
<item>
- <p>If the host of an Erlang node has several network interfaces,
- this parameter specifies which one to listen on. See
- <seealso marker="inet">inet(3)</seealso> for the type definition
- of <c>ip_address()</c>.</p>
+ <p>If the host of an Erlang node has many network interfaces,
+ this parameter specifies which one to listen on. For the type definition
+ of <c>ip_address()</c>,
+ see <seealso marker="inet"><c>inet(3)</c></seealso>.</p>
</item>
- <tag><c>{inet_dist_listen_min, First}</c></tag>
+ <tag><c>{inet_dist_listen_min, First}</c> and <c>{inet_dist_listen_max, Last}</c></tag>
<item>
- <p>See below.</p>
- </item>
- <tag><c>{inet_dist_listen_max, Last}</c></tag>
- <item>
- <p>Define the <c>First..Last</c> port range for the listener
+ <p>Defines the <c>First..Last</c> port range for the listener
socket of a distributed Erlang node.</p>
</item>
<tag><c>{inet_dist_listen_options, Opts}</c></tag>
<item>
- <p>Define a list of extra socket options to be used when opening the
+ <marker id="inet_dist_listen_options"></marker>
+ <p>Defines a list of extra socket options to be used when opening the
listening socket for a distributed Erlang node.
- See <seealso marker="gen_tcp#listen/2">gen_tcp:listen/2</seealso></p>
+ See <seealso marker="gen_tcp#listen/2"><c>gen_tcp:listen/2</c></seealso>.</p>
</item>
<tag><c>{inet_dist_connect_options, Opts}</c></tag>
<item>
- <p>Define a list of extra socket options to be used when connecting to
+ <marker id="inet_dist_connect_options"></marker>
+ <p>Defines a list of extra socket options to be used when connecting to
other distributed Erlang nodes.
- See <seealso marker="gen_tcp#connect/4">gen_tcp:connect/4</seealso></p>
+ See <seealso marker="gen_tcp#connect/4"><c>gen_tcp:connect/4</c></seealso>.</p>
</item>
<tag><c>inet_parse_error_log = silent</c></tag>
<item>
- <p>If this configuration parameter is set, no
- <c>error_logger</c> messages are generated when erroneous
- lines are found and skipped in the various Inet configuration
+ <p>If set, no log events are issued when erroneous lines are
+ found and skipped in the various Inet configuration
files.</p>
</item>
<tag><c>inetrc = Filename</c></tag>
<item>
- <p>The name (string) of an Inet user configuration file. See
- ERTS User's Guide, Inet configuration.</p>
+ <p>The name (string) of an Inet user configuration file. For details,
+ see section
+ <seealso marker="erts:inet_cfg"><c>Inet Configuration</c></seealso>
+ in the ERTS User's Guide.</p>
</item>
<tag><c>net_setuptime = SetupTime</c></tag>
<item>
<marker id="net_setuptime"></marker>
<p><c>SetupTime</c> must be a positive integer or floating point
- number, and will be interpreted as the maximally allowed time
+ number, and is interpreted as the maximum allowed time
for each network operation during connection setup to another
- Erlang node. The maximum allowed value is 120; if higher values
- are given, 120 will be used. The default value if the variable
- is not given, or if the value is incorrect (e.g. not a number),
- is 7 seconds.</p>
- <p>Note that this value does not limit the total connection
+ Erlang node. The maximum allowed value is <c>120</c>. If higher values
+ are specified, <c>120</c> is used. Default is 7 seconds if the variable
+ is not specified, or if the value is incorrect (for example, not a number).</p>
+ <p>Notice that this value does not limit the total connection
setup time, but rather each individual network operation during
the connection setup and handshake.</p>
</item>
<tag><c>net_ticktime = TickTime</c></tag>
<item>
<marker id="net_ticktime"></marker>
- <p>Specifies the <c>net_kernel</c> tick time. <c>TickTime</c>
- is given in seconds. Once every <c>TickTime/4</c> second, all
- connected nodes are ticked (if anything else has been written
- to a node) and if nothing has been received from another node
- within the last four (4) tick times that node is considered
- to be down. This ensures that nodes which are not responding,
- for reasons such as hardware errors, are considered to be
- down.</p>
- <p>The time <c>T</c>, in which a node that is not responding is
- detected, is calculated as: <c><![CDATA[MinT < T < MaxT]]></c> where:</p>
+ <p>Specifies the <c>net_kernel</c> tick time in seconds. This is the
+ approximate time a connected node may be unresponsive until it is
+ considered down and thereby disconnected.</p>
+ <p>Once every <c>TickTime/4</c> seconds, each connected node is ticked
+ if nothing has been sent to it during that last <c>TickTime/4</c>
+ interval. A tick is a small package sent on the connection. A connected
+ node is considered to be down if no ticks or payload packages have been
+ received during the last four <c>TickTime/4</c> intervals. This ensures
+ that nodes that are not responding, for reasons such as hardware errors,
+ are considered to be down.</p>
+ <p>As the availability is only checked every <c>TickTime/4</c> seconds,
+ the actual time <c>T</c> a node have been unresponsive when
+ detected may vary between <c>MinT</c> and <c>MaxT</c>,
+ where:</p>
<code type="none">
MinT = TickTime - TickTime / 4
MaxT = TickTime + TickTime / 4</code>
- <p><c>TickTime</c> is by default 60 (seconds). Thus,
- <c><![CDATA[45 < T < 75]]></c> seconds.</p>
- <p><em>Note:</em> All communicating nodes should have the same
- <c>TickTime</c> value specified.</p>
- <p><em>Note:</em> Normally, a terminating node is detected
- immediately.</p>
+ <p><c>TickTime</c> defaults to <c>60</c> seconds. Thus,
+ <c><![CDATA[45 < T < 75]]></c> seconds.</p>
+ <p>Notice that <em>all</em> communicating nodes are to have the
+ <em>same</em> <c>TickTime</c> value specified, as it determines both the
+ frequency of outgoing ticks and the expected frequency of incominging
+ ticks.</p>
+ <p>Normally, a terminating node is detected immediately by the transport
+ protocol (like TCP/IP).</p>
</item>
<tag><c>shutdown_timeout = integer() | infinity</c></tag>
<item>
- <p>Specifies the time <c>application_controller</c> will wait
+ <p>Specifies the time <c>application_controller</c> waits
for an application to terminate during node shutdown. If the
- timer expires, <c>application_controller</c> will brutally
- kill <c>application_master</c> of the hanging
+ timer expires, <c>application_controller</c> brutally
+ kills <c>application_master</c> of the hanging
application. If this parameter is undefined, it defaults
to <c>infinity</c>.</p>
</item>
<tag><c>sync_nodes_mandatory = [NodeName]</c></tag>
<item>
- <p>Specifies which other nodes <em>must</em> be alive in order
+ <p>Specifies which other nodes that <em>must</em> be alive
for this node to start properly. If some node in the list
- does not start within the specified time, this node will not
+ does not start within the specified time, this node does not
start either. If this parameter is undefined, it defaults to
- [].</p>
+ <c>[]</c>.</p>
</item>
<tag><c>sync_nodes_optional = [NodeName]</c></tag>
<item>
- <p>Specifies which other nodes <em>can</em> be alive in order
+ <p>Specifies which other nodes that <em>can</em> be alive
for this node to start properly. If some node in this list
does not start within the specified time, this node starts
anyway. If this parameter is undefined, it defaults to
@@ -314,66 +363,88 @@ MaxT = TickTime + TickTime / 4</code>
</item>
<tag><c>sync_nodes_timeout = integer() | infinity</c></tag>
<item>
- <p>Specifies the amount of time (in milliseconds) this node
- will wait for the mandatory and optional nodes to start. If
+ <p>Specifies the time (in milliseconds) that this node
+ waits for the mandatory and optional nodes to start. If
this parameter is undefined, no node synchronization is
- performed. This option also makes sure that <c>global</c> is
+ performed. This option ensures that <c>global</c> is
synchronized.</p>
</item>
<tag><c>start_dist_ac = true | false</c></tag>
<item>
<p>Starts the <c>dist_ac</c> server if the parameter is
- <c>true</c>. This parameter should be set to <c>true</c> for
- systems that use distributed applications.</p>
- <p>The default value is <c>false</c>. If this parameter is
- undefined, the server is started if the parameter
+ <c>true</c>. This parameter is to be set to <c>true</c> for
+ systems using distributed applications.</p>
+ <p>Defaults to <c>false</c>. If this parameter is
+ undefined, the server is started if parameter
<c>distributed</c> is set.</p>
</item>
<tag><c>start_boot_server = true | false</c></tag>
<item>
<p>Starts the <c>boot_server</c> if the parameter is <c>true</c>
- (see <seealso marker="erl_boot_server">erl_boot_server(3)</seealso>).
- This parameter should be
- set to <c>true</c> in an embedded system which uses this
- service.</p>
- <p>The default value is <c>false</c>.</p>
+ (see <seealso marker="erl_boot_server"><c>erl_boot_server(3)</c></seealso>).
+ This parameter is to be set to <c>true</c> in an embedded system
+ using this service.</p>
+ <p>Defaults to <c>false</c>.</p>
</item>
<tag><c>boot_server_slaves = [SlaveIP]</c></tag>
<item>
- <p>If the <c>start_boot_server</c> configuration parameter is
+ <p>If configuration parameter <c>start_boot_server</c> is
<c>true</c>, this parameter can be used to initialize
- <c>boot_server</c> with a list of slave IP addresses.
- <c>SlaveIP = string() | atom | {integer(),integer(),integer(),integer()}</c></p>
+ <c>boot_server</c> with a list of slave IP addresses:</p>
+ <p>
+ <c>SlaveIP = string() | atom | {integer(),integer(),integer(),integer()}</c>,</p>
<p>where <c><![CDATA[0 <= integer() <=255]]></c>.</p>
- <p>Examples of <c>SlaveIP</c> in atom, string and tuple form
- are: <br></br>
-<c>'150.236.16.70', "150,236,16,70", {150,236,16,70}</c>.</p>
- <p>The default value is <c>[]</c>.</p>
+ <p>Examples of <c>SlaveIP</c> in atom, string, and tuple form:</p>
+ <p><c>'150.236.16.70', "150,236,16,70", {150,236,16,70}</c>.</p>
+ <p>Defaults to <c>[]</c>.</p>
</item>
<tag><c>start_disk_log = true | false</c></tag>
<item>
<p>Starts the <c>disk_log_server</c> if the parameter is
- <c>true</c> (see <seealso marker="disk_log">disk_log(3)</seealso>).
- This parameter should be
- set to true in an embedded system which uses this service.</p>
- <p>The default value is <c>false</c>.</p>
+ <c>true</c> (see <seealso marker="disk_log"><c>disk_log(3)</c></seealso>).
+ This parameter is to be set to <c>true</c> in an embedded system
+ using this service.</p>
+ <p>Defaults to <c>false</c>.</p>
</item>
<tag><c>start_pg2 = true | false</c></tag>
<item>
+ <marker id="start_pg2"></marker>
<p>Starts the <c>pg2</c> server (see
- <seealso marker="pg2">pg2(3)</seealso>) if
- the parameter is <c>true</c>. This parameter should be set to
- <c>true</c> in an embedded system which uses this service.</p>
- <p>The default value is <c>false</c>.</p>
+ <seealso marker="pg2"><c>pg2(3)</c></seealso>) if
+ the parameter is <c>true</c>. This parameter is to be set to
+ <c>true</c> in an embedded system that uses this service.</p>
+ <p>Defaults to <c>false</c>.</p>
</item>
<tag><c>start_timer = true | false</c></tag>
<item>
<p>Starts the <c>timer_server</c> if the parameter is
- <c>true</c> (see <seealso marker="stdlib:timer">timer(3)</seealso>).
- This parameter should be
- set to <c>true</c> in an embedded system which uses this
- service.</p>
- <p>The default value is <c>false</c>.</p>
+ <c>true</c> (see <seealso marker="stdlib:timer"><c>timer(3)</c></seealso>).
+ This parameter is to be set to <c>true</c> in an embedded system
+ using this service.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><c>shell_history = enabled | disabled </c></tag>
+ <item>
+ <p>Specifies whether shell history should be logged to disk
+ between usages of <c>erl</c>.</p>
+ </item>
+ <tag><c>shell_history_drop = [string()]</c></tag>
+ <item>
+ <p>Specific log lines that should not be persisted. For
+ example <c>["q().", "init:stop()."]</c> will allow to
+ ignore commands that shut the node down. Defaults to
+ <c>[]</c>.</p>
+ </item>
+ <tag><c>shell_history_file_bytes = integer()</c></tag>
+ <item>
+ <p>how many bytes the shell should remember. By default, the
+ value is set to 512kb, and the minimal value is 50kb.</p>
+ </item>
+ <tag><c>shell_history_path = string()</c></tag>
+ <item>
+ <p>Specifies where the shell history files will be stored.
+ defaults to the user's cache directory as returned by
+ <c>filename:basedir(user_cache, "erlang-history")</c>.</p>
</item>
<tag><c>shutdown_func = {Mod, Func}</c></tag>
<item>
@@ -383,36 +454,102 @@ MaxT = TickTime + TickTime / 4</code>
<item><c>Func = atom()</c></item>
</list>
<p>Sets a function that <c>application_controller</c> calls
- when it starts to terminate. The function is called as:
+ when it starts to terminate. The function is called as
<c>Mod:Func(Reason)</c>, where <c>Reason</c> is the terminate
reason for <c>application_controller</c>, and it must
return as soon as possible for <c>application_controller</c>
to terminate properly.</p>
</item>
+ <tag><c>source_search_rules = [DirRule] | [SuffixRule] </c></tag>
+ <item>
+ <marker id="source_search_rules"></marker>
+ <p>Where:</p>
+ <list type="bulleted">
+ <item><c>DirRule = {ObjDirSuffix,SrcDirSuffix}</c></item>
+ <item><c>SuffixRule = {ObjSuffix,SrcSuffix,[DirRule]}</c></item>
+ <item><c>ObjDirSuffix = string()</c></item>
+ <item><c>SrcDirSuffix = string()</c></item>
+ <item><c>ObjSuffix = string()</c></item>
+ <item><c>SrcSuffix = string()</c></item>
+ </list>
+ <p>Specifies a list of rules for use by
+ <seealso marker="stdlib:filelib#find_file/2">
+ <c>filelib:find_file/2</c></seealso>
+ <seealso marker="stdlib:filelib#find_source/2">
+ <c>filelib:find_source/2</c></seealso>
+ If this is set to some other value
+ than the empty list, it replaces the default rules. Rules can be
+ simple pairs of directory suffixes, such as <c>{"ebin",
+ "src"}</c>, which are used by <c>filelib:find_file/2</c>, or
+ triples specifying separate directory suffix rules depending on
+ file name extensions, for example <c>[{".beam", ".erl", [{"ebin",
+ "src"}]}</c>, which are used by <c>filelib:find_source/2</c>. Both
+ kinds of rules can be mixed in the list.</p>
+ <p>The interpretation of <c>ObjDirSuffix</c> and <c>SrcDirSuffix</c>
+ is as follows: if the end of the directory name where an
+ object is located matches <c>ObjDirSuffix</c>, then the
+ name created by replacing <c>ObjDirSuffix</c> with
+ <c>SrcDirSuffix</c> is expanded by calling
+ <seealso marker="stdlib:filelib#wildcard/1">
+ <c>filelib:wildcard/1</c></seealso>, and the first regular
+ file found among the matches is the source file.
+ </p>
+
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Deprecated Configuration Parameters</title>
+ <p>In Erlang/OTP 21.0, a new API for logging was added. The
+ old <c>error_logger</c> event manager, and event handlers
+ running on this manager, still work, but they are no longer used
+ by default.</p>
+ <p>The following application configuration parameters can still be
+ set, but they are only used if the corresponding configuration
+ parameters for Logger are not set.</p>
+ <taglist>
+ <tag><c>error_logger</c></tag>
+ <item>Replaced by setting the type of the default
+ <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso>
+ to the same value. Example:
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{type=>{file,"/tmp/erlang.log"}}}}]'
+ </code>
+ </item>
+ <tag><c>error_logger_format_depth</c></tag>
+ <item>Replaced by setting the <seealso marker="logger_formatter#depth"><c>depth</c></seealso>
+ parameter of the default handlers formatter. Example:
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,#{formatter=>{logger_formatter,#{legacy_header=>true,template=>[{logger_formatter,header},"\n",msg,"\n"],depth=>10}}}]'
+ </code>
+ </item>
</taglist>
+ <p>See <seealso marker="logger_chapter#compatibility">Backwards
+ compatibility with error_logger</seealso> for more
+ information.</p>
</section>
<section>
<title>See Also</title>
- <p><seealso marker="app">app(4)</seealso>,
- <seealso marker="application">application(3)</seealso>,
- <seealso marker="code">code(3)</seealso>,
- <seealso marker="disk_log">disk_log(3)</seealso>,
- <seealso marker="erl_boot_server">erl_boot_server(3)</seealso>,
- <seealso marker="erl_ddll">erl_ddll(3)</seealso>,
- <seealso marker="error_logger">error_logger(3)</seealso>,
- <seealso marker="file">file(3)</seealso>,
- <seealso marker="global">global(3)</seealso>,
- <seealso marker="global_group">global_group(3)</seealso>,
- <seealso marker="heart">heart(3)</seealso>,
- <seealso marker="inet">inet(3)</seealso>,
- <seealso marker="net_kernel">net_kernel(3)</seealso>,
- <seealso marker="os">os(3)</seealso>,
- <seealso marker="pg2">pg2(3)</seealso>,
- <seealso marker="rpc">rpc(3)</seealso>,
- <seealso marker="seq_trace">seq_trace(3)</seealso>,
- <seealso marker="stdlib:timer">timer(3)</seealso>,
- <seealso marker="user">user(3)</seealso></p>
+ <p><seealso marker="app"><c>app(4)</c></seealso>,
+ <seealso marker="application"><c>application(3)</c></seealso>,
+ <seealso marker="code"><c>code(3)</c></seealso>,
+ <seealso marker="disk_log"><c>disk_log(3)</c></seealso>,
+ <seealso marker="erl_boot_server"><c>erl_boot_server(3)</c></seealso>,
+ <seealso marker="erl_ddll"><c>erl_ddll(3)</c></seealso>,
+ <seealso marker="file"><c>file(3)</c></seealso>,
+ <seealso marker="global"><c>global(3)</c></seealso>,
+ <seealso marker="global_group"><c>global_group(3)</c></seealso>,
+ <seealso marker="heart"><c>heart(3)</c></seealso>,
+ <seealso marker="inet"><c>inet(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>,
+ <seealso marker="os"><c>os(3)</c></seealso>,
+ <seealso marker="pg2"><c>pg2(3)</c></seealso>,
+ <seealso marker="rpc"><c>rpc(3)</c></seealso>,
+ <seealso marker="seq_trace"><c>seq_trace(3)</c></seealso>,
+ <seealso marker="user"><c>user(3)</c></seealso>,
+ <seealso marker="stdlib:timer"><c>timer(3)</c></seealso></p>
</section>
</appref>
-
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
new file mode 100644
index 0000000000..2bcf137299
--- /dev/null
+++ b/lib/kernel/doc/src/logger.xml
@@ -0,0 +1,1268 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger.xml</file>
+ </header>
+ <module>logger</module>
+ <modulesummary>API module for Logger, the standard logging facility
+ in Erlang/OTP.</modulesummary>
+
+ <description>
+ <p>This module implements the main API for logging in
+ Erlang/OTP. To create a log event, use the
+ <seealso marker="#logging_API">API functions</seealso> or the
+ log
+ <seealso marker="#macros">macros</seealso>, for example:</p>
+ <code>
+?LOG_ERROR("error happened because: ~p", [Reason]). % With macro
+logger:error("error happened because: ~p", [Reason]). % Without macro
+ </code>
+ <p>To configure the Logger backend,
+ use <seealso marker="kernel_app#logger">Kernel configuration
+ parameters</seealso>
+ or <seealso marker="#configuration_API">configuration
+ functions</seealso> in the Logger API.</p>
+
+ <p>By default, the Kernel application installs one log handler at
+ system start. This handler is named <c>default</c>. It receives
+ and processes standard log events produced by the Erlang runtime
+ system, standard behaviours and different Erlang/OTP
+ applications. The log events are by default printed to the
+ terminal.</p>
+ <p>If you want your systems logs to be printed to a file instead,
+ you must configure the default handler to do so. The simplest
+ way is to include the following in
+ your <seealso marker="config"><c>sys.config</c></seealso>:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{config => #{type => {file, "path/to/file.log"}}}}]}]}].
+ </code>
+ <p>
+ For more information about:
+ </p>
+ <list type="bulleted">
+ <item>the Logger facility in general, see
+ the <seealso marker="logger_chapter">User's
+ Guide</seealso>.</item>
+ <item>how to configure Logger, see
+ the <seealso marker="logger_chapter#configuration">Configuration</seealso>
+ section in the User's Guide.</item>
+ <item>the built-in handlers,
+ see <seealso marker="logger_std_h">logger_std_h</seealso> and
+ <seealso marker="logger_disk_log_h">logger_disk_log_h</seealso>.</item>
+ <item>the built-in formatter,
+ see <seealso marker="logger_formatter">logger_formatter</seealso>.</item>
+ <item>built-in filters,
+ see <seealso marker="logger_filters">logger_filters</seealso>.</item>
+ </list>
+
+ <note>
+ <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right
+ to introduce changes to the Logger API and functionality in
+ patches following this release. These changes might or might not
+ be backwards compatible with the initial version.</p>
+ </note>
+
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="filter"/>
+ <desc>
+ <p>A filter which can be installed as a handler filter, or as
+ a primary filter in Logger.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_arg"/>
+ <desc>
+ <p>The second argument to the filter fun.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_id"/>
+ <desc>
+ <p>A unique identifier for a filter.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_return"/>
+ <desc>
+ <p>The return value from the filter fun.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="formatter_config"/>
+ <desc>
+ <p>Configuration data for the
+ formatter. See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example of a formatter implementation.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="handler_config"/>
+ <desc>
+ <p>Handler configuration data for Logger. The following
+ default values apply:</p>
+ <list>
+ <item><c>level => all</c></item>
+ <item><c>filter_default => log</c></item>
+ <item><c>filters => []</c></item>
+ <item><c>formatter => {logger_formatter, DefaultFormatterConfig</c>}</item>
+ </list>
+ <p>In addition to these, the following fields are
+ automatically inserted by Logger, values taken from the
+ two first parameters
+ to <seealso marker="#add_handler-3"><c>add_handler/3</c></seealso>:</p>
+ <list>
+ <item><c>id => HandlerId</c></item>
+ <item><c>module => Module</c></item>
+ </list>
+ <p>These are read-only and cannot be changed in runtime.</p>
+ <p>Handler specific configuration data is inserted by the
+ handler callback itself, in a sub structure associated with
+ the field named <c>config</c>. See
+ the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>
+ manual pages for information about the specifc configuration
+ for these handlers.</p>
+ <p>See the <seealso marker="logger_formatter#type-config">
+ <c>logger_formatter(3)</c></seealso> manual page for
+ information about the default configuration for this
+ formatter.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="handler_id"/>
+ <desc>
+ <p>A unique identifier for a handler instance.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="level"/>
+ <desc>
+ <p>The severity level for the message to be logged.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="log_event"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="metadata"/>
+ <desc>
+ <p>Metadata for the log event.</p>
+ <p>Logger adds the following metadata to each log event:</p>
+ <list>
+ <item><c>pid => self()</c></item>
+ <item><c>gl => group_leader()</c></item>
+ <item><c>time => erlang:system_time(microsecond)</c></item>
+ </list>
+ <p>When a log macro is used, Logger also inserts location
+ information:</p>
+ <list>
+ <item><c>mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY}</c></item>
+ <item><c>file => ?FILE</c></item>
+ <item><c>line => ?LINE</c></item>
+ </list>
+ <p>You can add custom metadata, either by specifying a map as
+ the last parameter to any of the log macros or the API
+ functions, or by setting process metadata
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>
+ or <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ <p>Logger merges all the metadata maps before forwarding the
+ log event to the handlers. If the same keys occur, values
+ from the log call overwrite process metadata, which in turn
+ overwrite values set by Logger.</p>
+ <p>The following custom metadata keys have special meaning:</p>
+ <taglist>
+ <tag><c>domain</c></tag>
+ <item>
+ <p>The value associated with this key is used by filters
+ for grouping log events originating from, for example,
+ specific functional
+ areas. See <seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso>
+ for a description of how this field can be used.</p>
+ </item>
+ <tag><c>report_cb</c></tag>
+ <item>
+ <p>If the log message is specified as
+ a <seealso marker="#type-report"><c>report()</c></seealso>,
+ the <c>report_cb</c> key can be associated with a fun
+ (report callback) that converts the report to a format
+ string and arguments, or directly to a string. See the
+ type definition
+ of <seealso marker="#type-report_cb"><c>report_cb()</c></seealso>,
+ and
+ section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the User's Guide for more
+ information about report callbacks.</p>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="msg_fun"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="primary_config"/>
+ <desc>
+ <p>Primary configuration data for Logger. The following
+ default values apply:</p>
+ <list>
+ <item><c>level => info</c></item>
+ <item><c>filter_default => log</c></item>
+ <item><c>filters => []</c></item>
+ </list>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report_cb"/>
+ <desc>
+ <p>A fun which converts a <seealso marker="#type-report"><c>report()</c>
+ </seealso> to a format string and arguments, or directly to a string.
+ See section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the User's Guide for more
+ information.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report_cb_config"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="timestamp"/>
+ <desc>
+ <p>A timestamp produced
+ with <seealso marker="erts:erlang#system_time-1">
+ <c>erlang:system_time(microsecond)</c></seealso>.</p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <section>
+ <title>Macros</title>
+ <p>The following macros are defined:</p>
+
+ <list>
+ <item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_EMERGENCY(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_INFO(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_INFO(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG(Level,StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG(Level,FunOrFormat,Args[,Metadata])</c></item>
+ </list>
+
+ <p>All macros expand to a call to Logger, where <c>Level</c> is
+ taken from the macro name, or from the first argument in the
+ case of the <c>?LOG</c> macro. Location data is added to the
+ metadata as described under
+ the <seealso marker="#type-metadata"><c>metadata()</c></seealso>
+ type definition.</p>
+
+ <p>The call is wrapped in a case statement and will be evaluated
+ only if <c>Level</c> is equal to or below the configured log
+ level.</p>
+ </section>
+
+ <section>
+ <marker id="logging_API"/>
+ <title>Logging API functions</title>
+ </section>
+ <funcs>
+ <func>
+ <name>emergency(StringOrReport[,Metadata])</name>
+ <name>emergency(Format,Args[,Metadata])</name>
+ <name>emergency(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>emergency</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(emergency,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>alert(StringOrReport[,Metadata])</name>
+ <name>alert(Format,Args[,Metadata])</name>
+ <name>alert(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>alert</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(alert,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>critical(StringOrReport[,Metadata])</name>
+ <name>critical(Format,Args[,Metadata])</name>
+ <name>critical(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>critical</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(critical,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>error(StringOrReport[,Metadata])</name>
+ <name>error(Format,Args[,Metadata])</name>
+ <name>error(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>error</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(error,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>warning(StringOrReport[,Metadata])</name>
+ <name>warning(Format,Args[,Metadata])</name>
+ <name>warning(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>warning</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(warning,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>notice(StringOrReport[,Metadata])</name>
+ <name>notice(Format,Args[,Metadata])</name>
+ <name>notice(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>notice</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(notice,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>info(StringOrReport[,Metadata])</name>
+ <name>info(Format,Args[,Metadata])</name>
+ <name>info(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>info</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(info,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>debug(StringOrReport[,Metadata])</name>
+ <name>debug(Format,Args[,Metadata])</name>
+ <name>debug(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>debug</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(debug,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="log" arity="2"/>
+ <name name="log" arity="3" clause_i="1"/>
+ <name name="log" arity="3" clause_i="2"/>
+ <name name="log" arity="3" clause_i="3"/>
+ <name name="log" arity="4" clause_i="1"/>
+ <name name="log" arity="4" clause_i="2"/>
+ <fsummary>Logs the given message.</fsummary>
+ <type variable="Level"/>
+ <type variable="StringOrReport" name_i="1"/>
+ <type variable="Format" name_i="3"/>
+ <type variable="Args" name_i="3"/>
+ <type variable="Fun" name_i="4"/>
+ <type variable="FunArgs" name_i="4"/>
+ <type variable="Metadata"/>
+ <desc>
+ <p>Log the given message.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <marker id="configuration_API"/>
+ <title>Configuration API functions</title>
+ </section>
+ <funcs>
+ <func>
+ <name name="add_handler" arity="3"/>
+ <fsummary>Add a handler with the given configuration.</fsummary>
+ <desc>
+ <p>Add a handler with the given configuration.</p>
+ <p><c><anno>HandlerId</anno></c> is a unique identifier which
+ must be used in all subsequent calls referring to this
+ handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handler_filter" arity="3"/>
+ <fsummary>Add a filter to the specified handler.</fsummary>
+ <desc>
+ <p>Add a filter to the specified handler.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handler callback:</p>
+ <taglist>
+ <tag><c>log_event()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next handler filter, if
+ any, is applied. If no more filters exist for this
+ handler, the log event is forwarded to the handler
+ callback.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ handler filter, if any, is applied. If no more filters
+ exist for this handler, the value of
+ the <c>filter_default</c> configuration parameter for
+ the handler specifies if the log event shall be
+ discarded or forwarded to the handler callback.</p>
+ </item>
+ </taglist>
+ <p>See
+ section <seealso marker="logger_chapter#filters">Filters</seealso>
+ in the User's Guide for more information about filters.</p>
+ <p>Some built-in filters exist. These are defined in
+ <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handlers" arity="1" clause_i="1"/>
+ <fsummary>Set up log handlers from the application's
+ configuration parameters.</fsummary>
+ <desc>
+ <p>Reads the application configuration parameter <c>logger</c> and
+ calls <c>add_handlers/1</c> with its contents.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handlers" arity="1" clause_i="2"/>
+ <fsummary>Setup logger handlers.</fsummary>
+ <type name="config_handler"/>
+ <desc>
+ <p>This function should be used by custom Logger handlers to make
+ configuration consistent no matter which handler the system uses.
+ Normal usage is to add a call to <c>logger:add_handlers/1</c>
+ just after the processes that the handler needs are started,
+ and pass the application's <c>logger</c> configuration as the argument.
+ For example:</p>
+ <code>
+-behaviour(application).
+start(_, []) ->
+ case supervisor:start_link({local, my_sup}, my_sup, []) of
+ {ok, Pid} ->
+ ok = logger:add_handlers(my_app),
+ {ok, Pid, []};
+ Error -> Error
+ end.</code>
+ <p>This reads the <c>logger</c> configuration parameter from
+ the <c>my_app</c> application and starts the configured
+ handlers. The contents of the configuration use the same
+ rules as the
+ <seealso marker="logger_chapter#handler-configuration">logger handler configuration</seealso>.
+ </p>
+ <p>If the handler is meant to replace the default handler, the Kernel's
+ default handler have to be disabled before the new handler is added.
+ A <c>sys.config</c> file that disables the Kernel handler and adds
+ a custom handler could look like this:</p>
+ <code>
+[{kernel,
+ [{logger,
+ %% Disable the default Kernel handler
+ [{handler, default, undefined}]}]},
+ {my_app,
+ [{logger,
+ %% Enable this handler as the default
+ [{handler, default, my_handler, #{}}]}]}].
+ </code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_primary_filter" arity="2"/>
+ <fsummary>Add a primary filter to Logger.</fsummary>
+ <desc>
+ <p>Add a primary filter to Logger.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handlers:</p>
+ <taglist>
+ <tag><c>log_event()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next primary filter, if
+ any, is applied. If no more primary filters exist, the
+ log event is forwarded to the handler part of Logger,
+ where handler filters are applied.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ primary filter, if any, is applied. If no more primary
+ filters exist, the value of the
+ primary <c>filter_default</c> configuration parameter
+ specifies if the log event shall be discarded or
+ forwarded to the handler part.</p>
+ </item>
+ </taglist>
+ <p>See section <seealso marker="logger_chapter#filters">
+ Filters</seealso> in the User's Guide for more information
+ about filters.</p>
+ <p>Some built-in filters exist. These are defined
+ in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_config" arity="0"/>
+ <fsummary>Look up the current Logger configuration</fsummary>
+ <desc>
+ <p>Look up all current Logger configuration, including primary
+ and handler configuration, and module level settings.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_config" arity="0"/>
+ <fsummary>Look up the current configuration for all handlers.</fsummary>
+ <desc>
+ <p>Look up the current configuration for all handlers.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_config" arity="1"/>
+ <fsummary>Look up the current configuration for the given
+ handler.</fsummary>
+ <desc>
+ <p>Look up the current configuration for the given handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_ids" arity="0"/>
+ <fsummary>Look up the identities for all installed handlers.</fsummary>
+ <desc>
+ <p>Look up the identities for all installed handlers.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_primary_config" arity="0"/>
+ <fsummary>Look up the current primary configuration for Logger.</fsummary>
+ <desc>
+ <p>Look up the current primary configuration for Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_module_level" arity="0"/>
+ <fsummary>Look up all current module levels.</fsummary>
+ <desc>
+ <p>Look up all current module levels. Returns a list
+ containing one <c>{Module,Level}</c> element for each module
+ for which the module level was previously set
+ with <seealso marker="#set_module_level-2">
+ <c>set_module_level/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_module_level" arity="1"/>
+ <fsummary>Look up the current level for the given modules.</fsummary>
+ <desc>
+ <p>Look up the current level for the given modules. Returns a
+ list containing one <c>{Module,Level}</c> element for each
+ of the given modules for which the module level was
+ previously set with <seealso marker="#set_module_level-2">
+ <c>set_module_level/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_process_metadata" arity="0"/>
+ <fsummary>Retrieve data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Retrieve data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler" arity="1"/>
+ <fsummary>Remove the handler with the specified identity.</fsummary>
+ <desc>
+ <p>Remove the handler identified by <c><anno>HandlerId</anno></c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler_filter" arity="2"/>
+ <fsummary>Remove a filter from the specified handler.</fsummary>
+ <desc>
+ <p>Remove the filter identified
+ by <c><anno>FilterId</anno></c> from the handler identified
+ by <c><anno>HandlerId</anno></c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_primary_filter" arity="1"/>
+ <fsummary>Remove a primary filter from Logger.</fsummary>
+ <desc>
+ <p>Remove the primary filter identified
+ by <c><anno>FilterId</anno></c> from Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_application_level" arity="2"/>
+ <fsummary>Set the log level for all modules in the specified application.</fsummary>
+ <desc>
+ <p>Set the log level for all the modules of the specified application.</p>
+ <p>This function is a convenience function that calls
+ <seealso marker="#set_module_level/2">logger:set_module_level/2</seealso>
+ for each module associated with an application.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_handler_config" arity="2"/>
+ <fsummary>Set configuration data for the specified handler.</fsummary>
+ <desc>
+ <p>Set configuration data for the specified handler. This
+ overwrites the current handler configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2</c></seealso>, or, if a more
+ complex merge is needed, read the current configuration
+ with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c>
+ </seealso>, then do the merge before writing the new
+ configuration back with this function.</p>
+ <p>If a key is removed compared to the current configuration,
+ and the key is known by Logger, the default value is used. If
+ it is a custom key, then it is up to the handler
+ implementation if the value is removed or a default value is
+ inserted.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_handler_config" arity="3" clause_i="1"/>
+ <name name="set_handler_config" arity="3" clause_i="2"/>
+ <name name="set_handler_config" arity="3" clause_i="3"/>
+ <name name="set_handler_config" arity="3" clause_i="4"/>
+ <name name="set_handler_config" arity="3" clause_i="5"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <type variable="HandlerId"/>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <type variable="Formatter" name_i="4"/>
+ <type variable="Config" name_i="5"/>
+ <type variable="Return"/>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to the given value. If it does not exist, it will
+ be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is set to default values. To update
+ only specified data, and keep the existing configuration for
+ the rest, use <seealso marker="#update_handler_config-3">
+ <c>update_handler_config/3</c></seealso>.</p>
+ <p>See the definition of
+ the <seealso marker="#type-handler_config">
+ <c>handler_config()</c></seealso> type for more
+ information about the different parameters.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_primary_config" arity="1"/>
+ <fsummary>Set primary configuration data for Logger.</fsummary>
+ <desc>
+ <p>Set primary configuration data for Logger. This
+ overwrites the current configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#update_primary_config-1">
+ <c>update_primary_config/1</c></seealso>, or, if a more
+ complex merge is needed, read the current configuration
+ with <seealso marker="#get_primary_config-0"><c>get_primary_config/0</c>
+ </seealso>, then do the merge before writing the new
+ configuration back with this function.</p>
+ <p>If a key is removed compared to the current configuration,
+ the default value is used.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_primary_config" arity="2" clause_i="1"/>
+ <name name="set_primary_config" arity="2" clause_i="2"/>
+ <name name="set_primary_config" arity="2" clause_i="3"/>
+ <fsummary>Add or update primary configuration data for Logger.</fsummary>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <desc>
+ <p>Add or update primary configuration data for Logger. If the
+ given <c><anno>Key</anno></c> already exists, its associated
+ value will be changed to the given value. If it does not
+ exist, it will be added.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_module_level" arity="2"/>
+ <fsummary>Set the log level for the specified modules.</fsummary>
+ <desc>
+ <p>Set the log level for the specified modules.</p>
+ <p>The log level for a module overrides the primary log level
+ of Logger for log events originating from the module in
+ question. Notice, however, that it does not override the
+ level configuration for any handler.</p>
+ <p>For example: Assume that the primary log level for Logger
+ is <c>info</c>, and there is one handler, <c>h1</c>, with
+ level <c>info</c> and one handler, <c>h2</c>, with
+ level <c>debug</c>.</p>
+ <p>With this configuration, no debug messages will be logged,
+ since they are all stopped by the primary log level.</p>
+ <p>If the level for <c>mymodule</c> is now set
+ to <c>debug</c>, then debug events from this module will be
+ logged by the handler <c>h2</c>, but not by
+ handler <c>h1</c>.</p>
+ <p>Debug events from other modules are still not logged.</p>
+ <p>To change the primary log level for Logger, use
+ <seealso marker="#set_primary_config/2">
+ <c>set_primary_config(level, Level)</c></seealso>.</p>
+ <p>To change the log level for a handler, use
+ <seealso marker="#set_handler_config/3">
+ <c>set_handler_config(HandlerId, level, Level)</c>
+ </seealso>.</p>
+ <note>
+ <p>The originating module for a log event is only detected
+ if the key <c>mfa</c> exists in the metadata, and is
+ associated with <c>{Module, Function, Arity}</c>. When log
+ macros are used, this association is automatically added
+ to all log events. If an API function is called directly,
+ without using a macro, the logging client must explicitly
+ add this information if module levels shall have any
+ effect.</p>
+ </note>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_process_metadata" arity="1"/>
+ <fsummary>Set metadata to use when logging from current process.</fsummary>
+ <desc>
+ <p>Set metadata which Logger shall automatically insert in
+ all log events produced on the current process.</p>
+ <p>Location data produced by the log macros, and/or metadata
+ given as argument to the log call (API function or macro),
+ are merged with the process metadata. If the same keys
+ occur, values from the metadata argument to the log call
+ overwrite values from the process metadata, which in turn
+ overwrite values from the location data.</p>
+ <p>Subsequent calls to this function overwrites previous data
+ set. To update existing data instead of overwriting it,
+ see <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_application_level" arity="1"/>
+ <fsummary>Unset the log level for all modules in the specified application.</fsummary>
+ <desc>
+ <p>Unset the log level for all the modules of the specified application.</p>
+ <p>This function is a convinience function that calls
+ <seealso marker="#unset_module_level/1">logger:unset_module_level/2</seealso>
+ for each module associated with an application.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_module_level" arity="0"/>
+ <fsummary>Remove module specific log settings for all modules.</fsummary>
+ <desc>
+ <p>Remove module specific log settings. After this, the
+ primary log level is used for all modules.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_module_level" arity="1"/>
+ <fsummary>Remove module specific log settings for the given
+ modules.</fsummary>
+ <desc>
+ <p>Remove module specific log settings. After this, the
+ primary log level is used for the specified modules.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_process_metadata" arity="0"/>
+ <fsummary>Delete data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Delete data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_formatter_config" arity="2"/>
+ <fsummary>Update the formatter configuration for the specified handler.</fsummary>
+ <desc>
+ <p>Update the formatter configuration for the specified handler.</p>
+ <p>The new configuration is merged with the existing formatter
+ configuration.</p>
+ <p>To overwrite the existing configuration without any merge,
+ use</p>
+ <pre>
+<seealso marker="#set_handler_config-3">set_handler_config(HandlerId, formatter,
+ {FormatterModule, FormatterConfig})</seealso>.</pre>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_formatter_config" arity="3"/>
+ <fsummary>Update the formatter configuration for the specified handler.</fsummary>
+ <desc>
+ <p>Update the formatter configuration for the specified handler.</p>
+ <p>This is equivalent to</p>
+ <pre>
+<seealso marker="#update_formatter_config-2">update_formatter_config(<anno>HandlerId</anno>, #{<anno>Key</anno> => <anno>Value</anno>})</seealso></pre>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_handler_config" arity="2"/>
+ <fsummary>Update configuration data for the specified handler.</fsummary>
+ <desc>
+ <p>Update configuration data for the specified handler. This function
+ behaves as if it was implemented as follows:</p>
+ <code type="erl">
+{ok, {_, Old}} = logger:get_handler_config(HandlerId),
+logger:set_handler_config(HandlerId, maps:merge(Old, Config)).
+ </code>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_handler_config-2"><c>set_handler_config/2</c>
+ </seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_handler_config" arity="3" clause_i="1"/>
+ <name name="update_handler_config" arity="3" clause_i="2"/>
+ <name name="update_handler_config" arity="3" clause_i="3"/>
+ <name name="update_handler_config" arity="3" clause_i="4"/>
+ <name name="update_handler_config" arity="3" clause_i="5"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <type variable="HandlerId"/>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <type variable="Formatter" name_i="4"/>
+ <type variable="Config" name_i="5"/>
+ <type variable="Return"/>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to the given value. If it does not exist, it will
+ be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is not changed. To reset unspecified
+ data to default values,
+ use <seealso marker="#set_handler_config-3">
+ <c>set_handler_config/3</c></seealso>.</p>
+ <p>See the definition of
+ the <seealso marker="#type-handler_config">
+ <c>handler_config()</c></seealso> type for more
+ information about the different parameters.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_primary_config" arity="1"/>
+ <fsummary>Update primary configuration data for Logger.</fsummary>
+ <desc>
+ <p>Update primary configuration data for Logger. This function
+ behaves as if it was implemented as follows:</p>
+ <code type="erl">
+Old = logger:get_primary_config(),
+logger:set_primary_config(maps:merge(Old, Config)).
+ </code>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_primary_config-1"><c>set_primary_config/1</c>
+ </seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_process_metadata" arity="1"/>
+ <fsummary>Set or update metadata to use when logging from
+ current process.</fsummary>
+ <desc>
+ <p>Set or update metadata to use when logging from current
+ process</p>
+ <p>If process metadata exists for the current process, this
+ function behaves as if it was implemented as follows:</p>
+ <code type="erl">
+logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
+ </code>
+ <p>If no process metadata exists, the function behaves as
+ <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c>
+ </seealso>.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <marker id="misc_API"/>
+ <title>Miscellaneous API functions</title>
+ </section>
+ <funcs>
+ <func>
+ <name name="compare_levels" arity="2"/>
+ <fsummary>Compare the severity of two log levels.</fsummary>
+ <desc>
+ <p>Compare the severity of two log levels. Returns <c>gt</c>
+ if <c>Level1</c> is more severe than
+ <c>Level2</c>, <c>lt</c> if <c>Level1</c> is less severe,
+ and <c>eq</c> if the levels are equal.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="format_report" arity="1"/>
+ <fsummary>Convert a log message on report form to {Format, Args}.</fsummary>
+ <desc>
+ <p>Convert a log message on report form to <c>{Format,
+ Args}</c>. This is the default report callback used
+ by <seealso marker="logger_formatter">
+ <c>logger_formatter</c></seealso> when no custom report
+ callback is found. See
+ section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the Kernel User's Guide for
+ information about report callbacks and valid forms of log
+ messages.</p>
+ <p>The function produces lines of <c>Key: Value</c> from
+ key-value lists. Strings are printed with <c>~ts</c> and
+ other terms with <c>~tp</c>.</p>
+ <p>If <c><anno>Report</anno></c> is a map, it is converted to
+ a key-value list before formatting as such.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <marker id="handler_callback_functions"/>
+ <title>Handler Callback Functions</title>
+ <p>The following functions are to be exported from a handler
+ callback module.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>HModule:adding_handler(Config1) -> {ok, Config2} | {error,
+ Reason}</name>
+ <fsummary>An instance of this handler is about to be added.</fsummary>
+ <type>
+ <v>Config1 = Config2 =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called on a temporary process when an new
+ handler is about to be added. The purpose is to verify the
+ configuration and initiate all resources needed by the
+ handler.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config1</c>.</p>
+ <p>If everything succeeds, the callback function can add
+ possible default values or internal state values to the
+ configuration, and return the adjusted map
+ in <c>{ok,Config2}</c>.</p>
+ <p>If the configuration is faulty, or if the initiation fails,
+ the callback function must return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig) -> {ok, Config} | {error, Reason}</name>
+ <fsummary>The configuration for this handler is about to change.</fsummary>
+ <type>
+ <v>SetOrUpdate = set | update</v>
+ <v>OldConfig = NewConfig = Config =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called on a temporary process when the
+ configuration for a handler is about to change. The purpose
+ is to verify and act on the new configuration.</p>
+ <p><c>OldConfig</c> is the existing configuration
+ and <c>NewConfig</c> is the new configuration.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>OldConfig</c>.</p>
+ <p><c>SetOrUpdate</c> has the value <c>set</c> if the
+ configuration change originates from a call to
+ <seealso marker="#set_handler_config-2">
+ <c>set_handler_config/2,3</c></seealso>, and <c>update</c>
+ if it originates from <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2,3</c></seealso>. The handler can
+ use this parameteter to decide how to update the value of
+ the <c>config</c> field, that is, the handler specific
+ configuration data. Typically, if <c>SetOrUpdate</c>
+ equals <c>set</c>, values that are not specified must be
+ given their default values. If <c>SetOrUpdate</c>
+ equals <c>update</c>, the values found in <c>OldConfig</c>
+ must be used instead.</p>
+ <p>If everything succeeds, the callback function must return a
+ possibly adjusted configuration in <c>{ok,Config}</c>.</p>
+ <p>If the configuration is faulty, the callback function must
+ return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>HModule:filter_config(Config) -> FilteredConfig</name>
+ <fsummary>Remove internal data from configuration.</fsummary>
+ <type>
+ <v>Config = FilteredConfig =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when one of the Logger API functions
+ for fetching the handler configuration is called, for
+ example
+ <seealso marker="#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>.</p>
+ <p>It allows the handler to remove internal data fields from
+ its configuration data before it is returned to the
+ caller.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>HModule:log(LogEvent, Config) -> void()</name>
+ <fsummary>Log the given log event.</fsummary>
+ <type>
+ <v>LogEvent =
+ <seealso marker="#type-log_event">log_event()</seealso></v>
+ <v>Config =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is mandatory.</p>
+ <p>The function is called when all primary filters and all
+ handler filters for the handler in question have passed for
+ the given log event. It is called on the client process, that
+ is, the process that issued the log event.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config</c>.</p>
+ <p>The handler must log the event.</p>
+ <p>The return value from this function is ignored by
+ Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>HModule:removing_handler(Config) -> ok</name>
+ <fsummary>The given handler is about to be removed.</fsummary>
+ <type>
+ <v>Config =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called on a temporary process when a
+ handler is about to be removed. The purpose is to release
+ all resources used by the handler.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config</c>.</p>
+ <p>The return value is ignored by Logger.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <marker id="formatter_callback_functions"/>
+ <title>Formatter Callback Functions</title>
+ <p>The following functions are to be exported from a formatter
+ callback module.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>FModule:check_config(FConfig) -> ok | {error, Reason}</name>
+ <fsummary>Validate the given formatter configuration.</fsummary>
+ <type>
+ <v>FConfig =
+ <seealso marker="#type-formatter_config">formatter_config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called by a Logger when formatter
+ configuration is set or modified. The formatter must
+ validate the given configuration and return <c>ok</c> if it
+ is correct, and <c>{error,Reason}</c> if it is faulty.</p>
+ <p>The following Logger API functions can trigger this callback:</p>
+ <list>
+ <item><seealso marker="logger#add_handler-3">
+ <c>logger:add_handler/3</c></seealso></item>
+ <item><seealso marker="logger#set_handler_config-2">
+ <c>logger:set_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_handler_config-2">
+ <c>logger:updata_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2</c></seealso></item>
+ </list>
+ <p>See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example implementation. <c>logger_formatter</c> is the
+ default formatter used by Logger.</p>
+ </desc>
+ </func>
+ <func>
+ <name>FModule:format(LogEvent, FConfig) -> FormattedLogEntry</name>
+ <fsummary>Format the given log event.</fsummary>
+ <type>
+ <v>LogEvent =
+ <seealso marker="#type-log_event">log_event()</seealso></v>
+ <v>FConfig =
+ <seealso marker="#type-formatter_config">formatter_config()</seealso></v>
+ <v>FormattedLogEntry =
+ <seealso marker="unicode#type-chardata">unicode:chardata()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is mandatory.</p>
+ <p>The function can be called by a log handler to convert a
+ log event term to a printable string. The returned value
+ can, for example, be printed as a log entry to the console
+ or a file using <seealso marker="stdlib:io#put_chars-1">
+ <c>io:put_chars/1,2</c></seealso>.</p>
+ <p>See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example implementation. <c>logger_formatter</c> is the
+ default formatter used by Logger.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="config"><c>config(4)</c></seealso>,
+ <seealso marker="erts:erlang"><c>erlang(3)</c></seealso>,
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso>,
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>,
+ <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>,
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
+ <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>,
+ <seealso marker="stdlib:unicode"><c>unicode(3)</c></seealso>
+ </p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_arch.dia b/lib/kernel/doc/src/logger_arch.dia
new file mode 100644
index 0000000000..97be31856e
--- /dev/null
+++ b/lib/kernel/doc/src/logger_arch.dia
Binary files differ
diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png
new file mode 100644
index 0000000000..70933a5a41
--- /dev/null
+++ b/lib/kernel/doc/src/logger_arch.png
Binary files differ
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
new file mode 100644
index 0000000000..1870d2ab79
--- /dev/null
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -0,0 +1,1345 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Logging</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>logger_chapter.xml</file>
+ </header>
+
+ <p>Erlang/OTP 21.0 provides a standard API for logging
+ through <c>Logger</c>, which is part of the Kernel
+ application. Logger consists of the API for issuing log events,
+ and a customizable backend where log handlers, filters and
+ formatters can be plugged in.</p>
+ <p>By default, the Kernel application installs one log handler at
+ system start. This handler is named <c>default</c>. It receives
+ and processes standard log events produced by the Erlang runtime
+ system, standard behaviours and different Erlang/OTP
+ applications. The log events are by default written to the
+ terminal.</p>
+ <p>You can also configure the system so that the default handler
+ prints log events to a single file, or to a set of wrap logs
+ via <seealso marker="disk_log"><c>disk_log</c></seealso>.</p>
+ <p>By configuration, you can also modify or disable the default
+ handler, replace it by a custom handler, and install additional
+ handlers.</p>
+
+ <note>
+ <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right
+ to introduce changes to the Logger API and functionality in
+ patches following this release. These changes might or might not
+ be backwards compatible with the initial version.</p>
+ </note>
+
+ <section>
+ <title>Overview</title>
+ <p>A <em>log event</em> consists of a <em>log level</em>, the
+ <em>message</em> to be logged, and <em>metadata</em>.</p>
+ <p>The Logger backend forwards log events from the API, first
+ through a set of <em>primary filters</em>, then through a set of
+ secondary filters attached to each log handler. The secondary
+ filters are in the following named <em>handler filters</em>.</p>
+ <p>Each filter set consists of a <em>log level check</em>,
+ followed by zero or more <em>filter functions</em>.</p>
+ <p>The following figure shows a conceptual overview of Logger. The
+ figure shows two log handlers, but any number of handlers can be
+ installed.</p>
+
+ <!-- The image is edited with dia in logger_arch.dia file,
+ and .png file generated with make target 'png'. -->
+ <image file="logger_arch.png">
+ <icaption>Conceptual Overview</icaption>
+ </image>
+
+ <p>Log levels are expressed as atoms. Internally in Logger, the
+ atoms are mapped to integer values, and a log event passes the
+ log level check if the integer value of its log level is less
+ than or equal to the currently configured log level. That is,
+ the check passes if the event is equally or more severe than the
+ configured level. See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of all log
+ levels.</p>
+ <p>The primary log level can be overridden by a log level
+ configured per module. This is to, for instance, allow more
+ verbose logging from a specific part of the system.</p>
+ <p>Filter functions can be used for more sophisticated filtering
+ than the log level check provides. A filter function can stop or
+ pass a log event, based on any of the event's contents. It can
+ also modify all parts of the log event. See see
+ section <seealso marker="#filters">Filters</seealso> for more
+ details.</p>
+ <p>If a log event passes through all primary filters and all
+ handler filters for a specific handler, Logger forwards the
+ event to the <em>handler callback</em>. The handler formats and
+ prints the event to its destination. See
+ section <seealso marker="#handlers">Handlers</seealso> for more
+ details.</p>
+ <p>Everything up to and including the call to the handler
+ callbacks is executed on the client process, that is, the
+ process where the log event was issued. It is up to the handler
+ implementation if other processes are involved or not.</p>
+ <p>The handlers are called in sequence, and the order is not
+ defined.</p>
+ </section>
+ <section>
+ <marker id="logger_api"/>
+ <title>Logger API</title>
+ <p>The API for logging consists of a set
+ of <seealso marker="logger#macros">macros</seealso>, and a set
+ of functions on the form <c>logger:Level/1,2,3</c>, which are
+ all shortcuts
+ for <seealso marker="logger#log-2">
+ <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p>
+ <p>The difference between using the macros and the exported
+ functions is that macros add location (originator) information
+ to the metadata, and performs lazy evaluation by wrapping the
+ logger call in a case statement, so it is only evaluated if the
+ log level of the event passes the primary log level check.</p>
+ <section>
+ <marker id="log_level"/>
+ <title>Log Level</title>
+ <p>The log level indicates the severity of a event. In
+ accordance with the Syslog protocol,
+ <url href="https://www.ietf.org/rfc/rfc5424.txt">RFC
+ 5424</url>, eight log levels can be specified. The following
+ table lists all possible log levels by name (atom), integer
+ value, and description:</p>
+
+ <table align="left">
+ <row>
+ <cell><strong>Level</strong></cell>
+ <cell align="center"><strong>Integer</strong></cell>
+ <cell><strong>Description</strong></cell>
+ </row>
+ <row>
+ <cell>emergency</cell>
+ <cell align="center">0</cell>
+ <cell>system is unusable</cell>
+ </row>
+ <row>
+ <cell>alert</cell>
+ <cell align="center">1</cell>
+ <cell>action must be taken immediately</cell>
+ </row>
+ <row>
+ <cell>critical</cell>
+ <cell align="center">2</cell>
+ <cell>critical conditions</cell>
+ </row>
+ <row>
+ <cell>error</cell>
+ <cell align="center">3</cell>
+ <cell>error conditions</cell>
+ </row>
+ <row>
+ <cell>warning</cell>
+ <cell align="center">4</cell>
+ <cell>warning conditions</cell>
+ </row>
+ <row>
+ <cell>notice</cell>
+ <cell align="center">5</cell>
+ <cell>normal but significant conditions</cell>
+ </row>
+ <row>
+ <cell>info</cell>
+ <cell align="center">6</cell>
+ <cell>informational messages</cell>
+ </row>
+ <row>
+ <cell>debug</cell>
+ <cell align="center">7</cell>
+ <cell>debug-level messages</cell>
+ </row>
+ <tcaption>Log Levels</tcaption>
+ </table>
+ <p>Notice that the integer value is only used internally in
+ Logger. In the API, you must always use the atom. To compare
+ the severity of two log levels,
+ use <seealso marker="logger#compare_levels-2">
+ <c>logger:compare_levels/2</c></seealso>.</p>
+ </section>
+ <section>
+ <marker id="log_message"/>
+ <title>Log Message</title>
+ <p>The log message contains the information to be logged. The
+ message can consist of a format string and arguments (given as
+ two separate parameters in the Logger API), a string or a
+ report. The latter, which is either a map or a key-value list,
+ can be accompanied by a <em>report callback</em> specified in
+ the log event's <seealso marker="#metadata">metadata</seealso>.
+ The report callback is a convenience function that
+ the <seealso marker="#formatters">formatter</seealso> can use
+ to convert the report to a format string and arguments, or
+ directly to a string. The
+ formatter can also use its own conversion function, if no
+ callback is provided, or if a customized formatting is
+ desired.</p>
+ <p>The report callback must be a fun with one or two
+ arguments. If it takes one argument, this is the report
+ itself, and the fun returns a format string and arguments:</p>
+ <pre>fun((<seealso marker="logger#type-report"><c>logger:report()</c></seealso>) -> {<seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso>,[term()]})</pre>
+ <p>If it takes two arguments, the first is the report, and the
+ second is a map containing extra data that allows direct
+ coversion to a string:</p>
+ <pre>fun((<seealso marker="logger#type-report"><c>logger:report()</c></seealso>,<seealso marker="logger#type-report_cb_config"><c>logger:report_cb_config()</c></seealso>) -> <seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c></seealso>)
+ </pre>
+ <p>The fun must obey the <c>depth</c> and <c>chars_limit</c>
+ parameters provided in the second argument, as the formatter can
+ not do anything useful of these parameters with the returned
+ string. The extra data also contains a field named
+ <c>single_line</c>, indicating if the printed log message may
+ contain line breaks or not. This variant is used when the
+ formatting of the report depends on the size or single line
+ parameters.</p>
+ <p>Example, format string and arguments:</p>
+ <code>logger:error("The file does not exist: ~ts",[Filename])</code>
+ <p>Example, string:</p>
+ <code>logger:notice("Something strange happened!")</code>
+ <p>Example, report, and metadata with report callback:</p>
+ <code>
+logger:debug(#{got => connection_request, id => Id, state => State},
+ #{report_cb => fun(R) -> {"~p",[R]} end})</code>
+ <p>The log message can also be provided through a fun for lazy
+ evaluation. The fun is only evaluated if the primary log level
+ check passes, and is therefore recommended if it is expensive
+ to generate the message. The lazy fun must return a string, a
+ report, or a tuple with format string and arguments.</p>
+ </section>
+ <section>
+ <title>Metadata</title>
+ <p>Metadata contains additional data associated with a log
+ message. Logger inserts some metadata fields by default, and
+ the client can add custom metadata in two different ways:</p>
+ <taglist>
+ <tag>Set process metadata</tag>
+ <item>
+ <p>Process metadata is set and updated
+ with <seealso marker="logger#set_process_metadata-1">
+ <c>logger:set_process_metadata/1</c></seealso>
+ and <seealso marker="logger#update_process_metadata-1">
+ <c>logger:update_process_metadata/1</c></seealso>,
+ respectively. This metadata applies to the process on
+ which these calls are made, and Logger adds the metadata
+ to all log events issued on that process.</p>
+ </item>
+ <tag>Add metadata to a specific log event</tag>
+ <item>
+ <p>Metadata associated with one specific log event is given
+ as the last parameter to the log macro or Logger API
+ function when the event is issued. For example:</p>
+ <code>?LOG_ERROR("Connection closed",#{context => server})</code>
+ </item>
+ </taglist>
+ <p>See the description of
+ the <seealso marker="logger#type-metadata">
+ <c>logger:metadata()</c></seealso> type for information
+ about which default keys Logger inserts, and how the different
+ metadata maps are merged.</p>
+ </section>
+ </section>
+ <section>
+ <marker id="filter"/>
+ <title>Filters</title>
+ <p>Filters can be primary, or attached to a specific
+ handler. Logger calls the primary filters first, and if they all
+ pass, it calls the handler filters for each handler. Logger
+ calls the handler callback only if all filters attached to the
+ handler in question also pass.</p>
+ <p>A filter is defined as:</p>
+ <pre>{FilterFun, Extra}</pre>
+ <p>where <c>FilterFun</c> is a function of arity 2,
+ and <c>Extra</c> is any term. When applying the filter, Logger
+ calls the function with the log event as the first argument,
+ and the value of <c>Extra</c> as the second
+ argument. See <seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso> for type definitions.</p>
+ <p>The filter function can return <c>stop</c>, <c>ignore</c> or
+ the (possibly modified) log event.</p>
+ <p>If <c>stop</c> is returned, the log event is immediately
+ discarded. If the filter is primary, no handler filters or
+ callbacks are called. If it is a handler filter, the
+ corresponding handler callback is not called, but the log event
+ is forwarded to filters attached to the next handler, if
+ any.</p>
+ <p>If the log event is returned, the next filter function is
+ called with the returned value as the first argument. That is,
+ if a filter function modifies the log event, the next filter
+ function receives the modified event. The value returned from
+ the last filter function is the value that the handler callback
+ receives.</p>
+ <p>If the filter function returns <c>ignore</c>, it means that it
+ did not recognize the log event, and thus leaves to other
+ filters to decide the event's destiny.</p>
+ <p>The configuration option <c>filter_default</c> specifies the
+ behaviour if all filter functions return <c>ignore</c>, or if no
+ filters exist. <c>filter_default</c> is by default set
+ to <c>log</c>, meaning that if all existing filters ignore a log
+ event, Logger forwards the event to the handler
+ callback. If <c>filter_default</c> is set to <c>stop</c>, Logger
+ discards such events.</p>
+ <p>Primary filters are added
+ with <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ and removed
+ with <seealso marker="logger#remove_primary_filter-1">
+ <c>logger:remove_primary_filter/1</c></seealso>. They can also
+ be added at system start via the Kernel configuration
+ parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.</p>
+ <p>Handler filters are added
+ with <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>
+ and removed
+ with <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>. They can also
+ be specified directly in the configuration when adding a handler
+ with <seealso marker="logger#add_handler/3">
+ <c>logger:add_handler/3</c></seealso>
+ or via the Kernel configuration
+ parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.</p>
+
+ <p>To see which filters are currently installed in the system,
+ use <seealso marker="logger#get_config-0">
+ <c>logger:get_config/0</c></seealso>,
+ or <seealso marker="logger#get_primary_config-0">
+ <c>logger:get_primary_config/0</c></seealso>
+ and <seealso marker="logger#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>. Filters are
+ listed in the order they are applied, that is, the first
+ filter in the list is applied first, and so on.</p>
+
+ <p>For convenience, the following built-in filters exist:</p>
+
+ <taglist>
+ <tag><seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso></tag>
+ <item>
+ <p>Provides a way of filtering log events based on a
+ <c>domain</c> field in <c>Metadata</c>.</p>
+ </item>
+ <tag><seealso marker="logger_filters#level-2">
+ <c>logger_filters:level/2</c></seealso></tag>
+ <item>
+ <p>Provides a way of filtering log events based on the log
+ level.</p>
+ </item>
+ <tag><seealso marker="logger_filters#progress-2">
+ <c>logger_filters:progress/2</c></seealso></tag>
+ <item>
+ <p>Stops or allows progress reports from <c>supervisor</c>
+ and <c>application_controller</c>.</p>
+ </item>
+ <tag><seealso marker="logger_filters#remote_gl-2">
+ <c>logger_filters:remote_gl/2</c></seealso></tag>
+ <item>
+ <p>Stops or allows log events originating from a process
+ that has its group leader on a remote node.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="handlers"/>
+ <title>Handlers</title>
+ <p>A handler is defined as a module exporting at least the
+ following callback function:</p>
+
+ <pre><seealso marker="logger#HModule:log-2">log(LogEvent, Config) -> void()</seealso></pre>
+
+ <p>This function is called when a log event has passed through all
+ primary filters, and all handler filters attached to the handler
+ in question. The function call is executed on the client
+ process, and it is up to the handler implementation if other
+ processes are involved or not.</p>
+
+ <p>Logger allows adding multiple instances of a handler
+ callback. That is, if a callback module implementation allows
+ it, you can add multiple handler instances using the same
+ callback module. The different instances are identified by
+ unique handler identities.</p>
+
+ <p>In addition to the mandatory callback function <c>log/2</c>, a
+ handler module can export the optional callback
+ functions <c>adding_handler/1</c>, <c>changing_config/3</c>,
+ <c>filter_config/1</c>, and <c>removing_handler/1</c>. See
+ section <seealso marker="logger#handler_callback_functions">Handler
+ Callback Functions</seealso> in the logger(3) manual page for
+ more information about these function.</p>
+
+ <p>The following built-in handlers exist:</p>
+
+ <taglist>
+ <tag><c>logger_std_h</c></tag>
+ <item>
+ <p>This is the default handler used by OTP. Multiple instances
+ can be started, and each instance will write log events to a
+ given destination, terminal or file.</p>
+ </item>
+
+ <tag><c>logger_disk_log_h</c></tag>
+ <item>
+ <p>This handler behaves much like <c>logger_std_h</c>, except it uses
+ <seealso marker="disk_log"><c>disk_log</c></seealso> as its
+ destination.</p>
+ </item>
+
+ <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag>
+ <item>
+ <p>This handler is provided for backwards compatibility
+ only. It is not started by default, but will be
+ automatically started the first time an <c>error_logger</c>
+ event handler is added
+ with <seealso marker="error_logger#add_report_handler-1">
+ <c>error_logger:add_report_handler/1,2</c></seealso>.</p>
+
+ <p>The old <c>error_logger</c> event handlers in STDLIB and
+ SASL still exist, but they are not added by Erlang/OTP 21.0
+ or later.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="formatters"/>
+ <title>Formatters</title>
+ <p>A formatter can be used by the handler implementation to do the
+ final formatting of a log event, before printing to the
+ handler's destination. The handler callback receives the
+ formatter information as part of the handler configuration,
+ which is passed as the second argument
+ to <seealso marker="logger#HModule:log-2">
+ <c>HModule:log/2</c></seealso>.</p>
+ <p>The formatter information consist of a formatter
+ module, <c>FModule</c> and its
+ configuration, <c>FConfig</c>. <c>FModule</c> must export the
+ following function, which can be called by the handler:</p>
+ <pre><seealso marker="logger#FModule:format-2">format(LogEvent,FConfig)
+ -> FormattedLogEntry</seealso></pre>
+ <p>The formatter information for a handler is set as a part of its
+ configuration when the handler is added. It can also be changed
+ during runtime
+ with <seealso marker="logger#set_handler_config-3">
+ <c>logger:set_handler_config(HandlerId,formatter,{FModule,FConfig})</c>
+ </seealso>, which overwrites the current formatter information,
+ or with <seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2,3</c></seealso>, which
+ only modifies the formatter configuration.</p>
+ <p>If the formatter module exports the optional callback
+ function <seealso marker="logger#FModule:check_config-1">
+ <c>check_config(FConfig)</c></seealso>, Logger calls this
+ function when the formatter information is set or modified, to
+ verify the validity of the formatter configuration.</p>
+ <p>If no formatter information is specified for a handler, Logger
+ uses <c>logger_formatter</c> as default. See
+ the <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>
+ manual page for more information about this module.</p>
+ </section>
+
+ <section>
+ <title>Configuration</title>
+
+ <p>At system start, Logger is configured through Kernel
+ configuration parameters. The parameters that apply to Logger
+ are described in
+ section <seealso marker="#kernel_config_params">Kernel
+ Configuration Parameters</seealso>. Examples are found in
+ section <seealso marker="#config_examples">Configuration
+ Examples</seealso>.</p>
+ <p>During runtime, Logger configuration is changed via API
+ functions. See
+ section <seealso marker="logger#configuration_API">Configuration
+ API Functions</seealso> in the <c>logger(3)</c> manual page.</p>
+
+ <section>
+ <title>Primary Logger Configuration</title>
+ <p>Logger API functions that apply to the primary Logger
+ configuration are:</p>
+ <list>
+ <item><seealso marker="logger#get_primary_config-0">
+ <c>get_primary_config/0</c></seealso></item>
+ <item><seealso marker="logger#set_primary_config-1">
+ <c>set_primary_config/1,2</c></seealso></item>
+ <item><seealso marker="logger#update_primary_config-1">
+ <c>update_primary_config/1</c></seealso></item>
+ <item><seealso marker="logger#add_primary_filter-2">
+ <c>add_primary_filter/2</c></seealso></item>
+ <item><seealso marker="logger#remove_primary_filter-1">
+ <c>remove_primary_filter/1</c></seealso></item>
+ </list>
+ <p>The primary Logger configuration is a map with the following
+ keys:</p>
+ <taglist>
+ <tag><marker id="primary_level"/>
+ <c>level = </c><seealso marker="logger#type-level">
+ <c>logger:level()</c></seealso><c> | all | none</c></tag>
+ <item>
+ <p>Specifies the primary log level, that is, log event that
+ are equally or more severe than this level, are forwarded
+ to the primary filters. Less severe log events are
+ immediately discarded.</p>
+ <p>See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of
+ possible log levels.</p>
+ <p>The initial value of this option is set by the Kernel
+ configuration parameter <seealso marker="#logger_level">
+ <c>logger_level</c></seealso>. It is changed during
+ runtime with <seealso marker="logger#set_primary_config-2">
+ <c>logger:set_primary_config(level,Level)</c></seealso>.</p>
+ <p>Defaults to <c>notice</c>.</p>
+ </item>
+ <tag><c>filters = [{FilterId,Filter}]</c></tag>
+ <item>
+ <p>Specifies the primary filters.</p>
+ <list>
+ <item><c>FilterId = </c><seealso marker="logger#type-filter_id">
+ <c>logger:filter_id()</c></seealso></item>
+ <item><c>Filter = </c><seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso></item>
+ </list>
+ <p>The initial value of this option is set by the Kernel
+ configuration
+ parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.
+ During runtime, primary filters are added and removed with
+ <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso> and
+ <seealso marker="logger#remove_primary_filter-1">
+ <c>logger:remove_primary_filter/1</c></seealso>,
+ respectively.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more detailed information.</p>
+ <p>Defaults to <c>[]</c>.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what happens to a log event if all filters
+ return <c>ignore</c>, or if no filters exist.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more information about how this option is used.</p>
+ <p>Defaults to <c>log</c>.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="handler_configuration"/>
+ <title>Handler Configuration</title>
+ <p>Logger API functions that apply to handler configuration
+ are:</p>
+ <list>
+ <item><seealso marker="logger#get_handler_config-0">
+ <c>get_handler_config/0,1</c></seealso></item>
+ <item><seealso marker="logger#set_handler_config-2">
+ <c>set_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_handler_config-2">
+ <c>update_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#add_handler_filter-3">
+ <c>add_handler_filter/3</c></seealso></item>
+ <item><seealso marker="logger#remove_handler_filter-2">
+ <c>remove_handler_filter/2</c></seealso></item>
+ <item><seealso marker="logger#update_formatter_config-2">
+ <c>update_formatter_config/2,3</c></seealso></item>
+ </list>
+ <p>The configuration for a handler is a map with the following keys:</p>
+ <taglist>
+ <tag><c>id = </c><seealso marker="logger#type-handler_id">
+ <c>logger:handler_id()</c></seealso></tag>
+ <item>
+ <p>Automatically inserted by Logger. The value is the same
+ as the <c>HandlerId</c> specified when adding the handler,
+ and it cannot be changed.</p>
+ </item>
+ <tag><c>module = module()</c></tag>
+ <item>
+ <p>Automatically inserted by Logger. The value is the same
+ as the <c>Module</c> specified when adding the handler,
+ and it cannot be changed.</p>
+ </item>
+ <tag><c>level = </c><seealso marker="logger#type-level">
+ <c>logger:level()</c></seealso><c> | all | none</c></tag>
+ <item>
+ <p>Specifies the log level for the handler, that is, log
+ events that are equally or more severe than this level,
+ are forwarded to the handler filters for this
+ handler.</p>
+ <p>See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of
+ possible log levels.</p>
+ <p>The log level is specified when adding the handler, or
+ changed during runtime with, for
+ instance, <seealso marker="logger#set_handler_config/3">
+ <c>logger:set_handler_config(HandlerId,level,Level)</c></seealso>.
+ </p>
+ <p>Defaults to <c>all</c>.</p>
+ </item>
+ <tag><c>filters = [{FilterId,Filter}]</c></tag>
+ <item>
+ <p>Specifies the handler filters.</p>
+ <list>
+ <item><c>FilterId = </c><seealso marker="logger#type-filter_id">
+ <c>logger:filter_id()</c></seealso></item>
+ <item><c>Filter = </c><seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso></item>
+ </list>
+ <p>Handler filters are specified when adding the handler,
+ or added or removed during runtime with
+ <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso> and
+ <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#filters">Filters</seealso> for more
+ detailed information.</p>
+ <p>Defaults to <c>[]</c>.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what happens to a log event if all filters
+ return <c>ignore</c>, or if no filters exist.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more information about how this option is used.</p>
+ <p>Defaults to <c>log</c>.</p>
+ </item>
+ <tag><c>formatter = {FormatterModule,FormatterConfig}</c></tag>
+ <item>
+ <p>Specifies a formatter that the handler can use for
+ converting the log event term to a printable string.</p>
+ <list>
+ <item><c>FormatterModule = module()</c></item>
+ <item><c>FormatterConfig = </c>
+ <seealso marker="logger#type-formatter_config">
+ <c>logger:formatter_config()</c></seealso></item>
+ </list>
+ <p>The formatter information is specified when adding the
+ handler. The formatter configuration can be changed during
+ runtime
+ with <seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2,3</c></seealso>,
+ or the complete formatter information can be overwritten
+ with, for
+ instance, <seealso marker="logger#set_handler_config-3">
+ <c>logger:set_handler_config/3</c></seealso>.</p>
+ <p>See
+ section <seealso marker="#formatters">Formatters</seealso>
+ for more detailed information.</p>
+ <p>Defaults
+ to <c>{logger_formatter,DefaultFormatterConfig}</c>. See
+ the <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso> manual page for
+ information about this formatter and its default
+ configuration.</p>
+ </item>
+ <tag><c>config = term()</c></tag>
+ <item>
+ <p>Handler specific configuration, that is, configuration
+ data related to a specific handler implementation.</p>
+ <p>The configuration for the built-in handlers is described
+ in
+ the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
+ </seealso> manual pages.</p>
+ </item>
+ </taglist>
+
+ <p>Notice that <c>level</c> and <c>filters</c> are obeyed by
+ Logger itself before forwarding the log events to each
+ handler, while <c>formatter</c> and all handler specific
+ options are left to the handler implementation.</p>
+ </section>
+
+ <section>
+ <marker id="kernel_config_params"/>
+ <title>Kernel Configuration Parameters</title>
+
+ <p>The following Kernel configuration parameters apply to
+ Logger:</p>
+ <taglist>
+ <tag><marker id="logger_parameter"/><c>logger = [Config]</c></tag>
+ <item>
+ <p>Specifies the configuration
+ for <seealso marker="logger">Logger</seealso>, except the
+ primary log level, which is specified
+ with <seealso marker="#logger_level"><c>logger_level</c></seealso>,
+ and the compatibility
+ with <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso>, which is specified
+ with <seealso marker="#logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso>.</p>
+ <p>With this parameter, you can modify or disable the default
+ handler, add custom handlers and primary logger filters, and
+ set log levels per module.</p>
+ <p><c>Config</c> is any (zero or more) of the following:</p>
+ <taglist>
+ <tag><c>{handler, default, undefined}</c></tag>
+ <item>
+ <p>Disables the default handler. This allows another
+ application to add its own default handler.</p>
+ <p>Only one entry of this type is allowed.</p>
+ </item>
+ <tag><c>{handler, HandlerId, Module, HandlerConfig}</c></tag>
+ <item>
+ <p>If <c>HandlerId</c> is <c>default</c>, then this entry
+ modifies the default handler, equivalent to calling</p>
+ <pre><seealso marker="logger#remove_handler-1">
+ logger:remove_handler(default)
+ </seealso></pre>
+ <p>followed by</p>
+ <pre><seealso marker="logger#add_handler-3">
+ logger:add_handler(default, Module, HandlerConfig)
+ </seealso></pre>
+ <p>For all other values of <c>HandlerId</c>, this entry
+ adds a new handler, equivalent to calling</p>
+ <pre><seealso marker="logger:add_handler/3">
+ logger:add_handler(HandlerId, Module, HandlerConfig)
+ </seealso></pre>
+ <p>Multiple entries of this type are allowed.</p></item>
+ <tag><c>{filters, FilterDefault, [Filter]}</c></tag>
+ <item>
+ <p>Adds the specified primary filters.</p>
+ <list>
+ <item><c>FilterDefault = log | stop</c></item>
+ <item><c>Filter = {FilterId, {FilterFun, FilterConfig}}</c></item>
+ </list>
+ <p>Equivalent to calling</p>
+ <pre><seealso marker="logger#add_primary_filter/2">
+ logger:add_primary_filter(FilterId, {FilterFun, FilterConfig})
+ </seealso></pre>
+ <p>for each <c>Filter</c>.</p>
+ <p><c>FilterDefault</c> specifies the behaviour if all
+ primary filters return <c>ignore</c>, see
+ section <seealso marker="#filters">Filters</seealso>.</p>
+ <p>Only one entry of this type is allowed.</p>
+ </item>
+ <tag><c>{module_level, Level, [Module]}</c></tag>
+ <item>
+ <p>Sets module log level for the given modules. Equivalent
+ to calling</p>
+ <pre><seealso marker="logger#set_module_level/2">
+ logger:set_module_level(Module, Level)</seealso></pre>
+ <p>for each <c>Module</c>.</p>
+ <p>Multiple entries of this type are allowed.</p>
+ </item>
+ </taglist>
+ <p>See
+ section <seealso marker="#config_examples">Configuration
+ Examples</seealso> for examples using the <c>logger</c>
+ parameter for system configuration.</p>
+ </item>
+ <tag><marker id="logger_level"/>
+ <c>logger_level = Level</c></tag>
+ <item>
+ <p>Specifies the primary log level. See
+ the <seealso marker="kernel_app#logger_level"><c>kernel(6)</c></seealso>
+ manual page for more information about this parameter.</p>
+ </item>
+ <tag><marker id="logger_sasl_compatible"/>
+ <c>logger_sasl_compatible = true | false</c></tag>
+ <item>
+ <p>Specifies Logger's compatibility
+ with <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso>. See
+ the <seealso marker="kernel_app#logger_sasl_compatible">
+ <c>kernel(6)</c></seealso> manual page for more
+ information about this parameter.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="config_examples"/>
+ <title>Configuration Examples</title>
+ <p>The value of the Kernel configuration parameter <c>logger</c>
+ is a list of tuples. It is possible to write the term on the
+ command line when starting an erlang node, but as the term
+ grows, a better approach is to use the system configuration
+ file. See
+ the <seealso marker="config"><c>config(4)</c></seealso> manual
+ page for more information about this file.</p>
+ <p>Each of the following examples shows a simple system
+ configuration file that configures Logger according to the
+ description.</p>
+ <p>Modify the default handler to print to a file instead of
+ <c>standard_io</c>:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h, % {handler, HandlerId, Module,
+ #{config => #{type => {file,"log/erlang.log"}}}} % Config}
+ ]}]}].
+ </code>
+ <p>Modify the default handler to print each log event as a
+ single line:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{formatter => {logger_formatter, #{single_line => true}}}}
+ ]}]}].
+ </code>
+ <p>Modify the default handler to print the pid of the logging
+ process for each log event:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{formatter => {logger_formatter,
+ #{template => [time," ",pid," ",msg,"\n"]}}}}
+ ]}]}].
+ </code>
+ <p>Modify the default handler to only print errors and more
+ severe log events to "log/erlang.log", and add another handler
+ to print all log events to "log/debug.log".</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{level => error,
+ config => #{type => {file, "log/erlang.log"}}}},
+ {handler, info, logger_std_h,
+ #{level => debug,
+ config => #{type => {file, "log/debug.log"}}}}
+ ]}]}].
+ </code>
+ </section>
+
+ </section>
+
+ <section>
+ <marker id="compatibility"/>
+ <title>Backwards Compatibility with error_logger</title>
+ <p>Logger provides backwards compatibility with
+ <c>error_logger</c> in the following ways:</p>
+
+ <taglist>
+ <tag>API for Logging</tag>
+ <item>
+ <p>The <c>error_logger</c> API still exists, but should only
+ be used by legacy code. It will be removed in a later
+ release.</p>
+ <p>Calls
+ to <seealso marker="error_logger#error_report-1">
+ <c>error_logger:error_report/1,2</c></seealso>,
+ <seealso marker="error_logger#error_msg-1">
+ <c>error_logger:error_msg/1,2</c></seealso>, and
+ corresponding functions for warning and info messages, are
+ all forwarded to Logger as calls
+ to <seealso marker="logger#log-3">
+ <c>logger:log(Level,Report,Metadata)</c></seealso>.</p>
+ <p><c>Level = error | warning | info</c> and is taken
+ from the function name. <c>Report</c> contains the actual
+ log message, and <c>Metadata</c> contains additional
+ information which can be used for creating backwards
+ compatible events for legacy <c>error_logger</c> event
+ handlers, see
+ section <seealso marker="#legacy_event_handlers">Legacy
+ Event Handlers</seealso>.</p>
+ </item>
+ <tag>Output Format</tag>
+ <item>
+ <p>To get log events on the same format as produced
+ by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>,
+ use the default formatter, <c>logger_formatter</c>, with
+ configuration parameter <c>legacy_header</c> set
+ to <c>true</c>. This is the default configuration of
+ the <c>default</c> handler started by Kernel.</p>
+ </item>
+ <tag>Default Format of Log Events from OTP</tag>
+ <item>
+ <p>By default, all log events originating from within OTP,
+ except the former so called "SASL reports", look the same as
+ before.</p>
+ </item>
+ <tag><marker id="sasl_reports"/>SASL Reports</tag>
+ <item>
+ <p>By SASL reports we mean supervisor reports, crash reports
+ and progress reports.</p>
+ <p>Prior to Erlang/OTP 21.0, these reports were only logged
+ when the SASL application was running, and they were printed
+ trough SASL's own event handlers <c>sasl_report_tty_h</c>
+ and <c>sasl_report_file_h</c>.</p>
+ <p>The destination of these log events was configured by
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL
+ configuration parameters</seealso>.</p>
+ <p>Due to the specific event handlers, the output format
+ slightly differed from other log events.</p>
+ <p>As of Erlang/OTP 21.0, the concept of SASL reports is
+ removed, meaning that the default behaviour is as
+ follows:</p>
+ <list>
+ <item>Supervisor reports, crash reports, and progress reports
+ are no longer connected to the SASL application.</item>
+ <item>Supervisor reports and crash reports are issued
+ as <c>error</c> level log events, and are logged through
+ the default handler started by Kernel.</item>
+ <item>Progress reports are issued as <c>info</c> level log
+ events, and since the default primary log level
+ is <c>notice</c>, these are not logged by default. To
+ enable printing of progress reports, set
+ the <seealso marker="#primary_level">primary log
+ level</seealso> to <c>info</c>.</item>
+ <item>The output format is the same for all log
+ events.</item>
+ </list>
+ <p>If the old behaviour is preferred, the Kernel configuration
+ parameter <seealso marker="kernel_app#logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso> can be set
+ to <c>true</c>. The
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL
+ configuration parameters</seealso> can then be used as
+ before, and the SASL reports will only be printed if the
+ SASL application is running, through a second log handler
+ named <c>sasl</c>.</p>
+ <p>All SASL reports have a metadata field <c>domain</c> which
+ is set to <c>[otp,sasl]</c>. This field can be
+ used by filters to stop or allow the log events.</p>
+ <p>See section <seealso marker="sasl:error_logging">SASL User's
+ Guide</seealso> for more information about the old SASL
+ error logging functionality.</p>
+ </item>
+ <tag><marker id="legacy_event_handlers"/>Legacy Event Handlers</tag>
+ <item>
+ <p>To use event handlers written for <c>error_logger</c>, just
+ add your event handler with</p>
+ <code>
+error_logger:add_report_handler/1,2.
+ </code>
+ <p>This automatically starts the error logger event manager,
+ and adds <c>error_logger</c> as a handler to Logger, with
+ the following configuration:</p>
+<code>
+#{level => info,
+ filter_default => log,
+ filters => []}.
+</code>
+ <note>
+ <p>This handler ignores events that do not originate from
+ the <c>error_logger</c> API, or from within OTP. This
+ means that if your code uses the Logger API for logging,
+ then your log events will be discarded by this
+ handler.</p>
+ <p>The handler is not overload protected.</p>
+ </note>
+ </item>
+ </taglist>
+ </section>
+
+
+ <section>
+ <title>Error Handling</title>
+ <p>Logger does, to a certain extent, check its input data before
+ forwarding a log event to filters and handlers. It does,
+ however, not evaluate report callbacks, or check the validity of
+ format strings and arguments. This means that all filters and
+ handlers must be careful when formatting the data of a log
+ event, making sure that it does not crash due to bad input data
+ or faulty callbacks.</p>
+ <p>If a filter or handler still crashes, Logger will remove the
+ filter or handler in question from the configuration, and print
+ a short error message to the terminal. A debug event containing
+ the crash reason and other details is also issued.</p>
+ <p>See section <seealso marker="#log_message">Log
+ Message</seealso> for more information about report callbacks
+ and valid forms of log messages.</p>
+ </section>
+
+ <section>
+ <title>Example: Add a handler to log info events to file</title>
+ <p>When starting an Erlang node, the default behaviour is that all
+ log events on level <c>notice</c> or more severe, are logged to
+ the terminal via the default handler. To also log info events,
+ you can either change the primary log level to <c>info</c>:</p>
+ <pre>
+1> <input>logger:set_primary_config(level, info).</input>
+ok</pre>
+ <p>or set the level for one or a few modules only:</p>
+ <pre>
+2> <input>logger:set_module_level(mymodule, info).</input>
+ok</pre>
+ <p>This allows info events to pass through to the default handler,
+ and be printed to the terminal as well. If there are many info
+ events, it can be useful to print these to a file instead.</p>
+ <p>First, set the log level of the default handler
+ to <c>notice</c>, preventing it from printing info events to the
+ terminal:</p>
+ <pre>
+3> <input>logger:set_handler_config(default, level, notice).</input>
+ok</pre>
+ <p>Then, add a new handler which prints to file. You can use the
+ handler
+ module <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>,
+ and specify type <c>{file,File}</c>.:</p>
+ <pre>
+4> <input>Config = #{config => #{type => {file,"./info.log"}}, level => info}.</input>
+#{config => #{type => {file,"./info.log"}},level => info}
+5> <input>logger:add_handler(myhandler, logger_std_h, Config).</input>
+ok</pre>
+ <p>Since <c>filter_default</c> defaults to <c>log</c>, this
+ handler now receives all log events. If you want info events
+ only in the file, you must add a filter to stop all non-info
+ events. The built-in
+ filter <seealso marker="logger_filters#level-2">
+ <c>logger_filters:level/2</c></seealso>
+ can do this:</p>
+ <pre>
+6> <input>logger:add_handler_filter(myhandler, stop_non_info,
+ {fun logger_filters:level/2, {stop, neq, info}}).</input>
+ok</pre>
+ <p>See section <seealso marker="#filters">Filters</seealso> for
+ more information about the filters and the <c>filter_default</c>
+ configuration parameter.</p>
+
+ </section>
+
+ <section>
+ <title>Example: Implement a handler</title>
+ <p>Section <seealso marker="logger#handler_callback_functions">Handler
+ Callback Functions</seealso> in the logger(3) manual page
+ describes the callback functions that can be implemented for a
+ Logger handler.</p>
+ <p>A handler callback module must export:</p>
+ <list>
+ <item><c>log(Log, Config)</c></item>
+ </list>
+ <p>It can optionally also export some, or all, of the following:</p>
+ <list>
+ <item><c>adding_handler(Config)</c></item>
+ <item><c>removing_handler(Config)</c></item>
+ <item><c>changing_config(SetOrUpdate, OldConfig, NewConfig)</c></item>
+ <item><c>filter_config(Config)</c></item>
+ </list>
+ <p>When a handler is added, by for example a call
+ to <seealso marker="logger#add_handler-3">
+ <c>logger:add_handler(Id, HModule, Config)</c></seealso>,
+ Logger first calls <c>HModule:adding_handler(Config)</c>. If
+ this function returns <c>{ok,Config1}</c>, Logger
+ writes <c>Config1</c> to the configuration database, and
+ the <c>logger:add_handler/3</c> call returns. After this, the
+ handler is installed and must be ready to receive log events as
+ calls to <c>HModule:log/2</c>.</p>
+ <p>A handler can be removed by calling
+ <seealso marker="logger#remove_handler-1">
+ <c>logger:remove_handler(Id)</c></seealso>. Logger calls
+ <c>HModule:removing_handler(Config)</c>, and removes the
+ handler's configuration from the configuration database.</p>
+ <p>When <seealso marker="logger#set_handler_config-2">
+ <c>logger:set_handler_config/2,3</c></seealso>
+ or <seealso marker="logger#update_handler_config/2">
+ <c>logger:update_handler_config/2,3</c></seealso> is called,
+ Logger
+ calls <c>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig)</c>. If
+ this function returns <c>{ok,NewConfig1}</c>, Logger
+ writes <c>NewConfig1</c> to the configuration database.</p>
+ <p>When <seealso marker="logger#get_config-0">
+ <c>logger:get_config/0</c></seealso> or
+ <seealso marker="logger#get_handler_config-0">
+ <c>logger:get_handler_config/0,1</c></seealso> is called,
+ Logger calls <c>HModule:filter_config(Config)</c>. This function
+ must return the handler configuration where internal data is
+ removed.</p>
+
+ <p>A simple handler that prints to the terminal can be implemented
+ as follows:</p>
+ <code>
+-module(myhandler1).
+-export([log/2]).
+
+log(LogEvent, #{formatter := {FModule, FConfig}}) ->
+ io:put_chars(FModule:format(LogEvent, FConfig)).
+ </code>
+
+ <p>Notice that the above handler does not have any overload
+ protection, and all log events are printed directly from the
+ client process.</p>
+ <p>For information and examples of overload protection, please
+ refer to
+ section <seealso marker="#overload_protection">Protecting the
+ Handler from Overload</seealso>, and the implementation
+ of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso>.</p>
+ <p>The following is a simpler example of a handler which logs to a
+ file through one single process:</p>
+ <code>
+-module(myhandler2).
+-export([adding_handler/1, removing_handler/1, log/2]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
+
+adding_handler(Config) ->
+ MyConfig = maps:get(config,Config,#{file => "myhandler2.log"}),
+ {ok, Pid} = gen_server:start(?MODULE, MyConfig, []),
+ {ok, Config#{config => MyConfig#{pid => Pid}}}.
+
+removing_handler(#{config := #{pid := Pid}}) ->
+ gen_server:stop(Pid).
+
+log(LogEvent,#{config := #{pid := Pid}} = Config) ->
+ gen_server:cast(Pid, {log, LogEvent, Config}).
+
+init(#{file := File}) ->
+ {ok, Fd} = file:open(File, [append, {encoding, utf8}]),
+ {ok, #{file => File, fd => Fd}}.
+
+handle_call(_, _, State) ->
+ {reply, {error, bad_request}, State}.
+
+handle_cast({log, LogEvent, Config}, #{fd := Fd} = State) ->
+ do_log(Fd, LogEvent, Config),
+ {noreply, State}.
+
+terminate(_Reason, #{fd := Fd}) ->
+ _ = file:close(Fd),
+ ok.
+
+do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) ->
+ String = FModule:format(LogEvent, FConfig),
+ io:put_chars(Fd, String).
+ </code>
+ </section>
+
+ <section>
+ <marker id="overload_protection"/>
+ <title>Protecting the Handler from Overload</title>
+ <p>The default handlers, <seealso marker="logger_std_h">
+ <c>logger_std_h</c></seealso> and <seealso marker="logger_disk_log_h">
+ <c>logger_disk_log_h</c></seealso>, feature an overload protection
+ mechanism, which makes it possible for the handlers to survive,
+ and stay responsive, during periods of high load (when huge
+ numbers of incoming log requests must be handled).
+ The mechanism works as follows:</p>
+
+ <section>
+ <title>Message Queue Length</title>
+ <p>The handler process keeps track of the length of its message
+ queue and takes some form of action when the current length exceeds a
+ configurable threshold. The purpose is to keep the handler in, or to
+ as quickly as possible get the handler into, a state where it can
+ keep up with the pace of incoming log events. The memory use of the
+ handler must never grow larger and larger, since that will eventually
+ cause the handler to crash. These three thresholds, with associated
+ actions, exist:</p>
+
+ <taglist>
+ <tag><c>sync_mode_qlen</c></tag>
+ <item>
+ <p>As long as the length of the message queue is lower than this
+ value, all log events are handled asynchronously. This means that
+ the client process sending the log event, by calling a log function
+ in the <seealso marker="logger_chapter#logger_api">Logger API</seealso>,
+ does not wait for a response from the handler but continues
+ executing immediately after the event is sent. It is not affected
+ by the time it takes the handler to print the event to the log
+ device. If the message queue grows larger than this value,
+ the handler starts handling log events synchronously instead,
+ meaning that the client process sending the event must wait for a
+ response. When the handler reduces the message queue to a
+ level below the <c>sync_mode_qlen</c> threshold, asynchronous
+ operation is resumed. The switch from asynchronous to synchronous
+ mode can slow down the logging tempo of one, or a few, busy senders,
+ but cannot protect the handler sufficiently in a situation of many
+ busy concurrent senders.</p>
+ <p>Defaults to <c>10</c> messages.</p>
+ </item>
+ <tag><c>drop_mode_qlen</c></tag>
+ <item>
+ <p>When the message queue grows larger than this threshold, the
+ handler switches to a mode in which it drops all new events that
+ senders want to log. Dropping an event in this mode means that the
+ call to the log function never results in a message being sent to
+ the handler, but the function returns without taking any action.
+ The handler keeps logging the events that are already in its message
+ queue, and when the length of the message queue is reduced to a level
+ below the threshold, synchronous or asynchronous mode is resumed.
+ Notice that when the handler activates or deactivates drop mode,
+ information about it is printed in the log.</p>
+ <p>Defaults to <c>200</c> messages.</p>
+ </item>
+ <tag><c>flush_qlen</c></tag>
+ <item>
+ <p>If the length of the message queue grows larger than this threshold,
+ a flush (delete) operation takes place. To flush events, the handler
+ discards the messages in the message queue by receiving them in a
+ loop without logging. Client processes waiting for a response from a
+ synchronous log request receive a reply from the handler indicating
+ that the request is dropped. The handler process increases its
+ priority during the flush loop to make sure that no new events
+ are received during the operation. Notice that after the flush operation
+ is performed, the handler prints information in the log about how many
+ events have been deleted.</p>
+ <p>Defaults to <c>1000</c> messages.</p>
+ </item>
+ </taglist>
+
+ <p>For the overload protection algorithm to work properly, it is
+ required that:</p>
+
+ <p><c>sync_mode_qlen =&lt; drop_mode_qlen =&lt; flush_qlen</c></p>
+
+ <p>and that:</p>
+
+ <p><c>drop_mode_qlen &gt; 1</c></p>
+
+ <p>To disable certain modes, do the following:</p>
+ <list>
+ <item>If <c>sync_mode_qlen</c> is set to <c>0</c>, all log events are handled
+ synchronously. That is, asynchronous logging is disabled.</item>
+ <item>If <c>sync_mode_qlen</c> is set to the same value as
+ <c>drop_mode_qlen</c>, synchronous mode is disabled. That is, the handler
+ always runs in asynchronous mode, unless dropping or flushing is invoked.</item>
+ <item>If <c>drop_mode_qlen</c> is set to the same value as <c>flush_qlen</c>,
+ drop mode is disabled and can never occur.</item>
+ </list>
+
+ <p>During high load scenarios, the length of the handler message queue
+ rarely grows in a linear and predictable way. Instead, whenever the
+ handler process is scheduled in, it can have an almost arbitrary number
+ of messages waiting in the message queue. It is for this reason that the overload
+ protection mechanism is focused on acting quickly, and quite drastically,
+ such as immediately dropping or flushing messages, when a large queue length
+ is detected.</p>
+
+ <p>The values of the previously listed thresholds can be specified by the user.
+ This way, a handler can be configured to, for example, not drop or flush
+ messages unless the message queue length of the handler process grows extremely
+ large. Notice that large amounts of memory can be required for the node under such
+ circumstances. Another example of user configuration is when, for performance
+ reasons, the client processes must never be blocked by synchronous log requests.
+ It is possible, perhaps, that dropping or flushing events is still acceptable, since
+ it does not affect the performance of the client processes sending the log events.</p>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{config => #{type => {file,"./system_info.log"},
+ sync_mode_qlen => 100,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Controlling Bursts of Log Requests</title>
+ <p>Large bursts of log events - many events received by the handler
+ under a short period of time - can potentially cause problems, such as:</p>
+ <list>
+ <item>Log files grow very large, very quickly.</item>
+ <item>Circular logs wrap too quickly so that important data is overwritten.</item>
+ <item>Write buffers grow large, which slows down file sync operations.</item>
+ </list>
+
+ <p>For this reason, both built-in handlers offer the possibility to specify the
+ maximum number of events to be handled within a certain time frame.
+ With this burst control feature enabled, the handler can avoid choking the log with
+ massive amounts of printouts. The configuration parameters are:</p>
+ <taglist>
+ <tag><c>burst_limit_enable</c></tag>
+ <item>
+ <p>Value <c>true</c> enables burst control and <c>false</c> disables it.</p>
+ <p>Defaults to <c>true</c>.</p>
+ </item>
+ <tag><c>burst_limit_max_count</c></tag>
+ <item>
+ <p>This is the maximum number of events to handle within a
+ <c>burst_limit_window_time</c> time frame. After the limit is
+ reached, successive events are dropped until the end of the time frame.</p>
+ <p>Defaults to <c>500</c> events.</p>
+ </item>
+ <tag><c>burst_limit_window_time</c></tag>
+ <item>
+ <p>See the previous description of <c>burst_limit_max_count</c>.</p>
+ <p>Defaults to <c>1000</c> milliseconds.</p>
+ </item>
+ </taglist>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{config => #{file => "./my_disk_log",
+ burst_limit_enable => true,
+ burst_limit_max_count => 20,
+ burst_limit_window_time => 500}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Terminating an Overloaded Handler</title>
+ <p>It is possible that a handler, even if it can successfully manage peaks
+ of high load without crashing, can build up a large message queue, or use a
+ large amount of memory. The overload protection mechanism includes an
+ automatic termination and restart feature for the purpose of guaranteeing
+ that a handler does not grow out of bounds. The feature is configured
+ with the following parameters:</p>
+ <taglist>
+ <tag><c>overload_kill_enable</c></tag>
+ <item>
+ <p>Value <c>true</c> enables the feature and <c>false</c> disables it.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><c>overload_kill_qlen</c></tag>
+ <item>
+ <p>This is the maximum allowed queue length. If the message queue grows
+ larger than this, the handler process is terminated.</p>
+ <p>Defaults to <c>20000</c> messages.</p>
+ </item>
+ <tag><c>overload_kill_mem_size</c></tag>
+ <item>
+ <p>This is the maximum memory size that the handler process is allowed to use.
+ If the handler grows larger than this, the process is terminated.</p>
+ <p>Defaults to <c>3000000</c> bytes.</p>
+ </item>
+ <tag><c>overload_kill_restart_after</c></tag>
+ <item>
+ <p>If the handler is terminated, it restarts automatically after a
+ delay specified in milliseconds. The value <c>infinity</c> prevents
+ restarts.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ </item>
+ </taglist>
+ <p>If the handler process is terminated because of overload, it prints
+ information about it in the log. It also prints information about when a
+ restart has taken place, and the handler is back in action.</p>
+ <note>
+ <p>The sizes of the log events affect the memory needs of the handler.
+ For information about how to limit the size of log events, see the
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>
+ manual page.</p>
+ </note>
+ </section>
+ </section>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="disk_log"><c>disk_log(3)</c></seealso>,
+ <seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>,
+ <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>,
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
+ <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p>
+ </section>
+</chapter>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
new file mode 100644
index 0000000000..d9b941a0a9
--- /dev/null
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -0,0 +1,168 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_disk_log_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_disk_log_h.xml</file>
+ </header>
+ <module>logger_disk_log_h</module>
+ <modulesummary>A disk_log based handler for Logger</modulesummary>
+
+ <description>
+ <p>This is a handler for Logger that offers circular
+ (wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>.
+ Multiple instances of this handler can be added to Logger, and each instance
+ prints to its own disk log file, created with the name and settings specified
+ in the handler configuration.</p>
+ <p>The default standard handler,
+ <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be
+ replaced by a disk_log handler at startup of the Kernel application.
+ See an example of this below.</p>
+ <p>The handler has an overload protection mechanism that keeps the handler
+ process and the Kernel application alive during high loads of log
+ events. How overload protection works, and how to configure it, is
+ described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the disk_log handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which can contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, and handler specific parameters. The specific data
+ is stored in a sub map with the key <c>config</c>, and can contain the
+ following parameters:</p>
+ <taglist>
+ <tag><c>file</c></tag>
+ <item>
+ <p>This is the full name of the disk log file. The option
+ corresponds to the <c>name</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
+ </item>
+ <tag><c>type</c></tag>
+ <item>
+ <p>This is the disk log type, <c>wrap</c> or <c>halt</c>. The option
+ corresponds to the <c>type</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>wrap</c>.</p>
+ </item>
+ <tag><c>max_no_files</c></tag>
+ <item>
+ <p>This is the maximum number of files that disk_log uses
+ for its circular logging. The option
+ corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>10</c>.</p>
+ <p>The setting has no effect on a halt log.</p>
+ </item>
+ <tag><c>max_no_bytes</c></tag>
+ <item>
+ <p>This is the maximum number of bytes that is written to
+ a log file before disk_log proceeds with the next file in order, or
+ generates an error in case of a full halt log. The option
+ corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>1048576</c> bytes for a wrap log, and
+ <c>infinity</c> for a halt log.</p>
+ </item>
+ <tag><c>filesync_repeat_interval</c></tag>
+ <item>
+ <p>This value, in milliseconds, specifies how often the handler does
+ a disk_log sync operation to write buffered data to disk. The handler attempts
+ the operation repeatedly, but only performs a new sync if something has
+ actually been logged.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ <p>If <c>no_repeat</c> is set as value, the repeated sync operation
+ is disabled. The user can also call the
+ <seealso marker="logger_disk_log_h#filesync-1"><c>filesync/1</c>
+ </seealso> function to perform a disk_log sync.</p>
+ </item>
+ </taglist>
+ <p>Other configuration parameters exist, to be used for customizing
+ the overload protection behaviour. The same parameters are used both in the
+ standard handler and the disk_log handler, and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Notice that when changing the configuration of the handler in runtime, the
+ disk_log options (<c>file</c>, <c>type</c>, <c>max_no_files</c>,
+ <c>max_no_bytes</c>) must not be modified.</p>
+ <p>Example of adding a disk_log handler:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{config => #{file => "./my_disk_log",
+ type => wrap,
+ max_no_files => 4,
+ max_no_bytes => 10000},
+ filesync_repeat_interval => 1000}}).
+ </code>
+ <p>To use the disk_log handler instead of the default standard
+ handler when starting an Erlang node, change the Kernel default logger to
+ use <c>logger_disk_log_h</c>. Example:</p>
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_disk_log_h,
+ #{config => #{file => "./system_disk_log"}}}]'
+ </code>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="filesync" arity="1" clause_i="1"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>,
+ <seealso marker="disk_log"><c>disk_log(3)</c></seealso></p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml
new file mode 100644
index 0000000000..90f1fcc270
--- /dev/null
+++ b/lib/kernel/doc/src/logger_filters.xml
@@ -0,0 +1,254 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_filters</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_filters.xml</file>
+ </header>
+ <module>logger_filters</module>
+ <modulesummary>Filters to use with Logger.</modulesummary>
+
+ <description>
+ <p>All functions exported from this module can be used as primary
+ or handler
+ filters. See <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ and <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso> for more information
+ about how filters are added.</p>
+ <p>Filters are removed with <seealso marker="logger#remove_primary_filter-1">
+ <c>logger:remove_primary_filter/1</c></seealso>
+ and <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>.</p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="domain" arity="2"/>
+ <fsummary>Filter log events based on the domain field in
+ metadata.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based on a
+ <c>domain</c> field in <c>Metadata</c>. This field is
+ optional, and the purpose of using it is to group log events
+ from, for example, a specific functional area. This allows
+ filtering or other specialized treatment in a Logger
+ handler.</p>
+
+ <p>A domain field must be a list of atoms, creating smaller
+ and more specialized domains as the list grows longer. The
+ greatest domain is <c>[]</c>, which comprises all possible
+ domains.</p>
+
+ <p>For example, consider the following domains:</p>
+ <pre>
+D1 = [otp]
+D2 = [otp, sasl]</pre>
+
+ <p><c>D1</c> is the greatest of the two, and is said to be a
+ super-domain of <c>D2</c>. <c>D2</c> is a
+ sub-domain <c>D1</c>. Both <c>D1</c> and <c>D2</c> are
+ sub-domains of <c>[]</c>.</p>
+
+ <p>The above domains are used for logs originating from
+ Erlang/OTP. D1 specifies that the log event comes from
+ Erlang/OTP in general, and D2 indicates that the log event
+ is a so
+ called <seealso marker="logger_chapter#sasl_reports">SASL
+ report</seealso>.</p>
+
+ <p>The <c><anno>Extra</anno></c> parameter to
+ the <c>domain/2</c> function is specified when adding the
+ filter via <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the <c>domain</c> field in
+ the log event's metadata (<c>Domain</c>) against
+ <c><anno>MatchDomain</anno></c>. The filter matches if the
+ value of <c>Compare</c> is:</p>
+
+ <taglist>
+ <tag><c>sub</c></tag>
+ <item>
+ <p>and <c>Domain</c> is equal to or a sub-domain
+ of <c>MatchDomain</c>, that is, if <c>MatchDomain</c> is
+ a prefix of <c>Domain</c>.</p>
+ </item>
+ <tag><c>super</c></tag>
+ <item>
+ <p>and <c>Domain</c> is equal to or a super-domain
+ of <c>MatchDomain</c>, that is, if <c>Domain</c> is a
+ prefix of <c>MatchDomain</c>.</p>
+ </item>
+ <tag><c>equal</c></tag>
+ <item>
+ <p>and <c>Domain</c> is equal to <c>MatchDomain</c>.</p>
+ </item>
+ <tag><c>not_equal</c></tag>
+ <item>
+ <p>and <c>Domain</c> differs from <c>MatchDomain</c>, or
+ if there is no domain field in metadata.</p>
+ </item>
+ <tag><c>undefined</c></tag>
+ <item>
+ <p>and there is no domain field in metadata. In this
+ case <c><anno>MatchDomain</anno></c> must be set
+ to <c>[]</c>.</p>
+ </item>
+ </taglist>
+
+ <p>If the filter matches and <c><anno>Action</anno></c> is
+ <c>log</c>, the log event is allowed. If the filter matches
+ and <c><anno>Action</anno></c> is <c>stop</c>, the log event
+ is stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, decide if the
+ event is allowed or not.</p>
+
+ <p>Log events that do not contain any domain field, match only
+ when <c><anno>Compare</anno></c> is equal
+ to <c>undefined</c> or <c>not_equal</c>.</p>
+
+ <p>Example: stop all events with domain <c>[otp,
+ sasl | _]</c></p>
+
+ <code>
+logger:set_handler_config(h1, filter_default, log). % this is the default
+Filter = {fun logger_filters:domain/2, {stop, sub, [otp, sasl]}}.
+logger:add_handler_filter(h1, no_sasl, Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="level" arity="2"/>
+ <fsummary>Filter log events based on the log level.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based
+ on the log level. It matches log events by comparing the
+ log level with a specified <c>MatchLevel</c></p>
+
+ <p>The <c><anno>Extra</anno></c> parameter is specified when
+ adding the filter
+ via <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the event's log level
+ (<c>Level</c>) to <c><anno>MatchLevel</anno></c> by
+ calling <seealso marker="logger#compare_levels-2">
+ <c>logger:compare_levels(Level, MatchLevel)</c></seealso>.
+ The filter matches if the value
+ of <c><anno>Operator</anno></c> is:</p>
+
+ <taglist>
+ <tag><c>neq</c></tag>
+ <item><p>and the compare function returns <c>lt</c>
+ or <c>gt</c>.</p></item>
+ <tag><c>eq</c></tag>
+ <item><p>and the compare function returns <c>eq</c>.</p></item>
+ <tag><c>lt</c></tag>
+ <item><p>and the compare function returns <c>lt</c>.</p></item>
+ <tag><c>gt</c></tag>
+ <item><p>and the compare function returns <c>gt</c>.</p></item>
+ <tag><c>lteq</c></tag>
+ <item><p>and the compare function returns <c>lt</c>
+ or <c>eq</c>.</p></item>
+ <tag><c>gteq</c></tag>
+ <item><p>and the compare function returns <c>gt</c>
+ or <c>eq</c>.</p></item>
+ </taglist>
+
+ <p>If the filter matches and <c><anno>Action</anno></c> is
+ <c>log</c>, the log event is allowed. If the filter
+ matches and <c><anno>Action</anno></c> is <c>stop</c>, the
+ log event is stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, will decide
+ if the event is allowed or not.</p>
+
+ <p>Example: only allow debug level log events</p>
+
+ <code>
+logger:set_handler_config(h1, filter_default, stop).
+Filter = {fun logger_filters:level/2, {log, eq, debug}}.
+logger:add_handler_filter(h1, debug_only, Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="progress" arity="2"/>
+ <fsummary>Filter progress reports from supervisor and application_controller.</fsummary>
+ <desc>
+ <p>This filter matches all progress reports
+ from <c>supervisor</c> and <c>application_controller</c>.</p>
+
+ <p>If <c><anno>Extra</anno></c> is <c>log</c>, the progress
+ reports are allowed. If <c><anno>Extra</anno></c>
+ is <c>stop</c>, the progress reports are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remote_gl" arity="2"/>
+ <fsummary>Filter events with group leader on remote node.</fsummary>
+ <desc>
+ <p>This filter matches all events originating from a process
+ that has its group leader on a remote node.</p>
+
+ <p>If <c><anno>Extra</anno></c> is <c>log</c>, the matching
+ events are allowed. If <c><anno>Extra</anno></c>
+ is <c>stop</c>, the matching events are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="logger"><c>logger(3)</c></seealso>
+ </p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
new file mode 100644
index 0000000000..24772fd6c4
--- /dev/null
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -0,0 +1,354 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_formatter</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_formatter.xml</file>
+ </header>
+ <module>logger_formatter</module>
+ <modulesummary>Default formatter for Logger.</modulesummary>
+
+ <description>
+ <p>Each Logger handler has a configured formatter specified as a
+ module and a configuration term. The purpose of the formatter is
+ to translate the log events to a final printable string
+ (<seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c>
+ </seealso>) which can be written to the output device of the
+ handler. See
+ sections <seealso marker="logger_chapter#handlers">Handlers</seealso>
+ and <seealso marker="logger_chapter#formatters">Formatters</seealso>
+ in the Kernel User's Guide for more information.</p>
+ <p><c>logger_formatter</c> is the default formatter used by
+ Logger.</p>
+ </description>
+
+
+ <datatypes>
+ <datatype>
+ <name name="config"/>
+ <desc>
+ <p>The configuration term for <c>logger_formatter</c> is a
+ <seealso marker="stdlib:maps">map</seealso>, and the
+ following keys can be set as configuration parameters:</p>
+ <taglist>
+ <tag><marker id="chars_limit"/>
+ <c>chars_limit = integer() > 0 | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the value of the option
+ with the same name to be used when calling
+ <seealso marker="stdlib:io_lib#format-3">
+ <c>io_lib:format/3</c></seealso>.
+ This value limits the total number of characters printed
+ for each log event. Notice that this is a soft limit. For a
+ hard truncation limit, see option <c>max_size</c>.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ </item>
+ <tag><marker id="depth"/><c>depth = integer() > 0 | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the maximum depth to
+ which terms shall be printed by this formatter. Format
+ strings passed to this formatter are rewritten. The
+ format controls ~p and ~w are replaced with ~P and ~W,
+ respectively, and the value is used as the depth
+ parameter. For details, see
+ <seealso marker="stdlib:io#format-2"><c>io:format/2,3</c></seealso>
+ in STDLIB.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ </item>
+ <tag><c>legacy_header = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c> a header field is added to
+ logger_formatter's part of <c>Metadata</c>. The value of
+ this field is a string similar to the header created by
+ the
+ old <seealso marker="error_logger"><c>error_logger</c></seealso>
+ event handlers. It can be included in the log event by
+ adding the list <c>[logger_formatter,header]</c> to the
+ template. See the description of
+ the <seealso marker="#type-template"><c>template()</c></seealso>
+ type for more information.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><marker id="max_size"/>
+ <c>max_size = integer() > 0 | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the absolute maximum size a
+ string returned from this formatter can have. If the
+ formatted string is longer, after possibly being limited
+ by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ </item>
+ <tag><c>report_cb = </c><seealso marker="logger#type-report_cb">
+ <c>logger:report_cb()</c></seealso></tag>
+ <item>
+ <p>A report callback is used by the formatter to transform
+ log messages on report form to a format string and
+ arguments. The report callback can be specified in the
+ metadata for the log event. If no report callback exists
+ in metadata, <c>logger_formatter</c> will
+ use <seealso marker="logger#format_report-1">
+ <c>logger:format_report/1</c></seealso> as default
+ callback.</p>
+ <p>If this configuration parameter is set, it replaces
+ both the default report callback, and any report
+ callback found in metadata. That is, all reports are
+ converted by this configured function.</p>
+ </item>
+ <tag><c>single_line = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c>, each log event is printed as a
+ single line. To achieve this, <c>logger_formatter</c>
+ sets the field width to <c>0</c> for all <c>~p</c>
+ and <c>~P</c> control sequences in the format a string
+ (see <seealso marker="stdlib:io#format-2">
+ <c>io:format/2</c></seealso>), and replaces all
+ newlines in the message with <c>", "</c>. White spaces
+ following directly after newlines are removed. Notice
+ that newlines added by the <c>template</c> parameter are
+ not replaced.</p>
+ <p>Defaults to <c>true</c>.</p>
+ </item>
+ <tag><marker id="template"/>
+ <c>template = </c><seealso marker="#type-template"><c>template()</c>
+ </seealso></tag>
+ <item>
+ <p>The template describes how the formatted string is
+ composed by combining different data values from the log
+ event. See the description of
+ the <seealso marker="#type-template"><c>template()</c></seealso>
+ type for more information about this.</p>
+ </item>
+ <tag><c>time_designator = byte()</c></tag>
+ <item>
+ <p>Timestamps are formatted according to RFC3339, and the
+ time designator is the character used as date and time
+ separator.</p>
+ <p>Defaults to <c>$T</c>.</p>
+ <p>The value of this parameter is used as
+ the <c>time_designator</c> option
+ to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2">
+ <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p>
+ </item>
+ <tag><c>time_offset = integer() | [byte()]</c></tag>
+ <item>
+ <p>The time offset, either a string or an integer, to be
+ used when formatting the timestamp.</p>
+ <p>An empty string is interpreted as local time. The
+ values <c>"Z"</c>, <c>"z"</c> or <c>0</c> are
+ interpreted as Universal Coordinated Time (UTC).</p>
+ <p>Strings, other than <c>"Z"</c>, <c>"z"</c>,
+ or <c>""</c>, must be on the form <c>±[hh]:[mm]</c>, for
+ example <c>"-02:00"</c> or <c>"+00:00"</c>.</p>
+ <p>Integers must be in microseconds, meaning that the
+ offset <c>7200000000</c> is equivalent
+ to <c>"+02:00"</c>.</p>
+ <p>Defaults to an empty string, meaning that timestamps
+ are displayed in local time. However, for backwards
+ compatibility, if the SASL configuration
+ parameter <seealso marker="sasl:sasl_app#utc_log">
+ <c>utc_log</c></seealso><c>=true</c>, the default is
+ changed to <c>"Z"</c>, meaning that timestamps are displayed
+ in UTC.</p>
+ <p>The value of this parameter is used as
+ the <c>offset</c> option
+ to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2">
+ <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="metakey"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="template"/>
+ <desc>
+ <p>The template is a list of atoms, atom lists, tuples and strings. The
+ atoms <c>level</c> or <c>msg</c>, are treated as
+ placeholders for the severity level and the log message,
+ respectively. Other atoms or atom lists are interpreted as
+ placeholders for metadata, where atoms are expected to match
+ top level keys, and atom lists represent paths to sub keys when
+ the metadata is a nested map. For example the
+ list <c>[key1,key2]</c> is replaced by the value of
+ the <c>key2</c> field in the nested map below. The
+ atom <c>key1</c> on its own is replaced by the complete
+ value of the <c>key1</c> field. The values are converted to
+ strings.</p>
+
+ <code>
+#{key1 => #{key2 => my_value,
+ ...}
+ ...}</code>
+
+ <p>Tuples in the template express if-exist tests for metadata
+ keys. For example, the following tuple says that
+ if <c>key1</c> exists in the metadata map,
+ print <c>"key1=Value"</c>, where <c>Value</c> is the value
+ that <c>key1</c> is associated with in the metadata map. If
+ <c>key1</c> does not exist, print nothing.</p>
+ <code>
+{key1, ["key1=",key1], []}</code>
+
+ <p>Strings in the template are printed literally.</p>
+ <p>The default value for the <c>template</c> configuration
+ parameter depends on the value of the <c>single_line</c>
+ and <c>legacy_header</c> configuration parameters as
+ follows.</p>
+
+ <p>The log event used in the examples is:</p>
+ <code>
+?LOG_ERROR("name: ~p~nexit_reason: ~p", [my_name, "It crashed"])</code>
+
+ <taglist>
+ <tag><c>legacy_header = true, single_line = false</c></tag>
+ <item>
+ <p>Default
+ template: <c>[[logger_formatter,header],"\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+=ERROR REPORT==== 17-May-2018::18:30:19.453447 ===
+name: my_name
+exit_reason: "It crashed"</code>
+
+ <p>Notice that all eight levels can occur in the heading,
+ not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as
+ <seealso marker="error_logger"><c>error_logger</c></seealso>
+ produces. And microseconds are added at the end of the
+ timestamp.</p>
+ </item>
+
+ <tag><c>legacy_header = true, single_line = true</c></tag>
+ <item>
+ <p>Default
+ template: <c>[[logger_formatter,header],"\n",msg,"\n"]</c></p>
+
+ <p>Notice that the template is here the same as
+ for <c>single_line=false</c>, but the resulting log entry
+ differs in that there is only one line after the
+ heading:</p>
+ <code type="none">
+=ERROR REPORT==== 17-May-2018::18:31:06.952665 ===
+name: my_name, exit_reason: "It crashed"</code>
+ </item>
+
+ <tag><c>legacy_header = false, single_line = true</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+2018-05-17T18:31:31.152864+02:00 error: name: my_name, exit_reason: "It crashed"</code>
+ </item>
+
+ <tag><c>legacy_header = false, single_line = false</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+2018-05-17T18:32:20.105422+02:00 error:
+name: my_name
+exit_reason: "It crashed"</code>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name name="check_config" arity="1"/>
+ <fsummary>Validates the given formatter configuration.</fsummary>
+ <desc>
+ <p>The function is called by Logger when the formatter
+ configuration for a handler is set or modified. It
+ returns <c>ok</c> if the configuration is valid,
+ and <c>{error,term()}</c> if it is faulty.</p>
+ <p>The following Logger API functions can trigger this callback:</p>
+ <list>
+ <item><seealso marker="logger#add_handler-3">
+ <c>logger:add_handler/3</c></seealso></item>
+ <item><seealso marker="logger#set_handler_config-2">
+ <c>logger:set_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_handler_config-2">
+ <c>logger:updata_handler_config/2</c></seealso></item>
+ <item><seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+ <func>
+ <name name="format" arity="2"/>
+ <fsummary>Formats the given message.</fsummary>
+ <desc>
+ <p>This the formatter callback function to be called from
+ handlers. The log event is processed as follows:</p>
+ <list>
+ <item>If the message is on report form, it is converted to
+ <c>{Format,Args}</c> by calling the report callback. See
+ section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the Kernel User's Guide for more
+ information about report callbacks and valid forms of log
+ messages.</item>
+ <item>The message size is limited according to the values of
+ configuration parameters <seealso marker="#chars_limit">
+ <c>chars_limit</c></seealso>
+ and <seealso marker="#depth"><c>depth</c></seealso>.</item>
+ <item>The full log entry is composed according to
+ the <seealso marker="#template"><c>template</c></seealso>.</item>
+ <item>If the final string is too long, it is truncated
+ according to the value of configuration
+ parameter <seealso marker="#max_size"><c>max_size</c></seealso>.</item>
+ </list>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="stdlib:calendar"><c>calendar(3)</c></seealso>,
+ <seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso>,
+ <seealso marker="stdlib:io_lib"><c>io_lib(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="stdlib:maps"><c>maps(3)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>,
+ <seealso marker="stdlib:unicode"><c>unicode(3)</c></seealso>
+ </p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
new file mode 100644
index 0000000000..e156f5719b
--- /dev/null
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -0,0 +1,141 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_std_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_std_h.xml</file>
+ </header>
+ <module>logger_std_h</module>
+ <modulesummary>Standard handler for Logger.</modulesummary>
+
+ <description>
+ <p>This is the standard handler for Logger.
+ Multiple instances of this handler can be added to
+ Logger, and each instance prints logs to <c>standard_io</c>,
+ <c>standard_error</c>, or to file.</p>
+ <p>The handler has an overload protection mechanism that keeps the handler
+ process and the Kernel application alive during high loads of log
+ events. How overload protection works, and how to configure it, is
+ described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the standard handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which can contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, and handler specific parameters. The specific data
+ is stored in a sub map with the key <c>config</c>, and can contain the
+ following parameters:</p>
+ <taglist>
+ <tag><marker id="type"/><c>type</c></tag>
+ <item>
+ <p>This has the value <c>standard_io</c>, <c>standard_error</c>,
+ <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>.</p>
+ <p>If <c>LogFileOpts</c> is specified, it replaces the default
+ list of options used when opening the log file. The default
+ list is <c>[raw,append,delayed_write]</c>. One reason to do
+ so can be to change <c>append</c> to, for
+ example, <c>write</c>, ensuring that the old log is
+ truncated when a node is restarted. See the reference manual
+ for <seealso marker="file#open-2"><c>file:open/2</c></seealso>
+ for more information about file options.</p>
+ <p>Log files are always UTF-8 encoded. The encoding can not be
+ changed by setting the option <c>{encoding,Encoding}</c>
+ in <c>LogFileOpts</c>.</p>
+ <p>Notice that the standard handler does not have support for
+ circular logging. Use the disk_log handler,
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
+ for this.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>.</p>
+ </item>
+ <tag><c>filesync_repeat_interval</c></tag>
+ <item>
+ <p>This value, in milliseconds, specifies how often the handler does
+ a file sync operation to write buffered data to disk. The handler attempts
+ the operation repeatedly, but only performs a new sync if something has
+ actually been logged.</p>
+ <p>If <c>no_repeat</c> is set as value, the repeated file sync operation
+ is disabled, and it is the operating system settings that determine
+ how quickly or slowly data is written to disk. The user can also call
+ the <seealso marker="logger_std_h#filesync-1"><c>filesync/1</c></seealso>
+ function to perform a file sync.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ </item>
+ </taglist>
+ <p>Other configuration parameters exist, to be used for customizing
+ the overload protection behaviour. The same parameters are used both in the
+ standard handler and the disk_log handler, and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Notice that if changing the configuration of the handler in runtime,
+ the <c>type</c> parameter must not be modified.</p>
+ <p>Example of adding a standard handler:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{config => #{type => {file,"./system_info.log"},
+ filesync_repeat_interval => 1000}}).
+ </code>
+ <p>To set the default handler, that starts initially with
+ the Kernel application, to log to file instead of <c>standard_io</c>,
+ change the Kernel default logger configuration. Example:</p>
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,
+ #{config => #{type => {file,"./log.log"}}}}]'
+ </code>
+ <p>An example of how to replace the standard handler with a disk_log handler
+ at startup is found in the
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>
+ manual.</p>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="filesync" arity="1" clause_i="1"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="logger_disk_log_h">
+ <c>logger_disk_log_h(3)</c></seealso></p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/net_adm.xml b/lib/kernel/doc/src/net_adm.xml
index 4ef9d361f6..6957a3b5e4 100644
--- a/lib/kernel/doc/src/net_adm.xml
+++ b/lib/kernel/doc/src/net_adm.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -25,95 +25,105 @@
<title>net_adm</title>
<prepared>Claes Wikstrom</prepared>
<docno>1</docno>
- <date>96-09-10</date>
+ <date>1996-09-10</date>
<rev>A</rev>
</header>
<module>net_adm</module>
- <modulesummary>Various Erlang Net Administration Routines</modulesummary>
+ <modulesummary>Various Erlang net administration routines.</modulesummary>
<description>
<p>This module contains various network utility functions.</p>
</description>
+
<funcs>
<func>
<name name="dns_hostname" arity="1"/>
- <fsummary>Official name of a host</fsummary>
+ <fsummary>Official name of a host.</fsummary>
<desc>
<p>Returns the official name of <c><anno>Host</anno></c>, or
<c>{error, <anno>Host</anno>}</c> if no such name is found. See also
- <c>inet(3)</c>.</p>
+ <seealso marker="inet"><c>inet(3)</c></seealso>.</p>
</desc>
</func>
+
<func>
<name name="host_file" arity="0"/>
- <fsummary>Read the <c>.hosts.erlang</c>file</fsummary>
+ <fsummary>Read file <c>.hosts.erlang</c>.</fsummary>
<desc>
- <p>Reads the <c>.hosts.erlang</c> file, see the section
- <em>Files</em> below. Returns the hosts in this file as a
- list, or returns <c>{error, <anno>Reason</anno>}</c> if the file could not
- be read or the Erlang terms on the file could not be interpreted.</p>
+ <p>Reads file <c>.hosts.erlang</c>, see section
+ <seealso marker="#files">Files</seealso>. Returns the hosts in this
+ file as a list. Returns <c>{error, <anno>Reason</anno>}</c> if the
+ file cannot be read or the Erlang terms on the file cannot be
+ interpreted.</p>
</desc>
</func>
+
<func>
<name name="localhost" arity="0"/>
- <fsummary>Name of the local host</fsummary>
+ <fsummary>Name of the local host.</fsummary>
<desc>
<p>Returns the name of the local host. If Erlang was started
- with the <c>-name</c> command line flag, <c><anno>Name</anno></c> is
+ with command-line flag <c>-name</c>, <c><anno>Name</anno></c> is
the fully qualified name.</p>
</desc>
</func>
+
<func>
<name name="names" arity="0"/>
<name name="names" arity="1"/>
- <fsummary>Names of Erlang nodes at a host</fsummary>
+ <fsummary>Names of Erlang nodes at a host.</fsummary>
<desc>
- <p>Similar to <c>epmd -names</c>, see <c>epmd(1)</c>.
- <c><anno>Host</anno></c> defaults to the local host. Returns the names and
- associated port numbers of the Erlang nodes that <c>epmd</c>
- at the specified host has registered.</p>
- <p>Returns <c>{error, address}</c> if <c>epmd</c> is not
- running.</p>
+ <p>Similar to <c>epmd -names</c>, see
+ <seealso marker="erts:epmd"><c>erts:epmd(1)</c></seealso>.
+ <c><anno>Host</anno></c> defaults to the local host. Returns the
+ names and associated port numbers of the Erlang nodes that
+ <c>epmd</c> registered at the specified host. Returns
+ <c>{error, address}</c> if <c>epmd</c> is not operational.</p>
+ <p><em>Example:</em></p>
<pre>
(arne@dunn)1> <input>net_adm:names().</input>
{ok,[{"arne",40262}]}</pre>
</desc>
</func>
+
<func>
<name name="ping" arity="1"/>
- <fsummary>Set up a connection to a node</fsummary>
+ <fsummary>Set up a connection to a node.</fsummary>
<desc>
- <p>Tries to set up a connection to <c><anno>Node</anno></c>. Returns
- <c>pang</c> if it fails, or <c>pong</c> if it is successful.</p>
+ <p>Sets up a connection to <c><anno>Node</anno></c>. Returns
+ <c>pong</c> if it is successful, otherwise <c>pang</c>.</p>
</desc>
</func>
+
<func>
<name name="world" arity="0"/>
<name name="world" arity="1"/>
- <fsummary>Lookup and connect to all nodes at all hosts in <c>.hosts.erlang</c></fsummary>
+ <fsummary>Lookup and connect to all nodes at all hosts in
+ <c>.hosts.erlang</c>.</fsummary>
<type name="verbosity"/>
<desc>
- <p>This function calls <c>names(Host)</c> for all hosts which
+ <p>Calls <c>names(Host)</c> for all hosts that
are specified in the Erlang host file <c>.hosts.erlang</c>,
- collects the replies and then evaluates <c>ping(Node)</c> on
- all those nodes. Returns the list of all nodes that were,
- successfully pinged.</p>
+ collects the replies, and then evaluates <c>ping(Node)</c> on
+ all those nodes. Returns the list of all nodes that are
+ successfully pinged.</p>
<p><c><anno>Arg</anno></c> defaults to <c>silent</c>.
- If <c><anno>Arg</anno> == verbose</c>, the function writes information about which
- nodes it is pinging to stdout.</p>
+ If <c><anno>Arg</anno> == verbose</c>, the function writes
+ information about which nodes it is pinging to <c>stdout</c>.</p>
<p>This function can be useful when a node is started, and
- the names of the other nodes in the network are not initially
- known.</p>
- <p>Failure: <c>{error, Reason}</c> if <c>host_file()</c>
+ the names of the other network nodes are not initially known.</p>
+ <p>Returns <c>{error, Reason}</c> if <c>host_file()</c>
returns <c>{error, Reason}</c>.</p>
</desc>
</func>
+
<func>
<name name="world_list" arity="1"/>
<name name="world_list" arity="2"/>
- <fsummary>Lookup and connect to all nodes at specified hosts</fsummary>
+ <fsummary>Lookup and connect to all nodes at specified hosts.</fsummary>
<type name="verbosity"/>
<desc>
- <p>As <c>world/0,1</c>, but the hosts are given as argument
+ <p>Same as <seealso marker="#world/1"><c>world/0,1</c></seealso>,
+ but the hosts are specified as argument
instead of being read from <c>.hosts.erlang</c>.</p>
</desc>
</func>
@@ -121,13 +131,14 @@
<section>
<title>Files</title>
- <p>The <c>.hosts.erlang</c> file consists of a number of host names
+ <marker id="files"/>
+ <p>File <c>.hosts.erlang</c> consists of a number of host names
written as Erlang terms. It is looked for in the current work
directory, the user's home directory, and <c>$OTP_ROOT</c>
(the root directory of Erlang/OTP), in that order.</p>
- <p>The format of the <c>.hosts.erlang</c> file must be one host
- name per line. The host names must be within quotes as shown in
- the following example:</p>
+ <p>The format of file <c>.hosts.erlang</c> must be one host
+ name per line. The host names must be within quotes.</p>
+ <p><em>Example:</em></p>
<pre>
'super.eua.ericsson.se'.
'renat.eua.ericsson.se'.
diff --git a/lib/kernel/doc/src/net_kernel.xml b/lib/kernel/doc/src/net_kernel.xml
index 311e0d8ea4..bfbe7a6470 100644
--- a/lib/kernel/doc/src/net_kernel.xml
+++ b/lib/kernel/doc/src/net_kernel.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -25,41 +25,60 @@
<title>net_kernel</title>
<prepared>Claes Wikstrom</prepared>
<docno>1</docno>
- <date>96-09-10</date>
+ <date>1996-09-10</date>
<rev>A</rev>
</header>
<module>net_kernel</module>
- <modulesummary>Erlang Networking Kernel</modulesummary>
+ <modulesummary>Erlang networking kernel.</modulesummary>
<description>
<p>The net kernel is a system process, registered as
- <c>net_kernel</c>, which must be running for distributed Erlang
+ <c>net_kernel</c>, which must be operational for distributed Erlang
to work. The purpose of this process is to implement parts of
the BIFs <c>spawn/4</c> and <c>spawn_link/4</c>, and to provide
monitoring of the network.</p>
- <p>An Erlang node is started using the command line flag
+ <p>An Erlang node is started using command-line flag
<c>-name</c> or <c>-sname</c>:</p>
-<pre>$ <input>erl -sname foobar</input></pre>
+ <pre>
+$ <input>erl -sname foobar</input></pre>
<p>It is also possible to call <c>net_kernel:start([foobar])</c>
directly from the normal Erlang shell prompt:</p>
-<pre>1> <input>net_kernel:start([foobar, shortnames]).</input>
+ <pre>
+1> <input>net_kernel:start([foobar, shortnames]).</input>
{ok,&lt;0.64.0>}
(foobar@gringotts)2></pre>
- <p>If the node is started with the command line flag <c>-sname</c>,
- the node name will be <c>foobar@Host</c>, where <c>Host</c> is
+ <p>If the node is started with command-line flag <c>-sname</c>,
+ the node name is <c>foobar@Host</c>, where <c>Host</c> is
the short name of the host (not the fully qualified domain name).
- If started with the <c>-name</c> flag, <c>Host</c> is the fully
- qualified domain name. See <c>erl(1)</c>.</p>
+ If started with flag <c>-name</c>, the node name is <c>foobar@Host</c>,
+ where <c>Host</c> is the fully qualified domain name.
+ For more information, see
+ <seealso marker="erts:erl"><c>erl</c></seealso>.</p>
<p>Normally, connections are established automatically when
another node is referenced. This functionality can be disabled
- by setting the Kernel configuration parameter
- <c>dist_auto_connect</c> to <c>false</c>, see
- <seealso marker="kernel_app">kernel(6)</seealso>. In this case,
+ by setting Kernel configuration parameter
+ <c>dist_auto_connect</c> to <c>never</c>, see
+ <seealso marker="kernel_app"><c>kernel(6)</c></seealso>. In this case,
connections must be established explicitly by calling
- <c>net_kernel:connect_node/1</c>.</p>
- <p>Which nodes are allowed to communicate with each other is handled
- by the magic cookie system, see
- <seealso marker="doc/reference_manual:distributed">Distributed Erlang</seealso> in the Erlang Reference Manual.</p>
+ <seealso marker="#connect_node/1"><c>connect_node/1</c></seealso>.</p>
+ <p>Which nodes that are allowed to communicate with each other is handled
+ by the magic cookie system, see section
+ <seealso marker="doc/reference_manual:distributed">Distributed Erlang</seealso>
+ in the Erlang Reference Manual.</p>
+ <warning>
+ <p>
+ Starting a distributed node without also specifying
+ <seealso marker="erts:erl#proto_dist"><c>-proto_dist inet_tls</c></seealso>
+ will expose the node to attacks that may give the attacker
+ complete access to the node and in extension the cluster.
+ When using un-secure distributed nodes, make sure that the
+ network is configured to keep potential attackers out.
+ See the <seealso marker="ssl:ssl_distribution">
+ Using SSL for Erlang Distribution</seealso> User's Guide
+ for details on how to setup a secure distributed node.
+ </p>
+ </warning>
</description>
+
<funcs>
<func>
<name name="allow" arity="1"/>
@@ -77,237 +96,283 @@
an atom.</p>
</desc>
</func>
+
<func>
<name name="connect_node" arity="1"/>
- <fsummary>Establish a connection to a node</fsummary>
+ <fsummary>Establish a connection to a node.</fsummary>
+ <desc>
+ <p>Establishes a connection to <c><anno>Node</anno></c>. Returns
+ <c>true</c> if a connection was established or was already
+ established or if <c><anno>Node</anno></c> is the local node
+ itself. Returns <c>false</c> if the connection attempt failed, and
+ <c>ignored</c> if the local node is not alive.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_net_ticktime" arity="0"/>
+ <fsummary>Get <c>net_ticktime</c>.</fsummary>
+ <desc>
+ <p>Gets <c>net_ticktime</c> (see
+ <seealso marker="kernel_app"><c>kernel(6)</c></seealso>).</p>
+ <p>Defined return values (<c><anno>Res</anno></c>):</p>
+ <taglist>
+ <tag><c><anno>NetTicktime</anno></c></tag>
+ <item><p><c>net_ticktime</c> is <c><anno>NetTicktime</anno></c>
+ seconds.</p></item>
+ <tag><c>{ongoing_change_to, <anno>NetTicktime</anno>}</c></tag>
+ <item><p><c>net_kernel</c> is currently changing
+ <c>net_ticktime</c> to <c><anno>NetTicktime</anno></c>
+ seconds.</p></item>
+ <tag><c>ignored</c></tag>
+ <item><p>The local node is not alive.</p></item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="getopts" arity="2"/>
+ <fsummary>Get distribution socket options.</fsummary>
<desc>
- <p>Establishes a connection to <c><anno>Node</anno></c>. Returns <c>true</c>
- if successful, <c>false</c> if not, and <c>ignored</c> if
- the local node is not alive.</p>
+ <p>Get one or more options for the distribution socket
+ connected to <c><anno>Node</anno></c>.</p>
+ <p>If <c><anno>Node</anno></c> is a connected node
+ the return value is the same as from
+ <seealso marker="inet#getopts/2"><c>inet:getopts(Sock, Options)</c></seealso>
+ where <c>Sock</c> is the distribution socket for <c><anno>Node</anno></c>.</p>
+ <p>Returns <c>ignored</c> if the local node is not alive or
+ <c>{error, noconnection}</c> if <c><anno>Node</anno></c> is not connected.</p>
</desc>
</func>
+
<func>
<name name="monitor_nodes" arity="1"/>
<name name="monitor_nodes" arity="2"/>
- <fsummary>Subscribe to node status change messages</fsummary>
+ <fsummary>Subscribe to node status change messages.</fsummary>
<desc>
<p>The calling process subscribes or unsubscribes to node
status change messages. A <c>nodeup</c> message is delivered
- to all subscribing process when a new node is connected, and
+ to all subscribing processes when a new node is connected, and
a <c>nodedown</c> message is delivered when a node is
disconnected.</p>
- <p>If <c><anno>Flag</anno></c> is <c>true</c>, a new subscription is started.
- If <c><anno>Flag</anno></c> is <c>false</c>, all previous subscriptions --
- started with the same <c><anno>Options</anno></c> -- are stopped. Two
+ <p>If <c><anno>Flag</anno></c> is <c>true</c>, a new subscription is
+ started. If <c><anno>Flag</anno></c> is <c>false</c>, all previous
+ subscriptions started with the same <c><anno>Options</anno></c>
+ are stopped. Two
option lists are considered the same if they contain the same
set of options.</p>
- <p>As of <c>kernel</c> version 2.11.4, and <c>erts</c> version
+ <p>As from Kernel version 2.11.4, and ERTS version
5.5.4, the following is guaranteed:</p>
<list type="bulleted">
- <item><c>nodeup</c> messages will be delivered before delivery
- of any message from the remote node passed through the
- newly established connection.</item>
- <item><c>nodedown</c> messages will not be delivered until all
- messages from the remote node that have been passed
- through the connection have been delivered.</item>
+ <item><p><c>nodeup</c> messages are delivered before delivery
+ of any message from the remote node passed through the
+ newly established connection.</p></item>
+ <item><p><c>nodedown</c> messages are not delivered until all
+ messages from the remote node that have been passed
+ through the connection have been delivered.</p></item>
</list>
- <p>Note, that this is <em>not</em> guaranteed for <c>kernel</c>
+ <p>Notice that this is <em>not</em> guaranteed for Kernel
versions before 2.11.4.</p>
- <p>As of <c>kernel</c> version 2.11.4 subscriptions can also be
- made before the <c>net_kernel</c> server has been started,
- i.e., <c>net_kernel:monitor_nodes/[1,2]</c> does not return
+ <p>As from Kernel version 2.11.4, subscriptions can also be
+ made before the <c>net_kernel</c> server is started, that is,
+ <c>net_kernel:monitor_nodes/[1,2]</c> does not return
<c>ignored</c>.</p>
- <p>As of <c>kernel</c> version 2.13, and <c>erts</c> version
+ <p>As from Kernel version 2.13, and ERTS version
5.7, the following is guaranteed:</p>
<list type="bulleted">
- <item><c>nodeup</c> messages will be delivered after the
- corresponding node appears in results from
- <c>erlang:nodes/X</c>.</item>
- <item><c>nodedown</c> messages will be delivered after the
- corresponding node has disappeared in results from
- <c>erlang:nodes/X</c>.</item>
+ <item><p><c>nodeup</c> messages are delivered after the
+ corresponding node appears in results from
+ <c>erlang:nodes/X</c>.</p></item>
+ <item><p><c>nodedown</c> messages are delivered after the
+ corresponding node has disappeared in results from
+ <c>erlang:nodes/X</c>.</p></item>
</list>
- <p>Note, that this is <em>not</em> guaranteed for <c>kernel</c>
+ <p>Notice that this is <em>not</em> guaranteed for Kernel
versions before 2.13.</p>
<p>The format of the node status change messages depends on
- <c><anno>Options</anno></c>. If <c><anno>Options</anno></c> is [], which is the default,
- the format is:</p>
+ <c><anno>Options</anno></c>. If <c><anno>Options</anno></c> is
+ <c>[]</c>, which is the default, the format is as follows:</p>
<code type="none">
{nodeup, Node} | {nodedown, Node}
Node = node()</code>
- <p>If <c><anno>Options</anno> /= []</c>, the format is:</p>
+ <p>If <c><anno>Options</anno></c> is not <c>[]</c>, the format is
+ as follows:</p>
<code type="none">
{nodeup, Node, InfoList} | {nodedown, Node, InfoList}
Node = node()
InfoList = [{Tag, Val}]</code>
<p><c>InfoList</c> is a list of tuples. Its contents depends on
<c><anno>Options</anno></c>, see below.</p>
- <p>Also, when <c>OptionList == []</c> only visible nodes, that
+ <p>Also, when <c>OptionList == []</c>, only visible nodes, that
is, nodes that appear in the result of
- <seealso marker="erts:erlang#nodes/0">nodes/0</seealso>, are
- monitored.</p>
+ <seealso marker="erts:erlang#nodes/0"><c>erlang:nodes/0</c></seealso>,
+ are monitored.</p>
<p><c><anno>Option</anno></c> can be any of the following:</p>
<taglist>
<tag><c>{node_type, NodeType}</c></tag>
<item>
- <p>Currently valid values for <c>NodeType</c> are:</p>
+ <p>Valid values for <c>NodeType</c>:</p>
<taglist>
<tag><c>visible</c></tag>
- <item>Subscribe to node status change messages for visible
+ <item><p>Subscribe to node status change messages for visible
nodes only. The tuple <c>{node_type, visible}</c> is
- included in <c>InfoList</c>.</item>
+ included in <c>InfoList</c>.</p></item>
<tag><c>hidden</c></tag>
- <item>Subscribe to node status change messages for hidden
+ <item><p>Subscribe to node status change messages for hidden
nodes only. The tuple <c>{node_type, hidden}</c> is
- included in <c>InfoList</c>.</item>
+ included in <c>InfoList</c>.</p></item>
<tag><c>all</c></tag>
- <item>Subscribe to node status change messages for both
+ <item><p>Subscribe to node status change messages for both
visible and hidden nodes. The tuple
- <c>{node_type, visible | hidden}</c> is included in
- <c>InfoList</c>.</item>
+ <c>{node_type, visible | hidden}</c> is included in
+ <c>InfoList</c>.</p></item>
</taglist>
</item>
<tag><c>nodedown_reason</c></tag>
<item>
<p>The tuple <c>{nodedown_reason, Reason}</c> is included in
- <c>InfoList</c> in <c>nodedown</c> messages. <c>Reason</c>
- can be:</p>
+ <c>InfoList</c> in <c>nodedown</c> messages.</p>
+ <p>
+ <c>Reason</c> can, depending on which
+ distribution module or process that is used be any term,
+ but for the standard TCP distribution module it is
+ any of the following:
+ </p>
<taglist>
<tag><c>connection_setup_failed</c></tag>
- <item>The connection setup failed (after <c>nodeup</c>
- messages had been sent).</item>
+ <item><p>The connection setup failed (after <c>nodeup</c>
+ messages were sent).</p></item>
<tag><c>no_network</c></tag>
- <item>No network available.</item>
+ <item><p>No network is available.</p></item>
<tag><c>net_kernel_terminated</c></tag>
- <item>The <c>net_kernel</c> process terminated.</item>
+ <item><p>The <c>net_kernel</c> process terminated.</p></item>
<tag><c>shutdown</c></tag>
- <item>Unspecified connection shutdown.</item>
+ <item><p>Unspecified connection shutdown.</p></item>
<tag><c>connection_closed</c></tag>
- <item>The connection was closed.</item>
+ <item><p>The connection was closed.</p></item>
<tag><c>disconnect</c></tag>
- <item>The connection was disconnected (forced from the
- current node).</item>
+ <item><p>The connection was disconnected (forced from the
+ current node).</p></item>
<tag><c>net_tick_timeout</c></tag>
- <item>Net tick timeout.</item>
+ <item><p>Net tick time-out.</p></item>
<tag><c>send_net_tick_failed</c></tag>
- <item>Failed to send net tick over the connection.</item>
+ <item><p>Failed to send net tick over the connection.</p></item>
<tag><c>get_status_failed</c></tag>
- <item>Status information retrieval from the <c>Port</c>
- holding the connection failed.</item>
+ <item><p>Status information retrieval from the <c>Port</c>
+ holding the connection failed.</p></item>
</taglist>
</item>
</taglist>
</desc>
</func>
- <func>
- <name name="get_net_ticktime" arity="0"/>
- <fsummary>Get <c>net_ticktime</c></fsummary>
- <desc>
- <p>Gets <c>net_ticktime</c> (see
- <seealso marker="kernel_app">kernel(6)</seealso>).</p>
- <p>Currently defined return values (<c><anno>Res</anno></c>):</p>
- <taglist>
- <tag><c><anno>NetTicktime</anno></c></tag>
- <item>
- <p><c>net_ticktime</c> is <c><anno>NetTicktime</anno></c> seconds.</p>
- </item>
- <tag><c>{ongoing_change_to, <anno>NetTicktime</anno>}</c></tag>
- <item>
- <p><c>net_kernel</c> is currently changing
- <c>net_ticktime</c> to <c><anno>NetTicktime</anno></c> seconds.</p>
- </item>
- <tag><c>ignored</c></tag>
- <item>
- <p>The local node is not alive.</p>
- </item>
- </taglist>
- </desc>
- </func>
+
<func>
<name name="set_net_ticktime" arity="1"/>
<name name="set_net_ticktime" arity="2"/>
- <fsummary>Set <c>net_ticktime</c></fsummary>
+ <fsummary>Set <c>net_ticktime</c>.</fsummary>
<desc>
<p>Sets <c>net_ticktime</c> (see
- <seealso marker="kernel_app">kernel(6)</seealso>) to
- <c><anno>NetTicktime</anno></c> seconds. <c><anno>TransitionPeriod</anno></c> defaults
- to 60.</p>
+ <seealso marker="kernel_app"><c>kernel(6)</c></seealso>) to
+ <c><anno>NetTicktime</anno></c> seconds.
+ <c><anno>TransitionPeriod</anno></c> defaults to <c>60</c>.</p>
<p>Some definitions:</p>
<taglist>
- <tag>The minimum transition traffic interval (<c>MTTI</c>)</tag>
- <item>
- <p><c>minimum(<anno>NetTicktime</anno>, PreviousNetTicktime)*1000 div 4</c> milliseconds.</p>
- </item>
- <tag>The transition period</tag>
- <item>
- <p>The time of the least number of consecutive <c>MTTI</c>s
- to cover <c><anno>TransitionPeriod</anno></c> seconds following
- the call to <c>set_net_ticktime/2</c> (i.e.
- ((<c><anno>TransitionPeriod</anno>*1000 - 1) div MTTI + 1)*MTTI</c>
- milliseconds).</p>
- </item>
+ <tag>Minimum transition traffic interval (<c>MTTI</c>)</tag>
+ <item><p><c>minimum(<anno>NetTicktime</anno>,
+ PreviousNetTicktime)*1000 div 4</c> milliseconds.</p></item>
+ <tag>Transition period</tag>
+ <item><p>The time of the least number of consecutive <c>MTTI</c>s
+ to cover <c><anno>TransitionPeriod</anno></c> seconds following
+ the call to <c>set_net_ticktime/2</c> (that is,
+ ((<c><anno>TransitionPeriod</anno>*1000 - 1) div MTTI + 1)*MTTI</c>
+ milliseconds).</p></item>
</taglist>
- <p>If <c><![CDATA[<anno>NetTicktime</anno> < PreviousNetTicktime]]></c>, the actual
- <c>net_ticktime</c> change will be done at the end of
- the transition period; otherwise, at the beginning. During
- the transition period, <c>net_kernel</c> will ensure that
- there will be outgoing traffic on all connections at least
+ <p>If
+ <c><![CDATA[NetTicktime < PreviousNetTicktime]]></c>,
+ the <c>net_ticktime</c> change is done at the end of
+ the transition period; otherwise at the beginning. During
+ the transition period, <c>net_kernel</c> ensures that
+ there is outgoing traffic on all connections at least
every <c>MTTI</c> millisecond.</p>
<note>
- <p>The <c>net_ticktime</c> changes have to be initiated on all
+ <p>The <c>net_ticktime</c> changes must be initiated on all
nodes in the network (with the same <c><anno>NetTicktime</anno></c>)
before the end of any transition period on any node;
- otherwise, connections may erroneously be disconnected.</p>
+ otherwise connections can erroneously be disconnected.</p>
</note>
<p>Returns one of the following:</p>
<taglist>
<tag><c>unchanged</c></tag>
<item>
- <p><c>net_ticktime</c> already had the value of
- <c><anno>NetTicktime</anno></c> and was left unchanged.</p>
+ <p><c>net_ticktime</c> already has the value of
+ <c><anno>NetTicktime</anno></c> and is left unchanged.</p>
</item>
<tag><c>change_initiated</c></tag>
<item>
- <p><c>net_kernel</c> has initiated the change of
- <c>net_ticktime</c> to <c><anno>NetTicktime</anno></c> seconds.</p>
+ <p><c>net_kernel</c> initiated the change of
+ <c>net_ticktime</c> to <c><anno>NetTicktime</anno></c>
+ seconds.</p>
</item>
<tag><c>{ongoing_change_to, <anno>NewNetTicktime</anno>}</c></tag>
<item>
- <p>The request was <em>ignored</em>; because,
- <c>net_kernel</c> was busy changing <c>net_ticktime</c> to
+ <p>The request is <em>ignored</em> because
+ <c>net_kernel</c> is busy changing <c>net_ticktime</c> to
<c><anno>NewNetTicktime</anno></c> seconds.</p>
</item>
</taglist>
</desc>
</func>
+
+ <func>
+ <name name="setopts" arity="2"/>
+ <fsummary>Set distribution socket options.</fsummary>
+ <desc>
+ <p>Set one or more options for distribution sockets.
+ Argument <c><anno>Node</anno></c> can be either one node name
+ or the atom <c>new</c> to affect the distribution sockets of all
+ future connected nodes.</p>
+ <p>The return value is the same as from
+ <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>
+ or <c>{error, noconnection}</c> if <c><anno>Node</anno></c> is not
+ a connected node or <c>new</c>.</p>
+ <p>If <c><anno>Node</anno></c> is <c>new</c> the <c><anno>Options</anno></c>
+ will then also be added to kernel configration parameters
+ <seealso marker="kernel:kernel_app#inet_dist_listen_options">inet_dist_listen_options</seealso>
+ and
+ <seealso marker="kernel:kernel_app#inet_dist_connect_options">inet_dist_connect_options</seealso>.</p>
+ <p>Returns <c>ignored</c> if the local node is not alive.</p>
+ </desc>
+ </func>
+
<func>
<name>start([Name]) -> {ok, pid()} | {error, Reason}</name>
<name>start([Name, NameType]) -> {ok, pid()} | {error, Reason}</name>
<name>start([Name, NameType, Ticktime]) -> {ok, pid()} | {error, Reason}</name>
- <fsummary>Turn an Erlang runtime system into a distributed node</fsummary>
+ <fsummary>Turn an Erlang runtime system into a distributed node.</fsummary>
<type>
<v>Name = atom()</v>
<v>NameType = shortnames | longnames</v>
<v>Reason = {already_started, pid()} | term()</v>
</type>
<desc>
- <p>Note that the argument is a list with exactly one, two or
- three arguments. <c>NameType</c> defaults to <c>longnames</c>
- and <c>Ticktime</c> to 15000.</p>
<p>Turns a non-distributed node into a distributed node by
starting <c>net_kernel</c> and other necessary processes.</p>
+ <p>Notice that the argument is a list with exactly one, two, or
+ three arguments. <c>NameType</c> defaults to <c>longnames</c>
+ and <c>Ticktime</c> to <c>15000</c>.</p>
</desc>
</func>
+
<func>
<name name="stop" arity="0"/>
- <fsummary>Turn a node into a non-distributed Erlang runtime system</fsummary>
+ <fsummary>Turn a node into a non-distributed Erlang runtime system.</fsummary>
<desc>
<p>Turns a distributed node into a non-distributed node. For
other nodes in the network, this is the same as the node
- going down. Only possible when the net kernel was started
- using <c>start/1</c>, otherwise returns
- <c>{error, not_allowed}</c>. Returns <c>{error, not_found}</c>
- if the local node is not alive.</p>
+ going down. Only possible when the net kernel was started using
+ <seealso marker="#start/1"><c>start/1</c></seealso>,
+ otherwise <c>{error, not_allowed}</c> is returned. Returns
+ <c>{error, not_found}</c> if the local node is not alive.</p>
</desc>
</func>
</funcs>
diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml
index cf5777d4be..8188ede6a2 100644
--- a/lib/kernel/doc/src/notes.xml
+++ b/lib/kernel/doc/src/notes.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2004</year><year>2013</year>
+ <year>2004</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -31,6 +31,1104 @@
</header>
<p>This document describes the changes made to the Kernel application.</p>
+<section><title>Kernel 6.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ The values <c>all</c> and <c>none</c> are documented as
+ valid value for the Kernel configuration parameter
+ <c>logger_level</c>, but would cause a crash during node
+ start. This is now corrected.</p>
+ <p>
+ Own Id: OTP-15143</p>
+ </item>
+ <item>
+ <p>
+ Fix some potential buggy behavior in how ticks are sent
+ on inter node distribution connections. Tick is now sent
+ to c-node even if there are unsent buffered data, as
+ c-nodes need ticks in order to send reply ticks. The
+ amount of sent data was also calculated wrongly when
+ ticks were suppressed due to unsent buffered data.</p>
+ <p>
+ Own Id: OTP-15162 Aux Id: ERIERL-191 </p>
+ </item>
+ <item>
+ <p>
+ Non semantic change in dist_util.erl to silence dialyzer
+ warning.</p>
+ <p>
+ Own Id: OTP-15170</p>
+ </item>
+ <item>
+ <p>
+ Fixed <c>net_kernel:connect_node(node())</c> to return
+ <c>true</c> (and do nothing) as it always has before
+ OTP-21.0. Also documented this successful "self connect"
+ as the expected behavior.</p>
+ <p>
+ Own Id: OTP-15182 Aux Id: ERL-643 </p>
+ </item>
+ <item>
+ <p>
+ The single_line option on logger_formatter would in some
+ cases add an unwanted comma after the association arrows
+ in a map. This is now corrected.</p>
+ <p>
+ Own Id: OTP-15228</p>
+ </item>
+ <item>
+ <p>
+ Improved robustness of distribution connection setup. In
+ OTP-21.0 a truly asynchronous connection setup was
+ introduced. This is further improvement on that work to
+ make the emulator more robust and also be able to recover
+ in cases when involved Erlang processes misbehave.</p>
+ <p>
+ Own Id: OTP-15297 Aux Id: OTP-15279, OTP-15280 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ A new macro, <c>?LOG(Level,...)</c>, is added. This is
+ equivalent to the existing <c>?LOG_&lt;LEVEL&gt;(...)</c>
+ macros.</p>
+ <p>
+ A new variant of Logger report callback is added, which
+ takes an extra argument containing options for size
+ limiting and line breaks. Module <c>proc_lib</c> in
+ <c>STDLIB</c> uses this for crash reports.</p>
+ <p>
+ Logger configuration is now checked a bit more for
+ errors.</p>
+ <p>
+ Own Id: OTP-15132</p>
+ </item>
+ <item>
+ <p>
+ The socket options <c>recvtos</c>, <c>recvttl</c>,
+ <c>recvtclass</c> and <c>pktoptions</c> have been
+ implemented in the socket modules. See the documentation
+ for the <c>gen_tcp</c>, <c>gen_udp</c> and <c>inet</c>
+ modules. Note that support for these in the runtime
+ system is platform dependent. Especially for
+ <c>pktoptions</c> which is very Linux specific and
+ obsoleted by the RFCs that defined it.</p>
+ <p>
+ Own Id: OTP-15145 Aux Id: ERIERL-187 </p>
+ </item>
+ <item>
+ <p>
+ Add <c>logger:set_application_level/2</c> for setting the
+ logger level of all modules in one application.</p>
+ <p>
+ Own Id: OTP-15146</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.0.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed bug in <c>net_kernel</c> that could cause an
+ emulator crash if certain connection attempts failed. Bug
+ exists since kernel-6.0 (OTP-21.0).</p>
+ <p>
+ Own Id: OTP-15280 Aux Id: ERIERL-226, OTP-15279 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.0</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p> Clarify the documentation of <c>rpc:multicall/5</c>.
+ </p>
+ <p>
+ Own Id: OTP-10551</p>
+ </item>
+ <item>
+ <p>
+ The DNS resolver when getting econnrefused from a server
+ retained an invalid socket so look up towards the next
+ server(s) also failed.</p>
+ <p>
+ Own Id: OTP-13133 Aux Id: PR-1557 </p>
+ </item>
+ <item>
+ <p>
+ No resolver backend returns V4Mapped IPv6 addresses any
+ more. This was inconsistent before, some did, some did
+ not. To facilitate working with such addresses a new
+ function <c>inet:ipv4_mapped_ipv6_address/1</c> has been
+ added.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-13761 Aux Id: ERL-503 </p>
+ </item>
+ <item>
+ <p>
+ The type specifications for <c>file:posix/0</c> and
+ <c>inet:posix/0</c> have been updated according to which
+ errors file and socket operations should be able to
+ return.</p>
+ <p>
+ Own Id: OTP-14019 Aux Id: ERL-550 </p>
+ </item>
+ <item>
+ <p>
+ Fix name resolving in IPv6 only environments when doing
+ the initial distributed connection.</p>
+ <p>
+ Own Id: OTP-14501</p>
+ </item>
+ <item>
+ <p> File operations used to accept <seealso
+ marker="kernel:file#type-name_all">filenames</seealso>
+ containing null characters (integer value zero). This
+ caused the name to be truncated and in some cases
+ arguments to primitive operations to be mixed up.
+ Filenames containing null characters inside the filename
+ are now <em>rejected</em> and will cause primitive file
+ operations to fail. </p> <p> Also environment variable
+ operations used to accept <seealso
+ marker="kernel:os#type-env_var_name">names</seealso> and
+ <seealso
+ marker="kernel:os#type-env_var_value">values</seealso> of
+ environment variables containing null characters (integer
+ value zero). This caused operations to silently produce
+ erroneous results. Environment variable names and values
+ containing null characters inside the name or value are
+ now <em>rejected</em> and will cause environment variable
+ operations to fail. </p> <p>Primitive environment
+ variable operations also used to accept the <c>$=</c>
+ character in environment variable names causing various
+ problems. <c>$=</c> characters in environment variable
+ names are now also <em>rejected</em>. </p> <p>Also
+ <seealso
+ marker="kernel:os#cmd/1"><c>os:cmd/1</c></seealso> now
+ reject null characters inside its <seealso
+ marker="kernel:os#type-os_command">command</seealso>.
+ </p> <p><seealso
+ marker="erts:erlang#open_port/2"><c>erlang:open_port/2</c></seealso>
+ will also reject null characters inside the port name
+ from now on.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-14543 Aux Id: ERL-370 </p>
+ </item>
+ <item>
+ <p><c>os:putenv</c> and <c>os:getenv</c> no longer access
+ the process environment directly and instead work on a
+ thread-safe emulation. The only observable difference is
+ that it's <em>not</em> kept in sync with libc
+ <c>getenv(3)</c> / <c>putenv(3)</c>, so those who relied
+ on that behavior in drivers or NIFs will need to add
+ manual synchronization.</p> <p>On Windows this means that
+ you can no longer resolve DLL dependencies by modifying
+ the <c>PATH</c> just before loading the driver/NIF. To
+ make this less of a problem, the emulator now adds the
+ target DLL's folder to the DLL search path.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-14666</p>
+ </item>
+ <item>
+ <p>
+ Fixed connection tick toward primitive hidden nodes
+ (erl_interface) that could cause faulty tick timeout in
+ rare cases when payload data is sent to hidden node but
+ not received.</p>
+ <p>
+ Own Id: OTP-14681</p>
+ </item>
+ <item>
+ <p>
+ Make group react immediately on an EXIT-signal from shell
+ in e.g ssh.</p>
+ <p>
+ Own Id: OTP-14991 Aux Id: PR1705 </p>
+ </item>
+ <item>
+ <p>
+ Calls to <c>gen_tcp:send/2</c> on closed sockets now
+ returns <c>{error, closed}</c> instead of
+ <c>{error,enotconn}</c>.</p>
+ <p>
+ Own Id: OTP-15001</p>
+ </item>
+ <item>
+ <p>
+ The <c>included_applications</c> key are no longer
+ duplicated as application environment variable. Earlier,
+ the included applications could be read both with
+ <c>application:get[_all]_env(...)</c> and
+ <c>application:get[_all]_key(...)</c> functions. Now, it
+ can only be read with
+ <c>application:get[_all]_key(...)</c>.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-15071</p>
+ </item>
+ <item>
+ <p>Owner and group changes through
+ <c>file:write_file_info</c>, <c>file:change_owner</c>,
+ and <c>file:change_group</c> will no longer report
+ success on permission errors.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-15118</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>A new logging API is added to Erlang/OTP, see the
+ <seealso
+ marker="kernel:logger"><c>logger(3)</c></seealso> manual
+ page, and section <seealso
+ marker="kernel:logger_chapter">Logging</seealso> in the
+ Kernel User's Guide.</p>
+ <p>Calls to <c>error_logger</c> are automatically
+ redirected to the new API, and legacy error logger event
+ handlers can still be used. It is, however, recommended
+ to use the Logger API directly when writing new code.</p>
+ <p>Notice the following potential incompatibilities:</p>
+ <list> <item><p>Kernel configuration parameters
+ <c>error_logger</c> still works, but is overruled if the
+ default handler's output destination is configured with
+ Kernel configuration parameter <c>logger</c>.</p> <p>In
+ general, parameters for configuring error logger are
+ overwritten by new parameters for configuring
+ Logger.</p></item> <item><p>The concept of SASL error
+ logging is deprecated, meaning that by default the SASL
+ application does not affect which log events are
+ logged.</p> <p>By default, supervisor reports and crash
+ reports are logged by the default Logger handler started
+ by Kernel, and end up at the same destination (terminal
+ or file) as other standard log event from Erlang/OTP.</p>
+ <p>Progress reports are not logged by default, but can be
+ enabled by setting the primary log level to info, for
+ example with the Kernel configuration parameter
+ <c>logger_level</c>.</p> <p>To obtain backwards
+ compatibility with the SASL error logging functionality
+ from earlier releases, set Kernel configuration parameter
+ <c>logger_sasl_compatible</c> to <c>true</c>. This
+ prevents the default Logger handler from logging any
+ supervisor-, crash-, or progress reports. Instead, SASL
+ adds a separate Logger handler during application start,
+ which takes care of these log events. The SASL
+ configuration parameters <c>sasl_error_logger</c> and
+ <c>sasl_errlog_type</c> specify the destination (terminal
+ or file) and severity level to log for these
+ events.</p></item></list>
+ <p>
+ Since Logger is new in Erlang/OTP 21.0, we do reserve the
+ right to introduce changes to the Logger API and
+ functionality in patches following this release. These
+ changes might or might not be backwards compatible with
+ the initial version.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-13295</p>
+ </item>
+ <item>
+ <p>
+ The function <c>inet:i/0</c> has been documented.</p>
+ <p>
+ Own Id: OTP-13713 Aux Id: PR-1645 </p>
+ </item>
+ <item>
+ <p>
+ Typespecs for <c>netns</c> and <c>bind_to_device</c>
+ options have been added to <c>gen_tcp</c>, <c>gen_udp</c>
+ and <c>gen_sctp</c> functions.</p>
+ <p>
+ Own Id: OTP-14359 Aux Id: PR-1816 </p>
+ </item>
+ <item>
+ <p>
+ New functionality for implementation of alternative
+ carriers for the Erlang distribution has been introduced.
+ This mainly consists of support for usage of distribution
+ controller processes (previously only ports could be used
+ as distribution controllers). For more information see
+ <seealso marker="erts:alt_dist#distribution_module">ERTS
+ User's Guide ➜ How to implement an Alternative Carrier
+ for the Erlang Distribution ➜ Distribution
+ Module</seealso>.</p>
+ <p>
+ Own Id: OTP-14459</p>
+ </item>
+ <item>
+ <p><c>seq_trace</c> labels may now be any erlang
+ term.</p>
+ <p>
+ Own Id: OTP-14899</p>
+ </item>
+ <item>
+ <p>
+ The SSL distribution protocol <c>-proto inet_tls</c> has
+ stopped setting the SSL option
+ <c>server_name_indication</c>. New verify funs for client
+ and server in <c>inet_tls_dist</c> has been added, not
+ documented yet, that checks node name if present in peer
+ certificate. Usage is still also yet to be documented.</p>
+ <p>
+ Own Id: OTP-14969 Aux Id: OTP-14465, ERL-598 </p>
+ </item>
+ <item>
+ <p>
+ Changed timeout of <c>gen_server</c> calls to <c>auth</c>
+ server from default 5 seconds to <c>infinity</c>.</p>
+ <p>
+ Own Id: OTP-15009 Aux Id: ERL-601 </p>
+ </item>
+ <item>
+ <p>The callback module passed as <c>-epmd_module</c> to
+ erl has been expanded to be able to do name and port
+ resolving.</p> <p>Documentation has also been added in
+ the <seealso
+ marker="kernel:erl_epmd"><c>erl_epmd</c></seealso>
+ reference manual and ERTS User's Guide <seealso
+ marker="erts:alt_disco">How to Implement an Alternative
+ Service Discovery for Erlang Distribution</seealso>.</p>
+ <p>
+ Own Id: OTP-15086 Aux Id: PR-1694 </p>
+ </item>
+ <item>
+ <p>
+ Included config file specified with relative path in
+ sys.config are now first searched for relative to the
+ directory of sys.config itself. If not found, it is also
+ searched for relative to the current working directory.
+ The latter is for backwards compatibility.</p>
+ <p>
+ Own Id: OTP-15137 Aux Id: PR-1838 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4.3.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Non semantic change in dist_util.erl to silence dialyzer
+ warning.</p>
+ <p>
+ Own Id: OTP-15170</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix some potential buggy behavior in how ticks are sent
+ on inter node distribution connections. Tick is now sent
+ to c-node even if there are unsent buffered data, as
+ c-nodes need ticks in order to send reply ticks. The
+ amount of sent data was calculated wrongly when ticks
+ where suppressed due to unsent buffered data.</p>
+ <p>
+ Own Id: OTP-15162 Aux Id: ERIERL-191 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p> Correct a few contracts. </p>
+ <p>
+ Own Id: OTP-14889</p>
+ </item>
+ <item>
+ <p>
+ Reject loading modules with names containing directory
+ separators ('/' or '\' on Windows).</p>
+ <p>
+ Own Id: OTP-14933 Aux Id: ERL-564, PR-1716 </p>
+ </item>
+ <item>
+ <p>
+ Fix bug in handling of os:cmd/2 option max_size on
+ windows.</p>
+ <p>
+ Own Id: OTP-14940</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Add <c>os:cmd/2</c> that takes an options map as the
+ second argument.</p>
+ <p>
+ Add <c>max_size</c> as an option to <c>os:cmd/2</c> that
+ control the maximum size of the result that
+ <c>os:cmd/2</c> will return.</p>
+ <p>
+ Own Id: OTP-14823</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Refactored an internal API.</p>
+ <p>
+ Own Id: OTP-14784</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Processes which did output after switching jobs (Ctrl+G)
+ could be left forever stuck in the io request.</p>
+ <p>
+ Own Id: OTP-14571 Aux Id: ERL-472 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>Lock counting can now be fully toggled at runtime in
+ the lock counting emulator (<c>-emu_type lcnt</c>).
+ Everything is enabled by default to match the old
+ behavior, but specific categories can be toggled at will
+ with minimal runtime overhead when disabled. Refer to the
+ documentation on <c>lcnt:rt_mask/1</c> for details.</p>
+ <p>
+ Own Id: OTP-13170</p>
+ </item>
+ <item>
+ <p><c>lcnt:collect</c> and <c>lcnt:clear</c> will no
+ longer block all other threads in the runtime system.</p>
+ <p>
+ Own Id: OTP-14412</p>
+ </item>
+ <item>
+ <p>
+ General Unicode improvements.</p>
+ <p>
+ Own Id: OTP-14462</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>The documentation for the 'quiet' option in
+ disk_log:open/1 had an incorrect default value.</p>
+ <p>
+ Own Id: OTP-14498</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Function <c>inet:ntoa/1</c> has been fixed to return
+ lowercase letters according to RFC 5935 that has been
+ approved after this function was written. Previously
+ uppercase letters were returned so this may be a
+ backwards incompatible change depending on how the
+ returned address string is used.</p>
+ <p>Function <c>inet:parse_address/1</c> has been fixed to
+ accept %-suffixes on scoped addresses. The addresses does
+ not work yet, but gives no parse errors.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-13006 Aux Id: ERIERL-20, ERL-429 </p>
+ </item>
+ <item>
+ <p>
+ Fix bug where gethostname would incorrectly fail with
+ enametoolong on Linux.</p>
+ <p>
+ Own Id: OTP-14310</p>
+ </item>
+ <item>
+ <p>
+ Fix bug causing <c>code:is_module_native</c> to falsely
+ return true when <c>local</c> call trace is enabled for
+ the module.</p>
+ <p>
+ Own Id: OTP-14390</p>
+ </item>
+ <item>
+ <p>
+ Add early reject of invalid node names from distributed
+ nodes.</p>
+ <p>
+ Own Id: OTP-14426</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Since Unicode is now allowed in atoms an extra check is
+ needed for node names, which are restricted to Latin-1.</p>
+ <p>
+ Own Id: OTP-13805</p>
+ </item>
+ <item>
+ <p>Replaced usage of deprecated symbolic <seealso
+ marker="erts:erlang#type-time_unit"><c>time
+ unit</c></seealso> representations.</p>
+ <p>
+ Own Id: OTP-13831 Aux Id: OTP-13735 </p>
+ </item>
+ <item>
+ <p><c>file:write_file(Name, Data, [raw])</c> would turn
+ <c>Data</c> into a single binary before writing. This
+ meant it could not take advantage of the <c>writev()</c>
+ system call if it was given a list of binaries and told
+ to write with <c>raw</c> mode.</p>
+ <p>
+ Own Id: OTP-13909</p>
+ </item>
+ <item>
+ <p>The performance of the <c>disk_log</c> has been
+ somewhat improved in some corner cases (big items), and
+ the documentation has been clarified. </p>
+ <p>
+ Own Id: OTP-14057 Aux Id: PR-1245 </p>
+ </item>
+ <item>
+ <p>Functions for detecting changed code has been added.
+ <c>code:modified_modules/0</c> returns all currently
+ loaded modules that have changed on disk.
+ <c>code:module_status/1</c> returns the status for a
+ module. In the shell and in <c>c</c> module, <c>mm/0</c>
+ is short for <c>code:modified_modules/0</c>, and
+ <c>lm/0</c> reloads all currently loaded modules that
+ have changed on disk.</p>
+ <p>
+ Own Id: OTP-14059</p>
+ </item>
+ <item>
+ <p>
+ Introduce an event manager in Erlang to handle OS
+ signals. A subset of OS signals may be subscribed to and
+ those are described in the Kernel application.</p>
+ <p>
+ Own Id: OTP-14186</p>
+ </item>
+ <item>
+ <p> Sockets can now be bound to device (SO_BINDTODEVICE)
+ on platforms where it is supported. </p> <p> This has
+ been implemented e.g to support VRF-Lite under Linux; see
+ <url
+ href="https://www.kernel.org/doc/Documentation/networking/vrf.txt">
+ VRF </url>, and GitHub pull request <url
+ href="https://github.com/erlang/otp/pull/1326">#1326</url>.
+ </p>
+ <p>
+ Own Id: OTP-14357 Aux Id: PR-1326 </p>
+ </item>
+ <item>
+ <p>
+ Added option to store shell_history on disk so that the
+ history can be reused between sessions.</p>
+ <p>
+ Own Id: OTP-14409 Aux Id: PR-1420 </p>
+ </item>
+ <item>
+ <p> The size of crash reports created by
+ <c>gen_server</c>, <c>gen_statem</c> and <c>proc_lib</c>
+ is limited with aid of the Kernel application variable
+ <c>error_logger_format_depth</c>. The purpose is to limit
+ the size of the messages sent to the <c>error_logger</c>
+ process when processes with huge message queues or states
+ crash. </p> <p>The crash report generated by
+ <c>proc_lib</c> includes the new tag
+ <c>message_queue_len</c>. The neighbour report also
+ includes the new tag <c>current_stacktrace</c>. Finally,
+ the neighbour report no longer includes the tags
+ <c>messages</c> and <c>dictionary</c>. </p> <p> The new
+ function <c>error_logger:get_format_depth/0</c> can be
+ used to retrieve the value of the Kernel application
+ variable <c>error_logger_format_depth</c>. </p>
+ <p>
+ Own Id: OTP-14417</p>
+ </item>
+ <item>
+ <p> One of the ETS tables used by the <c>global</c>
+ module is created with <c>{read_concurrency, true}</c> in
+ order to reduce contention. </p>
+ <p>
+ Own Id: OTP-14419</p>
+ </item>
+ <item>
+ <p>
+ Warnings have been added to the relevant documentation
+ about not using un-secure distributed nodes in exposed
+ environments.</p>
+ <p>
+ Own Id: OTP-14425</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix a race during cleanup of os:cmd that would cause
+ os:cmd to hang indefinitely.</p>
+ <p>
+ Own Id: OTP-14232 Aux Id: seq13275 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>The functions in the '<c>file</c>' module that take a
+ list of paths (e.g. <c>file:path_consult/2</c>) will now
+ continue to search in the path if the path contains
+ something that is not a directory.</p>
+ <p>
+ Own Id: OTP-14191</p>
+ </item>
+ <item>
+ <p>Two OTP processes that are known to receive many
+ messages are 'rex' (used by 'rpc') and 'error_logger'.
+ Those processes will now store unprocessed messages
+ outside the process heap, which will potentially decrease
+ the cost of garbage collections.</p>
+ <p>
+ Own Id: OTP-14192</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.1.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ <c>code:add_pathsa/1</c> and command line option
+ <c>-pa</c> both revert the given list of directories when
+ adding it at the beginning of the code path. This is now
+ documented.</p>
+ <p>
+ Own Id: OTP-13920 Aux Id: ERL-267 </p>
+ </item>
+ <item>
+ <p>
+ Add lost runtime dependency to erts-8.1. This should have
+ been done in kernel-5.1 (OTP-19.1) as it cannot run
+ without at least erts-8.1 (OTP-19.1).</p>
+ <p>
+ Own Id: OTP-14003</p>
+ </item>
+ <item>
+ <p>
+ Type and doc for gen_{tcp,udp,sctp}:controlling_process/2
+ has been improved.</p>
+ <p>
+ Own Id: OTP-14022 Aux Id: PR-1208 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix a memory leak when calling
+ seq_trace:get_system_tracer().</p>
+ <p>
+ Own Id: OTP-13742</p>
+ </item>
+ <item>
+ <p>
+ Fix for the problem that when adding the ebin directory
+ of an application to the code path, the
+ <c>code:priv_dir/1</c> function returns an incorrect path
+ to the priv directory of the same application.</p>
+ <p>
+ Own Id: OTP-13758 Aux Id: ERL-195 </p>
+ </item>
+ <item>
+ <p>
+ Fix code_server crash when adding code paths of two
+ levels.</p>
+ <p>
+ Own Id: OTP-13765 Aux Id: ERL-194 </p>
+ </item>
+ <item>
+ <p>
+ Respect -proto_dist switch while connection to EPMD</p>
+ <p>
+ Own Id: OTP-13770 Aux Id: PR-1129 </p>
+ </item>
+ <item>
+ <p>
+ Fixed a bug where init:stop could deadlock if a process
+ with infinite shutdown timeout (e.g. a supervisor)
+ attempted to load code while terminating.</p>
+ <p>
+ Own Id: OTP-13802</p>
+ </item>
+ <item>
+ <p>
+ Close stdin of commands run in os:cmd. This is a
+ backwards compatibility fix that restores the behaviour of
+ pre 19.0 os:cmd.</p>
+ <p>
+ Own Id: OTP-13867 Aux Id: seq13178 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Add <c>net_kernel:setopts/2</c> and
+ <c>net_kernel:getopts/2</c> to control options for
+ distribution sockets in runtime.</p>
+ <p>
+ Own Id: OTP-13564</p>
+ </item>
+ <item>
+ <p>
+ Rudimentary support for DSCP has been implemented
+ in the guise of a <c>tclass</c> socket option
+ for IPv6 sockets.</p>
+ <p>
+ Own Id: OTP-13582</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.0.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ When calling os:cmd from a process that has set trap_exit
+ to true an 'EXIT' message would be left in the message
+ queue. This bug was introduced in kernel vsn 5.0.1.</p>
+ <p>
+ Own Id: OTP-13813</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.0.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix a os:cmd bug where creating a background job using
+ &amp; would cause os:cmd to hang until the background job
+ terminated or closed its stdout and stderr file
+ descriptors. This bug has existed from kernel 5.0.</p>
+ <p>
+ Own Id: OTP-13741</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.0</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>The handling of <c>on_load</c> functions has been
+ improved. The major improvement is that if a code upgrade
+ fails because the <c>on_load</c> function fails, the
+ previous version of the module will now be retained.</p>
+ <p>
+ Own Id: OTP-12593</p>
+ </item>
+ <item>
+ <p><c>rpc:call()</c> and <c>rpc:block_call()</c> would
+ sometimes cause an exception (which was not mentioned in
+ the documentation). This has been corrected so that
+ <c>{badrpc,Reason}</c> will be returned instead.</p>
+ <p>
+ Own Id: OTP-13409</p>
+ </item>
+ <item>
+ <p>On Windows, for modules that were loaded early (such
+ as the <c>lists</c> module), <c>code:which/1</c> would
+ return the path with mixed slashes and backslashes, for
+ example: <c>"C:\\Program
+ Files\\erl8.0/lib/stdlib-2.7/ebin/lists.beam"</c>. This
+ has been corrected.</p>
+ <p>
+ Own Id: OTP-13410</p>
+ </item>
+ <item>
+ <p>
+ Make file:datasync use fsync instead of fdatasync on Mac
+ OSX.</p>
+ <p>
+ Own Id: OTP-13411</p>
+ </item>
+ <item>
+ <p>
+ The default chunk size for the fallback sendfile
+ implementation, used on platforms that do not have a
+ native sendfile, has been decreased in order to reduce
+ connectivity issues.</p>
+ <p>
+ Own Id: OTP-13444</p>
+ </item>
+ <item>
+ <p>
+ Large file writes (2Gb or more) could fail on some Unix
+ platforms (for example, OS X and FreeBSD).</p>
+ <p>
+ Own Id: OTP-13461</p>
+ </item>
+ <item>
+ <p>
+ A bug has been fixed where the DNS resolver inet_res did
+ not refresh its view of the contents of for example
+ resolv.conf immediately after start and hence then failed
+ name resolution. Reported and fix suggested by Michal
+ Ptaszek in GitHUB pull req #949.</p>
+ <p>
+ Own Id: OTP-13470 Aux Id: Pull #969 </p>
+ </item>
+ <item>
+ <p>
+ Fix process leak from global_group.</p>
+ <p>
+ Own Id: OTP-13516 Aux Id: PR-1008 </p>
+ </item>
+ <item>
+ <p>
+ The function <c>inet:gethostbyname/1</c> now honors the
+ resolver option <c>inet6</c> instead of always looking up
+ IPv4 addresses.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-13622 Aux Id: PR-1065 </p>
+ </item>
+ <item>
+ <p>
+ The <c>Status</c> argument to <c>init:stop/1</c> is now
+ sanity checked to make sure <c>erlang:halt</c> does not
+ fail.</p>
+ <p>
+ Own Id: OTP-13631 Aux Id: PR-911 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Add {line_delim, byte()} option to inet:setopts/2 and
+ decode_packet/3</p>
+ <p>
+ Own Id: OTP-12837</p>
+ </item>
+ <item>
+ <p>
+ Added <seealso
+ marker="kernel:os#perf_counter/1">os:perf_counter/1</seealso>.</p>
+ <p>
+ The perf_counter is a very very cheap and high resolution
+ timer that can be used to timestamp system events. It
+ does not have monoticity guarantees, but should on most
+ OS's expose a monotonous time.</p>
+ <p>
+ Own Id: OTP-12908</p>
+ </item>
+ <item>
+ <p>
+ The os:cmd call has been optimized on unix platforms to
+ be scale better with the number of schedulers.</p>
+ <p>
+ Own Id: OTP-13089</p>
+ </item>
+ <item>
+ <p>New functions that can load multiple modules at once
+ have been added to the '<c>code</c>' module. The
+ functions are <c>code:atomic_load/1</c>,
+ <c>code:prepare_loading/1</c>,
+ <c>code:finish_loading/1</c>, and
+ <c>code:ensure_modules_loaded/1</c>.</p>
+ <p>
+ Own Id: OTP-13111</p>
+ </item>
+ <item>
+ <p>
+ The code path cache feature turned out not to be very
+ useful in practice and has been removed. If an attempt is
+ made to enable the code path cache, there will be a
+ warning report informing the user that the feature has
+ been removed.</p>
+ <p>
+ Own Id: OTP-13191</p>
+ </item>
+ <item>
+ <p>When an attempt is made to start a distributed Erlang
+ node with the same name as an existing node, the error
+ message will be much shorter and easier to read than
+ before. Example:</p>
+ <p><c>Protocol 'inet_tcp': the name somename@somehost
+ seems to be in use by another Erlang node</c></p>
+ <p>
+ Own Id: OTP-13294</p>
+ </item>
+ <item>
+ <p>
+ The output of the default error logger is somewhat
+ prettier and easier to read. The default error logger is
+ used during start-up of the OTP system. If the start-up
+ fails, the output will be easier to read.</p>
+ <p>
+ Own Id: OTP-13325</p>
+ </item>
+ <item>
+ <p>The functions <c>rpc:safe_multi_server_call/2,3</c>
+ that were deprecated in R12B have been removed.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-13449</p>
+ </item>
+ <item>
+ <p>
+ Update the error reasons in dist_util, and show them in
+ the logs if net_kernel:verbose(1) has been called.</p>
+ <p>
+ Own Id: OTP-13458</p>
+ </item>
+ <item>
+ <p>
+ Experimental support for Unix Domain Sockets has been
+ implemented. Read the sources if you want to try it out.
+ Example: <c>gen_udp:open(0,
+ [{ifaddr,{local,"/tmp/socket"}}])</c>. Documentation will
+ be written after user feedback on the experimental API.</p>
+ <p>
+ Own Id: OTP-13572 Aux Id: PR-612 </p>
+ </item>
+ <item>
+ <p>
+ Allow heart to be configured to not kill the previous
+ emulator before calling the HEART_COMMAND. This is done
+ by setting the environment variable HEART_NO_KILL to
+ TRUE.</p>
+ <p>
+ Own Id: OTP-13650</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Kernel 4.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -623,8 +1721,7 @@
Erlang/OTP has been ported to the realtime operating
system OSE. The port supports both smp and non-smp
emulator. For details around the port and how to started
- see the User's Guide in the <seealso
- marker="ose:ose_intro">ose</seealso> application. </p>
+ see the User's Guide in the ose application. </p>
<p>
Note that not all parts of Erlang/OTP has been ported. </p>
<p>
@@ -1113,7 +2210,7 @@
dependent, so applications aiming to be portable should
consider using <c>{ipv6_v6only,true}</c> when creating an
<c>inet6</c> listening/destination socket, and if
- neccesary also create an <c>inet</c> socket on the same
+ necessary also create an <c>inet</c> socket on the same
port for IPv4 traffic. See the documentation.</p>
<p>
Own Id: OTP-8928 Aux Id: kunagi-193 [104] </p>
@@ -3251,7 +4348,7 @@
types (for instance, <c>ensure_loaded/1</c> now only
accepts an atom as documented; it used to accept a string
too).</p>
- <p><c>Dialyzer</c> will generally emit warnings for any
+ <p>Dialyzer will generally emit warnings for any
calls that use undocumented argument types. Even if the
call happens to still work in R12B, you should correct
your code. A future release will adhere to the
diff --git a/lib/kernel/doc/src/notes_history.xml b/lib/kernel/doc/src/notes_history.xml
index 97ff3e7400..a6de349311 100644
--- a/lib/kernel/doc/src/notes_history.xml
+++ b/lib/kernel/doc/src/notes_history.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2006</year><year>2013</year>
+ <year>2006</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/lib/kernel/doc/src/os.xml b/lib/kernel/doc/src/os.xml
index 682d4a2eac..c95e615c6b 100644
--- a/lib/kernel/doc/src/os.xml
+++ b/lib/kernel/doc/src/os.xml
@@ -4,14 +4,14 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
@@ -19,7 +19,7 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
+
</legalnotice>
<title>os</title>
@@ -29,97 +29,219 @@
<rev></rev>
</header>
<module>os</module>
- <modulesummary>Operating System Specific Functions</modulesummary>
+ <modulesummary>Operating system-specific functions.</modulesummary>
<description>
- <p>The functions in this module are operating system specific.
- Careless use of these functions will result in programs that will
+ <p>The functions in this module are operating system-specific.
+ Careless use of these functions results in programs that will
only run on a specific platform. On the other hand, with careful
- use these functions can be of help in enabling a program to run on
+ use, these functions can be of help in enabling a program to run on
most platforms.</p>
+
+ <note>
+ <p>
+ File operations used to accept filenames containing
+ null characters (integer value zero). This caused
+ the name to be truncated and in some cases arguments
+ to primitive operations to be mixed up. Filenames
+ containing null characters inside the filename
+ are now <em>rejected</em> and will cause primitive
+ file operations to fail.
+ </p>
+ <p>
+ Also environment variable operations used to accept
+ names and values of environment variables containing
+ null characters (integer value zero). This caused
+ operations to silently produce erroneous results.
+ Environment variable names and values containing
+ null characters inside the name or value are now
+ <em>rejected</em> and will cause environment variable
+ operations to fail.
+ </p>
+ </note>
</description>
+
+ <datatypes>
+ <datatype>
+ <name name="env_var_name"/>
+ <desc>
+ <p>A string containing valid characters on the specific
+ OS for environment variable names using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. Note that specifically null characters (integer
+ value zero) and <c>$=</c> characters are not allowed.
+ However, note that not all invalid characters necessarily
+ will cause the primitiv operations to fail, but may instead
+ produce invalid results.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="env_var_value"/>
+ <desc>
+ <p>A string containing valid characters on the specific
+ OS for environment variable values using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. Note that specifically null characters (integer
+ value zero) are not allowed. However, note that not all
+ invalid characters necessarily will cause the primitiv
+ operations to fail, but may instead produce invalid results.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="env_var_name_value"/>
+ <desc>
+ <p>
+ Assuming that environment variables has been correctly
+ set, a strings containing valid characters on the specific
+ OS for environment variable names and values using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. The first <c>$=</c> characters appearing in
+ the string separates environment variable name (on the
+ left) from environment variable value (on the right).
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="os_command"/>
+ <desc>
+ <p>All characters needs to be valid characters on the
+ specific OS using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. Note that specifically null characters (integer
+ value zero) are not allowed. However, note that not all
+ invalid characters not necessarily will cause
+ <seealso marker="#cmd/1"><c>os:cmd/1</c></seealso>
+ to fail, but may instead produce invalid results.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="os_command_opts"/>
+ <desc>
+ <p>Options for <seealso marker="#cmd/2"><c>os:cmd/2</c></seealso></p>
+ <taglist>
+ <tag><c>max_size</c></tag>
+ <item>
+ <p>The maximum size of the data returned by the <c>os:cmd</c> call.
+ See the <seealso marker="#cmd/2"><c>os:cmd/2</c></seealso>
+ documentation for more details.</p>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ </datatypes>
+
<funcs>
<func>
<name name="cmd" arity="1"/>
- <fsummary>Execute a command in a shell of the target OS</fsummary>
+ <name name="cmd" arity="2"/>
+ <fsummary>Execute a command in a shell of the target OS.</fsummary>
<desc>
- <p>Executes <c><anno>Command</anno></c> in a command shell of the target OS,
- captures the standard output of the command and returns this
- result as a string. This function is a replacement of
- the previous <c>unix:cmd/1</c>; on a Unix platform they are
- equivalent.</p>
- <p>Examples:</p>
+ <p>Executes <c><anno>Command</anno></c> in a command shell of the
+ target OS, captures the standard output of the command,
+ and returns this result as a string.</p>
+ <warning><p>Previous implementation used to allow all characters
+ as long as they were integer values greater than or equal to zero.
+ This sometimes lead to unwanted results since null characters
+ (integer value zero) often are interpreted as string termination. The
+ current implementation rejects these.</p></warning>
+ <p><em>Examples:</em></p>
<code type="none">
LsOut = os:cmd("ls"), % on unix platform
DirOut = os:cmd("dir"), % on Win32 platform</code>
- <p>Note that in some cases, standard output of a command when
+ <p>Notice that in some cases, standard output of a command when
called from another program (for example, <c>os:cmd/1</c>)
- may differ, compared to the standard output of the command
+ can differ, compared with the standard output of the command
when called directly from an OS command shell.</p>
+ <p><c>os:cmd/2</c> was added in kernel-5.5 (OTP-20.2.1). It makes it
+ possible to pass an options map as the second argument in order to
+ control the behaviour of <c>os:cmd</c>. The possible options are:
+ </p>
+ <taglist>
+ <tag><c>max_size</c></tag>
+ <item>
+ <p>The maximum size of the data returned by the <c>os:cmd</c> call.
+ This option is a safety feature that should be used when the command
+ executed can return a very large, possibly infinite, result.</p>
+ <code type="none">
+> os:cmd("cat /dev/zero", #{ max_size => 20 }).
+[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]</code>
+ </item>
+ </taglist>
</desc>
</func>
+
<func>
<name name="find_executable" arity="1"/>
<name name="find_executable" arity="2"/>
- <fsummary>Absolute filename of a program</fsummary>
+ <fsummary>Absolute filename of a program.</fsummary>
<desc>
- <p>These two functions look up an executable program given its
- name and a search path, in the same way as the underlying
- operating system. <c>find_executable/1</c> uses the current
- execution path (that is, the environment variable PATH on
+ <p>These two functions look up an executable program, with the
+ specified name and a search path, in the same way as the underlying
+ OS. <c>find_executable/1</c> uses the current
+ execution path (that is, the environment variable <c>PATH</c> on
Unix and Windows).</p>
- <p><c><anno>Path</anno></c>, if given, should conform to the syntax of
- execution paths on the operating system. The absolute
- filename of the executable program <c><anno>Name</anno></c> is returned,
- or <c>false</c> if the program was not found.</p>
+ <p><c><anno>Path</anno></c>, if specified, is to conform to the syntax
+ of execution paths on the OS. Returns the absolute filename of the
+ executable program <c><anno>Name</anno></c>,
+ or <c>false</c> if the program is not found.</p>
</desc>
</func>
+
<func>
<name name="getenv" arity="0"/>
- <fsummary>List all environment variables</fsummary>
+ <fsummary>List all environment variables.</fsummary>
<desc>
<p>Returns a list of all environment variables.
- Each environment variable is given as a single string on
+ Each environment variable is expressed as a single string on
the format <c>"VarName=Value"</c>, where <c>VarName</c> is
the name of the variable and <c>Value</c> its value.</p>
- <p>If Unicode file name encoding is in effect (see the <seealso
- marker="erts:erl#file_name_encoding">erl manual
- page</seealso>), the strings may contain characters with
- codepoints > 255.</p>
+ <p>If Unicode filename encoding is in effect (see the
+ <seealso marker="erts:erl#file_name_encoding"><c>erl</c> manual
+ page</seealso>), the strings can contain characters with
+ codepoints &gt; 255.</p>
</desc>
</func>
+
<func>
<name name="getenv" arity="1"/>
- <fsummary>Get the value of an environment variable</fsummary>
+ <fsummary>Get the value of an environment variable.</fsummary>
<desc>
<p>Returns the <c><anno>Value</anno></c> of the environment variable
- <c><anno>VarName</anno></c>, or <c>false</c> if the environment variable
- is undefined.</p>
- <p>If Unicode file name encoding is in effect (see the <seealso
- marker="erts:erl#file_name_encoding">erl manual
- page</seealso>), the strings (both <c><anno>VarName</anno></c> and
- <c><anno>Value</anno></c>) may contain characters with codepoints > 255.</p>
+ <c><anno>VarName</anno></c>, or <c>false</c> if the environment
+ variable is undefined.</p>
+ <p>If Unicode filename encoding is in effect (see the
+ <seealso marker="erts:erl#file_name_encoding"><c>erl</c> manual
+ page</seealso>), the strings <c><anno>VarName</anno></c> and
+ <c><anno>Value</anno></c> can contain characters with
+ codepoints &gt; 255.</p>
</desc>
</func>
+
<func>
<name name="getenv" arity="2"/>
- <fsummary>Get the value of an environment variable</fsummary>
+ <fsummary>Get the value of an environment variable.</fsummary>
<desc>
<p>Returns the <c><anno>Value</anno></c> of the environment variable
- <c><anno>VarName</anno></c>, or <c>DefaultValue</c> if the environment variable
- is undefined.</p>
- <p>If Unicode file name encoding is in effect (see the <seealso
- marker="erts:erl#file_name_encoding">erl manual
- page</seealso>), the strings (both <c><anno>VarName</anno></c> and
- <c><anno>Value</anno></c>) may contain characters with codepoints > 255.</p>
+ <c><anno>VarName</anno></c>, or <c>DefaultValue</c> if the
+ environment variable is undefined.</p>
+ <p>If Unicode filename encoding is in effect (see the
+ <seealso marker="erts:erl#file_name_encoding"><c>erl</c> manual
+ page</seealso>), the strings <c><anno>VarName</anno></c> and
+ <c><anno>Value</anno></c> can contain characters with
+ codepoints &gt; 255.</p>
</desc>
</func>
+
<func>
<name name="getpid" arity="0"/>
- <fsummary>Return the process identifier of the emulator process</fsummary>
+ <fsummary>Return the process identifier of the emulator
+ process.</fsummary>
<desc>
<p>Returns the process identifier of the current Erlang emulator
- in the format most commonly used by the operating system
- environment. <c><anno>Value</anno></c> is returned as a string containing
+ in the format most commonly used by the OS environment.
+ Returns <c><anno>Value</anno></c> as a string containing
the (usually) numerical identifier for a process. On Unix,
this is typically the return value of the <c>getpid()</c>
system call. On Windows,
@@ -127,125 +249,192 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
system call is used.</p>
</desc>
</func>
+
<func>
<name name="putenv" arity="2"/>
- <fsummary>Set a new value for an environment variable</fsummary>
+ <fsummary>Set a new value for an environment variable.</fsummary>
<desc>
- <p>Sets a new <c><anno>Value</anno></c> for the environment variable
+ <p>Sets a new <c><anno>Value</anno></c> for environment variable
<c><anno>VarName</anno></c>.</p>
- <p>If Unicode filename encoding is in effect (see the <seealso
- marker="erts:erl#file_name_encoding">erl manual
- page</seealso>), the strings (both <c><anno>VarName</anno></c> and
- <c><anno>Value</anno></c>) may contain characters with codepoints > 255.</p>
- <p>On Unix platforms, the environment will be set using UTF-8 encoding
- if Unicode file name translation is in effect. On Windows the
- environment is set using wide character interfaces.</p>
+ <p>If Unicode filename encoding is in effect (see the
+ <seealso marker="erts:erl#file_name_encoding"><c>erl</c> manual
+ page</seealso>), the strings <c><anno>VarName</anno></c> and
+ <c><anno>Value</anno></c> can contain characters with
+ codepoints &gt; 255.</p>
+ <p>On Unix platforms, the environment is set using UTF-8 encoding
+ if Unicode filename translation is in effect. On Windows, the
+ environment is set using wide character interfaces.</p>
+ <note>
+ <p>
+ <c><anno>VarName</anno></c> is not allowed to contain
+ an <c>$=</c> character. Previous implementations used
+ to just let the <c>$=</c> character through which
+ silently caused erroneous results. Current implementation
+ will instead throw a <c>badarg</c> exception.
+ </p>
+ </note>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_signal" arity="2"/>
+ <fsummary>Enables or disables handling of OS signals.</fsummary>
+ <desc>
+ <p>Enables or disables OS signals.</p>
+ <p>Each signal my be set to one of the following options:</p>
+ <taglist>
+ <tag><c>ignore</c></tag>
+ <item>
+ This signal will be ignored.
+ </item>
+
+ <tag><c>default</c></tag>
+ <item>
+ This signal will use the default signal handler for the operating system.
+ </item>
+
+ <tag><c>handle</c></tag>
+ <item>
+ This signal will notify
+ <seealso marker="kernel_app#erl_signal_server"><c>erl_signal_server</c></seealso>
+ when it is received by the Erlang runtime system.
+ </item>
+ </taglist>
</desc>
</func>
+
<func>
<name name="system_time" arity="0"/>
- <fsummary>Current OS system time</fsummary>
+ <fsummary>Current OS system time.</fsummary>
<desc>
- <p>Returns current
+ <p>Returns the current
<seealso marker="erts:time_correction#OS_System_Time">OS system time</seealso>
in <c>native</c>
<seealso marker="erts:erlang#type_time_unit">time unit</seealso>.</p>
-
- <note><p>This time is <em>not</em> a monotonically increasing time.</p></note>
+ <note><p>This time is <em>not</em> a monotonically increasing time.</p>
+ </note>
</desc>
</func>
+
<func>
<name name="system_time" arity="1"/>
- <fsummary>Current OS system time</fsummary>
+ <fsummary>Current OS system time.</fsummary>
<desc>
- <p>Returns current
+ <p>Returns the current
<seealso marker="erts:time_correction#OS_System_Time">OS system time</seealso>
converted into the <c><anno>Unit</anno></c> passed as argument.</p>
-
- <p>Calling <c>os:system_time(<anno>Unit</anno>)</c> is equivalent to:
- <seealso marker="erts:erlang#convert_time_unit/3"><c>erlang:convert_time_unit</c></seealso><c>(</c><seealso marker="#system_time/0"><c>os:system_time()</c></seealso><c>,
+ <p>Calling <c>os:system_time(<anno>Unit</anno>)</c> is equivalent to
+ <seealso marker="erts:erlang#convert_time_unit/3"><c>erlang:convert_time_unit</c></seealso>(<seealso marker="#system_time/0"><c>os:system_time()</c></seealso><c>,
native, <anno>Unit</anno>)</c>.</p>
-
- <note><p>This time is <em>not</em> a monotonically increasing time.</p></note>
+ <note><p>This time is <em>not</em> a monotonically increasing time.</p>
+ </note>
</desc>
</func>
+
<func>
<name name="timestamp" arity="0"/>
- <fsummary>Current OS system time on the erlang:timestamp/0 format</fsummary>
+ <fsummary>Current OS system time on the <c>erlang:timestamp/0</c> format.</fsummary>
<type_desc variable="Timestamp">Timestamp = {MegaSecs, Secs, MicroSecs}</type_desc>
<desc>
- <p>Returns current
+ <p>Returns the current
<seealso marker="erts:time_correction#OS_System_Time">OS system time</seealso>
- in the same format as <seealso marker="erts:erlang#timestamp/0">erlang:timestamp/0</seealso>.
- The tuple can be used together with the function
- <seealso marker="stdlib:calendar#now_to_universal_time/1">calendar:now_to_universal_time/1</seealso>
- or <seealso marker="stdlib:calendar#now_to_local_time/1">calendar:now_to_local_time/1</seealso> to
- get calendar time. Using the calendar time together with the <c>MicroSecs</c> part of the return
- tuple from this function allows you to log timestamps in high resolution and consistent with the
- time in the rest of the operating system.</p>
- <p>Example of code formatting a string in the format &quot;DD Mon YYYY HH:MM:SS.mmmmmm&quot;, where
- DD is the day of month, Mon is the textual month name, YYYY is the year, HH:MM:SS is the time and
- mmmmmm is the microseconds in six positions:</p>
-<code>
+ in the same format as
+ <seealso marker="erts:erlang#timestamp/0"><c>erlang:timestamp/0</c></seealso>.
+ The tuple can be used together with function
+ <seealso marker="stdlib:calendar#now_to_universal_time/1"><c>calendar:now_to_universal_time/1</c></seealso>
+ or <seealso marker="stdlib:calendar#now_to_local_time/1"><c>calendar:now_to_local_time/1</c></seealso>
+ to get calendar time. Using the calendar time, together with the
+ <c>MicroSecs</c> part of the return tuple from this function, allows
+ you to log time stamps in high resolution and consistent with the
+ time in the rest of the OS.</p>
+ <p>Example of code formatting a string in format
+ &quot;DD Mon YYYY HH:MM:SS.mmmmmm&quot;, where DD is the day of month,
+ Mon is the textual month name, YYYY is the year, HH:MM:SS is the time,
+ and mmmmmm is the microseconds in six positions:</p>
+ <code>
-module(print_time).
-export([format_utc_timestamp/0]).
format_utc_timestamp() ->
TS = {_,_,Micro} = os:timestamp(),
- {{Year,Month,Day},{Hour,Minute,Second}} =
- calendar:now_to_universal_time(TS),
+ {{Year,Month,Day},{Hour,Minute,Second}} =
+calendar:now_to_universal_time(TS),
Mstr = element(Month,{"Jan","Feb","Mar","Apr","May","Jun","Jul",
- "Aug","Sep","Oct","Nov","Dec"}),
+ "Aug","Sep","Oct","Nov","Dec"}),
io_lib:format("~2w ~s ~4w ~2w:~2..0w:~2..0w.~6..0w",
- [Day,Mstr,Year,Hour,Minute,Second,Micro]).
-</code>
-
- <p>The module above could be used in the following way:</p>
-<pre>
+ [Day,Mstr,Year,Hour,Minute,Second,Micro]).</code>
+ <p>This module can be used as follows:</p>
+ <pre>
1> <input>io:format("~s~n",[print_time:format_utc_timestamp()]).</input>
-29 Apr 2009 9:55:30.051711
-</pre>
+29 Apr 2009 9:55:30.051711</pre>
<p>OS system time can also be retreived by
- <seealso marker="#system_time/0"><c>os:system_time/0</c></seealso>,
- and <seealso marker="#system_time/1"><c>os:system_time/1</c></seealso>.</p>
+ <seealso marker="#system_time/0"><c>system_time/0</c></seealso> and
+ <seealso marker="#system_time/1"><c>system_time/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="perf_counter" arity="0"/>
+ <fsummary>Returns a performance counter</fsummary>
+ <desc>
+ <p>Returns the current performance counter value in <c>perf_counter</c>
+ <seealso marker="erts:erlang#type_time_unit">time unit</seealso>.
+ This is a highly optimized call that might not be traceable.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name name="perf_counter" arity="1"/>
+ <fsummary>Returns a performance counter</fsummary>
+ <desc><p>Returns a performance counter that can be used as a very fast and
+ high resolution timestamp. This counter is read directly from the hardware or operating
+ system with the same guarantees. This means that two consecutive calls
+ to the function are not guaranteed to be monotonic, though it most likely will be.
+ The performance counter will be converted to the resolution passed as an argument.</p>
+ <pre>1> <input>T1 = os:perf_counter(1000),receive after 10000 -> ok end,T2 = os:perf_counter(1000).</input>
+176525861
+2> <input>T2 - T1.</input>
+10004</pre>
</desc>
</func>
<func>
<name name="type" arity="0"/>
- <fsummary>Return the OS family and, in some cases, OS name of the current operating system</fsummary>
+ <fsummary>Return the OS family and, in some cases, the OS name of the
+ current OS.</fsummary>
<desc>
- <p>Returns the <c><anno>Osfamily</anno></c> and, in some cases, <c><anno>Osname</anno></c>
- of the current operating system.</p>
- <p>On Unix, <c><anno>Osname</anno></c> will have same value as
+ <p>Returns the <c><anno>Osfamily</anno></c> and, in some cases, the
+ <c><anno>Osname</anno></c> of the current OS.</p>
+ <p>On Unix, <c><anno>Osname</anno></c> has the same value as
<c>uname -s</c> returns, but in lower case. For example, on
- Solaris 1 and 2, it will be <c>sunos</c>.</p>
- <p>In Windows, <c><anno>Osname</anno></c> will be either <c>nt</c> (on
- Windows NT), or <c>windows</c> (on Windows 95).</p>
+ Solaris 1 and 2, it is <c>sunos</c>.</p>
+ <p>On Windows, <c><anno>Osname</anno></c> is <c>nt</c>.</p>
<note>
- <p>Think twice before using this function. Use the
- <c>filename</c> module if you want to inspect or build
- file names in a portable way.
- Avoid matching on the <c><anno>Osname</anno></c> atom.</p>
+ <p>Think twice before using this function. Use module
+ <seealso marker="stdlib:filename"><c>filename</c></seealso>
+ if you want to inspect or build filenames in a portable way.
+ Avoid matching on atom <c><anno>Osname</anno></c>.</p>
</note>
</desc>
</func>
+
<func>
<name name="unsetenv" arity="1"/>
- <fsummary>Delete an environment variable</fsummary>
+ <fsummary>Delete an environment variable.</fsummary>
<desc>
<p>Deletes the environment variable <c><anno>VarName</anno></c>.</p>
- <p>If Unicode filename encoding is in effect (see the <seealso
- marker="erts:erl#file_name_encoding">erl manual
- page</seealso>), the string (<c><anno>VarName</anno></c>) may
- contain characters with codepoints > 255.</p>
+ <p>If Unicode filename encoding is in effect (see the
+ <seealso marker="erts:erl#file_name_encoding"><c>erl</c> manual
+ page</seealso>), the string <c><anno>VarName</anno></c> can
+ contain characters with codepoints &gt; 255.</p>
</desc>
</func>
+
<func>
<name name="version" arity="0"/>
- <fsummary>Return the Operating System version</fsummary>
+ <fsummary>Return the OS versions.</fsummary>
<desc>
- <p>Returns the operating system version.
+ <p>Returns the OS version.
On most systems, this function returns a tuple, but a string
- will be returned instead if the system has versions which
+ is returned instead if the system has versions that
cannot be expressed as three numbers.</p>
<note>
<p>Think twice before using this function. If you still need
@@ -255,4 +444,3 @@ format_utc_timestamp() ->
</func>
</funcs>
</erlref>
-
diff --git a/lib/kernel/doc/src/part_notes.xml b/lib/kernel/doc/src/part.xml
index ec19cfa80d..fa7e92835f 100644
--- a/lib/kernel/doc/src/part_notes.xml
+++ b/lib/kernel/doc/src/part.xml
@@ -4,7 +4,7 @@
<part xmlns:xi="http://www.w3.org/2001/XInclude">
<header>
<copyright>
- <year>2004</year><year>2013</year>
+ <year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -22,19 +22,16 @@
</legalnotice>
- <title>Kernel Release Notes</title>
- <prepared></prepared>
+ <title>Kernel User's Guide</title>
+ <prepared>OTP Team</prepared>
<docno></docno>
- <date></date>
- <rev></rev>
+ <date>2018-06-06</date>
+ <file>part.xml</file>
</header>
<description>
- <p>The <em>Kernel</em> application has all the code necessary to run
- the Erlang runtime system itself; File servers and code servers
- etc.</p>
- <p>For information about older versions, see
- <url href="part_notes_history_frame.html">Release Notes History</url>.</p>
+ <p></p>
</description>
- <xi:include href="notes.xml"/>
+ <xi:include href="introduction_chapter.xml"/>
+ <xi:include href="logger_chapter.xml"/>
</part>
diff --git a/lib/kernel/doc/src/part_notes_history.xml b/lib/kernel/doc/src/part_notes_history.xml
deleted file mode 100644
index 87c32c3fba..0000000000
--- a/lib/kernel/doc/src/part_notes_history.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE part SYSTEM "part.dtd">
-
-<part>
- <header>
- <copyright>
- <year>2006</year>
- <year>2013</year>
- <holder>Ericsson AB, All Rights Reserved</holder>
- </copyright>
- <legalnotice>
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
- The Initial Developer of the Original Code is Ericsson AB.
- </legalnotice>
-
- <title>Kernel Release Notes History</title>
- <prepared></prepared>
- <docno></docno>
- <date></date>
- <rev></rev>
- </header>
- <description>
- <p>The <em>Kernel</em> application has all the code necessary to run
- the Erlang runtime system itself; File servers and code servers
- etc.</p>
- </description>
- <include file="notes_history"></include>
-</part>
-
diff --git a/lib/kernel/doc/src/pg2.xml b/lib/kernel/doc/src/pg2.xml
index 8c2fdb4cd3..0631b317b4 100644
--- a/lib/kernel/doc/src/pg2.xml
+++ b/lib/kernel/doc/src/pg2.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2013</year>
+ <year>1997</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -30,135 +30,136 @@
<checked>[email protected]</checked>
<date>1997-08-18</date>
<rev>A2</rev>
- <file>pg2.sgml</file>
+ <file>pg2.xml</file>
</header>
<module>pg2</module>
- <modulesummary>Distributed Named Process Groups</modulesummary>
+ <modulesummary>Distributed named process groups.</modulesummary>
<description>
- <p>This module implements process groups. Each message may be sent
- to one, some, or all members of the group.
- </p>
+ <p>This module implements process groups. Each message can be sent
+ to one, some, or all group members.</p>
<p>A group of processes can be accessed by a common name. For
example, if there is a group named <c>foobar</c>, there can be a
- set of processes (which can be located on different nodes) which
+ set of processes (which can be located on different nodes) that
are all members of the group <c>foobar</c>. There are no special
functions for sending a message to the group. Instead, client
- functions should be written with the functions
- <c>get_members/1</c> and <c>get_local_members/1</c> to find out
- which processes are members of the group. Then the message can be
- sent to one or more members of the group.
- </p>
- <p>If a member terminates, it is automatically removed from the
- group.
- </p>
+ functions are to be written with the functions
+ <seealso marker="#get_members/1"><c>get_members/1</c></seealso> and
+ <seealso marker="#get_local_members/1"><c>get_local_members/1</c></seealso>
+ to determine which processes are members of the group.
+ Then the message can be sent to one or more group members.</p>
+ <p>If a member terminates, it is automatically removed from the group.</p>
<warning>
- <p>This module is used by the <c>disk_log</c> module for
+ <p>This module is used by module
+ <seealso marker="disk_log"><c>disk_log</c></seealso> for
managing distributed disk logs. The disk log names are used as
- group names, which means that some action may need to be taken
+ group names, which means that some action can be needed
to avoid name clashes.</p>
</warning>
</description>
+
<datatypes>
<datatype>
<name name="name"/>
<desc><p>The name of a process group.</p></desc>
</datatype>
</datatypes>
+
<funcs>
<func>
<name name="create" arity="1"/>
- <fsummary>Create a new, empty process group</fsummary>
+ <fsummary>Create a new, empty process group.</fsummary>
<desc>
<p>Creates a new, empty process group. The group is globally
- visible on all nodes. If the group exists, nothing happens.
- </p>
+ visible on all nodes. If the group exists, nothing happens.</p>
</desc>
</func>
+
<func>
<name name="delete" arity="1"/>
- <fsummary>Delete a process group</fsummary>
+ <fsummary>Delete a process group.</fsummary>
<desc>
- <p>Deletes a process group.
- </p>
+ <p>Deletes a process group.</p>
</desc>
</func>
+
<func>
<name name="get_closest_pid" arity="1"/>
- <fsummary>Common dispatch function</fsummary>
+ <fsummary>Common dispatch function.</fsummary>
<desc>
- <p>This is a useful dispatch function which can be used from
+ <p>A useful dispatch function that can be used from
client functions. It returns a process on the local node, if
- such a process exist. Otherwise, it chooses one randomly.
- </p>
+ such a process exists. Otherwise, it selects one randomly.</p>
</desc>
</func>
+
<func>
- <name name="get_members" arity="1"/>
- <fsummary>Return all processes in a group</fsummary>
+ <name name="get_local_members" arity="1"/>
+ <fsummary>Return all local processes in a group.</fsummary>
<desc>
- <p>Returns all processes in the group <c>Name</c>. This
- function should be used from within a client function that
- accesses the group. It is therefore optimized for speed.
- </p>
+ <p>Returns all processes running on the local node in the
+ group <c>Name</c>. This function is to be used from
+ within a client function that accesses the group. It is therefore
+ optimized for speed.</p>
</desc>
</func>
+
<func>
- <name name="get_local_members" arity="1"/>
- <fsummary>Return all local processes in a group</fsummary>
+ <name name="get_members" arity="1"/>
+ <fsummary>Return all processes in a group.</fsummary>
<desc>
- <p>Returns all processes running on the local node in the
- group <c>Name</c>. This function should to be used from
- within a client function that accesses the group. It is therefore
- optimized for speed.
- </p>
+ <p>Returns all processes in the group <c>Name</c>. This
+ function is to be used from within a client function that
+ accesses the group. It is therefore optimized for speed.</p>
</desc>
</func>
+
<func>
<name name="join" arity="2"/>
- <fsummary>Join a process to a group</fsummary>
+ <fsummary>Join a process to a group.</fsummary>
<desc>
<p>Joins the process <c>Pid</c> to the group <c>Name</c>.
- A process can join a group several times; it must then
- leave the group the same number of times.
- </p>
+ A process can join a group many times and must then
+ leave the group the same number of times.</p>
</desc>
</func>
+
<func>
<name name="leave" arity="2"/>
- <fsummary>Make a process leave a group</fsummary>
+ <fsummary>Make a process leave a group.</fsummary>
<desc>
<p>Makes the process <c>Pid</c> leave the group <c>Name</c>.
If the process is not a member of the group, <c>ok</c> is
- returned.
- </p>
- </desc>
- </func>
- <func>
- <name name="which_groups" arity="0"/>
- <fsummary>Return a list of all known groups</fsummary>
- <desc>
- <p>Returns a list of all known groups.
- </p>
+ returned.</p>
</desc>
</func>
+
<func>
<name name="start" arity="0"/>
<name name="start_link" arity="0"/>
- <fsummary>Start the pg2 server</fsummary>
+ <fsummary>Start the <c>pg2</c> server.</fsummary>
<desc>
- <p>Starts the pg2 server. Normally, the server does not need
+ <p>Starts the <c>pg2</c> server. Normally, the server does not need
to be started explicitly, as it is started dynamically if it
is needed. This is useful during development, but in a
- target system the server should be started explicitly. Use
- configuration parameters for <c>kernel</c> for this.
- </p>
+ target system the server is to be started explicitly. Use the
+ configuration parameters for
+ <seealso marker="kernel_app#start_pg2"><c>kernel(6)</c></seealso>
+ for this.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="which_groups" arity="0"/>
+ <fsummary>Return a list of all known groups.</fsummary>
+ <desc>
+ <p>Returns a list of all known groups.</p>
</desc>
</func>
</funcs>
<section>
<title>See Also</title>
- <p><seealso marker="kernel_app">kernel(6)</seealso></p>
+ <p><seealso marker="kernel_app"><c>kernel(6)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml
index 7eb48a5f1d..d3b947527f 100644
--- a/lib/kernel/doc/src/ref_man.xml
+++ b/lib/kernel/doc/src/ref_man.xml
@@ -4,7 +4,7 @@
<application xmlns:xi="http://www.w3.org/2001/XInclude">
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,31 +29,37 @@
<rev></rev>
</header>
<description>
- <p>The <em>Kernel</em> application has all the code necessary to run
- the Erlang runtime system itself: file servers and code servers
- and so on.</p>
+
</description>
<xi:include href="kernel_app.xml"/>
+ <xi:include href="app.xml"/>
<xi:include href="application.xml"/>
<xi:include href="auth.xml"/>
<xi:include href="code.xml"/>
+ <xi:include href="config.xml"/>
<xi:include href="disk_log.xml"/>
<xi:include href="erl_boot_server.xml"/>
<xi:include href="erl_ddll.xml"/>
+ <xi:include href="erl_epmd.xml"/>
<xi:include href="erl_prim_loader_stub.xml"/>
<xi:include href="erlang_stub.xml"/>
<xi:include href="error_handler.xml"/>
<xi:include href="error_logger.xml"/>
<xi:include href="file.xml"/>
+ <xi:include href="gen_sctp.xml"/>
<xi:include href="gen_tcp.xml"/>
<xi:include href="gen_udp.xml"/>
- <xi:include href="gen_sctp.xml"/>
<xi:include href="global.xml"/>
<xi:include href="global_group.xml"/>
<xi:include href="heart.xml"/>
<xi:include href="inet.xml"/>
<xi:include href="inet_res.xml"/>
<xi:include href="init_stub.xml"/>
+ <xi:include href="logger.xml"/>
+ <xi:include href="logger_filters.xml"/>
+ <xi:include href="logger_formatter.xml"/>
+ <xi:include href="logger_std_h.xml"/>
+ <xi:include href="logger_disk_log_h.xml"/>
<xi:include href="net_adm.xml"/>
<xi:include href="net_kernel.xml"/>
<xi:include href="os.xml"/>
@@ -63,6 +69,4 @@
<xi:include href="user.xml"/>
<xi:include href="wrap_log_reader.xml"/>
<xi:include href="zlib_stub.xml"/>
- <xi:include href="app.xml"/>
- <xi:include href="config.xml"/>
</application>
diff --git a/lib/kernel/doc/src/ref_man.xml.src b/lib/kernel/doc/src/ref_man.xml.src
deleted file mode 100644
index 7eb48a5f1d..0000000000
--- a/lib/kernel/doc/src/ref_man.xml.src
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE application SYSTEM "application.dtd">
-
-<application xmlns:xi="http://www.w3.org/2001/XInclude">
- <header>
- <copyright>
- <year>1996</year><year>2013</year>
- <holder>Ericsson AB. All Rights Reserved.</holder>
- </copyright>
- <legalnotice>
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
- </legalnotice>
-
- <title>Kernel Reference Manual</title>
- <prepared></prepared>
- <docno></docno>
- <date></date>
- <rev></rev>
- </header>
- <description>
- <p>The <em>Kernel</em> application has all the code necessary to run
- the Erlang runtime system itself: file servers and code servers
- and so on.</p>
- </description>
- <xi:include href="kernel_app.xml"/>
- <xi:include href="application.xml"/>
- <xi:include href="auth.xml"/>
- <xi:include href="code.xml"/>
- <xi:include href="disk_log.xml"/>
- <xi:include href="erl_boot_server.xml"/>
- <xi:include href="erl_ddll.xml"/>
- <xi:include href="erl_prim_loader_stub.xml"/>
- <xi:include href="erlang_stub.xml"/>
- <xi:include href="error_handler.xml"/>
- <xi:include href="error_logger.xml"/>
- <xi:include href="file.xml"/>
- <xi:include href="gen_tcp.xml"/>
- <xi:include href="gen_udp.xml"/>
- <xi:include href="gen_sctp.xml"/>
- <xi:include href="global.xml"/>
- <xi:include href="global_group.xml"/>
- <xi:include href="heart.xml"/>
- <xi:include href="inet.xml"/>
- <xi:include href="inet_res.xml"/>
- <xi:include href="init_stub.xml"/>
- <xi:include href="net_adm.xml"/>
- <xi:include href="net_kernel.xml"/>
- <xi:include href="os.xml"/>
- <xi:include href="pg2.xml"/>
- <xi:include href="rpc.xml"/>
- <xi:include href="seq_trace.xml"/>
- <xi:include href="user.xml"/>
- <xi:include href="wrap_log_reader.xml"/>
- <xi:include href="zlib_stub.xml"/>
- <xi:include href="app.xml"/>
- <xi:include href="config.xml"/>
-</application>
diff --git a/lib/kernel/doc/src/rpc.xml b/lib/kernel/doc/src/rpc.xml
index c323a84e50..fab616e630 100644
--- a/lib/kernel/doc/src/rpc.xml
+++ b/lib/kernel/doc/src/rpc.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2013</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -25,328 +25,405 @@
<title>rpc</title>
<prepared>Claes Wikstrom</prepared>
<docno>1</docno>
- <date>96-09-10</date>
+ <date>1996-09-10</date>
<rev>A</rev>
</header>
<module>rpc</module>
- <modulesummary>Remote Procedure Call Services</modulesummary>
+ <modulesummary>Remote Procedure Call services.</modulesummary>
<description>
- <p>This module contains services which are similar to remote
- procedure calls. It also contains broadcast facilities and
+ <p>This module contains services similar to Remote
+ Procedure Calls. It also contains broadcast facilities and
parallel evaluators. A remote procedure call is a method to call
a function on a remote node and collect the answer. It is used
for collecting information on a remote node, or for running a
function with some specific side effects on the remote node.</p>
</description>
+
<datatypes>
<datatype>
<name name="key"/>
<desc>
- <p>As returned by <seealso marker="#async_call/4">
- <c>async_call/4</c>.</seealso></p>
+ <p>As returned by
+ <seealso marker="#async_call/4"><c>async_call/4</c></seealso>.</p>
</desc>
</datatype>
</datatypes>
+
<funcs>
<func>
- <name name="call" arity="4"/>
- <fsummary>Evaluate a function call on a node</fsummary>
+ <name name="abcast" arity="2"/>
+ <fsummary>Broadcast a message asynchronously to a registered process on
+ all nodes.</fsummary>
<desc>
- <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c> on the node
- <c><anno>Node</anno></c> and returns the corresponding value <c><anno>Res</anno></c>, or
- <c>{badrpc, <anno>Reason</anno>}</c> if the call fails.</p>
+ <p>Equivalent to <c>abcast([node()|nodes()], <anno>Name</anno>,
+ <anno>Msg</anno>)</c>.</p>
</desc>
</func>
+
<func>
- <name name="call" arity="5"/>
- <fsummary>Evaluate a function call on a node</fsummary>
+ <name name="abcast" arity="3"/>
+ <fsummary>Broadcast a message asynchronously to a registered process on
+ specific nodes.</fsummary>
<desc>
- <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c> on the node
- <c><anno>Node</anno></c> and returns the corresponding value <c><anno>Res</anno></c>, or
- <c>{badrpc, <anno>Reason</anno>}</c> if the call fails. <c><anno>Timeout</anno></c> is
- a timeout value in milliseconds. If the call times out,
- <c><anno>Reason</anno></c> is <c>timeout</c>.</p>
- <p>If the reply arrives after the call times out, no message
- will contaminate the caller's message queue, since this
- function spawns off a middleman process to act as (a void)
- destination for such an orphan reply. This feature also makes
- this function more expensive than <c>call/4</c> at
- the caller's end.</p>
+ <p>Broadcasts the message <c><anno>Msg</anno></c> asynchronously to
+ the registered process <c><anno>Name</anno></c> on the specified
+ nodes.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="async_call" arity="4"/>
+ <fsummary>Evaluate a function call on a node, asynchronous
+ version.</fsummary>
+ <desc>
+ <p>Implements <em>call streams with promises</em>, a type of
+ RPC that does not suspend the caller until the result is
+ finished. Instead, a key is returned, which can be used
+ later to collect the value. The key can be viewed as a
+ promise to deliver the answer.</p>
+ <p>In this case, the key <c><anno>Key</anno></c> is returned, which
+ can be used in a subsequent call to
+ <seealso marker="#yield/1"><c>yield/1</c></seealso> or
+ <seealso marker="#nb_yield/1"><c>nb_yield/1,2</c></seealso>
+ to retrieve the value of evaluating <c>apply(<anno>Module</anno>,
+ <anno>Function</anno>, <anno>Args</anno>)</c> on node
+ <c><anno>Node</anno></c>.</p>
+ <note>
+ <p><seealso marker="#yield/1"><c>yield/1</c></seealso> and
+ <seealso marker="#nb_yield/1"><c>nb_yield/1,2</c></seealso>
+ must be called by the same process from which this function
+ was made otherwise they will never yield correctly.</p>
+ </note>
</desc>
</func>
+
<func>
<name name="block_call" arity="4"/>
- <fsummary>Evaluate a function call on a node in the RPC server's context</fsummary>
+ <fsummary>Evaluate a function call on a node in the RPC server's
+ context.</fsummary>
<desc>
- <p>Like <c>call/4</c>, but the RPC server at <c><anno>Node</anno></c> does
+ <p>Same as <seealso marker="#call/4"><c>call/4</c></seealso>,
+ but the RPC server at <c><anno>Node</anno></c> does
not create a separate process to handle the call. Thus,
this function can be used if the intention of the call is to
block the RPC server from any other incoming requests until
- the request has been handled. The function can also be used
+ the request has been handled. The function can also be used
for efficiency reasons when very small fast functions are
- evaluated, for example BIFs that are guaranteed not to
+ evaluated, for example, BIFs that are guaranteed not to
suspend.</p>
</desc>
</func>
+
<func>
<name name="block_call" arity="5"/>
- <fsummary>Evaluate a function call on a node in the RPC server's context</fsummary>
+ <fsummary>Evaluate a function call on a node in the RPC server's
+ context.</fsummary>
<desc>
- <p>Like <c>block_call/4</c>, but with a timeout value in
- the same manner as <c>call/5</c>.</p>
+ <p>Same as
+ <seealso marker="#block_call/4"><c>block_call/4</c></seealso>,
+ but with a time-out value in the same manner as
+ <seealso marker="#call/5"><c>call/5</c></seealso>.</p>
</desc>
</func>
+
<func>
- <name name="async_call" arity="4"/>
- <fsummary>Evaluate a function call on a node, asynchronous version</fsummary>
+ <name name="call" arity="4"/>
+ <fsummary>Evaluate a function call on a node.</fsummary>
<desc>
- <p>Implements <em>call streams with promises</em>, a type of
- RPC which does not suspend the caller until the result is
- finished. Instead, a key is returned which can be used at a
- later stage to collect the value. The key can be viewed as a
- promise to deliver the answer.</p>
- <p>In this case, the key <c><anno>Key</anno></c> is returned, which can be
- used in a subsequent call to <c>yield/1</c> or
- <c>nb_yield/1,2</c> to retrieve the value of evaluating
- <c>apply(<anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c> on the node <c><anno>Node</anno></c>.</p>
+ <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
+ <anno>Args</anno>)</c> on node <c><anno>Node</anno></c> and returns
+ the corresponding value <c><anno>Res</anno></c>, or
+ <c>{badrpc, <anno>Reason</anno>}</c> if the call fails.</p>
</desc>
</func>
+
<func>
- <name name="yield" arity="1"/>
- <fsummary>Deliver the result of evaluating a function call on a node (blocking)</fsummary>
+ <name name="call" arity="5"/>
+ <fsummary>Evaluate a function call on a node.</fsummary>
<desc>
- <p>Returns the promised answer from a previous
- <c>async_call/4</c>. If the answer is available, it is
- returned immediately. Otherwise, the calling process is
- suspended until the answer arrives from <c>Node</c>.</p>
+ <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
+ <anno>Args</anno>)</c> on node <c><anno>Node</anno></c> and returns
+ the corresponding value <c><anno>Res</anno></c>, or
+ <c>{badrpc, <anno>Reason</anno>}</c> if the call fails.
+ <c><anno>Timeout</anno></c> is
+ a time-out value in milliseconds. If the call times out,
+ <c><anno>Reason</anno></c> is <c>timeout</c>.</p>
+ <p>If the reply arrives after the call times out, no message
+ contaminates the caller's message queue, as this
+ function spawns off a middleman process to act as (a void)
+ destination for such an orphan reply. This feature also makes
+ this function more expensive than <c>call/4</c> at
+ the caller's end.</p>
</desc>
</func>
+
<func>
- <name name="nb_yield" arity="1"/>
- <fsummary>Deliver the result of evaluating a function call on a node (non-blocking)</fsummary>
+ <name name="cast" arity="4"/>
+ <fsummary>Run a function on a node ignoring the result.</fsummary>
<desc>
- <p>Equivalent to <c>nb_yield(<anno>Key</anno>, 0)</c>.</p>
+ <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
+ <anno>Args</anno>)</c> on node
+ <c><anno>Node</anno></c>. No response is delivered and the calling
+ process is not suspended until the evaluation is complete, as
+ is the case with
+ <seealso marker="#call/4"><c>call/4,5</c></seealso>.</p>
</desc>
</func>
+
<func>
- <name name="nb_yield" arity="2"/>
- <fsummary>Deliver the result of evaluating a function call on a node (non-blocking)</fsummary>
+ <name name="eval_everywhere" arity="3"/>
+ <fsummary>Run a function on all nodes, ignoring the result.</fsummary>
<desc>
- <p>This is a non-blocking version of <c>yield/1</c>. It returns
- the tuple <c>{value, <anno>Val</anno>}</c> when the computation has
- finished, or <c>timeout</c> when <c><anno>Timeout</anno></c> milliseconds
- has elapsed.</p>
+ <p>Equivalent to <c>eval_everywhere([node()|nodes()],
+ <anno>Module</anno>, <anno>Function</anno>,
+ <anno>Args</anno>)</c>.</p>
</desc>
</func>
+
+ <func>
+ <name name="eval_everywhere" arity="4"/>
+ <fsummary>Run a function on specific nodes, ignoring the
+ result.</fsummary>
+ <desc>
+ <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
+ <anno>Args</anno>)</c> on
+ the specified nodes. No answers are collected.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="multi_server_call" arity="2"/>
+ <fsummary>Interact with the servers on a number of nodes.</fsummary>
+ <desc>
+ <p>Equivalent to <c>multi_server_call([node()|nodes()],
+ <anno>Name</anno>, <anno>Msg</anno>)</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="multi_server_call" arity="3"/>
+ <fsummary>Interact with the servers on a number of nodes.</fsummary>
+ <desc>
+ <p>Can be used when interacting with servers called
+ <c><anno>Name</anno></c> on the specified nodes. It is assumed that
+ the servers receive messages in the format
+ <c>{From, <anno>Msg</anno>}</c> and reply using
+ <c>From ! {<anno>Name</anno>, Node, <anno>Reply</anno>}</c>, where
+ <c>Node</c> is the name of the node where the server is located.
+ The function returns <c>{<anno>Replies</anno>,
+ <anno>BadNodes</anno>}</c>, where <c><anno>Replies</anno></c> is a
+ list of all <c><anno>Reply</anno></c> values, and
+ <c><anno>BadNodes</anno></c> is one of the following:</p>
+ <list type="bulleted">
+ <item>A list of the nodes that do not exist</item>
+ <item>A list of the nodes where the server does not exist</item>
+ <item>A list of the nodes where the server terminated before sending
+ any reply.</item>
+ </list>
+ </desc>
+ </func>
+
<func>
<name name="multicall" arity="3"/>
- <fsummary>Evaluate a function call on a number of nodes</fsummary>
+ <fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
- <p>Equivalent to <c>multicall([node()|nodes()], <anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>, infinity)</c>.</p>
+ <p>Equivalent to <c>multicall([node()|nodes()], <anno>Module</anno>,
+ <anno>Function</anno>, <anno>Args</anno>, infinity)</c>.</p>
</desc>
</func>
+
<func>
<name name="multicall" arity="4" clause_i="1"/>
- <fsummary>Evaluate a function call on a number of nodes</fsummary>
+ <fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
- <p>Equivalent to <c>multicall(<anno>Nodes</anno>, <anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>, infinity)</c>.</p>
+ <p>Equivalent to <c>multicall(<anno>Nodes</anno>, <anno>Module</anno>,
+ <anno>Function</anno>, <anno>Args</anno>, infinity)</c>.</p>
</desc>
</func>
+
<func>
<name name="multicall" arity="4" clause_i="2"/>
- <fsummary>Evaluate a function call on a number of nodes</fsummary>
+ <fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
- <p>Equivalent to <c>multicall([node()|nodes()], <anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>, <anno>Timeout</anno>)</c>.</p>
+ <p>Equivalent to <c>multicall([node()|nodes()], <anno>Module</anno>,
+ <anno>Function</anno>, <anno>Args</anno>,
+ <anno>Timeout</anno>)</c>.</p>
</desc>
</func>
+
<func>
<name name="multicall" arity="5"/>
- <fsummary>Evaluate a function call on a number of nodes</fsummary>
+ <fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
- <p>In contrast to an RPC, a multicall is an RPC which is sent
+ <p>In contrast to an RPC, a multicall is an RPC that is sent
concurrently from one client to multiple servers. This is
- useful for collecting some information from a set of nodes,
+ useful for collecting information from a set of nodes,
or for calling a function on a set of nodes to achieve some
side effects. It is semantically the same as iteratively
making a series of RPCs on all the nodes, but the multicall
- is faster as all the requests are sent at the same time
+ is faster, as all the requests are sent at the same time
and are collected one by one as they come back.</p>
- <p>The function evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c>
- on the specified nodes and collects the answers. It returns
- <c>{<anno>ResL</anno>, <anno>BadNodes</anno>}</c>, where <c><anno>BadNodes</anno></c> is a list
- of the nodes that terminated or timed out during computation,
- and <c><anno>ResL</anno></c> is a list of the return values.
+ <p>The function evaluates <c>apply(<anno>Module</anno>,
+ <anno>Function</anno>, <anno>Args</anno>)</c>
+ on the specified nodes and collects the answers. It returns
+ <c>{<anno>ResL</anno>, <anno>BadNodes</anno>}</c>, where
+ <c><anno>BadNodes</anno></c> is a list
+ of the nodes that do not exist,
+ and <c><anno>ResL</anno></c> is a list of the return values,
+ or <c>{badrpc, <anno>Reason</anno>}</c> for failing calls.
<c><anno>Timeout</anno></c> is a time (integer) in milliseconds, or
<c>infinity</c>.</p>
<p>The following example is useful when new object code is to
- be loaded on all nodes in the network, and also indicates
- some side effects RPCs may produce:</p>
+ be loaded on all nodes in the network, and indicates
+ some side effects that RPCs can produce:</p>
<code type="none">
-%% Find object code for module Mod
-{Mod, Bin, File} = code:get_object_code(Mod),
+%% Find object code for module Mod
+{Mod, Bin, File} = code:get_object_code(Mod),
-%% and load it on all nodes including this one
+%% and load it on all nodes including this one
{ResL, _} = rpc:multicall(code, load_binary, [Mod, File, Bin]),
%% and then maybe check the ResL list.</code>
</desc>
</func>
+
<func>
- <name name="cast" arity="4"/>
- <fsummary>Run a function on a node ignoring the result</fsummary>
+ <name name="nb_yield" arity="1"/>
+ <fsummary>Deliver the result of evaluating a function call on a node
+ (non-blocking).</fsummary>
<desc>
- <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c> on the node
- <c><anno>Node</anno></c>. No response is delivered and the calling
- process is not suspended until the evaluation is complete, as
- is the case with <c>call/4,5</c>.</p>
+ <p>Equivalent to <c>nb_yield(<anno>Key</anno>, 0)</c>.</p>
</desc>
</func>
+
<func>
- <name name="eval_everywhere" arity="3"/>
- <fsummary>Run a function on all nodes, ignoring the result</fsummary>
+ <name name="nb_yield" arity="2"/>
+ <fsummary>Deliver the result of evaluating a function call on a node
+ (non-blocking).</fsummary>
<desc>
- <p>Equivalent to <c>eval_everywhere([node()|nodes()], <anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c>.</p>
+ <p>Non-blocking version of
+ <seealso marker="#yield/1"><c>yield/1</c></seealso>. It returns
+ the tuple <c>{value, <anno>Val</anno>}</c> when the computation is
+ finished, or <c>timeout</c> when <c><anno>Timeout</anno></c>
+ milliseconds has elapsed.</p>
+ <note>
+ <p>This function must be called by the same process from which
+ <seealso marker="#async_call/4"><c>async_call/4</c></seealso>
+ was made otherwise it will only return <c>timeout</c>.</p>
+ </note>
</desc>
</func>
+
<func>
- <name name="eval_everywhere" arity="4"/>
- <fsummary>Run a function on specific nodes, ignoring the result</fsummary>
+ <name name="parallel_eval" arity="1"/>
+ <fsummary>Evaluate many function calls on all nodes in
+ parallel.</fsummary>
<desc>
- <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c> on
- the specified nodes. No answers are collected.</p>
+ <p>Evaluates, for every tuple in <c><anno>FuncCalls</anno></c>,
+ <c>apply(<anno>Module</anno>, <anno>Function</anno>,
+ <anno>Args</anno>)</c> on some node in
+ the network. Returns the list of return values, in the same
+ order as in <c><anno>FuncCalls</anno></c>.</p>
</desc>
</func>
+
<func>
- <name name="abcast" arity="2"/>
- <fsummary>Broadcast a message asynchronously to a registered process on all nodes</fsummary>
+ <name name="pinfo" arity="1"/>
+ <fsummary>Information about a process.</fsummary>
<desc>
- <p>Equivalent to <c>abcast([node()|nodes()], <anno>Name</anno>, <anno>Msg</anno>)</c>.</p>
+ <p>Location transparent version of the BIF
+ <seealso marker="erts:erlang#process_info/1"><c>erlang:process_info/1</c></seealso> in ERTS.</p>
</desc>
</func>
+
<func>
- <name name="abcast" arity="3"/>
- <fsummary>Broadcast a message asynchronously to a registered process on specific nodes</fsummary>
+ <name name="pinfo" arity="2" clause_i="1"/>
+ <name name="pinfo" arity="2" clause_i="2"/>
+ <fsummary>Information about a process.</fsummary>
<desc>
- <p>Broadcasts the message <c><anno>Msg</anno></c> asynchronously to
- the registered process <c><anno>Name</anno></c> on the specified nodes.</p>
+ <p>Location transparent version of the BIF
+ <seealso marker="erts:erlang#process_info/2"><c>erlang:process_info/2</c></seealso> in ERTS.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="pmap" arity="3"/>
+ <fsummary>Parallel evaluation of mapping a function over a
+ list.</fsummary>
+ <desc>
+ <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
+ [<anno>Elem</anno>|<anno>ExtraArgs</anno>])</c> for every element
+ <c><anno>Elem</anno></c> in <c><anno>List1</anno></c>, in parallel.
+ Returns the list of return values, in the same order as in
+ <c><anno>List1</anno></c>.</p>
</desc>
</func>
+
<func>
<name name="sbcast" arity="2"/>
- <fsummary>Broadcast a message synchronously to a registered process on all nodes</fsummary>
+ <fsummary>Broadcast a message synchronously to a registered process on
+ all nodes.</fsummary>
<desc>
- <p>Equivalent to <c>sbcast([node()|nodes()], <anno>Name</anno>, <anno>Msg</anno>)</c>.</p>
+ <p>Equivalent to <c>sbcast([node()|nodes()], <anno>Name</anno>,
+ <anno>Msg</anno>)</c>.</p>
</desc>
</func>
+
<func>
<name name="sbcast" arity="3"/>
- <fsummary>Broadcast a message synchronously to a registered process on specific nodes</fsummary>
+ <fsummary>Broadcast a message synchronously to a registered process on
+ specific nodes.</fsummary>
<desc>
<p>Broadcasts the message <c><anno>Msg</anno></c> synchronously to
- the registered process <c><anno>Name</anno></c> on the specified nodes.</p>
- <p>Returns <c>{<anno>GoodNodes</anno>, <anno>BadNodes</anno>}</c>, where <c><anno>GoodNodes</anno></c>
- is the list of nodes which have <c><anno>Name</anno></c> as a registered
- process.</p>
+ the registered process <c><anno>Name</anno></c> on the specified
+ nodes.</p>
+ <p>Returns <c>{<anno>GoodNodes</anno>, <anno>BadNodes</anno>}</c>,
+ where <c><anno>GoodNodes</anno></c> is the list of nodes that have
+ <c><anno>Name</anno></c> as a registered process.</p>
<p>The function is synchronous in the sense that it is known
that all servers have received the message when the call
returns. It is not possible to know that the servers have
- actually processed the message.</p>
+ processed the message.</p>
<p>Any further messages sent to the servers, after this
- function has returned, will be received by all servers after
+ function has returned, are received by all servers after
this message.</p>
</desc>
</func>
+
<func>
<name name="server_call" arity="4"/>
- <fsummary>Interact with a server on a node</fsummary>
+ <fsummary>Interact with a server on a node.</fsummary>
<desc>
- <p>This function can be used when interacting with a server
- called <c><anno>Name</anno></c> at node <c><anno>Node</anno></c>. It is assumed that
- the server receives messages in the format
- <c>{From, <anno>Msg</anno>}</c> and replies using <c>From ! {<anno>ReplyWrapper</anno>, <anno>Node</anno>, <anno>Reply</anno>}</c>. This function makes such
+ <p>Can be used when interacting with a server called
+ <c><anno>Name</anno></c> on node <c><anno>Node</anno></c>. It is
+ assumed that the server receives messages in the format
+ <c>{From, <anno>Msg</anno>}</c> and replies using
+ <c>From ! {<anno>ReplyWrapper</anno>, <anno>Node</anno>,
+ <anno>Reply</anno>}</c>. This function makes such
a server call and ensures that the entire call is packed into
- an atomic transaction which either succeeds or fails. It
+ an atomic transaction, which either succeeds or fails. It
never hangs, unless the server itself hangs.</p>
- <p>The function returns the answer <c><anno>Reply</anno></c> as produced by
- the server <c><anno>Name</anno></c>, or <c>{error, <anno>Reason</anno>}</c>.</p>
- </desc>
- </func>
- <func>
- <name name="multi_server_call" arity="2"/>
- <fsummary>Interact with the servers on a number of nodes</fsummary>
- <desc>
- <p>Equivalent to <c>multi_server_call([node()|nodes()], <anno>Name</anno>, <anno>Msg</anno>)</c>.</p>
- </desc>
- </func>
- <func>
- <name name="multi_server_call" arity="3"/>
- <fsummary>Interact with the servers on a number of nodes</fsummary>
- <desc>
- <p>This function can be used when interacting with servers
- called <c><anno>Name</anno></c> on the specified nodes. It is assumed that
- the servers receive messages in the format <c>{From, <anno>Msg</anno>}</c>
- and reply using <c>From ! {<anno>Name</anno>, Node, <anno>Reply</anno>}</c>, where
- <c>Node</c> is the name of the node where the server is
- located. The function returns <c>{<anno>Replies</anno>, <anno>BadNodes</anno>}</c>,
- where <c><anno>Replies</anno></c> is a list of all <c><anno>Reply</anno></c> values and
- <c><anno>BadNodes</anno></c> is a list of the nodes which did not exist, or
- where the server did not exist, or where the server terminated
- before sending any reply.</p>
- </desc>
- </func>
- <func>
- <name name="safe_multi_server_call" arity="2"/>
- <name name="safe_multi_server_call" arity="3"/>
- <fsummary>Interact with the servers on a number of nodes (deprecated)</fsummary>
- <desc>
- <warning>
- <p>This function is deprecated. Use
- <c>multi_server_call/2,3</c> instead.</p>
- </warning>
- <p>In Erlang/OTP R6B and earlier releases,
- <c>multi_server_call/2,3</c> could not handle the case
- where the remote node exists, but there is no server called
- <c><anno>Name</anno></c>. Instead this function had to be used. In
- Erlang/OTP R7B and later releases, however, the functions are
- equivalent, except for this function being slightly slower.</p>
- </desc>
- </func>
- <func>
- <name name="parallel_eval" arity="1"/>
- <fsummary>Evaluate several function calls on all nodes in parallel</fsummary>
- <desc>
- <p>For every tuple in <c><anno>FuncCalls</anno></c>, evaluates
- <c>apply(<anno>Module</anno>, <anno>Function</anno>, <anno>Args</anno>)</c> on some node in
- the network. Returns the list of return values, in the same
- order as in <c><anno>FuncCalls</anno></c>.</p>
- </desc>
- </func>
- <func>
- <name name="pmap" arity="3"/>
- <fsummary>Parallell evaluation of mapping a function over a list </fsummary>
- <desc>
- <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>, [<anno>Elem</anno>|<anno>ExtraArgs</anno>])</c>,
- for every element <c><anno>Elem</anno></c> in <c><anno>List1</anno></c>, in parallel.
- Returns the list of return values, in the same order as in
- <c><anno>List1</anno></c>.</p>
- </desc>
- </func>
- <func>
- <name name="pinfo" arity="1"/>
- <fsummary>Information about a process</fsummary>
- <desc>
- <p>Location transparent version of the BIF
- <seealso marker="erts:erlang#process_info/1">
- <c>process_info/1</c></seealso>.</p>
+ <p>The function returns the answer <c><anno>Reply</anno></c> as
+ produced by the server <c><anno>Name</anno></c>, or
+ <c>{error, <anno>Reason</anno>}</c>.</p>
</desc>
</func>
+
<func>
- <name name="pinfo" arity="2"/>
- <fsummary>Information about a process</fsummary>
+ <name name="yield" arity="1"/>
+ <fsummary>Deliver the result of evaluating a function call on a node
+ (blocking).</fsummary>
<desc>
- <p>Location transparent version of the BIF
- <seealso marker="erts:erlang#process_info/2">
- <c>process_info/2</c></seealso>.</p>
+ <p>Returns the promised answer from a previous
+ <seealso marker="#async_call/4"><c>async_call/4</c></seealso>.
+ If the answer is available, it is
+ returned immediately. Otherwise, the calling process is
+ suspended until the answer arrives from <c>Node</c>.</p>
+ <note>
+ <p>This function must be called by the same process from which
+ <seealso marker="#async_call/4"><c>async_call/4</c></seealso>
+ was made otherwise it will never return.</p>
+ </note>
</desc>
</func>
</funcs>
diff --git a/lib/kernel/doc/src/seq_trace.xml b/lib/kernel/doc/src/seq_trace.xml
index f4fcd222ec..1a4a74419a 100644
--- a/lib/kernel/doc/src/seq_trace.xml
+++ b/lib/kernel/doc/src/seq_trace.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1998</year><year>2013</year>
+ <year>1998</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -29,24 +29,17 @@
<rev>A</rev>
</header>
<module>seq_trace</module>
- <modulesummary>Sequential Tracing of Messages</modulesummary>
+ <modulesummary>Sequential tracing of messages.</modulesummary>
<description>
<p>Sequential tracing makes it possible to trace all messages
resulting from one initial message. Sequential tracing is
- completely independent of the ordinary tracing in Erlang, which
- is controlled by the <c>erlang:trace/3</c> BIF. See the chapter
- <seealso marker="#whatis">What is Sequential Tracing</seealso>
- below for more information about what sequential tracing is and
- how it can be used.</p>
- <p><c>seq_trace</c> provides functions which control all aspects of
+ independent of the ordinary tracing in Erlang, which
+ is controlled by the <c>erlang:trace/3</c> BIF. For more information
+ about what sequential tracing is and how it can be used, see section
+ <seealso marker="#whatis">Sequential Tracing</seealso>.</p>
+ <p><c>seq_trace</c> provides functions that control all aspects of
sequential tracing. There are functions for activation,
- deactivation, inspection and for collection of the trace output.</p>
- <note>
- <p>The implementation of sequential tracing is in beta status.
- This means that the programming interface still might undergo
- minor adjustments (possibly incompatible) based on feedback
- from users.</p>
- </note>
+ deactivation, inspection, and for collection of the trace output.</p>
</description>
<datatypes>
<datatype>
@@ -87,13 +80,18 @@ seq_trace:set_token(OldToken), % activate the trace token again
<p>Sets the individual <c><anno>Component</anno></c> of the trace token to
<c><anno>Val</anno></c>. Returns the previous value of the component.</p>
<taglist>
- <tag><c>set_token(label, <anno>Integer</anno>)</c></tag>
+ <tag><c>set_token(label, <anno>Label</anno>)</c></tag>
<item>
- <p>The <c>label</c> component is an integer which
+ <p>The <c>label</c> component is a term which
identifies all events belonging to the same sequential
trace. If several sequential traces can be active
simultaneously, <c>label</c> is used to identify
the separate traces. Default is 0.</p>
+ <warning>
+ <p>Labels were restricted to small signed integers (28 bits)
+ prior to OTP 21. The trace token will be silenty dropped if it
+ crosses over to a node that does not support the label.</p>
+ </warning>
</item>
<tag><c>set_token(serial, SerialValue)</c></tag>
<item>
@@ -136,7 +134,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
<seealso marker="erts:time_correction#Erlang_Monotonic_Time">Erlang
monotonic time</seealso> and a monotonically increasing
integer. The time-stamp has the same format and value
- as produced by <c>{erlang:monotonic_time(nano_seconds),
+ as produced by <c>{erlang:monotonic_time(nanosecond),
erlang:unique_integer([monotonic])}</c>.</p>
</item>
<tag><c>set_token(monotonic_timestamp, <anno>Bool</anno>)</c></tag>
@@ -148,15 +146,15 @@ seq_trace:set_token(OldToken), % activate the trace token again
<seealso marker="erts:time_correction#Erlang_Monotonic_Time">Erlang
monotonic time</seealso>. The time-stamp has the same
format and value as produced by
- <c>erlang:monotonic_time(nano_seconds)</c>.</p>
+ <c>erlang:monotonic_time(nanosecond)</c>.</p>
</item>
+ </taglist>
<p>If multiple timestamp flags are passed, <c>timestamp</c> has
precedence over <c>strict_monotonic_timestamp</c> which
in turn has precedence over <c>monotonic_timestamp</c>. All
timestamp flags are remembered, so if two are passed
and the one with highest precedence later is disabled
the other one will become active.</p>
- </taglist>
</desc>
</func>
<func>
@@ -220,9 +218,10 @@ seq_trace:set_token(OldToken), % activate the trace token again
<type name="tracer"/>
<desc>
<p>Sets the system tracer. The system tracer can be either a
- process or port denoted by <c><anno>Tracer</anno></c>. Returns the previous
- value (which can be <c>false</c> if no system tracer is
- active).</p>
+ process, port or <seealso marker="erts:erl_tracer">tracer module</seealso>
+ denoted by <c><anno>Tracer</anno></c>.
+ Returns the previous value (which can be <c>false</c> if no system
+ tracer is active).</p>
<p>Failure: <c>{badarg, Info}}</c> if <c><anno>Pid</anno></c> is not an
existing local pid.</p>
</desc>
@@ -232,27 +231,28 @@ seq_trace:set_token(OldToken), % activate the trace token again
<fsummary>Return the pid() or port() of the current system tracer.</fsummary>
<type name="tracer"/>
<desc>
- <p>Returns the pid or port identifier of the current system
+ <p>Returns the pid, port identifier or tracer module of the current system
tracer or <c>false</c> if no system tracer is activated.</p>
</desc>
</func>
</funcs>
<section>
- <title>Trace Messages Sent To the System Tracer</title>
- <p>The format of the messages are:</p>
+ <title>Trace Messages Sent to the System Tracer</title>
+ <p>The format of the messages is one of the following, depending on if
+ flag <c>timestamp</c> of the trace token is set to <c>true</c> or
+ <c>false</c>:</p>
<code type="none">
{seq_trace, Label, SeqTraceInfo, TimeStamp}</code>
<p>or</p>
<code type="none">
{seq_trace, Label, SeqTraceInfo}</code>
- <p>depending on whether the <c>timestamp</c> flag of the trace
- token is set to <c>true</c> or <c>false</c>. Where:</p>
+ <p>Where:</p>
<code type="none">
Label = int()
TimeStamp = {Seconds, Milliseconds, Microseconds}
Seconds = Milliseconds = Microseconds = int()</code>
- <p>The <c>SeqTraceInfo</c> can have the following formats:</p>
+ <p><c>SeqTraceInfo</c> can have the following formats:</p>
<taglist>
<tag><c>{send, Serial, From, To, Message}</c></tag>
<item>
@@ -262,141 +262,151 @@ TimeStamp = {Seconds, Milliseconds, Microseconds}
<tag><c>{'receive', Serial, From, To, Message}</c></tag>
<item>
<p>Used when a process <c>To</c> receives a message with a
- trace token that has the <c>'receive'</c> flag set to
- <c>true</c>.</p>
+ trace token that has flag <c>'receive'</c> set to <c>true</c>.</p>
</item>
<tag><c>{print, Serial, From, _, Info}</c></tag>
<item>
- <p>Used when a process <c>From</c> has called
+ <p>Used when a process <c>From</c> has called
<c>seq_trace:print(Label, TraceInfo)</c> and has a trace
- token with the <c>print</c> flag set to <c>true</c> and
+ token with flag <c>print</c> set to <c>true</c>, and
<c>label</c> set to <c>Label</c>.</p>
</item>
</taglist>
<p><c>Serial</c> is a tuple <c>{PreviousSerial, ThisSerial}</c>,
- where the first integer <c>PreviousSerial</c> denotes the serial
- counter passed in the last received message which carried a trace
- token. If the process is the first one in a new sequential trace,
- <c>PreviousSerial</c> is set to the value of the process internal
- "trace clock". The second integer <c>ThisSerial</c> is the serial
- counter that a process sets on outgoing messages and it is based
- on the process internal "trace clock" which is incremented by one
- before it is attached to the trace token in the message.</p>
+ where:</p>
+ <list type="bulleted">
+ <item><p>Integer <c>PreviousSerial</c> denotes the serial
+ counter passed in the last received message that carried a trace
+ token. If the process is the first in a new sequential trace,
+ <c>PreviousSerial</c> is set to the value of the process internal
+ "trace clock".</p></item>
+ <item><p>Integer <c>ThisSerial</c> is the serial
+ counter that a process sets on outgoing messages. It is based
+ on the process internal "trace clock", which is incremented by one
+ before it is attached to the trace token in the message.</p></item>
+ </list>
</section>
<section>
<marker id="whatis"></marker>
- <title>What is Sequential Tracing</title>
+ <title>Sequential Tracing</title>
<p>Sequential tracing is a way to trace a sequence of messages sent
between different local or remote processes, where the sequence
- is initiated by one single message. In short it works like this:</p>
+ is initiated by a single message. In short, it works as follows:</p>
<p>Each process has a <em>trace token</em>, which can be empty or
- not empty. When not empty the trace token can be seen as
+ not empty. When not empty, the trace token can be seen as
the tuple <c>{Label, Flags, Serial, From}</c>. The trace token is
passed invisibly with each message.</p>
- <p>In order to start a sequential trace the user must explicitly set
+ <p>To start a sequential trace, the user must explicitly set
the trace token in the process that will send the first message
in a sequence.</p>
<p>The trace token of a process is set each time the process
matches a message in a receive statement, according to the trace
token carried by the received message, empty or not.</p>
- <p>On each Erlang node a process can be set as the <em>system tracer</em>. This process will receive trace messages each time
+ <p>On each Erlang node, a process can be set as the <em>system tracer</em>.
+ This process will receive trace messages each time
a message with a trace token is sent or received (if the trace
token flag <c>send</c> or <c>'receive'</c> is set). The system
- tracer can then print each trace event, write it to a file or
+ tracer can then print each trace event, write it to a file, or
whatever suitable.</p>
<note>
- <p>The system tracer will only receive those trace events that
+ <p>The system tracer only receives those trace events that
occur locally within the Erlang node. To get the whole picture
- of a sequential trace that involves processes on several Erlang
+ of a sequential trace, involving processes on many Erlang
nodes, the output from the system tracer on each involved node
- must be merged (off line).</p>
+ must be merged (offline).</p>
</note>
- <p>In the following sections Sequential Tracing and its most
- fundamental concepts are described.</p>
+ <p>The following sections describe sequential tracing and its most
+ fundamental concepts.</p>
</section>
<section>
<title>Trace Token</title>
- <p>Each process has a current trace token. Initially the token is
+ <p>Each process has a current trace token. Initially, the token is
empty. When the process sends a message to another process, a
- copy of the current token will be sent "invisibly" along with
+ copy of the current token is sent "invisibly" along with
the message.</p>
- <p>The current token of a process is set in two ways, either</p>
- <list type="ordered">
+ <p>The current token of a process is set in one of the following two
+ ways:</p>
+ <list type="bulleted">
<item>
- <p>explicitly by the process itself, through a call to
- <c>seq_trace:set_token</c>, or</p>
+ <p>Explicitly by the process itself, through a call to
+ <c>seq_trace:set_token/1,2</c></p>
</item>
<item>
- <p>when a message is received.</p>
+ <p>When a message is received</p>
</item>
</list>
- <p>In both cases the current token will be set. In particular, if
- the token of a message received is empty, the current token of
+ <p>In both cases, the current token is set. In particular, if
+ the token of a received message is empty, the current token of
the process is set to empty.</p>
- <p>A trace token contains a label, and a set of flags. Both
- the label and the flags are set in 1 and 2 above.</p>
+ <p>A trace token contains a label and a set of flags. Both
+ the label and the flags are set in both alternatives above.</p>
</section>
<section>
<title>Serial</title>
- <p>The trace token contains a component which is called
- <c>serial</c>. It consists of two integers <c>Previous</c> and
+ <p>The trace token contains a component called
+ <c>serial</c>. It consists of two integers, <c>Previous</c> and
<c>Current</c>. The purpose is to uniquely identify each traced
- event within a trace sequence and to order the messages
- chronologically and in the different branches if any.</p>
+ event within a trace sequence, as well as to order the messages
+ chronologically and in the different branches, if any.</p>
<p>The algorithm for updating <c>Serial</c> can be described as
follows:</p>
- <p>Let each process have two counters <c>prev_cnt</c> and
- <c>curr_cnt</c> which both are set to 0 when a process is created.
+ <p>Let each process have two counters, <c>prev_cnt</c> and
+ <c>curr_cnt</c>, both are set to <c>0</c> when a process is created.
The counters are updated at the following occasions:</p>
<list type="bulleted">
<item>
- <p><em>When the process is about to send a message and the trace token is not empty.</em></p>
+ <p><em>When the process is about to send a message and the trace token
+ is not empty.</em></p>
<p>Let the serial of the trace token be <c>tprev</c> and
- <c>tcurr</c>. <br></br>
-<c>curr_cnt := curr_cnt + 1</c> <br></br>
-<c>tprev := prev_cnt</c> <br></br>
-<c>tcurr := curr_cnt</c></p>
+ <c>tcurr</c>.</p>
+ <pre>
+curr_cnt := curr_cnt + 1
+tprev := prev_cnt
+tcurr := curr_cnt</pre>
<p>The trace token with <c>tprev</c> and <c>tcurr</c> is then
passed along with the message.</p>
</item>
<item>
- <p><em>When the process calls</em><c>seq_trace:print(Label, Info)</c>, <em>Label matches the label part of the trace token and the trace token print flag is true.</em></p>
- <p>The same algorithm as for send above.</p>
+ <p><em>When the process calls</em> <c>seq_trace:print(Label, Info)</c>,
+ <c>Label</c> <em>matches the label part of the trace token and the
+ trace token print flag is <c>true</c>.</em></p>
+ <p>The algorithm is the same as for send above.</p>
</item>
<item>
- <p><em>When a message is received and contains a nonempty trace token.</em></p>
+ <p><em>When a message is received and contains a non-empty trace
+ token.</em></p>
<p>The process trace token is set to the trace token from
the message.</p>
<p>Let the serial of the trace token be <c>tprev</c> and
- <c>tcurr</c>. <br></br>
-<c><![CDATA[if (curr_cnt < tcurr )]]></c> <br></br>
-
- &nbsp; &nbsp; &nbsp; &nbsp;<c>curr_cnt := tcurr</c> <br></br>
-<c>prev_cnt := tcurr</c></p>
+ <c>tcurr</c>.</p>
+ <code>
+<![CDATA[if (curr_cnt < tcurr )]]>
+ curr_cnt := tcurr
+prev_cnt := tcurr</code>
</item>
</list>
- <p>The <c>curr_cnt</c> of a process is incremented each time
+ <p><c>curr_cnt</c> of a process is incremented each time
the process is involved in a sequential trace. The counter can
reach its limit (27 bits) if a process is very long-lived and is
- involved in much sequential tracing. If the counter overflows it
- will not be possible to use the serial for ordering of the trace
- events. To prevent the counter from overflowing in the middle of
- a sequential trace the function <c>seq_trace:reset_trace/0</c>
- can be called to reset the <c>prev_cnt</c> and <c>curr_cnt</c> of
- all processes in the Erlang node. This function will also set all
- trace tokens in processes and their message queues to empty and
- will thus stop all ongoing sequential tracing.</p>
+ involved in much sequential tracing. If the counter overflows, the
+ serial for ordering of the trace events cannot be used. To prevent
+ the counter from overflowing in the middle of
+ a sequential trace, function <c>seq_trace:reset_trace/0</c>
+ can be called to reset <c>prev_cnt</c> and <c>curr_cnt</c> of
+ all processes in the Erlang node. This function also sets all
+ trace tokens in processes and their message queues to empty, and
+ thus stops all ongoing sequential tracing.</p>
</section>
<section>
- <title>Performance considerations</title>
- <p>The performance degradation for a system which is enabled for
- Sequential Tracing is negligible as long as no tracing is
- activated. When tracing is activated there will of course be an
- extra cost for each traced message but all other messages will be
+ <title>Performance Considerations</title>
+ <p>The performance degradation for a system that is enabled for
+ sequential tracing is negligible as long as no tracing is
+ activated. When tracing is activated, there is an
+ extra cost for each traced message, but all other messages are
unaffected.</p>
</section>
@@ -404,13 +414,13 @@ TimeStamp = {Seconds, Milliseconds, Microseconds}
<title>Ports</title>
<p>Sequential tracing is not performed across ports.</p>
<p>If the user for some reason wants to pass the trace token to a
- port this has to be done manually in the code of the port
+ port, this must be done manually in the code of the port
controlling process. The port controlling processes have to check
the appropriate sequential trace settings (as obtained from
- <c>seq_trace:get_token/1</c> and include trace information in
+ <c>seq_trace:get_token/1</c>) and include trace information in
the message data sent to their respective ports.</p>
<p>Similarly, for messages received from a port, a port controller
- has to retrieve trace specific information, and set appropriate
+ has to retrieve trace-specific information, and set appropriate
sequential trace flags through calls to
<c>seq_trace:set_token/2</c>.</p>
</section>
@@ -418,23 +428,17 @@ TimeStamp = {Seconds, Milliseconds, Microseconds}
<section>
<title>Distribution</title>
<p>Sequential tracing between nodes is performed transparently.
- This applies to C-nodes built with Erl_Interface too. A C-node
- built with Erl_Interface only maintains one trace token, which
- means that the C-node will appear as one process from
+ This applies to C-nodes built with <c>Erl_Interface</c> too. A C-node
+ built with <c>Erl_Interface</c> only maintains one trace token, which
+ means that the C-node appears as one process from
the sequential tracing point of view.</p>
- <p>In order to be able to perform sequential tracing between
- distributed Erlang nodes, the distribution protocol has been
- extended (in a backward compatible way). An Erlang node which
- supports sequential tracing can communicate with an older
- (OTP R3B) node but messages passed within that node can of course
- not be traced.</p>
</section>
<section>
- <title>Example of Usage</title>
- <p>The example shown here will give rough idea of how the new
- primitives can be used and what kind of output it will produce.</p>
- <p>Assume that we have an initiating process with
+ <title>Example of Use</title>
+ <p>This example gives a rough idea of how the new
+ primitives can be used and what kind of output it produces.</p>
+ <p>Assume that you have an initiating process with
<c><![CDATA[Pid == <0.30.0>]]></c> like this:</p>
<code type="none">
-module(seqex).
@@ -463,8 +467,8 @@ loop() ->
PortController ! {ack,Ack}
end,
loop().</code>
- <p>A possible output from the system's sequential_tracer (inspired
- by AXE-10 and MD-110) could look like:</p>
+ <p>A possible output from the system's <c>sequential_tracer</c> can be
+ like this:</p>
<pre>
17:&lt;0.30.0> Info {0,1} WITH
"**** Trace Started ****"
@@ -475,7 +479,7 @@ loop() ->
17:&lt;0.30.0> Received {2,4} FROM &lt;0.31.0> WITH
{ack,{received,the_message}}</pre>
<p>The implementation of a system tracer process that produces
- the printout above could look like this:</p>
+ this printout can look like this:</p>
<code type="none">
tracer() ->
receive
@@ -502,16 +506,15 @@ print_trace({'receive',Serial,From,To,Message}) ->
print_trace({send,Serial,From,To,Message}) ->
io:format("~p Sent ~p TO ~p WITH~n~p~n",
[From,Serial,To,Message]).</code>
- <p>The code that creates a process that runs the tracer function
- above and sets that process as the system tracer could look like
- this:</p>
+ <p>The code that creates a process that runs this tracer function
+ and sets that process as the system tracer can look like this:</p>
<code type="none">
start() ->
Pid = spawn(?MODULE,tracer,[]),
seq_trace:set_system_tracer(Pid), % set Pid as the system tracer
ok.</code>
- <p>With a function like <c>test/0</c> below the whole example can be
- started.</p>
+ <p>With a function like <c>test/0</c>, the whole example can be
+ started:</p>
<code type="none">
test() ->
P = spawn(?MODULE, loop, [port]),
diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml
index 29d52f23bb..b8c25ca53b 100644
--- a/lib/kernel/doc/src/specs.xml
+++ b/lib/kernel/doc/src/specs.xml
@@ -6,6 +6,7 @@
<xi:include href="../specs/specs_disk_log.xml"/>
<xi:include href="../specs/specs_erl_boot_server.xml"/>
<xi:include href="../specs/specs_erl_ddll.xml"/>
+ <xi:include href="../specs/specs_erl_epmd.xml"/>
<xi:include href="../specs/specs_erl_prim_loader_stub.xml"/>
<xi:include href="../specs/specs_erlang_stub.xml"/>
<xi:include href="../specs/specs_error_handler.xml"/>
@@ -20,6 +21,11 @@
<xi:include href="../specs/specs_inet.xml"/>
<xi:include href="../specs/specs_inet_res.xml"/>
<xi:include href="../specs/specs_init_stub.xml"/>
+ <xi:include href="../specs/specs_logger.xml"/>
+ <xi:include href="../specs/specs_logger_filters.xml"/>
+ <xi:include href="../specs/specs_logger_formatter.xml"/>
+ <xi:include href="../specs/specs_logger_std_h.xml"/>
+ <xi:include href="../specs/specs_logger_disk_log_h.xml"/>
<xi:include href="../specs/specs_net_adm.xml"/>
<xi:include href="../specs/specs_net_kernel.xml"/>
<xi:include href="../specs/specs_os.xml"/>
diff --git a/lib/kernel/doc/src/user.xml b/lib/kernel/doc/src/user.xml
index 24119bea82..38eac1c221 100644
--- a/lib/kernel/doc/src/user.xml
+++ b/lib/kernel/doc/src/user.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1996</year>
- <year>2013</year>
+ <year>2016</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -27,13 +27,13 @@
<title>user</title>
<prepared>Robert Virding</prepared>
<docno>1</docno>
- <date>96-10-10</date>
+ <date>1996-10-10</date>
<rev>A</rev>
</header>
<module>user</module>
- <modulesummary>Standard I/O Server</modulesummary>
+ <modulesummary>Standard I/O server.</modulesummary>
<description>
- <p><c>user</c> is a server which responds to all the messages
+ <p><c>user</c> is a server that responds to all messages
defined in the I/O interface. The code in <c>user.erl</c> can be
used as a model for building alternative I/O servers.</p>
</description>
diff --git a/lib/kernel/doc/src/user_guide.gif b/lib/kernel/doc/src/user_guide.gif
deleted file mode 100644
index e6275a803d..0000000000
--- a/lib/kernel/doc/src/user_guide.gif
+++ /dev/null
Binary files differ
diff --git a/lib/kernel/doc/src/wrap_log_reader.xml b/lib/kernel/doc/src/wrap_log_reader.xml
index c021f42cac..7fb9c1c023 100644
--- a/lib/kernel/doc/src/wrap_log_reader.xml
+++ b/lib/kernel/doc/src/wrap_log_reader.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1998</year><year>2013</year>
+ <year>1998</year><year>2016</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -26,39 +26,43 @@
<prepared>Esko Vierum&auml;ki</prepared>
<responsible>Esko Vierum&auml;ki</responsible>
<docno></docno>
- <approved>nobody</approved>
- <checked>no</checked>
- <date>98-09-21</date>
+ <approved></approved>
+ <checked></checked>
+ <date>1998-09-21</date>
<rev>A</rev>
<file>wrap_log_reader.sgml</file>
</header>
<module>wrap_log_reader</module>
- <modulesummary>A function to read internally formatted wrap disk logs</modulesummary>
+ <modulesummary>A service to read internally formatted wrap disk logs.
+ </modulesummary>
<description>
- <p><c>wrap_log_reader</c> is a function to read internally formatted
- wrap disk logs, refer to disk_log(3). <c>wrap_log_reader</c> does not
- interfere with disk_log activities; there is however a known bug in this
- version of the <c>wrap_log_reader</c>, see chapter <c>bugs</c> below.
- </p>
- <p>A wrap disk log file consists of several files, called index files.
- A log file can be opened and closed. It is also possible to open just one index file
- separately. If an non-existent or a non-internally formatted file is opened,
- an error message is returned. If the file is corrupt, no attempt to repair it
- will be done but an error message is returned.
- </p>
- <p>If a log is configured to be distributed, there is a possibility that all items
- are not loggen on all nodes. <c>wrap_log_reader</c> does only read the log on
- the called node, it is entirely up to the user to be sure that all items are read.
- </p>
+ <p>This module makes it possible to read internally formatted
+ wrap disk logs, see
+ <seealso marker="disk_log"><c>disk_log(3)</c></seealso>.
+ <c>wrap_log_reader</c> does not
+ interfere with <c>disk_log</c> activities; there is however a bug in this
+ version of the <c>wrap_log_reader</c>, see section
+ <seealso marker="#bugs">Known Limitations</seealso>.</p>
+ <p>A wrap disk log file consists of many files, called index files. A log
+ file can be opened and closed. Also, a single index file can be opened
+ separately. If a non-existent or non-internally formatted file is opened,
+ an error message is returned. If the file is corrupt, no attempt is made
+ to repair it, but an error message is returned.</p>
+ <p>If a log is configured to be distributed, it is possible that all items
+ are not logged on all nodes. <c>wrap_log_reader</c> only reads the log on
+ the called node; it is up to the user to be sure that all items
+ are read.</p>
</description>
+
<datatypes>
<datatype>
<name name="continuation"/>
- <desc><p>Continuation returned by
- <c>open/1,2</c> or <c>chunk/1,2</c>.</p>
+ <desc>
+ <p>Continuation returned by <c>open/1,2</c> or <c>chunk/1,2</c>.</p>
</desc>
</datatype>
</datatypes>
+
<funcs>
<func>
<name name="chunk" arity="1"/>
@@ -66,87 +70,83 @@
<fsummary>Read a chunk of objects written to a wrap log.</fsummary>
<type name="chunk_ret"/>
<desc>
- <p>This function makes it possible to efficiently read the
- terms which have been appended to a log. It minimises disk
- I/O by reading large 8K chunks from the file.
- </p>
- <p>The first time <c>chunk</c> is called an initial
- continuation returned from the <c>open/1</c>, <c>open/2</c> must be provided.
- </p>
+ <p>Enables to efficiently read the
+ terms that are appended to a log. Minimises disk
+ I/O by reading 64 kilobyte chunks from the file.</p>
+ <p>The first time <c>chunk()</c> is called, an initial
+ continuation returned from <c>open/1</c> or <c>open/2</c> must be
+ provided.</p>
<p>When <c>chunk/3</c> is called, <c><anno>N</anno></c> controls the
maximum number of terms that are read from the log in each
- chunk. Default is <c>infinity</c>, which means that all the
+ chunk. Defaults to <c>infinity</c>, which means that all the
terms contained in the 8K chunk are read. If less than
- <c><anno>N</anno></c> terms are returned, this does not necessarily mean
- that end of file is reached.
- </p>
- <p>The <c>chunk</c> function returns a tuple
- <c>{<anno>Continuation2</anno>, <anno>Terms</anno>}</c>, where <c><anno>Terms</anno></c> is a list
+ <c><anno>N</anno></c> terms are returned, this does not necessarily
+ mean that end of file is reached.</p>
+ <p>Returns a tuple <c>{<anno>Continuation2</anno>,
+ <anno>Terms</anno>}</c>, where <c><anno>Terms</anno></c> is a list
of terms found in the log. <c><anno>Continuation2</anno></c> is yet
- another continuation which must be passed on into any
- subsequent calls to <c>chunk</c>. With a series of calls to
- <c>chunk</c> it is then possible to extract all terms from a
- log.
- </p>
- <p>The <c>chunk</c> function returns a tuple
- <c>{<anno>Continuation2</anno>, <anno>Terms</anno>, <anno>Badbytes</anno>}</c> if the log is opened
- in read only mode and the read chunk is corrupt. <c><anno>Badbytes</anno></c>
+ another continuation that must be passed on to any
+ subsequent calls to <c>chunk()</c>. With a series of calls to
+ <c>chunk()</c>, it is then possible to extract all terms from a log.</p>
+ <p>Returns a tuple <c>{<anno>Continuation2</anno>,
+ <anno>Terms</anno>, <anno>Badbytes</anno>}</c> if the log is opened
+ in read only mode and the read chunk is corrupt.
+ <c><anno>Badbytes</anno></c>
indicates the number of non-Erlang terms found in the chunk.
- Note also that the log is not repaired.
- </p>
- <p><c>chunk</c> returns <c>{<anno>Continuation2</anno>, eof}</c> when the end of the log is
- reached, and <c>{error, <anno>Reason</anno>}</c> if an error occurs.
- </p>
- <p>The returned continuation may or may not be valid in the next call to
- <c>chunk</c>. This is because the log may wrap and delete
- the file into which the continuation points. To make sure
- this does not happen, the log can be blocked during the
- search.
- </p>
+ Notice that the log is not repaired.</p>
+ <p>Returns <c>{<anno>Continuation2</anno>, eof}</c> when
+ the end of the log is reached, and <c>{error, <anno>Reason</anno>}</c>
+ if an error occurs.</p>
+ <p>The returned continuation either is or is not valid in the next call
+ to this function. This is because the log can wrap and delete
+ the file into which the continuation points. To ensure
+ this does not occur, the log can be blocked during the search.</p>
</desc>
</func>
+
<func>
<name name="close" arity="1"/>
- <fsummary>Close a log</fsummary>
+ <fsummary>Close a log.</fsummary>
<desc>
- <p>This function closes a log file properly.
- </p>
+ <p>Closes a log file properly.</p>
</desc>
</func>
+
<func>
<name name="open" arity="1"/>
<name name="open" arity="2"/>
- <fsummary>Open a log file</fsummary>
+ <fsummary>Open a log file.</fsummary>
<type name="open_ret"/>
<desc>
- <p><c><anno>Filename</anno></c> specifies the name of the file which is to be read. </p>
- <p><c><anno>N</anno></c> specifies the index of the file which is to be read.
- If <c><anno>N</anno></c> is omitted the whole wrap log file will be read; if it
- is specified only the specified index file will be read.
- </p>
- <p>The <c>open</c> function returns <c>{ok, <anno>Continuation</anno>}</c> if the
- log/index file was successfully opened. The <c><anno>Continuation</anno></c>
- is to be used when chunking or closing the file.
- </p>
- <p>The function returns <c>{error, <anno>Reason</anno>}</c> for all errors.
- </p>
+ <p><c><anno>Filename</anno></c> specifies the name of the file to be
+ read.</p>
+ <p><c><anno>N</anno></c> specifies the index of the file to be read.
+ If <c><anno>N</anno></c> is omitted, the whole wrap log file is read;
+ if it is specified, only the specified index file is read.</p>
+ <p>Returns <c>{ok, <anno>Continuation</anno>}</c> if the
+ log/index file is opened successfully.
+ <c><anno>Continuation</anno></c>
+ is to be used when chunking or closing the file.</p>
+ <p>Returns <c>{error, <anno>Reason</anno>}</c> for all errors.</p>
</desc>
</func>
</funcs>
<section>
- <title>Bugs</title>
- <p>This version of the <c>wrap_log_reader</c> does not detect if the <c>disk_log</c>
- wraps to a new index file between a <c>wrap_log_reader:open</c> and the first
- <c>wrap_log_reader:chunk</c>.
- In this case the chuck will actually read the last logged items in the log file,
- because the opened index file was truncated by the <c>disk_log</c>.
- </p>
+ <title>Known Limitations</title>
+ <marker id="bugs"/>
+ <p>This version of <c>wrap_log_reader</c> does not detect if
+ <c>disk_log</c> wraps to a new index file between a call to
+ <c>wrap_log_reader:open()</c> and the first call to
+ <c>wrap_log_reader:chunk()</c>.
+ If this occurs, the call to <c>chunk()</c> reads the last logged
+ items in the log file, as the opened index file was truncated by
+ <c>disk_log</c>.</p>
</section>
<section>
<title>See Also</title>
- <p><seealso marker="disk_log">disk_log(3)</seealso></p>
+ <p><seealso marker="disk_log"><c>disk_log(3)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/zlib_stub.xml b/lib/kernel/doc/src/zlib_stub.xml
index d1823b01aa..9ab9c4eb62 100644
--- a/lib/kernel/doc/src/zlib_stub.xml
+++ b/lib/kernel/doc/src/zlib_stub.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1997</year>
- <year>2013</year>
+ <year>2016</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -31,13 +31,9 @@
<rev>A</rev>
</header>
<module>zlib</module>
- <modulesummary>Zlib Compression interface.</modulesummary>
- <description><p>
-
- The module zlib is moved to the runtime system
- application. Please see <seealso
- marker="erts:zlib">zlib(3)</seealso> in the
- erts reference manual instead.
-
- </p></description>
+ <modulesummary>Zlib compression interface.</modulesummary>
+ <description>
+ <p>This module is moved to the
+ <seealso marker="erts:zlib">ERTS</seealso> application.</p>
+ </description>
</erlref>
diff --git a/lib/kernel/examples/Makefile b/lib/kernel/examples/Makefile
index 26ec58f571..f86e662838 100644
--- a/lib/kernel/examples/Makefile
+++ b/lib/kernel/examples/Makefile
@@ -45,7 +45,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/kernel-$(KERNEL_VSN)/examples
# Pack and install the complete directory structure from
# here (CWD) and down, for all examples.
-EXAMPLES = uds_dist
+EXAMPLES = uds_dist gen_tcp_dist
release_spec:
$(INSTALL_DIR) "$(RELSYSDIR)"
diff --git a/lib/kernel/examples/gen_tcp_dist/Makefile b/lib/kernel/examples/gen_tcp_dist/Makefile
new file mode 100644
index 0000000000..65513a1729
--- /dev/null
+++ b/lib/kernel/examples/gen_tcp_dist/Makefile
@@ -0,0 +1,20 @@
+RM=rm -f
+CP=cp
+EBIN=ebin
+ERLC=erlc
+# Works if building in open source source tree
+KERNEL_INCLUDE=$(ERL_TOP)/lib/kernel/include
+ERLCFLAGS+= -W -I$(KERNEL_INCLUDE)
+
+MODULES=gen_tcp_dist
+
+TARGET_FILES=$(MODULES:%=$(EBIN)/%.beam)
+
+opt: $(TARGET_FILES)
+
+$(EBIN)/%.beam: src/%.erl
+ $(ERLC) $(ERLCFLAGS) -o$(EBIN) $<
+
+clean:
+ $(RM) $(TARGET_FILES)
+
diff --git a/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore
diff --git a/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl
new file mode 100644
index 0000000000..98554ed805
--- /dev/null
+++ b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl
@@ -0,0 +1,781 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(gen_tcp_dist).
+
+%%
+%% This is an example of how to plug in an arbitrary distribution
+%% carrier for Erlang using distribution processes.
+%%
+%% This example uses gen_tcp for transportation of data, but
+%% you can use whatever underlying protocol you want as long
+%% as your implementation reliably delivers data chunks to the
+%% receiving VM in the order they were sent from the sending
+%% VM.
+%%
+%% This code is a rewrite of the lib/kernel/src/inet_tcp_dist.erl
+%% distribution impementation for TCP used by default. That
+%% implementation use distribution ports instead of distribution
+%% processes and is more efficient compared to this implementation.
+%% This since this implementation more or less gets the
+%% distribution processes in between the VM and the ports without
+%% any gain specific gain.
+%%
+
+-export([listen/1, accept/1, accept_connection/5,
+ setup/5, close/1, select/1, is_node_name/1]).
+
+%% Optional
+-export([setopts/2, getopts/2]).
+
+%% internal exports
+
+-export([dist_cntrlr_setup/1, dist_cntrlr_input_setup/3,
+ dist_cntrlr_tick_handler/1]).
+
+-export([accept_loop/2,do_accept/6,do_setup/6]).
+
+-import(error_logger,[error_msg/2]).
+
+-include("net_address.hrl").
+
+-include("dist.hrl").
+-include("dist_util.hrl").
+
+%% ------------------------------------------------------------
+%% Select this protocol based on node name
+%% select(Node) => Bool
+%% ------------------------------------------------------------
+
+select(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_, Host] ->
+ case inet:getaddr(Host, inet) of
+ {ok,_} -> true;
+ _ -> false
+ end;
+ _ -> false
+ end.
+
+%% ------------------------------------------------------------
+%% Create the listen socket, i.e. the port that this erlang
+%% node is accessible through.
+%% ------------------------------------------------------------
+
+listen(Name) ->
+ case do_listen([binary, {active, false}, {packet,2}, {reuseaddr, true}]) of
+ {ok, Socket} ->
+ TcpAddress = get_tcp_address(Socket),
+ {_,Port} = TcpAddress#net_address.address,
+ ErlEpmd = net_kernel:epmd_module(),
+ case ErlEpmd:register_node(Name, Port) of
+ {ok, Creation} ->
+ {ok, {Socket, TcpAddress, Creation}};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+do_listen(Options) ->
+ {First,Last} = case application:get_env(kernel,inet_dist_listen_min) of
+ {ok,N} when is_integer(N) ->
+ case application:get_env(kernel,
+ inet_dist_listen_max) of
+ {ok,M} when is_integer(M) ->
+ {N,M};
+ _ ->
+ {N,N}
+ end;
+ _ ->
+ {0,0}
+ end,
+ do_listen(First, Last, listen_options([{backlog,128}|Options])).
+
+do_listen(First,Last,_) when First > Last ->
+ {error,eaddrinuse};
+do_listen(First,Last,Options) ->
+ case gen_tcp:listen(First, Options) of
+ {error, eaddrinuse} ->
+ do_listen(First+1,Last,Options);
+ Other ->
+ Other
+ end.
+
+listen_options(Opts0) ->
+ Opts1 =
+ case application:get_env(kernel, inet_dist_use_interface) of
+ {ok, Ip} ->
+ [{ip, Ip} | Opts0];
+ _ ->
+ Opts0
+ end,
+ case application:get_env(kernel, inet_dist_listen_options) of
+ {ok,ListenOpts} ->
+ ListenOpts ++ Opts1;
+ _ ->
+ Opts1
+ end.
+
+
+%% ------------------------------------------------------------
+%% Accepts new connection attempts from other Erlang nodes.
+%% ------------------------------------------------------------
+
+accept(Listen) ->
+ spawn_opt(?MODULE, accept_loop, [self(), Listen], [link, {priority, max}]).
+
+accept_loop(Kernel, Listen) ->
+ ?trace("~p~n",[{?MODULE, accept_loop, self()}]),
+ case gen_tcp:accept(Listen) of
+ {ok, Socket} ->
+ DistCtrl = spawn_dist_cntrlr(Socket),
+ ?trace("~p~n",[{?MODULE, accept_loop, accepted, Socket, DistCtrl, self()}]),
+ flush_controller(DistCtrl, Socket),
+ gen_tcp:controlling_process(Socket, DistCtrl),
+ flush_controller(DistCtrl, Socket),
+ Kernel ! {accept,self(),DistCtrl,inet,tcp},
+ receive
+ {Kernel, controller, Pid} ->
+ call_ctrlr(DistCtrl, {supervisor, Pid}),
+ Pid ! {self(), controller};
+ {Kernel, unsupported_protocol} ->
+ exit(unsupported_protocol)
+ end,
+ accept_loop(Kernel, Listen);
+ Error ->
+ exit(Error)
+ end.
+
+flush_controller(Pid, Socket) ->
+ receive
+ {tcp, Socket, Data} ->
+ Pid ! {tcp, Socket, Data},
+ flush_controller(Pid, Socket);
+ {tcp_closed, Socket} ->
+ Pid ! {tcp_closed, Socket},
+ flush_controller(Pid, Socket)
+ after 0 ->
+ ok
+ end.
+
+%% ------------------------------------------------------------
+%% Accepts a new connection attempt from another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) ->
+ spawn_opt(?MODULE, do_accept,
+ [self(), AcceptPid, DistCtrl, MyNode, Allowed, SetupTime],
+ [link, {priority, max}]).
+
+do_accept(Kernel, AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) ->
+ ?trace("~p~n",[{?MODULE, do_accept, self(), MyNode}]),
+ receive
+ {AcceptPid, controller} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case check_ip(DistCtrl) of
+ true ->
+ HSData0 = hs_data_common(DistCtrl),
+ HSData = HSData0#hs_data{kernel_pid = Kernel,
+ this_node = MyNode,
+ socket = DistCtrl,
+ timer = Timer,
+ this_flags = 0,
+ allowed = Allowed},
+ dist_util:handshake_other_started(HSData);
+ {false,IP} ->
+ error_msg("** Connection attempt from "
+ "disallowed IP ~w ** ~n", [IP]),
+ ?shutdown(no_node)
+ end
+ end.
+
+%% we may not always want the nodelay behaviour
+%% for performance reasons
+
+nodelay() ->
+ case application:get_env(kernel, dist_nodelay) of
+ undefined ->
+ {nodelay, true};
+ {ok, true} ->
+ {nodelay, true};
+ {ok, false} ->
+ {nodelay, false};
+ _ ->
+ {nodelay, true}
+ end.
+
+%% ------------------------------------------------------------
+%% Setup a new connection to another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+setup(Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ spawn_opt(?MODULE, do_setup,
+ [self(), Node, Type, MyNode, LongOrShortNames, SetupTime],
+ [link, {priority, max}]).
+
+do_setup(Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
+ ?trace("~p~n",[{?MODULE, do_setup, self(), Node}]),
+ [Name, Address] = splitnode(Node, LongOrShortNames),
+ case inet:getaddr(Address, inet) of
+ {ok, Ip} ->
+ Timer = dist_util:start_timer(SetupTime),
+ ErlEpmd = net_kernel:epmd_module(),
+ case ErlEpmd:port_please(Name, Ip) of
+ {port, TcpPort, Version} ->
+ ?trace("port_please(~p) -> version ~p~n",
+ [Node,Version]),
+ dist_util:reset_timer(Timer),
+ case
+ gen_tcp:connect(
+ Ip, TcpPort,
+ connect_options([binary, {active, false}, {packet, 2}]))
+ of
+ {ok, Socket} ->
+ DistCtrl = spawn_dist_cntrlr(Socket),
+ call_ctrlr(DistCtrl, {supervisor, self()}),
+ flush_controller(DistCtrl, Socket),
+ gen_tcp:controlling_process(Socket, DistCtrl),
+ flush_controller(DistCtrl, Socket),
+ HSData0 = hs_data_common(DistCtrl),
+ HSData = HSData0#hs_data{kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = DistCtrl,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ request_type = Type},
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% port_please !
+ ?trace("other node (~p) "
+ "closed since port_please.~n",
+ [Node]),
+ ?shutdown(Node)
+ end;
+ _ ->
+ ?trace("port_please (~p) "
+ "failed.~n", [Node]),
+ ?shutdown(Node)
+ end;
+ _Other ->
+ ?trace("inet_getaddr(~p) "
+ "failed (~p).~n", [Node,_Other]),
+ ?shutdown(Node)
+ end.
+
+connect_options(Opts) ->
+ case application:get_env(kernel, inet_dist_connect_options) of
+ {ok,ConnectOpts} ->
+ ConnectOpts ++ Opts;
+ _ ->
+ Opts
+ end.
+
+%%
+%% Close a socket.
+%%
+close(Listen) ->
+ gen_tcp:close(Listen).
+
+
+%% If Node is illegal terminate the connection setup!!
+splitnode(Node, LongOrShortNames) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [Name|Tail] when Tail =/= [] ->
+ Host = lists:append(Tail),
+ case split_node(Host, $., []) of
+ [_] when LongOrShortNames =:= longnames ->
+ case inet:parse_address(Host) of
+ {ok, _} ->
+ [Name, Host];
+ _ ->
+ error_msg("** System running to use "
+ "fully qualified "
+ "hostnames **~n"
+ "** Hostname ~ts is illegal **~n",
+ [Host]),
+ ?shutdown(Node)
+ end;
+ L when length(L) > 1, LongOrShortNames =:= shortnames ->
+ error_msg("** System NOT running to use fully qualified "
+ "hostnames **~n"
+ "** Hostname ~ts is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ _ ->
+ [Name, Host]
+ end;
+ [_] ->
+ error_msg("** Nodename ~p illegal, no '@' character **~n",
+ [Node]),
+ ?shutdown(Node);
+ _ ->
+ error_msg("** Nodename ~p illegal **~n", [Node]),
+ ?shutdown(Node)
+ end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack) -> [lists:reverse(Ack)].
+
+%% ------------------------------------------------------------
+%% Fetch local information about a Socket.
+%% ------------------------------------------------------------
+get_tcp_address(Socket) ->
+ {ok, Address} = inet:sockname(Socket),
+ {ok, Host} = inet:gethostname(),
+ #net_address {
+ address = Address,
+ host = Host,
+ protocol = tcp,
+ family = inet
+ }.
+
+%% ------------------------------------------------------------
+%% Do only accept new connection attempts from nodes at our
+%% own LAN, if the check_ip environment parameter is true.
+%% ------------------------------------------------------------
+check_ip(DistCtrl) ->
+ case application:get_env(check_ip) of
+ {ok, true} ->
+ case get_ifs(DistCtrl) of
+ {ok, IFs, IP} ->
+ check_ip(IFs, IP);
+ _ ->
+ ?shutdown(no_node)
+ end;
+ _ ->
+ true
+ end.
+
+get_ifs(DistCtrl) ->
+ Socket = call_ctrlr(DistCtrl, socket),
+ case inet:peername(Socket) of
+ {ok, {IP, _}} ->
+ case inet:getif(Socket) of
+ {ok, IFs} -> {ok, IFs, IP};
+ Error -> Error
+ end;
+ Error ->
+ Error
+ end.
+
+check_ip([{OwnIP, _, Netmask}|IFs], PeerIP) ->
+ case {inet_tcp:mask(Netmask, PeerIP), inet_tcp:mask(Netmask, OwnIP)} of
+ {M, M} -> true;
+ _ -> check_ip(IFs, PeerIP)
+ end;
+check_ip([], PeerIP) ->
+ {false, PeerIP}.
+
+is_node_name(Node) when is_atom(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_, _Host] -> true;
+ _ -> false
+ end;
+is_node_name(_Node) ->
+ false.
+
+hs_data_common(DistCtrl) ->
+ TickHandler = call_ctrlr(DistCtrl, tick_handler),
+ Socket = call_ctrlr(DistCtrl, socket),
+ #hs_data{f_send = send_fun(),
+ f_recv = recv_fun(),
+ f_setopts_pre_nodeup = setopts_pre_nodeup_fun(),
+ f_setopts_post_nodeup = setopts_post_nodeup_fun(),
+ f_getll = getll_fun(),
+ f_handshake_complete = handshake_complete_fun(),
+ f_address = address_fun(),
+ mf_setopts = setopts_fun(DistCtrl, Socket),
+ mf_getopts = getopts_fun(DistCtrl, Socket),
+ mf_getstat = getstat_fun(DistCtrl, Socket),
+ mf_tick = tick_fun(DistCtrl, TickHandler)}.
+
+%%% ------------------------------------------------------------
+%%% Distribution controller processes
+%%% ------------------------------------------------------------
+
+%%
+%% There will be five parties working together when the
+%% connection is up:
+%% - The gen_tcp socket. Providing a tcp/ip connection
+%% to the other node.
+%% - The output handler. It will dispatch all outgoing
+%% traffic from the VM to the gen_tcp socket. This
+%% process is registered as distribution controller
+%% for this channel with the VM.
+%% - The input handler. It will dispatch all incoming
+%% traffic from the gen_tcp socket to the VM. This
+%% process is also the socket owner and receives
+%% incoming traffic using active-N.
+%% - The tick handler. Dispatches asynchronous tick
+%% requests to the socket. It executes on max priority
+%% since it is important to get ticks through to the
+%% other end.
+%% - The channel supervisor (provided by dist_util). It
+%% monitors traffic. Issue tick requests to the tick
+%% handler when no outgoing traffic is seen and bring
+%% the connection down if no incoming traffic is seen.
+%% This process also executes on max priority.
+%%
+%% These parties are linked togheter so should one
+%% of them fail, all of them are terminated and the
+%% connection is taken down.
+%%
+
+%% In order to avoid issues with lingering signal binaries
+%% we enable off-heap message queue data as well as fullsweep
+%% after 0. The fullsweeps will be cheap since we have more
+%% or less no live data.
+-define(DIST_CNTRL_COMMON_SPAWN_OPTS,
+ [{message_queue_data, off_heap},
+ {fullsweep_after, 0}]).
+
+tick_fun(DistCtrl, TickHandler) ->
+ fun (Ctrl) when Ctrl == DistCtrl ->
+ TickHandler ! tick
+ end.
+
+getstat_fun(DistCtrl, Socket) ->
+ fun (Ctrl) when Ctrl == DistCtrl ->
+ case inet:getstat(Socket, [recv_cnt, send_cnt, send_pend]) of
+ {ok, Stat} ->
+ split_stat(Stat,0,0,0);
+ Error ->
+ Error
+ end
+ end.
+
+split_stat([{recv_cnt, R}|Stat], _, W, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_cnt, W}|Stat], R, _, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_pend, P}|Stat], R, W, _) ->
+ split_stat(Stat, R, W, P);
+split_stat([], R, W, P) ->
+ {ok, R, W, P}.
+
+setopts_fun(DistCtrl, Socket) ->
+ fun (Ctrl, Opts) when Ctrl == DistCtrl ->
+ setopts(Socket, Opts)
+ end.
+
+getopts_fun(DistCtrl, Socket) ->
+ fun (Ctrl, Opts) when Ctrl == DistCtrl ->
+ getopts(Socket, Opts)
+ end.
+
+setopts(S, Opts) ->
+ case [Opt || {K,_}=Opt <- Opts,
+ K =:= active orelse K =:= deliver orelse K =:= packet] of
+ [] -> inet:setopts(S,Opts);
+ Opts1 -> {error, {badopts,Opts1}}
+ end.
+
+getopts(S, Opts) ->
+ inet:getopts(S, Opts).
+
+send_fun() ->
+ fun (Ctrlr, Packet) ->
+ call_ctrlr(Ctrlr, {send, Packet})
+ end.
+
+recv_fun() ->
+ fun (Ctrlr, Length, Timeout) ->
+ case call_ctrlr(Ctrlr, {recv, Length, Timeout}) of
+ {ok, Bin} when is_binary(Bin) ->
+ {ok, binary_to_list(Bin)};
+ Other ->
+ Other
+ end
+ end.
+
+getll_fun() ->
+ fun (Ctrlr) ->
+ call_ctrlr(Ctrlr, getll)
+ end.
+
+address_fun() ->
+ fun (Ctrlr, Node) ->
+ case call_ctrlr(Ctrlr, {address, Node}) of
+ {error, no_node} -> %% No '@' or more than one '@' in node name.
+ ?shutdown(no_node);
+ Res ->
+ Res
+ end
+ end.
+
+setopts_pre_nodeup_fun() ->
+ fun (Ctrlr) ->
+ call_ctrlr(Ctrlr, pre_nodeup)
+ end.
+
+setopts_post_nodeup_fun() ->
+ fun (Ctrlr) ->
+ call_ctrlr(Ctrlr, post_nodeup)
+ end.
+
+handshake_complete_fun() ->
+ fun (Ctrlr, Node, DHandle) ->
+ call_ctrlr(Ctrlr, {handshake_complete, Node, DHandle})
+ end.
+
+call_ctrlr(Ctrlr, Msg) ->
+ Ref = erlang:monitor(process, Ctrlr),
+ Ctrlr ! {Ref, self(), Msg},
+ receive
+ {Ref, Res} ->
+ erlang:demonitor(Ref, [flush]),
+ Res;
+ {'DOWN', Ref, process, Ctrlr, Reason} ->
+ exit({dist_controller_exit, Reason})
+ end.
+
+%%
+%% The tick handler process writes a tick to the
+%% socket when it receives a 'tick' message from
+%% the connection supervisor.
+%%
+%% We are not allowed to block the connection
+%% superviser when writing a tick and we also want
+%% the tick to go through even during a heavily
+%% loaded system. gen_tcp does not have a
+%% non-blocking send operation exposed in its API
+%% and we don't want to run the distribution
+%% controller under high priority. Therefore this
+%% sparate process with max prio that dispatches
+%% ticks.
+%%
+dist_cntrlr_tick_handler(Socket) ->
+ receive
+ tick ->
+ %% May block due to busy port...
+ sock_send(Socket, "");
+ _ ->
+ ok
+ end,
+ dist_cntrlr_tick_handler(Socket).
+
+spawn_dist_cntrlr(Socket) ->
+ spawn_opt(?MODULE, dist_cntrlr_setup, [Socket],
+ [{priority, max}] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS).
+
+dist_cntrlr_setup(Socket) ->
+ TickHandler = spawn_opt(?MODULE, dist_cntrlr_tick_handler,
+ [Socket],
+ [link, {priority, max}]
+ ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS),
+ dist_cntrlr_setup_loop(Socket, TickHandler, undefined).
+
+%%
+%% During the handshake phase we loop in dist_cntrlr_setup().
+%% When the connection is up we spawn an input handler and
+%% continue as output handler.
+%%
+dist_cntrlr_setup_loop(Socket, TickHandler, Sup) ->
+ receive
+ {tcp_closed, Socket} ->
+ exit(connection_closed);
+
+ {Ref, From, {supervisor, Pid}} ->
+ Res = link(Pid),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Pid);
+
+ {Ref, From, tick_handler} ->
+ From ! {Ref, TickHandler},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, socket} ->
+ From ! {Ref, Socket},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {send, Packet}} ->
+ Res = gen_tcp:send(Socket, Packet),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {recv, Length, Timeout}} ->
+ Res = gen_tcp:recv(Socket, Length, Timeout),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, getll} ->
+ From ! {Ref, {ok, self()}},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {address, Node}} ->
+ Res = case inet:peername(Socket) of
+ {ok, Address} ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_,Host] ->
+ #net_address{address=Address,host=Host,
+ protocol=tcp, family=inet};
+ _ ->
+ {error, no_node}
+ end
+ end,
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, pre_nodeup} ->
+ Res = inet:setopts(Socket,
+ [{active, false},
+ {packet, 4},
+ nodelay()]),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, post_nodeup} ->
+ Res = inet:setopts(Socket,
+ [{active, false},
+ {packet, 4},
+ nodelay()]),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {handshake_complete, _Node, DHandle}} ->
+ From ! {Ref, ok},
+ %% Handshake complete! Begin dispatching traffic...
+
+ %% We use separate process for dispatching input. This
+ %% is not necessary, but it enables parallel execution
+ %% of independent work loads at the same time as it
+ %% simplifies the the implementation...
+ InputHandler = spawn_opt(?MODULE, dist_cntrlr_input_setup,
+ [DHandle, Socket, Sup],
+ [link] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS),
+
+ flush_controller(InputHandler, Socket),
+ gen_tcp:controlling_process(Socket, InputHandler),
+ flush_controller(InputHandler, Socket),
+
+ ok = erlang:dist_ctrl_input_handler(DHandle, InputHandler),
+
+ InputHandler ! DHandle,
+
+ %% From now on we execute on normal priority
+ process_flag(priority, normal),
+ erlang:dist_ctrl_get_data_notification(DHandle),
+ dist_cntrlr_output_loop(DHandle, Socket)
+ end.
+
+%% We use active 10 for good throughput while still
+%% maintaining back-pressure if the input controller
+%% isn't able to handle all incoming messages...
+-define(ACTIVE_INPUT, 10).
+
+dist_cntrlr_input_setup(DHandle, Socket, Sup) ->
+ link(Sup),
+ %% Ensure we don't try to put data before registerd
+ %% as input handler...
+ receive
+ DHandle ->
+ dist_cntrlr_input_loop(DHandle, Socket, 0)
+ end.
+
+dist_cntrlr_input_loop(DHandle, Socket, N) when N =< ?ACTIVE_INPUT/2 ->
+ inet:setopts(Socket, [{active, ?ACTIVE_INPUT - N}]),
+ dist_cntrlr_input_loop(DHandle, Socket, ?ACTIVE_INPUT);
+dist_cntrlr_input_loop(DHandle, Socket, N) ->
+ receive
+ {tcp_closed, Socket} ->
+ %% Connection to remote node terminated...
+ exit(connection_closed);
+
+ {tcp, Socket, Data} ->
+ %% Incoming data from remote node...
+ try erlang:dist_ctrl_put_data(DHandle, Data)
+ catch _ : _ -> death_row()
+ end,
+ dist_cntrlr_input_loop(DHandle, Socket, N-1);
+
+ _ ->
+ %% Ignore...
+ dist_cntrlr_input_loop(DHandle, Socket, N)
+ end.
+
+dist_cntrlr_send_data(DHandle, Socket) ->
+ case erlang:dist_ctrl_get_data(DHandle) of
+ none ->
+ erlang:dist_ctrl_get_data_notification(DHandle);
+ Data ->
+ sock_send(Socket, Data),
+ dist_cntrlr_send_data(DHandle, Socket)
+ end.
+
+
+dist_cntrlr_output_loop(DHandle, Socket) ->
+ receive
+ dist_data ->
+ %% Outgoing data from this node...
+ try dist_cntrlr_send_data(DHandle, Socket)
+ catch _ : _ -> death_row()
+ end,
+ dist_cntrlr_output_loop(DHandle, Socket);
+
+ {send, From, Ref, Data} ->
+ %% This is for testing only!
+ %%
+ %% Needed by some OTP distribution
+ %% test suites...
+ sock_send(Socket, Data),
+ From ! {Ref, ok},
+ dist_cntrlr_output_loop(DHandle, Socket);
+
+ _ ->
+ %% Drop garbage message...
+ dist_cntrlr_output_loop(DHandle, Socket)
+
+ end.
+
+sock_send(Socket, Data) ->
+ try gen_tcp:send(Socket, Data) of
+ ok -> ok;
+ {error, Reason} -> death_row({send_error, Reason})
+ catch
+ Type : Reason -> death_row({send_error, {Type, Reason}})
+ end.
+
+death_row() ->
+ death_row(connection_closed).
+
+death_row(normal) ->
+ %% We do not want to exit with normal
+ %% exit reason since it wont bring down
+ %% linked processes...
+ death_row();
+death_row(Reason) ->
+ %% When the connection is on its way down operations
+ %% begin to fail. We catch the failures and call
+ %% this function waiting for termination. We should
+ %% be terminated by one of our links to the other
+ %% involved parties that began bringing the
+ %% connection down. By waiting for termination we
+ %% avoid altering the exit reason for the connection
+ %% teardown. We however limit the wait to 5 seconds
+ %% and bring down the connection ourselves if not
+ %% terminated...
+ receive after 5000 -> exit(Reason) end.
diff --git a/lib/kernel/include/dist.hrl b/lib/kernel/include/dist.hrl
index 47d9459a19..003852f1b0 100644
--- a/lib/kernel/include/dist.hrl
+++ b/lib/kernel/include/dist.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -39,3 +39,9 @@
-define(DFLAG_SMALL_ATOM_TAGS, 16#4000).
-define(DFLAG_UTF8_ATOMS, 16#10000).
-define(DFLAG_MAP_TAG, 16#20000).
+-define(DFLAG_BIG_CREATION, 16#40000).
+-define(DFLAG_SEND_SENDER, 16#80000).
+-define(DFLAG_BIG_SEQTRACE_LABELS, 16#100000).
+
+%% Also update dflag2str() in ../src/dist_util.erl
+%% when adding flags...
diff --git a/lib/kernel/include/dist_util.hrl b/lib/kernel/include/dist_util.hrl
index d5df2be4ec..56f775f060 100644
--- a/lib/kernel/include/dist_util.hrl
+++ b/lib/kernel/include/dist_util.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,9 +29,9 @@
-endif.
-ifdef(dist_trace).
--define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])).
+-define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])).
% Use the one below for config-file (early boot) connection tracing
-%-define(trace(Fmt,Args), erlang:display([erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])).
+%-define(trace(Fmt,Args), erlang:display([erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])).
-define(trace_factor,8).
-else.
-define(trace(Fmt,Args), ok).
@@ -63,7 +63,7 @@
f_getll, %% Get low level port or pid.
f_address, %% The address of the "socket",
%% generated from Socket,Node
- %% These two are used in the tick loop,
+ %% These three are used in the tick loop,
%% so they are not fun's to avoid holding old code.
mf_tick, %% Takes the socket as parameters and
%% sends a tick, this is no fun, it
@@ -74,7 +74,17 @@
%% {ok, RecvCnt, SendCnt, SendPend} for
%% a given socket. This is a {M,F},
%% returning {error, Reason on failure}
- request_type = normal
+ request_type = normal,
+
+ %% New in kernel-5.1 (OTP 19.1):
+ mf_setopts, %% netkernel:setopts on active connection
+ mf_getopts, %% netkernel:getopts on active connection
+
+ %% New in kernel-6.0 (OTP 21.0)
+ f_handshake_complete, %% Notify handshake complete
+ add_flags, %% dflags to add
+ reject_flags, %% dflags not to use (not all can be rejected)
+ require_flags %% dflags that are required
}).
diff --git a/lib/kernel/include/file.hrl b/lib/kernel/include/file.hrl
index 7cf033f7f5..36112bb040 100644
--- a/lib/kernel/include/file.hrl
+++ b/lib/kernel/include/file.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -23,37 +23,40 @@
%%--------------------------------------------------------------------------
-record(file_info,
- {size :: non_neg_integer(), % Size of file in bytes.
- type :: 'device' | 'directory' | 'other' | 'regular' | 'symlink',
- access :: 'read' | 'write' | 'read_write' | 'none',
- atime :: file:date_time() | non_neg_integer(),
+ {size :: non_neg_integer() | 'undefined', % Size of file in bytes.
+ type :: 'device' | 'directory' | 'other' | 'regular' | 'symlink'
+ | 'undefined',
+ access :: 'read' | 'write' | 'read_write' | 'none' | 'undefined',
+ atime :: file:date_time() | non_neg_integer() | 'undefined',
% The local time the file was last read:
% {{Year, Mon, Day}, {Hour, Min, Sec}}.
% atime, ctime, mtime may also be unix epochs()
- mtime :: file:date_time() | non_neg_integer(),
+ mtime :: file:date_time() | non_neg_integer() | 'undefined',
% The local time the file was last written.
- ctime :: file:date_time() | non_neg_integer(),
+ ctime :: file:date_time() | non_neg_integer() | 'undefined',
% The interpretation of this time field
% is dependent on operating system.
% On Unix it is the last time the file
% or the inode was changed. On Windows,
% it is the creation time.
- mode :: non_neg_integer(), % File permissions. On Windows,
+ mode :: non_neg_integer() | 'undefined',
+ % File permissions. On Windows,
% the owner permissions will be
% duplicated for group and user.
- links :: non_neg_integer(),
+ links :: non_neg_integer() | 'undefined',
% Number of links to the file (1 if the
% filesystem doesn't support links).
- major_device :: non_neg_integer(),
+ major_device :: non_neg_integer() | 'undefined',
% Identifies the file system (Unix),
% or the drive number (A: = 0, B: = 1)
% (Windows).
%% The following are Unix specific.
%% They are set to zero on other operating systems.
- minor_device :: non_neg_integer(), % Only valid for devices.
- inode :: non_neg_integer(), % Inode number for file.
- uid :: non_neg_integer(), % User id for owner.
- gid :: non_neg_integer()}). % Group id for owner.
+ minor_device :: non_neg_integer() | 'undefined',
+ % Only valid for devices.
+ inode :: non_neg_integer() | 'undefined', % Inode number for file.
+ uid :: non_neg_integer() | 'undefined', % User id for owner.
+ gid :: non_neg_integer() | 'undefined'}). % Group id for owner.
-record(file_descriptor,
diff --git a/lib/kernel/include/inet.hrl b/lib/kernel/include/inet.hrl
index d983fa9e72..daa2e14b46 100644
--- a/lib/kernel/include/inet.hrl
+++ b/lib/kernel/include/inet.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@
-record(hostent,
{
- h_name :: inet:hostname(), %% offical name of host
+ h_name :: inet:hostname(), %% official name of host
h_aliases = [] :: [inet:hostname()], %% alias list
h_addrtype :: 'inet' | 'inet6', %% host address type
h_length :: non_neg_integer(), %% length of address
diff --git a/lib/kernel/include/inet_sctp.hrl b/lib/kernel/include/inet_sctp.hrl
index d6376e452e..ddb3cdc26c 100644
--- a/lib/kernel/include/inet_sctp.hrl
+++ b/lib/kernel/include/inet_sctp.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/include/logger.hrl b/lib/kernel/include/logger.hrl
new file mode 100644
index 0000000000..b09977e0f2
--- /dev/null
+++ b/lib/kernel/include/logger.hrl
@@ -0,0 +1,53 @@
+-ifndef(LOGGER_HRL).
+-define(LOGGER_HRL,true).
+-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])).
+-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])).
+-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])).
+
+-define(LOG_ALERT(A),?DO_LOG(alert,[A])).
+-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])).
+-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])).
+
+-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])).
+-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])).
+-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])).
+
+-define(LOG_ERROR(A),?DO_LOG(error,[A])).
+-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])).
+-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])).
+
+-define(LOG_WARNING(A),?DO_LOG(warning,[A])).
+-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])).
+-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])).
+
+-define(LOG_NOTICE(A),?DO_LOG(notice,[A])).
+-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])).
+-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])).
+
+-define(LOG_INFO(A),?DO_LOG(info,[A])).
+-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])).
+-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])).
+
+-define(LOG_DEBUG(A),?DO_LOG(debug,[A])).
+-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])).
+-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])).
+
+-define(LOG(L,A),?DO_LOG(L,[A])).
+-define(LOG(L,A,B),?DO_LOG(L,[A,B])).
+-define(LOG(L,A,B,C),?DO_LOG(L,[A,B,C])).
+
+-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ line=>?LINE,
+ file=>?FILE}).
+
+%%%-----------------------------------------------------------------
+%%% Internal, i.e. not intended for direct use in code - use above
+%%% macros instead!
+-define(DO_LOG(Level,Args),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ apply(logger,macro_log,[?LOCATION,Level|Args]);
+ false ->
+ ok
+ end).
+-endif.
diff --git a/lib/kernel/include/net_address.hrl b/lib/kernel/include/net_address.hrl
index 9dd4b78cd4..4988175593 100644
--- a/lib/kernel/include/net_address.hrl
+++ b/lib/kernel/include/net_address.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
index 57dacebde3..57f17defc8 100644
--- a/lib/kernel/src/Makefile
+++ b/lib/kernel/src/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2013. All Rights Reserved.
+# Copyright Ericsson AB 1996-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -71,6 +71,7 @@ MODULES = \
erl_distribution \
erl_epmd \
erl_reply \
+ erl_signal_handler \
erts_debug \
error_handler \
error_logger \
@@ -84,6 +85,7 @@ MODULES = \
global_group \
global_search \
group \
+ group_history \
heart \
hipe_unified_loader \
inet \
@@ -104,6 +106,21 @@ MODULES = \
inet_sctp \
kernel \
kernel_config \
+ kernel_refc \
+ local_udp \
+ local_tcp \
+ logger \
+ logger_backend \
+ logger_config \
+ logger_handler_watcher \
+ logger_std_h \
+ logger_disk_log_h \
+ logger_h_common \
+ logger_filters \
+ logger_formatter \
+ logger_server \
+ logger_simple_h \
+ logger_sup \
net \
net_adm \
net_kernel \
@@ -116,17 +133,25 @@ MODULES = \
user \
user_drv \
user_sup \
+ raw_file_io \
+ raw_file_io_compressed \
+ raw_file_io_inflate \
+ raw_file_io_deflate \
+ raw_file_io_delayed \
+ raw_file_io_list \
+ raw_file_io_raw \
wrap_log_reader
HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \
../include/dist.hrl ../include/dist_util.hrl \
- ../include/net_address.hrl
+ ../include/net_address.hrl ../include/logger.hrl
INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
- erl_epmd.hrl hipe_ext_format.hrl \
+ erl_epmd.hrl file_int.hrl hipe_ext_format.hrl \
inet_dns.hrl inet_res.hrl \
inet_boot.hrl inet_config.hrl inet_int.hrl \
- inet_dns_record_adts.hrl
+ inet_dns_record_adts.hrl \
+ logger_internal.hrl logger_h_common.hrl
ERL_FILES= $(MODULES:%=%.erl)
@@ -211,7 +236,7 @@ release_docs_spec:
# Include dependencies -- list below added by Kostis Sagonas
-$(EBIN)/application_controller.beam: application_master.hrl
+$(EBIN)/application_controller.beam: application_master.hrl ../include/logger.hrl
$(EBIN)/application_master.beam: application_master.hrl
$(EBIN)/auth.beam: ../include/file.hrl
$(EBIN)/code.beam: ../include/file.hrl
@@ -222,7 +247,9 @@ $(EBIN)/disk_log_server.beam: disk_log.hrl
$(EBIN)/dist_util.beam: ../include/dist_util.hrl ../include/dist.hrl
$(EBIN)/erl_boot_server.beam: inet_boot.hrl
$(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl
-$(EBIN)/file.beam: ../include/file.hrl
+$(EBIN)/error_logger.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/file.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/file_io_server.beam: ../include/file.hrl file_int.hrl
$(EBIN)/gen_tcp.beam: inet_int.hrl
$(EBIN)/gen_udp.beam: inet_int.hrl
$(EBIN)/gen_sctp.beam: ../include/inet_sctp.hrl
@@ -244,7 +271,26 @@ $(EBIN)/inet_tcp.beam: inet_int.hrl
$(EBIN)/inet_udp_dist.beam: ../include/net_address.hrl ../include/dist.hrl ../include/dist_util.hrl
$(EBIN)/inet_udp.beam: inet_int.hrl
$(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl
+$(EBIN)/local_udp.beam: inet_int.hrl
+$(EBIN)/local_tcp.beam: inet_int.hrl
+$(EBIN)/logger.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_backend.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_config.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_simple_h.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl
$(EBIN)/net_kernel.beam: ../include/net_address.hrl
$(EBIN)/os.beam: ../include/file.hrl
$(EBIN)/ram_file.beam: ../include/file.hrl
$(EBIN)/wrap_log_reader.beam: disk_log.hrl ../include/file.hrl
+$(EBIN)/raw_file_io.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_compressed.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_inflate.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_deflate.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_delayed.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_list.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_raw.beam: ../include/file.hrl file_int.hrl
diff --git a/lib/kernel/src/application.erl b/lib/kernel/src/application.erl
index 1abfbfb9ec..bc6be2f8f5 100644
--- a/lib/kernel/src/application.erl
+++ b/lib/kernel/src/application.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index b1ca2ea64f..a074d2e74b 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -44,6 +44,7 @@
keyfind/3, keydelete/3, keyreplace/4]).
-include("application_master.hrl").
+-include("logger.hrl").
-define(AC, ?MODULE). % Name of process
@@ -1271,9 +1272,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
NewEnv = merge_app_env(ApplEnv, ConfEnv),
CmdLineEnv = get_cmd_env(Name),
NewEnv2 = merge_app_env(NewEnv, CmdLineEnv),
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
- add_env(Name, NewEnv3),
+ add_env(Name, NewEnv2),
Appl = #appl{name = Name, descr = Descr, id = Id, vsn = Vsn,
appl_data = ApplData, inc_apps = IncApps, apps = Apps},
ets:insert(ac_tab, {{loaded, Name}, Appl}),
@@ -1291,7 +1290,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
{ok, NewS}.
unload(AppName, S) ->
- {ok, IncApps} = get_env(AppName, included_applications),
+ {ok, IncApps} = get_key(AppName, included_applications),
del_env(AppName),
ets:delete(ac_tab, {loaded, AppName}),
foldl(fun(App, S1) ->
@@ -1546,9 +1545,8 @@ do_change_apps(Applications, Config, OldAppls) ->
%% Report errors, but do not terminate
%% (backwards compatible behaviour)
lists:foreach(fun({error, {SysFName, Line, Str}}) ->
- Str2 = lists:flatten(io_lib:format("~tp: ~w: ~ts~n",
- [SysFName, Line, Str])),
- error_logger:format(Str2, [])
+ ?LOG_ERROR("~tp: ~w: ~ts~n",[SysFName, Line, Str],
+ #{error_logger=>#{tag=>error}})
end,
Errors),
@@ -1583,13 +1581,9 @@ do_change_appl({ok, {ApplData, Env, IncApps, Descr, Id, Vsn, Apps}},
CmdLineEnv = get_cmd_env(AppName),
NewEnv2 = merge_app_env(NewEnv1, CmdLineEnv),
- %% included_apps is made into an env parameter as well
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
-
%% Update ets table with new application env
del_env(AppName),
- add_env(AppName, NewEnv3),
+ add_env(AppName, NewEnv2),
OldAppl#appl{appl_data=ApplData,
descr=Descr,
@@ -1620,7 +1614,7 @@ conv(_) -> [].
make_term(Str) ->
case erl_scan:string(Str) of
{ok, Tokens, _} ->
- case erl_parse:parse_term(Tokens ++ [{dot, 1}]) of
+ case erl_parse:parse_term(Tokens ++ [{dot, erl_anno:new(1)}]) of
{ok, Term} ->
Term;
{error, {_,M,Reason}} ->
@@ -1631,8 +1625,9 @@ make_term(Str) ->
end.
handle_make_term_error(Mod, Reason, Str) ->
- error_logger:format("application_controller: ~ts: ~ts~n",
- [Mod:format_error(Reason), Str]),
+ ?LOG_ERROR("application_controller: ~ts: ~ts~n",
+ [Mod:format_error(Reason), Str],
+ #{error_logger=>#{tag=>error}}),
throw({error, {bad_environment_value, Str}}).
get_env_i(Name, #state{conf_data = ConfData}) when is_list(ConfData) ->
@@ -1819,8 +1814,9 @@ check_conf() ->
%% Therefore read and merge contents.
if
BFName =:= "sys" ->
+ DName = filename:dirname(FName),
{ok, SysEnv, Errors} =
- check_conf_sys(NewEnv),
+ check_conf_sys(NewEnv, [], [], DName),
%% Report first error, if any, and
%% terminate
@@ -1842,20 +1838,31 @@ check_conf() ->
end.
check_conf_sys(Env) ->
- check_conf_sys(Env, [], []).
+ check_conf_sys(Env, [], [], []).
-check_conf_sys([File|T], SysEnv, Errors) when is_list(File) ->
+check_conf_sys([File|T], SysEnv, Errors, DName) when is_list(File),is_list(DName) ->
BFName = filename:basename(File, ".config"),
FName = filename:join(filename:dirname(File), BFName ++ ".config"),
- case load_file(FName) of
+ LName = case filename:pathtype(FName) of
+ relative when (DName =/= []) ->
+ % Check if relative to sys.config dir otherwise use legacy mode,
+ % i.e relative to cwd.
+ RName = filename:join(DName, FName),
+ case erl_prim_loader:read_file_info(RName) of
+ {ok, _} -> RName ;
+ error -> FName
+ end;
+ _ -> FName
+ end,
+ case load_file(LName) of
{ok, NewEnv} ->
- check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors);
+ check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors, DName);
{error, {Line, _Mod, Str}} ->
- check_conf_sys(T, SysEnv, [{error, {FName, Line, Str}}|Errors])
+ check_conf_sys(T, SysEnv, [{error, {LName, Line, Str}}|Errors], DName)
end;
-check_conf_sys([Tuple|T], SysEnv, Errors) ->
- check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors);
-check_conf_sys([], SysEnv, Errors) ->
+check_conf_sys([Tuple|T], SysEnv, Errors, DName) ->
+ check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors, DName);
+check_conf_sys([], SysEnv, Errors, _) ->
{ok, SysEnv, lists:reverse(Errors)}.
load_file(File) ->
@@ -1913,19 +1920,25 @@ config_error() ->
"configuration file must contain ONE list ended by <dot>"}}.
%%-----------------------------------------------------------------
-%% Info messages sent to error_logger
+%% Info messages sent to logger
%%-----------------------------------------------------------------
info_started(Name, Node) ->
- Rep = [{application, Name},
- {started_at, Node}],
- error_logger:info_report(progress, Rep).
+ ?LOG_INFO(#{label=>{application_controller,progress},
+ report=>[{application, Name},
+ {started_at, Node}]},
+ #{domain=>[otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
info_exited(Name, Reason, Type) ->
- Rep = [{application, Name},
- {exited, Reason},
- {type, Type}],
- error_logger:info_report(Rep).
-
+ ?LOG_NOTICE(#{label=>{application_controller,exit},
+ report=>[{application, Name},
+ {exited, Reason},
+ {type, Type}]},
+ #{domain=>[otp],
+ report_cb=>fun logger:format_otp_report/1,
+ error_logger=>#{tag=>info_report,type=>std_info}}).
%%-----------------------------------------------------------------
%% Reply to all processes waiting this application to be started.
@@ -2012,5 +2025,5 @@ to_string(Term) ->
true ->
Term;
false ->
- lists:flatten(io_lib:format("~134217728p", [Term]))
+ lists:flatten(io_lib:format("~0p", [Term]))
end.
diff --git a/lib/kernel/src/application_master.erl b/lib/kernel/src/application_master.erl
index 85b7efc402..8697143dfb 100644
--- a/lib/kernel/src/application_master.erl
+++ b/lib/kernel/src/application_master.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -118,6 +118,10 @@ init(Parent, Starter, ApplData, Type) ->
link(Parent),
process_flag(trap_exit, true),
OldGleader = group_leader(),
+ %% We become the group leader, but forward all I/O to OldGleader.
+ %% This is just a way to identify processes that belong to the
+ %% application. Used for example to find ourselves from any
+ %% process, or, reciprocally, to kill them all when we terminate.
group_leader(self(), self()),
%% Insert ourselves as master for the process. This ensures that
%% the processes in the application can use get_env/1 at startup.
diff --git a/lib/kernel/src/application_master.hrl b/lib/kernel/src/application_master.hrl
index f252ce8f16..b03074dbce 100644
--- a/lib/kernel/src/application_master.hrl
+++ b/lib/kernel/src/application_master.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/application_starter.erl b/lib/kernel/src/application_starter.erl
index 692681b515..f51f9bbc45 100644
--- a/lib/kernel/src/application_starter.erl
+++ b/lib/kernel/src/application_starter.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/auth.erl b/lib/kernel/src/auth.erl
index 78cf1e77be..4d18daf9e4 100644
--- a/lib/kernel/src/auth.erl
+++ b/lib/kernel/src/auth.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -107,7 +107,7 @@ get_cookie() ->
get_cookie(_Node) when node() =:= nonode@nohost ->
nocookie;
get_cookie(Node) ->
- gen_server:call(auth, {get_cookie, Node}).
+ gen_server:call(auth, {get_cookie, Node}, infinity).
-spec set_cookie(Cookie :: cookie()) -> 'true'.
@@ -119,12 +119,12 @@ set_cookie(Cookie) ->
set_cookie(_Node, _Cookie) when node() =:= nonode@nohost ->
erlang:error(distribution_not_started);
set_cookie(Node, Cookie) ->
- gen_server:call(auth, {set_cookie, Node, Cookie}).
+ gen_server:call(auth, {set_cookie, Node, Cookie}, infinity).
-spec sync_cookie() -> any().
sync_cookie() ->
- gen_server:call(auth, sync_cookie).
+ gen_server:call(auth, sync_cookie, infinity).
-spec print(Node :: node(), Format :: string(), Args :: [_]) -> 'ok'.
diff --git a/lib/kernel/src/code.erl b/lib/kernel/src/code.erl
index 7237550786..7faef93609 100644
--- a/lib/kernel/src/code.erl
+++ b/lib/kernel/src/code.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -28,11 +28,15 @@
get_path/0,
load_file/1,
ensure_loaded/1,
+ ensure_modules_loaded/1,
load_abs/1,
load_abs/2,
load_binary/3,
load_native_partial/2,
load_native_sticky/3,
+ atomic_load/1,
+ prepare_loading/1,
+ finish_loading/1,
delete/1,
purge/1,
soft_purge/1,
@@ -60,15 +64,20 @@
del_path/1,
replace_path/2,
rehash/0,
- start_link/0, start_link/1,
+ start_link/0,
which/1,
where_is_file/1,
where_is_file/2,
set_primary_archive/4,
clash/0,
- get_mode/0]).
+ module_status/1,
+ modified_modules/0,
+ get_mode/0]).
+
+-deprecated({rehash,0,next_major_release}).
-export_type([load_error_rsn/0, load_ret/0]).
+-export_type([prepared_code/0]).
-include_lib("kernel/include/file.hrl").
@@ -86,6 +95,11 @@
-type loaded_ret_atoms() :: 'cover_compiled' | 'preloaded'.
-type loaded_filename() :: (Filename :: file:filename()) | loaded_ret_atoms().
+-define(PREPARED, '$prepared$').
+-opaque prepared_code() ::
+ {?PREPARED,[{module(),{binary(),string(),_}}]}.
+
+
%%% BIFs
-export([get_chunk/2, is_module_native/1, make_stub_module/3, module_md5/1]).
@@ -104,8 +118,8 @@ get_chunk(_, _) ->
is_module_native(_) ->
erlang:nif_error(undef).
--spec make_stub_module(Module, Beam, Info) -> Module when
- Module :: module(),
+-spec make_stub_module(LoaderState, Beam, Info) -> module() when
+ LoaderState :: binary(),
Beam :: binary(),
Info :: {list(), list(), binary()}.
@@ -135,8 +149,11 @@ load_file(Mod) when is_atom(Mod) ->
-spec ensure_loaded(Module) -> {module, Module} | {error, What} when
Module :: module(),
What :: embedded | badfile | nofile | on_load_failure.
-ensure_loaded(Mod) when is_atom(Mod) ->
- call({ensure_loaded,Mod}).
+ensure_loaded(Mod) when is_atom(Mod) ->
+ case erlang:module_loaded(Mod) of
+ true -> {module, Mod};
+ false -> call({ensure_loaded,Mod})
+ end.
%% XXX File as an atom is allowed only for backwards compatibility.
-spec load_abs(Filename) -> load_ret() when
@@ -246,7 +263,7 @@ is_sticky(Mod) when is_atom(Mod) -> call({is_sticky,Mod}).
-spec set_path(Path) -> 'true' | {'error', What} when
Path :: [Dir :: file:filename()],
- What :: 'bad_directory' | 'bad_path'.
+ What :: 'bad_directory'.
set_path(PathList) when is_list(PathList) -> call({set_path,PathList}).
-spec get_path() -> Path when
@@ -294,23 +311,328 @@ replace_path(Name, Dir) when (is_atom(Name) orelse is_list(Name)),
call({replace_path,Name,Dir}).
-spec rehash() -> 'ok'.
-rehash() -> call(rehash).
+rehash() ->
+ cache_warning(),
+ ok.
-spec get_mode() -> 'embedded' | 'interactive'.
get_mode() -> call(get_mode).
+%%%
+%%% Loading of several modules in parallel.
+%%%
+
+-spec ensure_modules_loaded([Module]) ->
+ 'ok' | {'error',[{Module,What}]} when
+ Module :: module(),
+ What :: badfile | nofile | on_load_failure.
+
+ensure_modules_loaded(Modules) when is_list(Modules) ->
+ case prepare_ensure(Modules, []) of
+ Ms when is_list(Ms) ->
+ ensure_modules_loaded_1(Ms);
+ error ->
+ error(function_clause, [Modules])
+ end.
+
+ensure_modules_loaded_1(Ms0) ->
+ Ms = lists:usort(Ms0),
+ {Prep,Error0} = load_mods(Ms),
+ {OnLoad,Normal} = partition_on_load(Prep),
+ Error1 = case finish_loading(Normal, true) of
+ ok -> Error0;
+ {error,Err} -> Err ++ Error0
+ end,
+ ensure_modules_loaded_2(OnLoad, Error1).
+
+ensure_modules_loaded_2([{M,_}|Ms], Errors) ->
+ case ensure_loaded(M) of
+ {module,M} ->
+ ensure_modules_loaded_2(Ms, Errors);
+ {error,Err} ->
+ ensure_modules_loaded_2(Ms, [{M,Err}|Errors])
+ end;
+ensure_modules_loaded_2([], []) ->
+ ok;
+ensure_modules_loaded_2([], [_|_]=Errors) ->
+ {error,Errors}.
+
+prepare_ensure([M|Ms], Acc) when is_atom(M) ->
+ case erlang:module_loaded(M) of
+ true ->
+ prepare_ensure(Ms, Acc);
+ false ->
+ prepare_ensure(Ms, [M|Acc])
+ end;
+prepare_ensure([], Acc) ->
+ Acc;
+prepare_ensure(_, _) ->
+ error.
+
+-spec atomic_load(Modules) -> 'ok' | {'error',[{Module,What}]} when
+ Modules :: [Module | {Module, Filename, Binary}],
+ Module :: module(),
+ Filename :: file:filename(),
+ Binary :: binary(),
+ What :: 'badfile' | 'nofile' | 'on_load_not_allowed' | 'duplicated' |
+ 'not_purged' | 'sticky_directory' | 'pending_on_load'.
+
+atomic_load(Modules) ->
+ case do_prepare_loading(Modules) of
+ {ok,Prep} ->
+ finish_loading(Prep, false);
+ {error,_}=Error ->
+ Error;
+ badarg ->
+ error(function_clause, [Modules])
+ end.
+
+-spec prepare_loading(Modules) ->
+ {'ok',Prepared} | {'error',[{Module,What}]} when
+ Modules :: [Module | {Module, Filename, Binary}],
+ Module :: module(),
+ Filename :: file:filename(),
+ Binary :: binary(),
+ Prepared :: prepared_code(),
+ What :: 'badfile' | 'nofile' | 'on_load_not_allowed' | 'duplicated'.
+
+prepare_loading(Modules) ->
+ case do_prepare_loading(Modules) of
+ {ok,Prep} ->
+ {ok,{?PREPARED,Prep}};
+ {error,_}=Error ->
+ Error;
+ badarg ->
+ error(function_clause, [Modules])
+ end.
+
+-spec finish_loading(Prepared) -> 'ok' | {'error',[{Module,What}]} when
+ Prepared :: prepared_code(),
+ Module :: module(),
+ What :: 'not_purged' | 'sticky_directory' | 'pending_on_load'.
+
+finish_loading({?PREPARED,Prepared}=Arg) when is_list(Prepared) ->
+ case verify_prepared(Prepared) of
+ ok ->
+ finish_loading(Prepared, false);
+ error ->
+ error(function_clause, [Arg])
+ end.
+
+partition_load([Item|T], Bs, Ms) ->
+ case Item of
+ {M,File,Bin} when is_atom(M) andalso
+ is_list(File) andalso
+ is_binary(Bin) ->
+ partition_load(T, [Item|Bs], Ms);
+ M when is_atom(M) ->
+ partition_load(T, Bs, [Item|Ms]);
+ _ ->
+ error
+ end;
+partition_load([], Bs, Ms) ->
+ {Bs,Ms}.
+
+do_prepare_loading(Modules) ->
+ case partition_load(Modules, [], []) of
+ {ModBins,Ms} ->
+ case prepare_loading_1(ModBins, Ms) of
+ {error,_}=Error ->
+ Error;
+ Prep when is_list(Prep) ->
+ {ok,Prep}
+ end;
+ error ->
+ badarg
+ end.
+
+prepare_loading_1(ModBins, Ms) ->
+ %% erlang:finish_loading/1 *will* detect duplicates.
+ %% However, we want to detect all errors that can be detected
+ %% by only examining the input data before call the LastAction
+ %% fun.
+ case prepare_check_uniq(ModBins, Ms) of
+ ok ->
+ prepare_loading_2(ModBins, Ms);
+ Error ->
+ Error
+ end.
+
+prepare_loading_2(ModBins, Ms) ->
+ {Prep0,Error0} = load_bins(ModBins),
+ {Prep1,Error1} = load_mods(Ms),
+ case Error0 ++ Error1 of
+ [] ->
+ prepare_loading_3(Prep0 ++ Prep1);
+ [_|_]=Error ->
+ {error,Error}
+ end.
+
+prepare_loading_3(Prep) ->
+ case partition_on_load(Prep) of
+ {[_|_]=OnLoad,_} ->
+ Error = [{M,on_load_not_allowed} || {M,_} <- OnLoad],
+ {error,Error};
+ {[],_} ->
+ Prep
+ end.
+
+prepare_check_uniq([{M,_,_}|T], Ms) ->
+ prepare_check_uniq(T, [M|Ms]);
+prepare_check_uniq([], Ms) ->
+ prepare_check_uniq_1(lists:sort(Ms), []).
+
+prepare_check_uniq_1([M|[M|_]=Ms], Acc) ->
+ prepare_check_uniq_1(Ms, [{M,duplicated}|Acc]);
+prepare_check_uniq_1([_|Ms], Acc) ->
+ prepare_check_uniq_1(Ms, Acc);
+prepare_check_uniq_1([], []) ->
+ ok;
+prepare_check_uniq_1([], [_|_]=Errors) ->
+ {error,Errors}.
+
+partition_on_load(Prep) ->
+ P = fun({_,{PC,_,_}}) ->
+ erlang:has_prepared_code_on_load(PC)
+ end,
+ lists:partition(P, Prep).
+
+verify_prepared([{M,{Prep,Name,_Native}}|T])
+ when is_atom(M), is_list(Name) ->
+ try erlang:has_prepared_code_on_load(Prep) of
+ false ->
+ verify_prepared(T);
+ _ ->
+ error
+ catch
+ error:_ ->
+ error
+ end;
+verify_prepared([]) ->
+ ok;
+verify_prepared(_) ->
+ error.
+
+finish_loading(Prepared0, EnsureLoaded) ->
+ Prepared = [{M,{Bin,File}} || {M,{Bin,File,_}} <- Prepared0],
+ Native0 = [{M,Code} || {M,{_,_,Code}} <- Prepared0,
+ Code =/= undefined],
+ case call({finish_loading,Prepared,EnsureLoaded}) of
+ ok ->
+ finish_loading_native(Native0);
+ {error,Errors}=E when EnsureLoaded ->
+ S0 = sofs:relation(Errors),
+ S1 = sofs:domain(S0),
+ R0 = sofs:relation(Native0),
+ R1 = sofs:drestriction(R0, S1),
+ Native = sofs:to_external(R1),
+ finish_loading_native(Native),
+ E;
+ {error,_}=E ->
+ E
+ end.
+
+finish_loading_native([{Mod,Code}|Ms]) ->
+ _ = load_native_partial(Mod, Code),
+ finish_loading_native(Ms);
+finish_loading_native([]) ->
+ ok.
+
+load_mods([]) ->
+ {[],[]};
+load_mods(Mods) ->
+ Path = get_path(),
+ F = prepare_loading_fun(),
+ {ok,{Succ,Error0}} = erl_prim_loader:get_modules(Mods, F, Path),
+ Error = [case E of
+ badfile -> {M,E};
+ _ -> {M,nofile}
+ end || {M,E} <- Error0],
+ {Succ,Error}.
+
+load_bins([]) ->
+ {[],[]};
+load_bins(BinItems) ->
+ F = prepare_loading_fun(),
+ do_par(F, BinItems).
+
+-type prep_fun_type() :: fun((module(), file:filename(), binary()) ->
+ {ok,_} | {error,_}).
+
+-spec prepare_loading_fun() -> prep_fun_type().
+
+prepare_loading_fun() ->
+ GetNative = get_native_fun(),
+ fun(Mod, FullName, Beam) ->
+ case erlang:prepare_loading(Mod, Beam) of
+ {error,_}=Error ->
+ Error;
+ Prepared ->
+ {ok,{Prepared,FullName,GetNative(Beam)}}
+ end
+ end.
+
+get_native_fun() ->
+ Architecture = erlang:system_info(hipe_architecture),
+ try hipe_unified_loader:chunk_name(Architecture) of
+ ChunkTag ->
+ fun(Beam) -> code:get_chunk(Beam, ChunkTag) end
+ catch _:_ ->
+ fun(_) -> undefined end
+ end.
+
+do_par(Fun, L) ->
+ {_,Ref} = spawn_monitor(do_par_fun(Fun, L)),
+ receive
+ {'DOWN',Ref,process,_,Res} ->
+ Res
+ end.
+
+-spec do_par_fun(prep_fun_type(), list()) -> fun(() -> no_return()).
+
+do_par_fun(Fun, L) ->
+ fun() ->
+ _ = [spawn_monitor(do_par_fun_2(Fun, Item)) ||
+ Item <- L],
+ exit(do_par_recv(length(L), [], []))
+ end.
+
+-spec do_par_fun_2(prep_fun_type(),
+ {module(),file:filename(),binary()}) ->
+ fun(() -> no_return()).
+
+do_par_fun_2(Fun, Item) ->
+ fun() ->
+ {Mod,Filename,Bin} = Item,
+ try Fun(Mod, Filename, Bin) of
+ {ok,Res} ->
+ exit({good,{Mod,Res}});
+ {error,Error} ->
+ exit({bad,{Mod,Error}})
+ catch
+ _:Error ->
+ exit({bad,{Mod,Error}})
+ end
+ end.
+
+do_par_recv(0, Good, Bad) ->
+ {Good,Bad};
+do_par_recv(N, Good, Bad) ->
+ receive
+ {'DOWN',_,process,_,{good,Res}} ->
+ do_par_recv(N-1, [Res|Good], Bad);
+ {'DOWN',_,process,_,{bad,Res}} ->
+ do_par_recv(N-1, Good, [Res|Bad])
+ end.
+
%%-----------------------------------------------------------------
call(Req) ->
- code_server:call(code_server, Req).
+ code_server:call(Req).
--spec start_link() -> {'ok', pid()} | {'error', 'crash'}.
+-spec start_link() -> {'ok', pid()}.
start_link() ->
- start_link([stick]).
-
--spec start_link(Flags :: [atom()]) -> {'ok', pid()} | {'error', 'crash'}.
-start_link(Flags) ->
- do_start(Flags).
+ do_start().
%%-----------------------------------------------------------------
%% In the init phase, code must not use any modules not yet loaded,
@@ -322,35 +644,21 @@ start_link(Flags) ->
%% us, so the module is loaded.
%%-----------------------------------------------------------------
-do_start(Flags) ->
+do_start() ->
+ maybe_warn_for_cache(),
load_code_server_prerequisites(),
- Mode = get_mode(Flags),
- case init:get_argument(root) of
- {ok,[[Root0]]} ->
- Root = filename:join([Root0]), % Normalize. Use filename
- case code_server:start_link([Root,Mode]) of
- {ok,_Pid} = Ok2 ->
- if
- Mode =:= interactive ->
- case lists:member(stick, Flags) of
- true -> do_stick_dirs();
- _ -> ok
- end;
- true ->
- ok
- end,
- %% Quietly load native code for all modules loaded so far
- Architecture = erlang:system_info(hipe_architecture),
- load_native_code_for_all_loaded(Architecture),
- Ok2;
- Other ->
- Other
- end;
- Other ->
- error_logger:error_msg("Can not start code server ~w ~n", [Other]),
- {error, crash}
- end.
+ {ok,[[Root0]]} = init:get_argument(root),
+ Mode = start_get_mode(),
+ Root = filename:join([Root0]), % Normalize.
+ Res = code_server:start_link([Root,Mode]),
+
+ maybe_stick_dirs(Mode),
+
+ %% Quietly load native code for all modules loaded so far.
+ Architecture = erlang:system_info(hipe_architecture),
+ load_native_code_for_all_loaded(Architecture),
+ Res.
%% Make sure that all modules that the code_server process calls
%% (directly or indirectly) are loaded. Otherwise the code_server
@@ -370,6 +678,16 @@ load_code_server_prerequisites() ->
_ = [M = M:module_info(module) || M <- Needed],
ok.
+maybe_stick_dirs(interactive) ->
+ case init:get_argument(nostick) of
+ {ok,[[]]} ->
+ ok;
+ _ ->
+ do_stick_dirs()
+ end;
+maybe_stick_dirs(_) ->
+ ok.
+
do_stick_dirs() ->
do_s(compiler),
do_s(stdlib),
@@ -387,19 +705,12 @@ do_s(Lib) ->
ok
end.
-get_mode(Flags) ->
- case lists:member(embedded, Flags) of
- true ->
+start_get_mode() ->
+ case init:get_argument(mode) of
+ {ok,[["embedded"]]} ->
embedded;
- _Otherwise ->
- case init:get_argument(mode) of
- {ok,[["embedded"]]} ->
- embedded;
- {ok,[["minimal"]]} ->
- minimal;
- _Else ->
- interactive
- end
+ _ ->
+ interactive
end.
%% Find out which version of a particular module we would
@@ -413,38 +724,14 @@ get_mode(Flags) ->
which(Module) when is_atom(Module) ->
case is_loaded(Module) of
false ->
- which2(Module);
+ which(Module, get_path());
{file, File} ->
File
end.
-which2(Module) ->
- Base = atom_to_list(Module),
- File = filename:basename(Base) ++ objfile_extension(),
- Path = get_path(),
- which(File, filename:dirname(Base), Path).
-
--spec which(file:filename(), file:filename(), [file:filename()]) ->
- 'non_existing' | file:filename().
-
-which(_, _, []) ->
- non_existing;
-which(File, Base, [Directory|Tail]) ->
- Path = if
- Base =:= "." -> Directory;
- true -> filename:join(Directory, Base)
- end,
- case erl_prim_loader:list_dir(Path) of
- {ok,Files} ->
- case lists:member(File,Files) of
- true ->
- filename:append(Path, File);
- false ->
- which(File, Base, Tail)
- end;
- _Error ->
- which(File, Base, Tail)
- end.
+which(Module, Path) when is_atom(Module) ->
+ File = atom_to_list(Module) ++ objfile_extension(),
+ where_is_file(Path, File).
%% Search the code path for a specific file. Try to locate
%% it in the code path cache if possible.
@@ -453,29 +740,33 @@ which(File, Base, [Directory|Tail]) ->
Filename :: file:filename(),
Absname :: file:filename().
where_is_file(File) when is_list(File) ->
- case call({is_cached,File}) of
- no ->
- Path = get_path(),
- which(File, ".", Path);
- Dir ->
- filename:join(Dir, File)
- end.
+ Path = get_path(),
+ where_is_file(Path, File).
--spec where_is_file(Path :: file:filename(), Filename :: file:filename()) ->
- file:filename() | 'non_existing'.
+%% To avoid unnecessary work when looking at many modules, this also
+%% accepts pairs of directories and pre-fetched contents in the path
+-spec where_is_file(Path :: [Dir|{Dir,Files}], Filename :: file:filename()) ->
+ 'non_existing' | file:filename() when
+ Dir :: file:filename(), Files :: [file:filename()].
-where_is_file(Path, File) when is_list(Path), is_list(File) ->
- CodePath = get_path(),
- if
- Path =:= CodePath ->
- case call({is_cached, File}) of
- no ->
- which(File, ".", Path);
- Dir ->
- filename:join(Dir, File)
- end;
- true ->
- which(File, ".", Path)
+where_is_file([], _) ->
+ non_existing;
+where_is_file([{Path, Files}|Tail], File) ->
+ where_is_file(Tail, File, Path, Files);
+where_is_file([Path|Tail], File) ->
+ case erl_prim_loader:list_dir(Path) of
+ {ok,Files} ->
+ where_is_file(Tail, File, Path, Files);
+ _Error ->
+ where_is_file(Tail, File)
+ end.
+
+where_is_file(Tail, File, Path, Files) ->
+ case lists:member(File, Files) of
+ true ->
+ filename:append(Path, File);
+ false ->
+ where_is_file(Tail, File)
end.
-spec set_primary_archive(ArchiveFile :: file:filename(),
@@ -554,6 +845,22 @@ has_ext(Ext, Extlen, File) ->
end.
%%%
+%%% Warning for deprecated code path cache.
+%%%
+
+maybe_warn_for_cache() ->
+ case init:get_argument(code_path_cache) of
+ {ok, _} ->
+ cache_warning();
+ error ->
+ ok
+ end.
+
+cache_warning() ->
+ W = "The code path cache functionality has been removed",
+ error_logger:warning_report(W).
+
+%%%
%%% Silently load native code for all modules loaded so far.
%%%
@@ -593,3 +900,97 @@ load_all_native_1([{Mod,BeamFilename}|T], ChunkTag) ->
load_all_native_1(T, ChunkTag);
load_all_native_1([], _) ->
ok.
+
+%% Returns the status of the module in relation to object file on disk.
+-spec module_status(Module :: module()) -> not_loaded | loaded | modified | removed.
+module_status(Module) ->
+ module_status(Module, code:get_path()).
+
+%% Note that we don't want to go via which/1, since it doesn't look at the
+%% disk contents at all if the module is already loaded.
+module_status(Module, PathFiles) ->
+ case code:is_loaded(Module) of
+ false -> not_loaded;
+ {file, preloaded} -> loaded;
+ {file, cover_compiled} ->
+ %% cover compilation loads directly to memory and does not
+ %% create a beam file, so report 'modified' if a file exists
+ case which(Module, PathFiles) of
+ non_existing -> removed;
+ _File -> modified
+ end;
+ {file, []} -> loaded; % no beam file - generated code
+ {file, OldFile} when is_list(OldFile) ->
+ %% we don't care whether or not the file is in the same location
+ %% as when last loaded, as long as it can be found in the path
+ case which(Module, PathFiles) of
+ non_existing -> removed;
+ Path ->
+ case module_changed_on_disk(Module, Path) of
+ true -> modified;
+ false -> loaded
+ end
+ end
+ end.
+
+%% Detects actual code changes only, e.g. to decide whether a module should
+%% be reloaded; does not care about file timestamps or compilation time
+module_changed_on_disk(Module, Path) ->
+ MD5 = erlang:get_module_info(Module, md5),
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ %% straightforward, since native is not supported
+ MD5 =/= beam_file_md5(Path);
+ Architecture ->
+ case code:is_module_native(Module) of
+ true ->
+ %% MD5 is for native code, so we check only the native
+ %% code on disk, ignoring the beam code
+ MD5 =/= beam_file_native_md5(Path, Architecture);
+ _ ->
+ %% MD5 is for beam code, so check only the beam code on
+ %% disk, even if the file contains native code as well
+ MD5 =/= beam_file_md5(Path)
+ end
+ end.
+
+beam_file_md5(Path) ->
+ case beam_lib:md5(Path) of
+ {ok,{_Mod,MD5}} -> MD5;
+ _ -> undefined
+ end.
+
+beam_file_native_md5(Path, Architecture) ->
+ try
+ get_beam_chunk(Path, hipe_unified_loader:chunk_name(Architecture))
+ of
+ NativeCode when is_binary(NativeCode) ->
+ erlang:md5(NativeCode)
+ catch
+ _:_ -> undefined
+ end.
+
+get_beam_chunk(Path, Chunk) ->
+ {ok, {_, [{_, Bin}]}} = beam_lib:chunks(Path, [Chunk]),
+ Bin.
+
+%% Returns a list of all modules modified on disk.
+-spec modified_modules() -> [module()].
+modified_modules() ->
+ PathFiles = path_files(),
+ [M || {M, _} <- code:all_loaded(),
+ module_status(M, PathFiles) =:= modified].
+
+%% prefetch the directory contents of code path directories
+path_files() ->
+ path_files(code:get_path()).
+
+path_files([]) ->
+ [];
+path_files([Path|Tail]) ->
+ case erl_prim_loader:list_dir(Path) of
+ {ok, Files} ->
+ [{Path,Files} | path_files(Tail)];
+ _Error ->
+ path_files(Tail)
+ end.
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
index 614219794c..1b4a67ecb7 100644
--- a/lib/kernel/src/code_server.erl
+++ b/lib/kernel/src/code_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -22,29 +22,33 @@
%% This file holds the server part of the code_server.
-export([start_link/1,
- call/2,
- system_continue/3,
- system_terminate/4,
+ call/1,
system_code_change/4,
error_msg/2, info_msg/2
]).
-include_lib("kernel/include/file.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
-import(lists, [foreach/2]).
--define(ANY_NATIVE_CODE_LOADED, any_native_code_loaded).
+-type on_load_action() ::
+ fun((term(), state()) -> {'reply',term(),state()} |
+ {'noreply',state()}).
--record(state, {supervisor,
- root,
- path,
- moddb,
- namedb,
- cache = no_cache,
- mode = interactive,
- on_load = []}).
+-type on_load_item() :: {{pid(),reference()},module(),
+ [{pid(),on_load_action()}]}.
+
+-record(state, {supervisor :: pid(),
+ root :: file:name_all(),
+ path :: [file:name_all()],
+ moddb :: ets:tab(),
+ namedb :: ets:tab(),
+ mode = interactive :: 'interactive' | 'embedded',
+ on_load = [] :: [on_load_item()]}).
-type state() :: #state{}.
+-spec start_link([term()]) -> {'ok', pid()}.
start_link(Args) ->
Ref = make_ref(),
Parent = self(),
@@ -59,7 +63,7 @@ start_link(Args) ->
%% Init the code_server process.
%% -----------------------------------------------------------
-init(Ref, Parent, [Root,Mode0]) ->
+init(Ref, Parent, [Root,Mode]) ->
register(?MODULE, self()),
process_flag(trap_exit, true),
@@ -68,20 +72,16 @@ init(Ref, Parent, [Root,Mode0]) ->
%% Pre-loaded modules are always sticky.
ets:insert(Db, [{M,preloaded},{{sticky,M},true}])
end, erlang:pre_loaded()),
- ets:insert(Db, init:fetch_loaded()),
-
- Mode =
- case Mode0 of
- minimal -> interactive;
- _ -> Mode0
- end,
+ Loaded0 = init:fetch_loaded(),
+ Loaded = [{M,filename:join([P])} || {M,P} <- Loaded0], %Normalize.
+ ets:insert(Db, Loaded),
IPath =
case Mode of
interactive ->
LibDir = filename:append(Root, "lib"),
{ok,Dirs} = erl_prim_loader:list_dir(LibDir),
- {Paths,_Libs} = make_path(LibDir, Dirs),
+ Paths = make_path(LibDir, Dirs),
UserLibPaths = get_user_lib_dirs(),
["."] ++ UserLibPaths ++ Paths;
_ ->
@@ -89,24 +89,15 @@ init(Ref, Parent, [Root,Mode0]) ->
end,
Path = add_loader_path(IPath, Mode),
- State0 = #state{root = Root,
- path = Path,
- moddb = Db,
- namedb = init_namedb(Path),
- mode = Mode},
-
- State =
- case init:get_argument(code_path_cache) of
- {ok, _} ->
- create_cache(State0);
- error ->
- State0
- end,
-
- put(?ANY_NATIVE_CODE_LOADED, false),
+ State = #state{supervisor = Parent,
+ root = Root,
+ path = Path,
+ moddb = Db,
+ namedb = init_namedb(Path),
+ mode = Mode},
Parent ! {Ref,{ok,self()}},
- loop(State#state{supervisor = Parent}).
+ loop(State).
get_user_lib_dirs() ->
case os:getenv("ERL_LIBS") of
@@ -125,7 +116,7 @@ get_user_lib_dirs() ->
get_user_lib_dirs_1([Dir|DirList]) ->
case erl_prim_loader:list_dir(Dir) of
{ok, Dirs} ->
- {Paths,_Libs} = make_path(Dir, Dirs),
+ Paths = make_path(Dir, Dirs),
%% Only add paths trailing with ./ebin.
[P || P <- Paths, filename:basename(P) =:= "ebin"] ++
get_user_lib_dirs_1(DirList);
@@ -142,11 +133,16 @@ split_paths([C|T], S, Path, Paths) ->
split_paths([], _S, Path, Paths) ->
lists:reverse(Paths, [lists:reverse(Path)]).
-call(Name, Req) ->
- Name ! {code_call, self(), Req},
+-spec call(term()) -> term().
+call(Req) ->
+ Ref = erlang:monitor(process, ?MODULE),
+ ?MODULE ! {code_call, self(), Req},
receive
{?MODULE, Reply} ->
- Reply
+ erlang:demonitor(Ref,[flush]),
+ Reply;
+ {'DOWN',Ref,process,_,_} ->
+ exit({'DOWN',code_server,Req})
end.
reply(Pid, Res) ->
@@ -155,7 +151,7 @@ reply(Pid, Res) ->
loop(#state{supervisor=Supervisor}=State0) ->
receive
{code_call, Pid, Req} ->
- case handle_call(Req, {Pid, call}, State0) of
+ case handle_call(Req, Pid, State0) of
{reply, Res, State} ->
_ = reply(Pid, Res),
loop(State);
@@ -168,8 +164,8 @@ loop(#state{supervisor=Supervisor}=State0) ->
system_terminate(Reason, Supervisor, [], State0);
{system, From, Msg} ->
handle_system_msg(running,Msg, From, Supervisor, State0);
- {'DOWN',Ref,process,_,Res} ->
- State = finish_on_load(Ref, Res, State0),
+ {'DOWN',Ref,process,Pid,Res} ->
+ State = finish_on_load({Pid,Ref}, Res, State0),
loop(State);
_Msg ->
loop(State0)
@@ -238,278 +234,144 @@ system_code_change(State, _Module, _OldVsn, _Extra) ->
%% The gen_server call back functions.
%%
-handle_call({stick_dir,Dir}, {_From,_Tag}, S) ->
+handle_call({stick_dir,Dir}, _From, S) ->
{reply,stick_dir(Dir, true, S),S};
-handle_call({unstick_dir,Dir}, {_From,_Tag}, S) ->
+handle_call({unstick_dir,Dir}, _From, S) ->
{reply,stick_dir(Dir, false, S),S};
-handle_call({stick_mod,Mod}, {_From,_Tag}, S) ->
+handle_call({stick_mod,Mod}, _From, S) ->
{reply,stick_mod(Mod, true, S),S};
-handle_call({unstick_mod,Mod}, {_From,_Tag}, S) ->
+handle_call({unstick_mod,Mod}, _From, S) ->
{reply,stick_mod(Mod, false, S),S};
-handle_call({dir,Dir}, {_From,_Tag}, S) ->
+handle_call({dir,Dir}, _From, S) ->
Root = S#state.root,
Resp = do_dir(Root,Dir,S#state.namedb),
{reply,Resp,S};
-handle_call({load_file,Mod}, Caller, St) ->
- case modp(Mod) of
- false ->
- {reply,{error,badarg},St};
- true ->
- load_file(Mod, Caller, St)
- end;
+handle_call({load_file,Mod}, From, St) when is_atom(Mod) ->
+ load_file(Mod, From, St);
-handle_call({add_path,Where,Dir0}, {_From,_Tag},
- #state{cache=Cache0,namedb=Namedb,path=Path0}=S) ->
- case Cache0 of
- no_cache ->
- {Resp,Path} = add_path(Where, Dir0, Path0, Namedb),
- {reply,Resp,S#state{path=Path}};
- _ ->
- Dir = absname(Dir0), %% Cache always expands the path
- {Resp,Path} = add_path(Where, Dir, Path0, Namedb),
- Cache = update_cache([Dir], Where, Cache0),
- {reply,Resp,S#state{path=Path,cache=Cache}}
- end;
+handle_call({add_path,Where,Dir0}, _From,
+ #state{namedb=Namedb,path=Path0}=S) ->
+ {Resp,Path} = add_path(Where, Dir0, Path0, Namedb),
+ {reply,Resp,S#state{path=Path}};
-handle_call({add_paths,Where,Dirs0}, {_From,_Tag},
- #state{cache=Cache0,namedb=Namedb,path=Path0}=S) ->
- case Cache0 of
- no_cache ->
- {Resp,Path} = add_paths(Where, Dirs0, Path0, Namedb),
- {reply,Resp,S#state{path=Path}};
- _ ->
- %% Cache always expands the path
- Dirs = [absname(Dir) || Dir <- Dirs0],
- {Resp,Path} = add_paths(Where, Dirs, Path0, Namedb),
- Cache=update_cache(Dirs,Where,Cache0),
- {reply,Resp,S#state{cache=Cache,path=Path}}
- end;
+handle_call({add_paths,Where,Dirs0}, _From,
+ #state{namedb=Namedb,path=Path0}=S) ->
+ {Resp,Path} = add_paths(Where, Dirs0, Path0, Namedb),
+ {reply,Resp,S#state{path=Path}};
-handle_call({set_path,PathList}, {_From,_Tag},
+handle_call({set_path,PathList}, _From,
#state{path=Path0,namedb=Namedb}=S) ->
{Resp,Path,NewDb} = set_path(PathList, Path0, Namedb),
- {reply,Resp,rehash_cache(S#state{path=Path,namedb=NewDb})};
+ {reply,Resp,S#state{path=Path,namedb=NewDb}};
-handle_call({del_path,Name}, {_From,_Tag},
+handle_call({del_path,Name}, _From,
#state{path=Path0,namedb=Namedb}=S) ->
{Resp,Path} = del_path(Name, Path0, Namedb),
- {reply,Resp,rehash_cache(S#state{path=Path})};
+ {reply,Resp,S#state{path=Path}};
-handle_call({replace_path,Name,Dir}, {_From,_Tag},
+handle_call({replace_path,Name,Dir}, _From,
#state{path=Path0,namedb=Namedb}=S) ->
{Resp,Path} = replace_path(Name, Dir, Path0, Namedb),
- {reply,Resp,rehash_cache(S#state{path=Path})};
+ {reply,Resp,S#state{path=Path}};
-handle_call(rehash, {_From,_Tag}, S0) ->
- S = create_cache(S0),
- {reply,ok,S};
-
-handle_call(get_path, {_From,_Tag}, S) ->
+handle_call(get_path, _From, S) ->
{reply,S#state.path,S};
%% Messages to load, delete and purge modules/files.
-handle_call({load_abs,File,Mod}, Caller, S) when is_atom(Mod) ->
+handle_call({load_abs,File,Mod}, From, S) when is_atom(Mod) ->
case modp(File) of
false ->
{reply,{error,badarg},S};
true ->
- load_abs(File, Mod, Caller, S)
+ load_abs(File, Mod, From, S)
end;
-handle_call({load_binary,Mod,File,Bin}, Caller, S) ->
- do_load_binary(Mod, File, Bin, Caller, S);
+handle_call({load_binary,Mod,File,Bin}, From, S) when is_atom(Mod) ->
+ do_load_binary(Mod, File, Bin, From, S);
-handle_call({load_native_partial,Mod,Bin}, {_From,_Tag}, S) ->
+handle_call({load_native_partial,Mod,Bin}, _From, S) ->
Architecture = erlang:system_info(hipe_architecture),
Result = (catch hipe_unified_loader:load(Mod, Bin, Architecture)),
- Status = hipe_result_to_status(Result),
+ Status = hipe_result_to_status(Result, S),
{reply,Status,S};
-handle_call({load_native_sticky,Mod,Bin,WholeModule}, {_From,_Tag}, S) ->
+handle_call({load_native_sticky,Mod,Bin,WholeModule}, _From, S) ->
Architecture = erlang:system_info(hipe_architecture),
Result = (catch hipe_unified_loader:load_module(Mod, Bin, WholeModule,
Architecture)),
- Status = hipe_result_to_status(Result),
+ Status = hipe_result_to_status(Result, S),
{reply,Status,S};
-handle_call({ensure_loaded,Mod0}, Caller, St0) ->
- Fun = fun (M, St) ->
- case erlang:module_loaded(M) of
- true ->
- {reply,{module,M},St};
- false when St#state.mode =:= interactive ->
- load_file(M, Caller, St);
- false ->
- {reply,{error,embedded},St}
- end
- end,
- do_mod_call(Fun, Mod0, {error,badarg}, St0);
-
-handle_call({delete,Mod0}, {_From,_Tag}, S) ->
- Fun = fun (M, St) ->
- case catch erlang:delete_module(M) of
- true ->
- ets:delete(St#state.moddb, M),
- {reply,true,St};
- _ ->
- {reply,false,St}
- end
- end,
- do_mod_call(Fun, Mod0, false, S);
+handle_call({ensure_loaded,Mod}, From, St) when is_atom(Mod) ->
+ case erlang:module_loaded(Mod) of
+ true ->
+ {reply,{module,Mod},St};
+ false when St#state.mode =:= interactive ->
+ ensure_loaded(Mod, From, St);
+ false ->
+ {reply,{error,embedded},St}
+ end;
-handle_call({purge,Mod0}, {_From,_Tag}, St0) ->
- do_mod_call(fun (M, St) ->
- {reply,do_purge(M),St}
- end, Mod0, false, St0);
+handle_call({delete,Mod}, _From, St) when is_atom(Mod) ->
+ case catch erlang:delete_module(Mod) of
+ true ->
+ ets:delete(St#state.moddb, Mod),
+ {reply,true,St};
+ _ ->
+ {reply,false,St}
+ end;
+
+handle_call({purge,Mod}, _From, St) when is_atom(Mod) ->
+ {reply,do_purge(Mod),St};
-handle_call({soft_purge,Mod0}, {_From,_Tag}, St0) ->
- do_mod_call(fun (M, St) ->
- {reply,do_soft_purge(M),St}
- end, Mod0, true, St0);
+handle_call({soft_purge,Mod}, _From, St) when is_atom(Mod) ->
+ {reply,do_soft_purge(Mod),St};
-handle_call({is_loaded,Mod0}, {_From,_Tag}, St0) ->
- do_mod_call(fun (M, St) ->
- {reply,is_loaded(M, St#state.moddb),St}
- end, Mod0, false, St0);
+handle_call({is_loaded,Mod}, _From, St) when is_atom(Mod) ->
+ {reply,is_loaded(Mod, St#state.moddb),St};
-handle_call(all_loaded, {_From,_Tag}, S) ->
+handle_call(all_loaded, _From, S) ->
Db = S#state.moddb,
{reply,all_loaded(Db),S};
-handle_call({get_object_code,Mod0}, {_From,_Tag}, St0) ->
- Fun = fun(M, St) ->
- Path = St#state.path,
- case mod_to_bin(Path, atom_to_list(M)) of
- {_,Bin,FName} -> {reply,{M,Bin,FName},St};
- Error -> {reply,Error,St}
- end
- end,
- do_mod_call(Fun, Mod0, error, St0);
+handle_call({get_object_code,Mod}, _From, St) when is_atom(Mod) ->
+ case get_object_code(St, Mod) of
+ {_,Bin,FName} -> {reply,{Mod,Bin,FName},St};
+ Error -> {reply,Error,St}
+ end;
-handle_call({is_sticky, Mod}, {_From,_Tag}, S) ->
+handle_call({is_sticky, Mod}, _From, S) ->
Db = S#state.moddb,
{reply, is_sticky(Mod,Db), S};
-handle_call(stop,{_From,_Tag}, S) ->
+handle_call(stop,_From, S) ->
{stop,normal,stopped,S};
-handle_call({is_cached,_File}, {_From,_Tag}, S=#state{cache=no_cache}) ->
- {reply, no, S};
-
-handle_call({set_primary_archive, File, ArchiveBin, FileInfo, ParserFun}, {_From,_Tag}, S=#state{mode=Mode}) ->
- case erl_prim_loader:set_primary_archive(File, ArchiveBin, FileInfo, ParserFun) of
+handle_call({set_primary_archive, File, ArchiveBin, FileInfo, ParserFun},
+ _From, S=#state{mode=Mode}) ->
+ case erl_prim_loader:set_primary_archive(File, ArchiveBin, FileInfo,
+ ParserFun) of
{ok, Files} ->
{reply, {ok, Mode, Files}, S};
{error, _Reason} = Error ->
{reply, Error, S}
end;
-handle_call({is_cached,File}, {_From,_Tag}, S=#state{cache=Cache}) ->
- ObjExt = objfile_extension(),
- Ext = filename:extension(File),
- Type = case Ext of
- ObjExt -> obj;
- ".app" -> app;
- _ -> undef
- end,
- if Type =:= undef ->
- {reply, no, S};
- true ->
- Key = {Type,list_to_atom(filename:rootname(File, Ext))},
- case ets:lookup(Cache, Key) of
- [] ->
- {reply, no, S};
- [{Key,Dir}] ->
- {reply, Dir, S}
- end
- end;
-
-handle_call(get_mode, {_From,_Tag}, S=#state{mode=Mode}) ->
+handle_call(get_mode, _From, S=#state{mode=Mode}) ->
{reply, Mode, S};
-handle_call(Other,{_From,_Tag}, S) ->
+handle_call({finish_loading,Prepared,EnsureLoaded}, _From, S) ->
+ {reply,finish_loading(Prepared, EnsureLoaded, S),S};
+
+handle_call(Other,_From, S) ->
error_msg(" ** Codeserver*** ignoring ~w~n ",[Other]),
{noreply,S}.
-do_mod_call(Action, Module, _Error, St) when is_atom(Module) ->
- Action(Module, St);
-do_mod_call(Action, Module, Error, St) ->
- try list_to_atom(Module) of
- Atom when is_atom(Atom) ->
- Action(Atom, St)
- catch
- error:badarg ->
- {reply,Error,St}
- end.
-
-%% --------------------------------------------------------------
-%% Cache functions
-%% --------------------------------------------------------------
-
-create_cache(St = #state{cache = no_cache}) ->
- Cache = ets:new(code_cache, [protected]),
- rehash_cache(Cache, St);
-create_cache(St) ->
- rehash_cache(St).
-
-rehash_cache(St = #state{cache = no_cache}) ->
- St;
-rehash_cache(St = #state{cache = OldCache}) ->
- ets:delete(OldCache),
- Cache = ets:new(code_cache, [protected]),
- rehash_cache(Cache, St).
-
-rehash_cache(Cache, St = #state{path = Path}) ->
- Exts = [{obj,objfile_extension()}, {app,".app"}],
- {Cache,NewPath} = locate_mods(lists:reverse(Path), first, Exts, Cache, []),
- St#state{cache = Cache, path=NewPath}.
-
-update_cache(Dirs, Where, Cache0) ->
- Exts = [{obj,objfile_extension()}, {app,".app"}],
- {Cache, _} = locate_mods(Dirs, Where, Exts, Cache0, []),
- Cache.
-
-locate_mods([Dir0|Path], Where, Exts, Cache, Acc) ->
- Dir = absname(Dir0), %% Cache always expands the path
- case erl_prim_loader:list_dir(Dir) of
- {ok, Files} ->
- Cache = filter_mods(Files, Where, Exts, Dir, Cache),
- locate_mods(Path, Where, Exts, Cache, [Dir|Acc]);
- error ->
- locate_mods(Path, Where, Exts, Cache, Acc)
- end;
-locate_mods([], _, _, Cache, Path) ->
- {Cache,Path}.
-
-filter_mods([File|Rest], Where, Exts, Dir, Cache) ->
- Ext = filename:extension(File),
- Root = list_to_atom(filename:rootname(File, Ext)),
- case lists:keyfind(Ext, 2, Exts) of
- {Type, _} ->
- Key = {Type,Root},
- case Where of
- first ->
- true = ets:insert(Cache, {Key,Dir});
- last ->
- case ets:lookup(Cache, Key) of
- [] ->
- true = ets:insert(Cache, {Key,Dir});
- _ ->
- ignore
- end
- end;
- false ->
- ok
- end,
- filter_mods(Rest, Where, Exts, Dir, Cache);
-filter_mods([], _, _, _, Cache) ->
- Cache.
-
%% --------------------------------------------------------------
%% Path handling functions.
%% --------------------------------------------------------------
@@ -519,7 +381,7 @@ filter_mods([], _, _, _, Cache) ->
%%
make_path(BundleDir, Bundles0) ->
Bundles = choose_bundles(Bundles0),
- make_path(BundleDir, Bundles, [], []).
+ make_path(BundleDir, Bundles, []).
choose_bundles(Bundles) ->
ArchiveExt = archive_extension(),
@@ -529,12 +391,10 @@ choose_bundles(Bundles) ->
create_bundle(FullName, ArchiveExt) ->
BaseName = filename:basename(FullName, ArchiveExt),
- case split(BaseName, "-") of
- [_, _|_] = Toks ->
- VsnStr = lists:last(Toks),
+ case split_base(BaseName) of
+ {Name, VsnStr} ->
case vsn_to_num(VsnStr) of
{ok, VsnNum} ->
- Name = join(lists:sublist(Toks, length(Toks)-1),"-"),
{Name,VsnNum,FullName};
false ->
{FullName,[0],FullName}
@@ -605,41 +465,44 @@ choose([{Name,NumVsn,NewFullName}=New|Bs], Acc, ArchiveExt) ->
choose([],Acc, _ArchiveExt) ->
Acc.
-make_path(_,[],Res,Bs) ->
- {Res,Bs};
-make_path(BundleDir,[Bundle|Tail],Res,Bs) ->
- Dir = filename:append(BundleDir,Bundle),
- Ebin = filename:append(Dir,"ebin"),
+make_path(_, [], Res) ->
+ Res;
+make_path(BundleDir, [Bundle|Tail], Res) ->
+ Dir = filename:append(BundleDir, Bundle),
+ Ebin = filename:append(Dir, "ebin"),
%% First try with /ebin
- case erl_prim_loader:read_file_info(Ebin) of
- {ok,#file_info{type=directory}} ->
- make_path(BundleDir,Tail,[Ebin|Res],[Bundle|Bs]);
- _ ->
+ case is_dir(Ebin) of
+ true ->
+ make_path(BundleDir, Tail, [Ebin|Res]);
+ false ->
%% Second try with archive
Ext = archive_extension(),
- Base = filename:basename(Dir, Ext),
- Ebin2 = filename:join([filename:dirname(Dir), Base ++ Ext, Base, "ebin"]),
+ Base = filename:basename(Bundle, Ext),
+ Ebin2 = filename:join([BundleDir, Base ++ Ext, Base, "ebin"]),
Ebins =
- case split(Base, "-") of
- [_, _|_] = Toks ->
- AppName = join(lists:sublist(Toks, length(Toks)-1),"-"),
- Ebin3 = filename:join([filename:dirname(Dir), Base ++ Ext, AppName, "ebin"]),
+ case split_base(Base) of
+ {AppName,_} ->
+ Ebin3 = filename:join([BundleDir, Base ++ Ext,
+ AppName, "ebin"]),
[Ebin3, Ebin2, Dir];
_ ->
[Ebin2, Dir]
end,
- try_ebin_dirs(Ebins,BundleDir,Tail,Res,Bundle, Bs)
+ case try_ebin_dirs(Ebins) of
+ {ok,FoundEbin} ->
+ make_path(BundleDir, Tail, [FoundEbin|Res]);
+ error ->
+ make_path(BundleDir, Tail, Res)
+ end
end.
-try_ebin_dirs([Ebin | Ebins],BundleDir,Tail,Res,Bundle,Bs) ->
- case erl_prim_loader:read_file_info(Ebin) of
- {ok,#file_info{type=directory}} ->
- make_path(BundleDir,Tail,[Ebin|Res],[Bundle|Bs]);
- _ ->
- try_ebin_dirs(Ebins,BundleDir,Tail,Res,Bundle,Bs)
+try_ebin_dirs([Ebin|Ebins]) ->
+ case is_dir(Ebin) of
+ true -> {ok,Ebin};
+ false -> try_ebin_dirs(Ebins)
end;
-try_ebin_dirs([],BundleDir,Tail,Res,_Bundle,Bs) ->
- make_path(BundleDir,Tail,Res,Bs).
+try_ebin_dirs([]) ->
+ error.
%%
@@ -757,19 +620,34 @@ exclude(Dir,Path) ->
%%
%%
get_name(Dir) ->
- get_name2(get_name1(Dir), []).
+ get_name_from_splitted(filename:split(Dir)).
+
+get_name_from_splitted([DirName,"ebin"]) ->
+ discard_after_hyphen(DirName);
+get_name_from_splitted([DirName]) ->
+ discard_after_hyphen(DirName);
+get_name_from_splitted([_|T]) ->
+ get_name_from_splitted(T);
+get_name_from_splitted([]) ->
+ "". %No name.
+
+discard_after_hyphen("-"++_) ->
+ [];
+discard_after_hyphen([H|T]) ->
+ [H|discard_after_hyphen(T)];
+discard_after_hyphen([]) ->
+ [].
-get_name1(Dir) ->
- case lists:reverse(filename:split(Dir)) of
- ["ebin",DirName|_] -> DirName;
- [DirName|_] -> DirName;
- _ -> "" % No name !
+split_base(BaseName) ->
+ case split(BaseName, "-") of
+ [_, _|_] = Toks ->
+ Vsn = lists:last(Toks),
+ AllButLast = lists:droplast(Toks),
+ {join(AllButLast, "-"),Vsn};
+ [_|_] ->
+ BaseName
end.
-get_name2([$-|_],Acc) -> lists:reverse(Acc);
-get_name2([H|T],Acc) -> get_name2(T,[H|Acc]);
-get_name2(_,Acc) -> lists:reverse(Acc).
-
check_path(Path) ->
PathChoice = init:code_path_choice(),
ArchiveExt = archive_extension(),
@@ -778,23 +656,23 @@ check_path(Path) ->
do_check_path([], _PathChoice, _ArchiveExt, Acc) ->
{ok, lists:reverse(Acc)};
do_check_path([Dir | Tail], PathChoice, ArchiveExt, Acc) ->
- case catch erl_prim_loader:read_file_info(Dir) of
- {ok, #file_info{type=directory}} ->
+ case is_dir(Dir) of
+ true ->
do_check_path(Tail, PathChoice, ArchiveExt, [Dir | Acc]);
- _ when PathChoice =:= strict ->
+ false when PathChoice =:= strict ->
%% Be strict. Only use dir as explicitly stated
{error, bad_directory};
- _ when PathChoice =:= relaxed ->
+ false when PathChoice =:= relaxed ->
%% Be relaxed
case catch lists:reverse(filename:split(Dir)) of
{'EXIT', _} ->
{error, bad_directory};
["ebin", App] ->
Dir2 = filename:join([App ++ ArchiveExt, App, "ebin"]),
- case erl_prim_loader:read_file_info(Dir2) of
- {ok, #file_info{type = directory}} ->
+ case is_dir(Dir2) of
+ true ->
do_check_path(Tail, PathChoice, ArchiveExt, [Dir2 | Acc]);
- _ ->
+ false ->
{error, bad_directory}
end;
["ebin", App, OptArchive | RevTop] ->
@@ -814,10 +692,10 @@ do_check_path([Dir | Tail], PathChoice, ArchiveExt, Acc) ->
Top = lists:reverse([OptArchive | RevTop]),
filename:join(Top ++ [App ++ ArchiveExt, App, "ebin"])
end,
- case erl_prim_loader:read_file_info(Dir2) of
- {ok, #file_info{type = directory}} ->
+ case is_dir(Dir2) of
+ true ->
do_check_path(Tail, PathChoice, ArchiveExt, [Dir2 | Acc]);
- _ ->
+ false ->
{error, bad_directory}
end;
_ ->
@@ -916,7 +794,7 @@ init_namedb(Path) ->
Db.
init_namedb([P|Path], Db) ->
- insert_name(P, Db),
+ insert_dir(P, Db),
init_namedb(Path, Db);
init_namedb([], _) ->
ok.
@@ -929,59 +807,45 @@ clear_namedb([], _) ->
ok.
-endif.
-insert_name(Dir, Db) ->
- case get_name(Dir) of
- Dir -> false;
- Name -> insert_name(Name, Dir, Db)
- end.
+%% Dir must be a complete pathname (not only a name).
+insert_dir(Dir, Db) ->
+ Splitted = filename:split(Dir),
+ case get_name_from_splitted(Splitted) of
+ Name when Name /= "ebin", Name /= "." ->
+ Name;
+ _ ->
+ SplittedAbsName = filename:split(absname(Dir)),
+ Name = get_name_from_splitted(SplittedAbsName)
+ end,
+ AppDir = filename:join(del_ebin_1(Splitted)),
+ do_insert_name(Name, AppDir, Db).
insert_name(Name, Dir, Db) ->
AppDir = del_ebin(Dir),
+ do_insert_name(Name, AppDir, Db).
+
+do_insert_name(Name, AppDir, Db) ->
{Base, SubDirs} = archive_subdirs(AppDir),
ets:insert(Db, {Name, AppDir, Base, SubDirs}),
true.
archive_subdirs(AppDir) ->
- IsDir =
- fun(RelFile) ->
- File = filename:join([AppDir, RelFile]),
- case erl_prim_loader:read_file_info(File) of
- {ok, #file_info{type = directory}} ->
- false;
- _ ->
- true
- end
- end,
- {Base, ArchiveDirs} = all_archive_subdirs(AppDir),
- {Base, lists:filter(IsDir, ArchiveDirs)}.
-
-all_archive_subdirs(AppDir) ->
- Ext = archive_extension(),
Base = filename:basename(AppDir),
- Dirs =
- case split(Base, "-") of
- [_, _|_] = Toks ->
- Base2 = join(lists:sublist(Toks, length(Toks)-1), "-"),
- [Base2, Base];
- _ ->
- [Base]
+ Dirs = case split_base(Base) of
+ {Name, _} -> [Name, Base];
+ _ -> [Base]
end,
+ Ext = archive_extension(),
try_archive_subdirs(AppDir ++ Ext, Base, Dirs).
try_archive_subdirs(Archive, Base, [Dir | Dirs]) ->
- ArchiveDir = filename:join([Archive, Dir]),
+ ArchiveDir = filename:append(Archive, Dir),
case erl_prim_loader:list_dir(ArchiveDir) of
{ok, Files} ->
- IsDir =
- fun(RelFile) ->
- File = filename:join([ArchiveDir, RelFile]),
- case erl_prim_loader:read_file_info(File) of
- {ok, #file_info{type = directory}} ->
- true;
- _ ->
- false
- end
- end,
+ IsDir = fun(RelFile) ->
+ File = filename:append(ArchiveDir, RelFile),
+ is_dir(File)
+ end,
{Dir, lists:filter(IsDir, Files)};
_ ->
try_archive_subdirs(Archive, Base, Dirs)
@@ -1075,22 +939,32 @@ check_pars(Name,Dir) ->
end.
del_ebin(Dir) ->
- case filename:basename(Dir) of
- "ebin" ->
- Dir2 = filename:dirname(Dir),
- Dir3 = filename:dirname(Dir2),
- Ext = archive_extension(),
- case filename:extension(Dir3) of
- E when E =:= Ext ->
- %% Strip archive extension
- filename:join([filename:dirname(Dir3),
- filename:basename(Dir3, Ext)]);
- _ ->
- Dir2
- end;
+ filename:join(del_ebin_1(filename:split(Dir))).
+
+del_ebin_1([Parent,App,"ebin"]) ->
+ case filename:basename(Parent) of
+ [] ->
+ %% Parent is the root directory
+ [Parent,App];
_ ->
- Dir
- end.
+ Ext = archive_extension(),
+ case filename:basename(Parent, Ext) of
+ Parent ->
+ %% Plain directory.
+ [Parent,App];
+ Archive ->
+ %% Archive.
+ [Archive]
+ end
+ end;
+del_ebin_1(Path = [_App,"ebin"]) ->
+ del_ebin_1(filename:split(absname(filename:join(Path))));
+del_ebin_1(["ebin"]) ->
+ del_ebin_1(filename:split(absname("ebin")));
+del_ebin_1([H|T]) ->
+ [H|del_ebin_1(T)];
+del_ebin_1([]) ->
+ [].
replace_name(Dir, Db) ->
case get_name(Dir) of
@@ -1206,14 +1080,14 @@ add_paths(Where,[Dir|Tail],Path,NameDb) ->
add_paths(_,_,Path,_) ->
{ok,Path}.
-do_load_binary(Module, File, Binary, Caller, St) ->
- case modp(Module) andalso modp(File) andalso is_binary(Binary) of
+do_load_binary(Module, File, Binary, From, St) ->
+ case modp(File) andalso is_binary(Binary) of
true ->
- case erlang:module_loaded(to_atom(Module)) of
+ case erlang:module_loaded(Module) of
true -> do_purge(Module);
false -> ok
end,
- try_load_module(File, Module, Binary, Caller, St);
+ try_load_module(File, Module, Binary, From, St);
false ->
{reply,{error,badarg},St}
end.
@@ -1222,158 +1096,124 @@ modp(Atom) when is_atom(Atom) -> true;
modp(List) when is_list(List) -> int_list(List);
modp(_) -> false.
-load_abs(File, Mod, Caller, St) ->
+load_abs(File, Mod, From, St) ->
Ext = objfile_extension(),
FileName0 = lists:concat([File, Ext]),
FileName = absname(FileName0),
case erl_prim_loader:get_file(FileName) of
{ok,Bin,_} ->
- try_load_module(FileName, Mod, Bin, Caller, St);
+ try_load_module(FileName, Mod, Bin, From, St);
error ->
{reply,{error,nofile},St}
end.
-try_load_module(Mod, Dir, Caller, St) ->
- File = filename:append(Dir, to_list(Mod) ++
- objfile_extension()),
- case erl_prim_loader:get_file(File) of
- error ->
- {reply,error,St};
- {ok,Binary,FName} ->
- try_load_module(absname(FName), Mod, Binary, Caller, St)
- end.
-
-try_load_module(File, Mod, Bin, {From,_}=Caller, St0) ->
- M = to_atom(Mod),
- case pending_on_load(M, From, St0) of
- no ->
- try_load_module_1(File, M, Bin, Caller, St0);
- {yes,St} ->
- {noreply,St}
- end.
+try_load_module(File, Mod, Bin, From, St) ->
+ Action = fun(_, S) ->
+ try_load_module_1(File, Mod, Bin, From, S)
+ end,
+ handle_pending_on_load(Action, Mod, From, St).
-try_load_module_1(File, Mod, Bin, Caller, #state{moddb=Db}=St) ->
+try_load_module_1(File, Mod, Bin, From, #state{moddb=Db}=St) ->
case is_sticky(Mod, Db) of
true -> %% Sticky file reject the load
error_msg("Can't load module '~w' that resides in sticky dir\n",[Mod]),
{reply,{error,sticky_directory},St};
false ->
Architecture = erlang:system_info(hipe_architecture),
- try_load_module_2(File, Mod, Bin, Caller, Architecture, St)
+ try_load_module_2(File, Mod, Bin, From, Architecture, St)
end.
-try_load_module_2(File, Mod, Bin, Caller, undefined, St) ->
- try_load_module_3(File, Mod, Bin, Caller, undefined, St);
-try_load_module_2(File, Mod, Bin, Caller, Architecture,
+try_load_module_2(File, Mod, Bin, From, undefined, St) ->
+ try_load_module_3(File, Mod, Bin, From, undefined, St);
+try_load_module_2(File, Mod, Bin, From, Architecture,
#state{moddb=Db}=St) ->
- case catch load_native_code(Mod, Bin, Architecture) of
+ case catch hipe_unified_loader:load_native_code(Mod, Bin, Architecture) of
{module,Mod} = Module ->
- ets:insert(Db, {Mod,File}),
+ ets:insert(Db, {Mod,File}),
{reply,Module,St};
no_native ->
- try_load_module_3(File, Mod, Bin, Caller, Architecture, St);
+ try_load_module_3(File, Mod, Bin, From, Architecture, St);
Error ->
error_msg("Native loading of ~ts failed: ~p\n", [File,Error]),
- {reply,ok,St}
+ {reply,{error,Error},St}
end.
-try_load_module_3(File, Mod, Bin, Caller, Architecture,
- #state{moddb=Db}=St) ->
- case erlang:load_module(Mod, Bin) of
- {module,Mod} = Module ->
- ets:insert(Db, {Mod,File}),
- post_beam_load(Mod, Architecture),
- {reply,Module,St};
- {error,on_load} ->
- handle_on_load(Mod, File, Caller, St);
- {error,What} = Error ->
- error_msg("Loading of ~ts failed: ~p\n", [File, What]),
- {reply,Error,St}
- end.
-
-load_native_code(Mod, Bin, Architecture) ->
- %% During bootstrapping of Open Source Erlang, we don't have any hipe
- %% loader modules, but the Erlang emulator might be hipe enabled.
- %% Therefore we must test for that the loader modules are available
- %% before trying to to load native code.
- case erlang:module_loaded(hipe_unified_loader) of
- false ->
- no_native;
- true ->
- Result = hipe_unified_loader:load_native_code(Mod, Bin,
- Architecture),
- case Result of
- {module,_} ->
- put(?ANY_NATIVE_CODE_LOADED, true);
- _ ->
- ok
- end,
- Result
- end.
-
-hipe_result_to_status(Result) ->
+try_load_module_3(File, Mod, Bin, From, _Architecture, St0) ->
+ Action = fun({module,_}=Module, #state{moddb=Db}=S) ->
+ ets:insert(Db, {Mod,File}),
+ {reply,Module,S};
+ ({error,on_load_failure}=Error, S) ->
+ {reply,Error,S};
+ ({error,What}=Error, S) ->
+ error_msg("Loading of ~ts failed: ~p\n", [File, What]),
+ {reply,Error,S}
+ end,
+ Res = erlang:load_module(Mod, Bin),
+ handle_on_load(Res, Action, Mod, From, St0).
+
+hipe_result_to_status(Result, #state{}) ->
case Result of
{module,_} ->
- put(?ANY_NATIVE_CODE_LOADED, true),
Result;
_ ->
{error,Result}
end.
-post_beam_load(Mod, Architecture) ->
- %% post_beam_load/2 can potentially be very expensive because it
- %% blocks multi-scheduling; thus we want to avoid the call if we
- %% know that it is not needed.
- case get(?ANY_NATIVE_CODE_LOADED) of
- true -> hipe_unified_loader:post_beam_load(Mod, Architecture);
- false -> ok
- end.
int_list([H|T]) when is_integer(H) -> int_list(T);
int_list([_|_]) -> false;
int_list([]) -> true.
-load_file(Mod0, {From,_}=Caller, St0) ->
- Mod = to_atom(Mod0),
- case pending_on_load(Mod, From, St0) of
- no -> load_file_1(Mod, Caller, St0);
- {yes,St} -> {noreply,St}
- end.
-
-load_file_1(Mod, Caller, #state{path=Path,cache=no_cache}=St) ->
- case mod_to_bin(Path, Mod) of
+ensure_loaded(Mod, From, St0) ->
+ Action = fun(_, S) ->
+ case erlang:module_loaded(Mod) of
+ true ->
+ {reply,{module,Mod},S};
+ false ->
+ load_file_1(Mod, From, S)
+ end
+ end,
+ handle_pending_on_load(Action, Mod, From, St0).
+
+load_file(Mod, From, St0) ->
+ Action = fun(_, S) ->
+ load_file_1(Mod, From, S)
+ end,
+ handle_pending_on_load(Action, Mod, From, St0).
+
+load_file_1(Mod, From, St) ->
+ case get_object_code(St, Mod) of
error ->
{reply,{error,nofile},St};
{Mod,Binary,File} ->
- try_load_module(File, Mod, Binary, Caller, St)
- end;
-load_file_1(Mod, Caller, #state{cache=Cache}=St0) ->
- Key = {obj,Mod},
- case ets:lookup(Cache, Key) of
- [] ->
- St = rehash_cache(St0),
- case ets:lookup(St#state.cache, Key) of
- [] ->
- {reply,{error,nofile},St};
- [{Key,Dir}] ->
- try_load_module(Mod, Dir, Caller, St)
- end;
- [{Key,Dir}] ->
- try_load_module(Mod, Dir, Caller, St0)
+ try_load_module_1(File, Mod, Binary, From, St)
+ end.
+
+get_object_code(#state{path=Path}, Mod) when is_atom(Mod) ->
+ ModStr = atom_to_list(Mod),
+ case erl_prim_loader:is_basename(ModStr) of
+ true ->
+ mod_to_bin(Path, Mod, ModStr ++ objfile_extension());
+ false ->
+ error
end.
-mod_to_bin([Dir|Tail], Mod) ->
- File = filename:append(Dir, to_list(Mod) ++ objfile_extension()),
+mod_to_bin([Dir|Tail], Mod, ModFile) ->
+ File = filename:append(Dir, ModFile),
case erl_prim_loader:get_file(File) of
error ->
- mod_to_bin(Tail, Mod);
- {ok,Bin,FName} ->
- {Mod,Bin,absname(FName)}
+ mod_to_bin(Tail, Mod, ModFile);
+ {ok,Bin,_} ->
+ case filename:pathtype(File) of
+ absolute ->
+ {Mod,Bin,File};
+ _ ->
+ {Mod,Bin,absname(File)}
+ end
end;
-mod_to_bin([], Mod) ->
+mod_to_bin([], Mod, ModFile) ->
%% At last, try also erl_prim_loader's own method
- File = to_list(Mod) ++ objfile_extension(),
- case erl_prim_loader:get_file(File) of
+ case erl_prim_loader:get_file(ModFile) of
error ->
error; % No more alternatives !
{ok,Bin,FName} ->
@@ -1416,307 +1256,155 @@ absname_vr([[X, $:]|Name], _, _AbsBase) ->
absname(filename:join(Name), Dcwd).
-%% do_purge(Module)
-%% Kill all processes running code from *old* Module, and then purge the
-%% module. Return true if any processes killed, else false.
-
-do_purge(Mod0) ->
- Mod = to_atom(Mod0),
- case erlang:check_old_code(Mod) of
- false ->
- false;
- true ->
- Res = check_proc_code(erlang:processes(), Mod, true),
- try
- erlang:purge_module(Mod)
- catch
- _:_ -> ignore
- end,
- Res
+is_loaded(M, Db) ->
+ case ets:lookup(Db, M) of
+ [{M,File}] -> {file,File};
+ [] -> false
end.
-%% do_soft_purge(Module)
-%% Purge old code only if no procs remain that run old code.
-%% Return true in that case, false if procs remain (in this
-%% case old code is not purged)
+do_purge(Mod) ->
+ {_WasOld, DidKill} = erts_code_purger:purge(Mod),
+ DidKill.
-do_soft_purge(Mod0) ->
- Mod = to_atom(Mod0),
- case erlang:check_old_code(Mod) of
- false ->
- true;
- true ->
- case check_proc_code(erlang:processes(), Mod, false) of
- false ->
- false;
- true ->
- try
- erlang:purge_module(Mod)
- catch
- _:_ -> ignore
- end,
- true
- end
- end.
+do_soft_purge(Mod) ->
+ erts_code_purger:soft_purge(Mod).
-%%
-%% check_proc_code(Pids, Mod, Hard) - Send asynchronous
-%% requests to all processes to perform a check_process_code
-%% operation. Each process will check their own state and
-%% reply with the result. If 'Hard' equals
-%% - true, processes that refer 'Mod' will be killed. If
-%% any processes were killed true is returned; otherwise,
-%% false.
-%% - false, and any processes refer 'Mod', false will
-%% returned; otherwise, true.
-%%
-%% Requests will be sent to all processes identified by
-%% Pids at once, but without allowing GC to be performed.
-%% Check process code operations that are aborted due to
-%% GC need, will be restarted allowing GC. However, only
-%% ?MAX_CPC_GC_PROCS outstanding operation allowing GC at
-%% a time will be allowed. This in order not to blow up
-%% memory wise.
-%%
-%% We also only allow ?MAX_CPC_NO_OUTSTANDING_KILLS
-%% outstanding kills. This both in order to avoid flooding
-%% our message queue with 'DOWN' messages and limiting the
-%% amount of memory used to keep references to all
-%% outstanding kills.
-%%
+is_dir(Path) ->
+ case erl_prim_loader:read_file_info(Path) of
+ {ok,#file_info{type=directory}} -> true;
+ _ -> false
+ end.
-%% We maybe should allow more than two outstanding
-%% GC requests, but for now we play it safe...
--define(MAX_CPC_GC_PROCS, 2).
--define(MAX_CPC_NO_OUTSTANDING_KILLS, 10).
-
--record(cpc_static, {hard, module, tag}).
-
--record(cpc_kill, {outstanding = [],
- no_outstanding = 0,
- waiting = [],
- killed = false}).
-
-check_proc_code(Pids, Mod, Hard) ->
- Tag = erlang:make_ref(),
- CpcS = #cpc_static{hard = Hard,
- module = Mod,
- tag = Tag},
- check_proc_code(CpcS, cpc_init(CpcS, Pids, 0), 0, [], #cpc_kill{}, true).
-
-check_proc_code(#cpc_static{hard = true}, 0, 0, [],
- #cpc_kill{outstanding = [], waiting = [], killed = Killed},
- true) ->
- %% No outstanding requests. We did a hard check, so result is whether or
- %% not we killed any processes...
- Killed;
-check_proc_code(#cpc_static{hard = false}, 0, 0, [], _KillState, Success) ->
- %% No outstanding requests and we did a soft check...
- Success;
-check_proc_code(#cpc_static{hard = false, tag = Tag} = CpcS, NoReq0, NoGcReq0,
- [], _KillState, false) ->
- %% Failed soft check; just cleanup the remaining replies corresponding
- %% to the requests we've sent...
- {NoReq1, NoGcReq1} = receive
- {check_process_code, {Tag, _P, GC}, _Res} ->
- case GC of
- false -> {NoReq0-1, NoGcReq0};
- true -> {NoReq0, NoGcReq0-1}
- end
- end,
- check_proc_code(CpcS, NoReq1, NoGcReq1, [], _KillState, false);
-check_proc_code(#cpc_static{tag = Tag} = CpcS, NoReq0, NoGcReq0, NeedGC0,
- KillState0, Success) ->
-
- %% Check if we should request a GC operation
- {NoGcReq1, NeedGC1} = case NoGcReq0 < ?MAX_CPC_GC_PROCS of
- GcOpAllowed when GcOpAllowed == false;
- NeedGC0 == [] ->
- {NoGcReq0, NeedGC0};
- _ ->
- {NoGcReq0+1, cpc_request_gc(CpcS,NeedGC0)}
- end,
-
- %% Wait for a cpc reply or 'DOWN' message
- {NoReq1, NoGcReq2, Pid, Result, KillState1} = cpc_recv(Tag,
- NoReq0,
- NoGcReq1,
- KillState0),
-
- %% Check the result of the reply
- case Result of
- aborted ->
- %% Operation aborted due to the need to GC in order to
- %% determine if the process is referring the module.
- %% Schedule the operation for restart allowing GC...
- check_proc_code(CpcS, NoReq1, NoGcReq2, [Pid|NeedGC1], KillState1,
- Success);
- false ->
- %% Process not referring the module; done with this process...
- check_proc_code(CpcS, NoReq1, NoGcReq2, NeedGC1, KillState1,
- Success);
- true ->
- %% Process referring the module...
- case CpcS#cpc_static.hard of
- false ->
- %% ... and soft check. The whole operation failed so
- %% no point continuing; clean up and fail...
- check_proc_code(CpcS, NoReq1, NoGcReq2, [], KillState1,
- false);
- true ->
- %% ... and hard check; schedule kill of it...
- check_proc_code(CpcS, NoReq1, NoGcReq2, NeedGC1,
- cpc_sched_kill(Pid, KillState1), Success)
- end;
- 'DOWN' ->
- %% Handled 'DOWN' message
- check_proc_code(CpcS, NoReq1, NoGcReq2, NeedGC1,
- KillState1, Success)
+%%%
+%%% Loading of multiple modules in parallel.
+%%%
+
+finish_loading(Prepared, EnsureLoaded, #state{moddb=Db}=St) ->
+ Ps = [fun(L) -> finish_loading_ensure(L, EnsureLoaded) end,
+ fun(L) -> abort_if_pending_on_load(L, St) end,
+ fun(L) -> abort_if_sticky(L, Db) end,
+ fun(L) -> do_finish_loading(L, St) end],
+ run(Ps, Prepared).
+
+finish_loading_ensure(Prepared, true) ->
+ {ok,[P || {M,_}=P <- Prepared, not erlang:module_loaded(M)]};
+finish_loading_ensure(Prepared, false) ->
+ {ok,Prepared}.
+
+abort_if_pending_on_load(L, #state{on_load=[]}) ->
+ {ok,L};
+abort_if_pending_on_load(L, #state{on_load=OnLoad}) ->
+ Pending = [{M,pending_on_load} ||
+ {M,_} <- L,
+ lists:keymember(M, 2, OnLoad)],
+ case Pending of
+ [] -> {ok,L};
+ [_|_] -> {error,Pending}
end.
-cpc_recv(Tag, NoReq, NoGcReq, #cpc_kill{outstanding = []} = KillState) ->
- receive
- {check_process_code, {Tag, Pid, GC}, Res} ->
- cpc_handle_cpc(NoReq, NoGcReq, GC, Pid, Res, KillState)
- end;
-cpc_recv(Tag, NoReq, NoGcReq,
- #cpc_kill{outstanding = [R0, R1, R2, R3, R4 | _]} = KillState) ->
- receive
- {'DOWN', R, process, _, _} when R == R0;
- R == R1;
- R == R2;
- R == R3;
- R == R4 ->
- cpc_handle_down(NoReq, NoGcReq, R, KillState);
- {check_process_code, {Tag, Pid, GC}, Res} ->
- cpc_handle_cpc(NoReq, NoGcReq, GC, Pid, Res, KillState)
- end;
-cpc_recv(Tag, NoReq, NoGcReq, #cpc_kill{outstanding = [R|_]} = KillState) ->
- receive
- {'DOWN', R, process, _, _} ->
- cpc_handle_down(NoReq, NoGcReq, R, KillState);
- {check_process_code, {Tag, Pid, GC}, Res} ->
- cpc_handle_cpc(NoReq, NoGcReq, GC, Pid, Res, KillState)
+abort_if_sticky(L, Db) ->
+ Sticky = [{M,sticky_directory} || {M,_} <- L, is_sticky(M, Db)],
+ case Sticky of
+ [] -> {ok,L};
+ [_|_] -> {error,Sticky}
end.
-cpc_handle_down(NoReq, NoGcReq, R, #cpc_kill{outstanding = Rs,
- no_outstanding = N} = KillState) ->
- {NoReq, NoGcReq, undefined, 'DOWN',
- cpc_sched_kill_waiting(KillState#cpc_kill{outstanding = cpc_list_rm(R, Rs),
- no_outstanding = N-1})}.
-
-cpc_list_rm(R, [R|Rs]) ->
- Rs;
-cpc_list_rm(R0, [R1|Rs]) ->
- [R1|cpc_list_rm(R0, Rs)].
-
-cpc_handle_cpc(NoReq, NoGcReq, false, Pid, Res, KillState) ->
- {NoReq-1, NoGcReq, Pid, Res, KillState};
-cpc_handle_cpc(NoReq, NoGcReq, true, Pid, Res, KillState) ->
- {NoReq, NoGcReq-1, Pid, Res, KillState}.
-
-cpc_sched_kill_waiting(#cpc_kill{waiting = []} = KillState) ->
- KillState;
-cpc_sched_kill_waiting(#cpc_kill{outstanding = Rs,
- no_outstanding = N,
- waiting = [P|Ps]} = KillState) ->
- R = erlang:monitor(process, P),
- exit(P, kill),
- KillState#cpc_kill{outstanding = [R|Rs],
- no_outstanding = N+1,
- waiting = Ps,
- killed = true}.
-
-cpc_sched_kill(Pid, #cpc_kill{no_outstanding = N, waiting = Pids} = KillState)
- when N >= ?MAX_CPC_NO_OUTSTANDING_KILLS ->
- KillState#cpc_kill{waiting = [Pid|Pids]};
-cpc_sched_kill(Pid,
- #cpc_kill{outstanding = Rs, no_outstanding = N} = KillState) ->
- R = erlang:monitor(process, Pid),
- exit(Pid, kill),
- KillState#cpc_kill{outstanding = [R|Rs],
- no_outstanding = N+1,
- killed = true}.
-
-cpc_request(#cpc_static{tag = Tag, module = Mod}, Pid, AllowGc) ->
- erlang:check_process_code(Pid, Mod, [{async, {Tag, Pid, AllowGc}},
- {allow_gc, AllowGc}]).
-
-cpc_request_gc(CpcS, [Pid|Pids]) ->
- cpc_request(CpcS, Pid, true),
- Pids.
-
-cpc_init(_CpcS, [], NoReqs) ->
- NoReqs;
-cpc_init(CpcS, [Pid|Pids], NoReqs) ->
- cpc_request(CpcS, Pid, false),
- cpc_init(CpcS, Pids, NoReqs+1).
-
-% end of check_proc_code() implementation.
+do_finish_loading(Prepared, #state{moddb=Db}) ->
+ MagicBins = [B || {_,{B,_}} <- Prepared],
+ case erlang:finish_loading(MagicBins) of
+ ok ->
+ MFs = [{M,F} || {M,{_,F}} <- Prepared],
+ true = ets:insert(Db, MFs),
+ ok;
+ {Reason,Ms} ->
+ {error,[{M,Reason} || M <- Ms]}
+ end.
-is_loaded(M, Db) ->
- case ets:lookup(Db, M) of
- [{M,File}] -> {file,File};
- [] -> false
+run([F], Data) ->
+ F(Data);
+run([F|Fs], Data0) ->
+ case F(Data0) of
+ {ok,Data} ->
+ run(Fs, Data);
+ {error,_}=Error ->
+ Error
end.
%% -------------------------------------------------------
%% The on_load functionality.
%% -------------------------------------------------------
-handle_on_load(Mod, File, {From,_}, #state{on_load=OnLoad0}=St0) ->
+handle_on_load({error,on_load}, Action, Mod, From, St0) ->
+ #state{on_load=OnLoad0} = St0,
Fun = fun() ->
Res = erlang:call_on_load_function(Mod),
exit(Res)
end,
- {_,Ref} = spawn_monitor(Fun),
- OnLoad = [{Ref,Mod,File,[From]}|OnLoad0],
+ PidRef = spawn_monitor(Fun),
+ PidAction = {From,Action},
+ OnLoad = [{PidRef,Mod,[PidAction]}|OnLoad0],
St = St0#state{on_load=OnLoad},
- {noreply,St}.
+ {noreply,St};
+handle_on_load(Res, Action, _, _, St) ->
+ Action(Res, St).
-pending_on_load(_, _, #state{on_load=[]}) ->
- no;
-pending_on_load(Mod, From, #state{on_load=OnLoad0}=St) ->
- case lists:keymember(Mod, 2, OnLoad0) of
+handle_pending_on_load(Action, Mod, From, #state{on_load=OnLoad0}=St) ->
+ case lists:keyfind(Mod, 2, OnLoad0) of
false ->
- no;
- true ->
- OnLoad = pending_on_load_1(Mod, From, OnLoad0),
- {yes,St#state{on_load=OnLoad}}
+ Action(ok, St);
+ {{From,_Ref},Mod,_Pids} ->
+ %% The on_load function tried to make an external
+ %% call to its own module. That would be a deadlock.
+ %% Fail the call. (The call is probably from error_handler,
+ %% and it will ignore the actual error reason and cause
+ %% an undef execption.)
+ {reply,{error,deadlock},St};
+ {_,_,_} ->
+ OnLoad = handle_pending_on_load_1(Mod, {From,Action}, OnLoad0),
+ {noreply,St#state{on_load=OnLoad}}
end.
-pending_on_load_1(Mod, From, [{Ref,Mod,File,Pids}|T]) ->
- [{Ref,Mod,File,[From|Pids]}|T];
-pending_on_load_1(Mod, From, [H|T]) ->
- [H|pending_on_load_1(Mod, From, T)];
-pending_on_load_1(_, _, []) -> [].
+handle_pending_on_load_1(Mod, From, [{PidRef,Mod,Pids}|T]) ->
+ [{PidRef,Mod,[From|Pids]}|T];
+handle_pending_on_load_1(Mod, From, [H|T]) ->
+ [H|handle_pending_on_load_1(Mod, From, T)];
+handle_pending_on_load_1(_, _, []) -> [].
-finish_on_load(Ref, OnLoadRes, #state{on_load=OnLoad0,moddb=Db}=State) ->
- case lists:keyfind(Ref, 1, OnLoad0) of
+finish_on_load(PidRef, OnLoadRes, #state{on_load=OnLoad0}=St0) ->
+ case lists:keyfind(PidRef, 1, OnLoad0) of
false ->
%% Since this process in general silently ignores messages
%% it doesn't understand, it should also ignore a 'DOWN'
%% message with an unknown reference.
- State;
- {Ref,Mod,File,WaitingPids} ->
- finish_on_load_1(Mod, File, OnLoadRes, WaitingPids, Db),
- OnLoad = [E || {R,_,_,_}=E <- OnLoad0, R =/= Ref],
- State#state{on_load=OnLoad}
+ St0;
+ {PidRef,Mod,Waiting} ->
+ St = finish_on_load_1(Mod, OnLoadRes, Waiting, St0),
+ OnLoad = [E || {R,_,_}=E <- OnLoad0, R =/= PidRef],
+ St#state{on_load=OnLoad}
end.
-finish_on_load_1(Mod, File, OnLoadRes, WaitingPids, Db) ->
+finish_on_load_1(Mod, OnLoadRes, Waiting, St) ->
Keep = OnLoadRes =:= ok,
- erlang:finish_after_on_load(Mod, Keep),
+ erts_code_purger:finish_after_on_load(Mod, Keep),
Res = case Keep of
false ->
_ = finish_on_load_report(Mod, OnLoadRes),
{error,on_load_failure};
true ->
- ets:insert(Db, {Mod,File}),
{module,Mod}
end,
- _ = [reply(Pid, Res) || Pid <- WaitingPids],
- ok.
+ finish_on_load_2(Waiting, Res, St).
+
+finish_on_load_2([{Pid,Action}|T], Res, St0) ->
+ case Action(Res, St0) of
+ {reply,Rep,St} ->
+ _ = reply(Pid, Rep),
+ finish_on_load_2(T, Res, St);
+ {noreply,St} ->
+ finish_on_load_2(T, Res, St)
+ end;
+finish_on_load_2([], _, St) ->
+ St.
finish_on_load_report(_Mod, Atom) when is_atom(Atom) ->
%% No error reports for atoms.
@@ -1728,7 +1416,7 @@ finish_on_load_report(Mod, Term) ->
%% from the code_server process.
spawn(fun() ->
F = "The on_load function for module "
- "~s returned ~P\n",
+ "~s returned:~n~P\n",
%% Express the call as an apply to simplify
%% the ext_mod_dep/1 test case.
@@ -1741,29 +1429,25 @@ finish_on_load_report(Mod, Term) ->
%% -------------------------------------------------------
all_loaded(Db) ->
- all_l(Db, ets:slot(Db, 0), 1, []).
+ Ms = ets:fun2ms(fun({M,_}=T) when is_atom(M) -> T end),
+ ets:select(Db, Ms).
-all_l(_Db, '$end_of_table', _, Acc) ->
- Acc;
-all_l(Db, ModInfo, N, Acc) ->
- NewAcc = strip_mod_info(ModInfo,Acc),
- all_l(Db, ets:slot(Db, N), N + 1, NewAcc).
-
-
-strip_mod_info([{{sticky,_},_}|T], Acc) -> strip_mod_info(T, Acc);
-strip_mod_info([H|T], Acc) -> strip_mod_info(T, [H|Acc]);
-strip_mod_info([], Acc) -> Acc.
-
-% error_msg(Format) ->
-% error_msg(Format,[]).
+-spec error_msg(io:format(), [term()]) -> 'ok'.
error_msg(Format, Args) ->
- Msg = {notify,{error, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,error,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:system_time(microsecond),
+ error_logger=>#{tag=>error}}},
ok.
+-spec info_msg(io:format(), [term()]) -> 'ok'.
info_msg(Format, Args) ->
- Msg = {notify,{info_msg, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,info,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:system_time(microsecond),
+ error_logger=>#{tag=>info_msg}}},
ok.
objfile_extension() ->
@@ -1774,6 +1458,3 @@ archive_extension() ->
to_list(X) when is_list(X) -> X;
to_list(X) when is_atom(X) -> atom_to_list(X).
-
-to_atom(X) when is_atom(X) -> X;
-to_atom(X) when is_list(X) -> list_to_atom(X).
diff --git a/lib/kernel/src/disk_log.erl b/lib/kernel/src/disk_log.erl
index f5450f30af..99ea8dc384 100644
--- a/lib/kernel/src/disk_log.erl
+++ b/lib/kernel/src/disk_log.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -67,7 +67,7 @@
%%-define(PROFILE(C), C).
-define(PROFILE(C), void).
--compile({inline,[{log_loop,5},{log_end_sync,2},{replies,2},{rflat,1}]}).
+-compile({inline,[{log_loop,6},{log_end_sync,2},{replies,2},{rflat,1}]}).
%%%----------------------------------------------------------------------
%%% Contract type specifications
@@ -75,8 +75,6 @@
-opaque continuation() :: #continuation{}.
--type bytes() :: binary() | [byte()].
-
-type file_error() :: term(). % XXX: refine
-type invalid_header() :: term(). % XXX: refine
@@ -127,28 +125,28 @@ open(A) ->
Log :: log(),
Term :: term().
log(Log, Term) ->
- req(Log, {log, term_to_binary(Term)}).
+ req(Log, {log, internal, [term_to_binary(Term)]}).
-spec blog(Log, Bytes) -> ok | {error, Reason :: log_error_rsn()} when
Log :: log(),
- Bytes :: bytes().
+ Bytes :: iodata().
blog(Log, Bytes) ->
- req(Log, {blog, check_bytes(Bytes)}).
+ req(Log, {log, external, [ensure_binary(Bytes)]}).
-spec log_terms(Log, TermList) -> ok | {error, Resaon :: log_error_rsn()} when
Log :: log(),
TermList :: [term()].
log_terms(Log, Terms) ->
Bs = terms2bins(Terms),
- req(Log, {log, Bs}).
+ req(Log, {log, internal, Bs}).
-spec blog_terms(Log, BytesList) ->
ok | {error, Reason :: log_error_rsn()} when
Log :: log(),
- BytesList :: [bytes()].
+ BytesList :: [iodata()].
blog_terms(Log, Bytess) ->
- Bs = check_bytes_list(Bytess, Bytess),
- req(Log, {blog, Bs}).
+ Bs = ensure_binary_list(Bytess),
+ req(Log, {log, external, Bs}).
-type notify_ret() :: 'ok' | {'error', 'no_such_log'}.
@@ -156,27 +154,27 @@ blog_terms(Log, Bytess) ->
Log :: log(),
Term :: term().
alog(Log, Term) ->
- notify(Log, {alog, term_to_binary(Term)}).
+ notify(Log, {alog, internal, [term_to_binary(Term)]}).
-spec alog_terms(Log, TermList) -> notify_ret() when
Log :: log(),
TermList :: [term()].
alog_terms(Log, Terms) ->
Bs = terms2bins(Terms),
- notify(Log, {alog, Bs}).
+ notify(Log, {alog, internal, Bs}).
-spec balog(Log, Bytes) -> notify_ret() when
Log :: log(),
- Bytes :: bytes().
+ Bytes :: iodata().
balog(Log, Bytes) ->
- notify(Log, {balog, check_bytes(Bytes)}).
+ notify(Log, {alog, external, [ensure_binary(Bytes)]}).
-spec balog_terms(Log, ByteList) -> notify_ret() when
Log :: log(),
- ByteList :: [bytes()].
+ ByteList :: [iodata()].
balog_terms(Log, Bytess) ->
- Bs = check_bytes_list(Bytess, Bytess),
- notify(Log, {balog, Bs}).
+ Bs = ensure_binary_list(Bytess),
+ notify(Log, {alog, external, Bs}).
-type close_error_rsn() ::'no_such_log' | 'nonode'
| {'file_error', file:filename(), file_error()}.
@@ -219,9 +217,9 @@ truncate(Log, Head) ->
-spec btruncate(Log, BHead) -> 'ok' | {'error', trunc_error_rsn()} when
Log :: log(),
- BHead :: bytes().
+ BHead :: iodata().
btruncate(Log, Head) ->
- req(Log, {truncate, {ok, check_bytes(Head)}, btruncate, 2}).
+ req(Log, {truncate, {ok, ensure_binary(Head)}, btruncate, 2}).
-type reopen_error_rsn() :: no_such_log
| nonode
@@ -248,9 +246,9 @@ reopen(Log, NewFile, NewHead) ->
-spec breopen(Log, File, BHead) -> 'ok' | {'error', reopen_error_rsn()} when
Log :: log(),
File :: file:filename(),
- BHead :: bytes().
+ BHead :: iodata().
breopen(Log, NewFile, NewHead) ->
- req(Log, {reopen, NewFile, {ok, check_bytes(NewHead)}, breopen, 3}).
+ req(Log, {reopen, NewFile, {ok, ensure_binary(NewHead)}, breopen, 3}).
-type inc_wrap_error_rsn() :: 'no_such_log' | 'nonode'
| {'read_only_mode', log()}
@@ -268,7 +266,7 @@ inc_wrap_file(Log) ->
Size :: dlog_size(),
Reason :: no_such_log | nonode | {read_only_mode, Log}
| {blocked_log, Log}
- | {new_size_too_small, CurrentSize :: pos_integer()}
+ | {new_size_too_small, Log, CurrentSize :: pos_integer()}
| {badarg, size}
| {file_error, file:filename(), file_error()}.
change_size(Log, NewSize) ->
@@ -640,6 +638,8 @@ check_arg([{mode, read_only}|Tail], Res) ->
check_arg(Tail, Res#arg{mode = read_only});
check_arg([{mode, read_write}|Tail], Res) ->
check_arg(Tail, Res#arg{mode = read_write});
+check_arg([{quiet, Boolean}|Tail], Res) when is_boolean(Boolean) ->
+ check_arg(Tail, Res#arg{quiet = Boolean});
check_arg(Arg, _) ->
{error, {badarg, Arg}}.
@@ -670,13 +670,12 @@ init(Parent, Server) ->
process_flag(trap_exit, true),
loop(#state{parent = Parent, server = Server}).
-loop(State) when State#state.messages =:= [] ->
+loop(#state{messages = []}=State) ->
receive
Message ->
handle(Message, State)
end;
-loop(State) ->
- [M | Ms] = State#state.messages,
+loop(#state{messages = [M | Ms]}=State) ->
handle(M, State#state{messages = Ms}).
handle({From, write_cache}, S) when From =:= self() ->
@@ -686,106 +685,79 @@ handle({From, write_cache}, S) when From =:= self() ->
Error ->
loop(S#state{cache_error = Error})
end;
-handle({From, {log, B}}, S) ->
+handle({From, {log, Format, B}}=Message, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only}=L ->
reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.status =:= ok, L#log.format =:= internal ->
- log_loop(S, From, [B], [], iolist_size(B));
- L when L#log.status =:= ok, L#log.format =:= external ->
+ #log{status = ok, format=external}=L when Format =:= internal ->
reply(From, {error, {format_external, L#log.name}}, S);
- L when L#log.status =:= {blocked, false} ->
+ #log{status = ok, format=LogFormat} ->
+ log_loop(S, From, [B], [], iolist_size(B), LogFormat);
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, {log, B}} | S#state.queue]})
- end;
-handle({From, {blog, B}}, S) ->
- case get(log) of
- L when L#log.mode =:= read_only ->
- reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.status =:= ok ->
- log_loop(S, From, [B], [], iolist_size(B));
- L when L#log.status =:= {blocked, false} ->
- reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
- reply(From, {error, {blocked_log, L#log.name}}, S);
- _ ->
- loop(S#state{queue = [{From, {blog, B}} | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({alog, B}, S) ->
+handle({alog, Format, B}=Message, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only} ->
notify_owners({read_only,B}),
loop(S);
- L when L#log.status =:= ok, L#log.format =:= internal ->
- log_loop(S, [], [B], [], iolist_size(B));
- L when L#log.status =:= ok ->
+ #log{status = ok, format = external} when Format =:= internal ->
notify_owners({format_external, B}),
loop(S);
- L when L#log.status =:= {blocked, false} ->
- notify_owners({blocked_log, B}),
- loop(S);
- _ ->
- loop(S#state{queue = [{alog, B} | S#state.queue]})
- end;
-handle({balog, B}, S) ->
- case get(log) of
- L when L#log.mode =:= read_only ->
- notify_owners({read_only,B}),
- loop(S);
- L when L#log.status =:= ok ->
- log_loop(S, [], [B], [], iolist_size(B));
- L when L#log.status =:= {blocked, false} ->
+ #log{status = ok, format=LogFormat} ->
+ log_loop(S, [], [B], [], iolist_size(B), LogFormat);
+ #log{status = {blocked, false}} ->
notify_owners({blocked_log, B}),
loop(S);
_ ->
- loop(S#state{queue = [{balog, B} | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, {block, QueueLogRecs}}, S) ->
+handle({From, {block, QueueLogRecs}}=Message, S) ->
case get(log) of
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
do_block(From, QueueLogRecs, L),
reply(From, ok, S);
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, {block, QueueLogRecs}} |
- S#state.queue]})
+ enqueue(Message, S)
end;
handle({From, unblock}, S) ->
case get(log) of
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
reply(From, {error, {not_blocked, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
S2 = do_unblock(L, S),
reply(From, ok, S2);
L ->
reply(From, {error, {not_blocked_by_pid, L#log.name}}, S)
end;
-handle({From, sync}, S) ->
+handle({From, sync}=Message, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only}=L ->
reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.status =:= ok ->
- sync_loop([From], S);
- L when L#log.status =:= {blocked, false} ->
+ #log{status = ok, format=LogFormat} ->
+ log_loop(S, [], [], [From], 0, LogFormat);
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, sync} | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, {truncate, Head, F, A}}, S) ->
+handle({From, {truncate, Head, F, A}}=Message, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only}=L ->
reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ #log{status = ok} when S#state.cache_error =/= ok ->
loop(cache_error(S, [From]));
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
H = merge_head(Head, L#log.head),
case catch do_trunc(L, H) of
ok ->
@@ -796,48 +768,46 @@ handle({From, {truncate, Head, F, A}}, S) ->
Error ->
do_exit(S, From, Error, ?failure(Error, F, A))
end;
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, {truncate, Head, F, A}}
- | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, {chunk, Pos, B, N}}, S) ->
+handle({From, {chunk, Pos, B, N}}=Message, S) ->
case get(log) of
- L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ #log{status = ok} when S#state.cache_error =/= ok ->
loop(cache_error(S, [From]));
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
R = do_chunk(L, Pos, B, N),
reply(From, R, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
R = do_chunk(L, Pos, B, N),
reply(From, R, S);
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_L ->
- loop(S#state{queue = [{From, {chunk, Pos, B, N}} | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, {chunk_step, Pos, N}}, S) ->
+handle({From, {chunk_step, Pos, N}}=Message, S) ->
case get(log) of
- L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ #log{status = ok} when S#state.cache_error =/= ok ->
loop(cache_error(S, [From]));
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
R = do_chunk_step(L, Pos, N),
reply(From, R, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
R = do_chunk_step(L, Pos, N),
reply(From, R, S);
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, {chunk_step, Pos, N}}
- | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, {change_notify, Pid, NewNotify}}, S) ->
+handle({From, {change_notify, Pid, NewNotify}}=Message, S) ->
case get(log) of
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
case do_change_notify(L, Pid, NewNotify) of
{ok, L1} ->
put(log, L1),
@@ -845,39 +815,37 @@ handle({From, {change_notify, Pid, NewNotify}}, S) ->
Error ->
reply(From, Error, S)
end;
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, {change_notify, Pid, NewNotify}}
- | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, {change_header, NewHead}}, S) ->
+handle({From, {change_header, NewHead}}=Message, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only}=L ->
reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.status =:= ok ->
- case check_head(NewHead, L#log.format) of
+ #log{status = ok, format = Format}=L ->
+ case check_head(NewHead, Format) of
{ok, Head} ->
- put(log, L#log{head = mk_head(Head, L#log.format)}),
+ put(log, L#log{head = mk_head(Head, Format)}),
reply(From, ok, S);
Error ->
reply(From, Error, S)
end;
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, {change_header, NewHead}}
- | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, {change_size, NewSize}}, S) ->
+handle({From, {change_size, NewSize}}=Message, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only}=L ->
reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
case check_size(L#log.type, NewSize) of
ok ->
case catch do_change_size(L, NewSize) of % does the put
@@ -894,23 +862,22 @@ handle({From, {change_size, NewSize}}, S) ->
not_ok ->
reply(From, {error, {badarg, size}}, S)
end;
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, {change_size, NewSize}}
- | S#state.queue]})
+ enqueue(Message, S)
end;
-handle({From, inc_wrap_file}, S) ->
+handle({From, inc_wrap_file}=Message, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only}=L ->
reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.type =:= halt ->
+ #log{type = halt}=L ->
reply(From, {error, {halt_log, L#log.name}}, S);
- L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ #log{status = ok} when S#state.cache_error =/= ok ->
loop(cache_error(S, [From]));
- L when L#log.status =:= ok ->
+ #log{status = ok}=L ->
case catch do_inc_wrap_file(L) of
{ok, L2, Lost} ->
put(log, L2),
@@ -920,20 +887,22 @@ handle({From, inc_wrap_file}, S) ->
put(log, L2),
reply(From, Error, state_err(S, Error))
end;
- L when L#log.status =:= {blocked, false} ->
+ #log{status = {blocked, false}}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
- L when L#log.blocked_by =:= From ->
+ #log{blocked_by = From}=L ->
reply(From, {error, {blocked_log, L#log.name}}, S);
_ ->
- loop(S#state{queue = [{From, inc_wrap_file} | S#state.queue]})
+ enqueue(Message, S)
end;
handle({From, {reopen, NewFile, Head, F, A}}, S) ->
case get(log) of
- L when L#log.mode =:= read_only ->
+ #log{mode = read_only}=L ->
reply(From, {error, {read_only_mode, L#log.name}}, S);
- L when L#log.status =:= ok, S#state.cache_error =/= ok ->
+ #log{status = ok} when S#state.cache_error =/= ok ->
loop(cache_error(S, [From]));
- L when L#log.status =:= ok, L#log.filename =/= NewFile ->
+ #log{status = ok, filename = NewFile}=L ->
+ reply(From, {error, {same_file_name, L#log.name}}, S);
+ #log{status = ok}=L ->
case catch close_disk_log2(L) of
closed ->
File = L#log.filename,
@@ -966,8 +935,6 @@ handle({From, {reopen, NewFile, Head, F, A}}, S) ->
Error ->
do_exit(S, From, Error, ?failure(Error, F, A))
end;
- L when L#log.status =:= ok ->
- reply(From, {error, {same_file_name, L#log.name}}, S);
L ->
reply(From, {error, {blocked_log, L#log.name}}, S)
end;
@@ -1005,11 +972,11 @@ handle({From, close}, S) ->
end;
handle({From, info}, S) ->
reply(From, do_info(get(log), S#state.cnt), S);
-handle({'EXIT', From, Reason}, S) when From =:= S#state.parent ->
+handle({'EXIT', From, Reason}, #state{parent=From}=S) ->
%% Parent orders shutdown.
_ = do_stop(S),
exit(Reason);
-handle({'EXIT', From, Reason}, S) when From =:= S#state.server ->
+handle({'EXIT', From, Reason}, #state{server=From}=S) ->
%% The server is gone.
_ = do_stop(S),
exit(Reason);
@@ -1034,57 +1001,59 @@ handle({system, From, Req}, S) ->
handle(_, S) ->
loop(S).
-sync_loop(From, S) ->
- log_loop(S, [], [], From, 0).
+enqueue(Message, #state{queue = Queue}=S) ->
+ loop(S#state{queue = [Message | Queue]}).
+
+%% Collect further log and sync requests already in the mailbox or queued
-define(MAX_LOOK_AHEAD, 64*1024).
%% Inlined.
-log_loop(#state{cache_error = CE}=S, Pids, _Bins, _Sync, _Sz) when CE =/= ok ->
+log_loop(#state{cache_error = CE}=S, Pids, _Bins, _Sync, _Sz, _F) when CE =/= ok ->
loop(cache_error(S, Pids));
-log_loop(#state{}=S, Pids, Bins, Sync, Sz) when Sz > ?MAX_LOOK_AHEAD ->
- loop(log_end(S, Pids, Bins, Sync));
-log_loop(#state{messages = []}=S, Pids, Bins, Sync, Sz) ->
- receive
+log_loop(#state{}=S, Pids, Bins, Sync, Sz, _F) when Sz > ?MAX_LOOK_AHEAD ->
+ loop(log_end(S, Pids, Bins, Sync, Sz));
+log_loop(#state{messages = []}=S, Pids, Bins, Sync, Sz, F) ->
+ receive
Message ->
- log_loop(Message, Pids, Bins, Sync, Sz, S, get(log))
+ log_loop(Message, Pids, Bins, Sync, Sz, F, S)
after 0 ->
- loop(log_end(S, Pids, Bins, Sync))
+ loop(log_end(S, Pids, Bins, Sync, Sz))
end;
-log_loop(#state{messages = [M | Ms]}=S, Pids, Bins, Sync, Sz) ->
+log_loop(#state{messages = [M | Ms]}=S, Pids, Bins, Sync, Sz, F) ->
S1 = S#state{messages = Ms},
- log_loop(M, Pids, Bins, Sync, Sz, S1, get(log)).
+ log_loop(M, Pids, Bins, Sync, Sz, F, S1).
%% Items logged after the last sync request found are sync:ed as well.
-log_loop({alog,B}, Pids, Bins, Sync, Sz, S, #log{format = internal}) ->
- %% {alog, _} allowed for the internal format only.
- log_loop(S, Pids, [B | Bins], Sync, Sz+iolist_size(B));
-log_loop({balog, B}, Pids, Bins, Sync, Sz, S, _L) ->
- log_loop(S, Pids, [B | Bins], Sync, Sz+iolist_size(B));
-log_loop({From, {log, B}}, Pids, Bins, Sync, Sz, S, #log{format = internal}) ->
- %% {log, _} allowed for the internal format only.
- log_loop(S, [From | Pids], [B | Bins], Sync, Sz+iolist_size(B));
-log_loop({From, {blog, B}}, Pids, Bins, Sync, Sz, S, _L) ->
- log_loop(S, [From | Pids], [B | Bins], Sync, Sz+iolist_size(B));
-log_loop({From, sync}, Pids, Bins, Sync, Sz, S, _L) ->
- log_loop(S, Pids, Bins, [From | Sync], Sz);
-log_loop(Message, Pids, Bins, Sync, _Sz, S, _L) ->
- NS = log_end(S, Pids, Bins, Sync),
+log_loop({alog, internal, B}, Pids, Bins, Sync, Sz, internal=F, S) ->
+ %% alog of terms allowed for the internal format only
+ log_loop(S, Pids, [B | Bins], Sync, Sz+iolist_size(B), F);
+log_loop({alog, binary, B}, Pids, Bins, Sync, Sz, F, S) ->
+ log_loop(S, Pids, [B | Bins], Sync, Sz+iolist_size(B), F);
+log_loop({From, {log, internal, B}}, Pids, Bins, Sync, Sz, internal=F, S) ->
+ %% log of terms allowed for the internal format only
+ log_loop(S, [From | Pids], [B | Bins], Sync, Sz+iolist_size(B), F);
+log_loop({From, {log, binary, B}}, Pids, Bins, Sync, Sz, F, S) ->
+ log_loop(S, [From | Pids], [B | Bins], Sync, Sz+iolist_size(B), F);
+log_loop({From, sync}, Pids, Bins, Sync, Sz, F, S) ->
+ log_loop(S, Pids, Bins, [From | Sync], Sz, F);
+log_loop(Message, Pids, Bins, Sync, Sz, _F, S) ->
+ NS = log_end(S, Pids, Bins, Sync, Sz),
handle(Message, NS).
-log_end(S, [], [], Sync) ->
+log_end(S, [], [], Sync, _Sz) ->
log_end_sync(S, Sync);
-log_end(S, Pids, Bins, Sync) ->
- case do_log(get(log), rflat(Bins)) of
+log_end(#state{cnt = Cnt}=S, Pids, Bins, Sync, Sz) ->
+ case do_log(get(log), rflat(Bins), Sz) of
N when is_integer(N) ->
ok = replies(Pids, ok),
- S1 = (state_ok(S))#state{cnt = S#state.cnt+N},
+ S1 = (state_ok(S))#state{cnt = Cnt + N},
log_end_sync(S1, Sync);
{error, {error, {full, _Name}}, N} when Pids =:= [] ->
- log_end_sync(state_ok(S#state{cnt = S#state.cnt + N}), Sync);
+ log_end_sync(state_ok(S#state{cnt = Cnt + N}), Sync);
{error, Error, N} ->
ok = replies(Pids, Error),
- state_err(S#state{cnt = S#state.cnt + N}, Error)
+ state_err(S#state{cnt = Cnt + N}, Error)
end.
%% Inlined.
@@ -1096,12 +1065,9 @@ log_end_sync(S, Sync) ->
state_err(S, Res).
%% Inlined.
-rflat([B]=L) when is_binary(B) -> L;
rflat([B]) -> B;
rflat(B) -> rflat(B, []).
-rflat([B | Bs], L) when is_binary(B) ->
- rflat(Bs, [B | L]);
rflat([B | Bs], L) ->
rflat(Bs, B ++ L);
rflat([], L) -> L.
@@ -1138,17 +1104,17 @@ close_owner(Pid, L, S) ->
S2 = do_unblock(Pid, get(log), S),
unlink(Pid),
do_close2(L1, S2).
-
+
%% -> {stop, S} | {continue, S}
-close_user(Pid, L, S) when L#log.users > 0 ->
- L1 = L#log{users = L#log.users - 1},
+close_user(Pid, #log{users=Users}=L, S) when Users > 0 ->
+ L1 = L#log{users = Users - 1},
put(log, L1),
S2 = do_unblock(Pid, get(log), S),
do_close2(L1, S2);
close_user(_Pid, _L, S) ->
{continue, S}.
-do_close2(L, S) when L#log.users =:= 0, L#log.owners =:= [] ->
+do_close2(#log{users = 0, owners = []}, S) ->
{stop, S};
do_close2(_L, S) ->
{continue, S}.
@@ -1227,14 +1193,14 @@ add_pid(Pid, Notify, L) when is_pid(Pid) ->
add_pid(_NotAPid, _Notify, L) ->
{ok, L#log{users = L#log.users + 1}}.
-unblock_pid(L) when L#log.blocked_by =:= none ->
+unblock_pid(#log{blocked_by = none}) ->
ok;
-unblock_pid(L) ->
- case is_owner(L#log.blocked_by, L) of
+unblock_pid(#log{blocked_by = Pid}=L) ->
+ case is_owner(Pid, L) of
{true, _Notify} ->
ok;
false ->
- unlink(L#log.blocked_by)
+ unlink(Pid)
end.
%% -> true | false
@@ -1310,16 +1276,24 @@ compare_arg(_Attr, _Val, _A) ->
%% -> {ok, Res, log(), Cnt} | Error
do_open(A) ->
- L = #log{name = A#arg.name,
- filename = A#arg.file,
- size = A#arg.size,
- head = mk_head(A#arg.head, A#arg.format),
- mode = A#arg.mode,
- version = A#arg.version},
- do_open2(L, A).
+ #arg{type = Type, format = Format, name = Name, head = Head0,
+ file = FName, repair = Repair, size = Size, mode = Mode,
+ quiet = Quiet, version = V} = A,
+ disk_log_1:set_quiet(Quiet),
+ Head = mk_head(Head0, Format),
+ case do_open2(Type, Format, Name, FName, Repair, Size, Mode, Head, V) of
+ {ok, Ret, Extra, FormatType, NoItems} ->
+ L = #log{name = Name, type = Type, format = Format,
+ filename = FName, size = Size,
+ format_type = FormatType, head = Head, mode = Mode,
+ version = V, extra = Extra},
+ {ok, Ret, L, NoItems};
+ Error ->
+ Error
+ end.
mk_head({head, Term}, internal) -> {ok, term_to_binary(Term)};
-mk_head({head, Bytes}, external) -> {ok, check_bytes(Bytes)};
+mk_head({head, Bytes}, external) -> {ok, ensure_binary(Bytes)};
mk_head(H, _) -> H.
terms2bins([T | Ts]) ->
@@ -1327,30 +1301,29 @@ terms2bins([T | Ts]) ->
terms2bins([]) ->
[].
-check_bytes_list([B | Bs], Bs0) when is_binary(B) ->
- check_bytes_list(Bs, Bs0);
-check_bytes_list([], Bs0) ->
+ensure_binary_list(Bs) ->
+ ensure_binary_list(Bs, Bs).
+
+ensure_binary_list([B | Bs], Bs0) when is_binary(B) ->
+ ensure_binary_list(Bs, Bs0);
+ensure_binary_list([], Bs0) ->
Bs0;
-check_bytes_list(_, Bs0) ->
- check_bytes_list(Bs0).
-
-check_bytes_list([B | Bs]) when is_binary(B) ->
- [B | check_bytes_list(Bs)];
-check_bytes_list([B | Bs]) ->
- [list_to_binary(B) | check_bytes_list(Bs)];
-check_bytes_list([]) ->
+ensure_binary_list(_, Bs0) ->
+ make_binary_list(Bs0).
+
+make_binary_list([B | Bs]) ->
+ [ensure_binary(B) | make_binary_list(Bs)];
+make_binary_list([]) ->
[].
-check_bytes(Binary) when is_binary(Binary) ->
- Binary;
-check_bytes(Bytes) ->
- list_to_binary(Bytes).
+ensure_binary(Bytes) ->
+ iolist_to_binary(Bytes).
%%-----------------------------------------------------------------
%% Change size of the logs in runtime.
%%-----------------------------------------------------------------
%% -> ok | {big, CurSize} | throw(Error)
-do_change_size(L, NewSize) when L#log.type =:= halt ->
+do_change_size(#log{type = halt}=L, NewSize) ->
Halt = L#log.extra,
CurB = Halt#halt.curB,
NewLog = L#log{extra = Halt#halt{size = NewSize}},
@@ -1366,7 +1339,7 @@ do_change_size(L, NewSize) when L#log.type =:= halt ->
true ->
{big, CurB}
end;
-do_change_size(L, NewSize) when L#log.type =:= wrap ->
+do_change_size(#log{type = wrap}=L, NewSize) ->
#log{extra = Extra, version = Version} = L,
{ok, Handle} = disk_log_1:change_size_wrap(Extra, NewSize, Version),
erase(is_full),
@@ -1381,7 +1354,7 @@ check_head({head_func, {M, F, A}}, _Format) when is_atom(M),
is_list(A) ->
{ok, {M, F, A}};
check_head({head, Head}, external) ->
- case catch check_bytes(Head) of
+ case catch ensure_binary(Head) of
{'EXIT', _} ->
{error, {badarg, head}};
_ ->
@@ -1432,57 +1405,44 @@ do_inc_wrap_file(L) ->
%%-----------------------------------------------------------------
%% -> {ok, Reply, log(), Cnt} | Error
%% Note: the header is always written, even if the log size is too small.
-do_open2(L, #arg{type = halt, format = internal, name = Name,
- file = FName, repair = Repair, size = Size, mode = Mode}) ->
- case catch disk_log_1:int_open(FName, Repair, Mode, L#log.head) of
+do_open2(halt, internal, Name, FName, Repair, Size, Mode, Head, _V) ->
+ case catch disk_log_1:int_open(FName, Repair, Mode, Head) of
{ok, {_Alloc, FdC, {NoItems, _NoBytes}, FileSize}} ->
Halt = #halt{fdc = FdC, curB = FileSize, size = Size},
- {ok, {ok, Name}, L#log{format_type = halt_int, extra = Halt},
- NoItems};
+ {ok, {ok, Name}, Halt, halt_int, NoItems};
{repaired, FdC, Rec, Bad, FileSize} ->
Halt = #halt{fdc = FdC, curB = FileSize, size = Size},
{ok, {repaired, Name, {recovered, Rec}, {badbytes, Bad}},
- L#log{format_type = halt_int, extra = Halt},
- Rec};
+ Halt, halt_int, Rec};
Error ->
Error
end;
-do_open2(L, #arg{type = wrap, format = internal, size = {MaxB, MaxF},
- name = Name, repair = Repair, file = FName, mode = Mode,
- version = V}) ->
+do_open2(wrap, internal, Name, FName, Repair, Size, Mode, Head, V) ->
+ {MaxB, MaxF} = Size,
case catch
- disk_log_1:mf_int_open(FName, MaxB, MaxF, Repair, Mode, L#log.head, V) of
+ disk_log_1:mf_int_open(FName, MaxB, MaxF, Repair, Mode, Head, V) of
{ok, Handle, Cnt} ->
- {ok, {ok, Name}, L#log{type = wrap,
- format_type = wrap_int,
- extra = Handle}, Cnt};
+ {ok, {ok, Name}, Handle, wrap_int, Cnt};
{repaired, Handle, Rec, Bad, Cnt} ->
{ok, {repaired, Name, {recovered, Rec}, {badbytes, Bad}},
- L#log{type = wrap, format_type = wrap_int, extra = Handle}, Cnt};
+ Handle, wrap_int, Cnt};
Error ->
Error
end;
-do_open2(L, #arg{type = halt, format = external, file = FName, name = Name,
- size = Size, repair = Repair, mode = Mode}) ->
- case catch disk_log_1:ext_open(FName, Repair, Mode, L#log.head) of
+do_open2(halt, external, Name, FName, Repair, Size, Mode, Head, _V) ->
+ case catch disk_log_1:ext_open(FName, Repair, Mode, Head) of
{ok, {_Alloc, FdC, {NoItems, _NoBytes}, FileSize}} ->
Halt = #halt{fdc = FdC, curB = FileSize, size = Size},
- {ok, {ok, Name},
- L#log{format_type = halt_ext, format = external, extra = Halt},
- NoItems};
+ {ok, {ok, Name}, Halt, halt_ext, NoItems};
Error ->
Error
end;
-do_open2(L, #arg{type = wrap, format = external, size = {MaxB, MaxF},
- name = Name, file = FName, repair = Repair, mode = Mode,
- version = V}) ->
+do_open2(wrap, external, Name, FName, Repair, Size, Mode, Head, V) ->
+ {MaxB, MaxF} = Size,
case catch
- disk_log_1:mf_ext_open(FName, MaxB, MaxF, Repair, Mode, L#log.head, V) of
+ disk_log_1:mf_ext_open(FName, MaxB, MaxF, Repair, Mode, Head, V) of
{ok, Handle, Cnt} ->
- {ok, {ok, Name}, L#log{type = wrap,
- format_type = wrap_ext,
- extra = Handle,
- format = external}, Cnt};
+ {ok, {ok, Name}, Handle, wrap_ext, Cnt};
Error ->
Error
end.
@@ -1680,7 +1640,7 @@ do_block(Pid, QueueLogRecs, L) ->
link(Pid)
end.
-do_unblock(Pid, L, S) when L#log.blocked_by =:= Pid ->
+do_unblock(Pid, #log{blocked_by = Pid}=L, S) ->
do_unblock(L, S);
do_unblock(_Pid, _L, S) ->
S.
@@ -1698,10 +1658,13 @@ do_unblock(L, S) ->
-spec do_log(#log{}, [binary()]) -> integer() | {'error', _, integer()}.
-do_log(L, B) when L#log.type =:= halt ->
+do_log(L, B) ->
+ do_log(L, B, iolist_size(B)).
+
+do_log(#log{type = halt}=L, B, BSz) ->
#log{format = Format, extra = Halt} = L,
#halt{curB = CurSize, size = Sz} = Halt,
- {Bs, BSize} = bsize(B, Format),
+ {Bs, BSize} = logl(B, Format, BSz),
case get(is_full) of
true ->
{error, {error, {full, L#log.name}}, 0};
@@ -1710,7 +1673,7 @@ do_log(L, B) when L#log.type =:= halt ->
undefined ->
halt_write_full(L, B, Format, 0)
end;
-do_log(L, B) when L#log.format_type =:= wrap_int ->
+do_log(#log{format_type = wrap_int}=L, B, _BSz) ->
case disk_log_1:mf_int_log(L#log.extra, B, L#log.head) of
{ok, Handle, Logged, Lost, Wraps} ->
notify_owners_wrap(Wraps),
@@ -1723,7 +1686,7 @@ do_log(L, B) when L#log.format_type =:= wrap_int ->
put(log, L#log{extra = Handle}),
{error, Error, Logged - Lost}
end;
-do_log(L, B) when L#log.format_type =:= wrap_ext ->
+do_log(#log{format_type = wrap_ext}=L, B, _BSz) ->
case disk_log_1:mf_ext_log(L#log.extra, B, L#log.head) of
{ok, Handle, Logged, Lost, Wraps} ->
notify_owners_wrap(Wraps),
@@ -1737,17 +1700,16 @@ do_log(L, B) when L#log.format_type =:= wrap_ext ->
{error, Error, Logged - Lost}
end.
-bsize(B, external) ->
- {B, xsz(B, 0)};
-bsize(B, internal) ->
+logl(B, external, undefined) ->
+ {B, iolist_size(B)};
+logl(B, external, Sz) ->
+ {B, Sz};
+logl(B, internal, _Sz) ->
disk_log_1:logl(B).
-xsz([B|T], Sz) -> xsz(T, byte_size(B) + Sz);
-xsz([], Sz) -> Sz.
-
halt_write_full(L, [Bin | Bins], Format, N) ->
B = [Bin],
- {Bs, BSize} = bsize(B, Format),
+ {Bs, BSize} = logl(B, Format, undefined),
Halt = L#log.extra,
#halt{curB = CurSize, size = Sz} = Halt,
if
@@ -1799,7 +1761,7 @@ do_sync(#log{type = wrap, extra = Handle} = Log) ->
Reply.
%% -> ok | Error | throw(Error)
-do_trunc(L, Head) when L#log.type =:= halt ->
+do_trunc(#log{type = halt}=L, Head) ->
#log{filename = FName, extra = Halt} = L,
FdC = Halt#halt.fdc,
{Reply1, FdC2} =
@@ -1828,7 +1790,7 @@ do_trunc(L, Head) when L#log.type =:= halt ->
end,
put(log, L#log{extra = NewHalt}),
Reply;
-do_trunc(L, Head) when L#log.type =:= wrap ->
+do_trunc(#log{type = wrap}=L, Head) ->
Handle = L#log.extra,
OldHead = L#log.head,
{MaxB, MaxF} = disk_log_1:get_wrap_size(Handle),
@@ -2022,8 +1984,7 @@ notify_owners(Note) ->
(_) -> ok
end, L#log.owners).
-cache_error(S, Pids) ->
- Error = S#state.cache_error,
+cache_error(#state{cache_error=Error}=S, Pids) ->
ok = replies(Pids, Error),
state_err(S#state{cache_error = ok}, Error).
diff --git a/lib/kernel/src/disk_log.hrl b/lib/kernel/src/disk_log.hrl
index 6c0aea070f..a362881f40 100644
--- a/lib/kernel/src/disk_log.hrl
+++ b/lib/kernel/src/disk_log.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -39,6 +39,7 @@
-define(MAX_FILES, 65000).
-define(MAX_BYTES, ((1 bsl 64) - 1)).
-define(MAX_CHUNK_SIZE, 65536).
+-define(MAX_FWRITE_CACHE, 65536).
%% Object defines
-define(LOGMAGIC, <<1,2,3,4>>).
@@ -54,11 +55,10 @@
%% Types -- alphabetically
%%------------------------------------------------------------------------
--type dlog_byte() :: [dlog_byte()] | byte().
-type dlog_format() :: 'external' | 'internal'.
-type dlog_format_type() :: 'halt_ext' | 'halt_int' | 'wrap_ext' | 'wrap_int'.
-type dlog_head() :: 'none' | {'ok', binary()} | mfa().
--type dlog_head_opt() :: none | term() | binary() | [dlog_byte()].
+-type dlog_head_opt() :: none | term() | iodata().
-type log() :: term(). % XXX: refine
-type dlog_mode() :: 'read_only' | 'read_write'.
-type dlog_name() :: atom() | string().
@@ -69,13 +69,14 @@
| {file, FileName :: file:filename()}
| {linkto, LinkTo :: none | pid()}
| {repair, Repair :: true | false | truncate}
- | {type, Type :: dlog_type}
+ | {type, Type :: dlog_type()}
| {format, Format :: dlog_format()}
| {size, Size :: dlog_size()}
| {distributed, Nodes :: [node()]}
| {notify, boolean()}
| {head, Head :: dlog_head_opt()}
| {head_func, MFA :: {atom(), atom(), list()}}
+ | {quiet, boolean()}
| {mode, Mode :: dlog_mode()}.
-type dlog_options() :: [dlog_option()].
-type dlog_repair() :: 'truncate' | boolean().
@@ -102,6 +103,7 @@
head = none,
mode = read_write :: dlog_mode(),
notify = false :: boolean(),
+ quiet = false :: boolean(),
options = [] :: dlog_options()}).
-record(cache, %% Cache for logged terms (per file descriptor).
@@ -152,8 +154,8 @@
users = 0 :: non_neg_integer(), %% non-linked users
filename :: file:filename(), %% real name of the file
owners = [] :: [{pid(), boolean()}],%% [{pid, notify}]
- type = halt :: dlog_type(),
- format = internal :: dlog_format(),
+ type :: dlog_type(),
+ format :: dlog_format(),
format_type :: dlog_format_type(),
head = none, %% none | {head, H} | {M,F,A}
%% called when wraplog wraps
diff --git a/lib/kernel/src/disk_log_1.erl b/lib/kernel/src/disk_log_1.erl
index 2e61363aa6..41ef33c613 100644
--- a/lib/kernel/src/disk_log_1.erl
+++ b/lib/kernel/src/disk_log_1.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -37,6 +37,7 @@
-export([get_wrap_size/1]).
-export([is_head/1]).
-export([position/3, truncate_at/3, fwrite/4, fclose/2]).
+-export([set_quiet/1, is_quiet/0]).
-compile({inline,[{scan_f2,7}]}).
@@ -500,7 +501,10 @@ lh(H, _F) -> % cannot happen
repair(In, File) ->
FSz = file_size(File),
- error_logger:info_msg("disk_log: repairing ~tp ...\n", [File]),
+ case is_quiet() of
+ true -> ok;
+ _ -> error_logger:info_msg("disk_log: repairing ~tp ...\n", [File])
+ end,
Tmp = add_ext(File, "TMP"),
{ok, {_Alloc, Out, {0, _}, _FileSize}} = new_int_file(Tmp, none),
scan_f_read(<<>>, In, Out, File, FSz, Tmp, ?MAX_CHUNK_SIZE, 0, 0).
@@ -626,7 +630,7 @@ is_head(Bin) when is_binary(Bin) ->
%% Writes MaxB bytes on each file.
%% Creates a file called Name.idx in the Dir. This
%% file contains the last written FileName as one byte, and
-%% follwing that, the sizes of each file (size 0 number of items).
+%% following that, the sizes of each file (size 0 number of items).
%% On startup, this file is read, and the next available
%% filename is used as first log file.
%% Reports can be browsed with Report Browser Tool (rb), or
@@ -769,8 +773,11 @@ mf_int_chunk(Handle, {FileNo, Pos}, Bin, N) ->
NFileNo = inc(FileNo, Handle#handle.maxF),
case catch int_open(FName, true, read_only, any) of
{error, _Reason} ->
- error_logger:info_msg("disk_log: chunk error. File ~tp missing.\n\n",
- [FName]),
+ case is_quiet() of
+ true -> ok;
+ _ -> error_logger:info_msg("disk_log: chunk error. File ~tp missing.\n\n",
+ [FName])
+ end,
mf_int_chunk(Handle, {NFileNo, 0}, [], N);
{ok, {_Alloc, FdC, _HeadSize, _FileSize}} ->
case chunk(FdC, FName, Pos, Bin, N) of
@@ -797,9 +804,12 @@ mf_int_chunk_read_only(Handle, {FileNo, Pos}, Bin, N) ->
NFileNo = inc(FileNo, Handle#handle.maxF),
case catch int_open(FName, true, read_only, any) of
{error, _Reason} ->
- error_logger:info_msg("disk_log: chunk error. File ~tp missing.\n\n",
- [FName]),
- mf_int_chunk_read_only(Handle, {NFileNo, 0}, [], N);
+ case is_quiet() of
+ true -> ok;
+ _ -> error_logger:info_msg("disk_log: chunk error. File ~tp missing.\n\n",
+ [FName])
+ end,
+ mf_int_chunk_read_only(Handle, {NFileNo, 0}, [], N);
{ok, {_Alloc, FdC, _HeadSize, _FileSize}} ->
case do_chunk_read_only(FdC, FName, Pos, Bin, N) of
{NewFdC, eof} ->
@@ -1416,24 +1426,36 @@ open_truncate(FileName) ->
%%% Functions that access files, and throw on error.
--define(MAX, 16384). % bytes
-define(TIMEOUT, 2000). % ms
%% -> {Reply, cache()}; Reply = ok | Error
-fwrite(#cache{c = []} = FdC, _FN, B, Size) ->
+fwrite(FdC, _FN, _B, 0) ->
+ {ok, FdC}; % avoid starting a timer for empty writes
+fwrite(#cache{fd = Fd, c = C, sz = Sz} = FdC, FileName, B, Size) ->
+ Sz1 = Sz + Size,
+ C1 = cache_append(C, B),
+ if Sz1 > ?MAX_FWRITE_CACHE ->
+ write_cache(Fd, FileName, C1);
+ true ->
+ maybe_start_timer(C),
+ {ok, FdC#cache{sz = Sz1, c = C1}}
+ end.
+
+cache_append([], B) -> B;
+cache_append(C, B) -> [C | B].
+
+%% if the cache was empty, start timer (unless it's already running)
+maybe_start_timer([]) ->
case get(write_cache_timer_is_running) of
- true ->
+ true ->
ok;
- _ ->
+ _ ->
put(write_cache_timer_is_running, true),
erlang:send_after(?TIMEOUT, self(), {self(), write_cache}),
ok
- end,
- {ok, FdC#cache{sz = Size, c = B}};
-fwrite(#cache{sz = Sz, c = C} = FdC, _FN, B, Size) when Sz < ?MAX ->
- {ok, FdC#cache{sz = Sz+Size, c = [C | B]}};
-fwrite(#cache{fd = Fd, c = C}, FileName, B, _Size) ->
- write_cache(Fd, FileName, [C | B]).
+ end;
+maybe_start_timer(_C) ->
+ ok.
fwrite_header(Fd, B, Size) ->
{ok, #cache{fd = Fd, sz = Size, c = B}}.
@@ -1537,6 +1559,12 @@ fclose(#cache{fd = Fd, c = C}, FileName) ->
_ = write_cache_close(Fd, FileName, C),
file:close(Fd).
+set_quiet(Bool) ->
+ put(quiet, Bool).
+
+is_quiet() ->
+ get(quiet) =:= true.
+
%% -> {Reply, #cache{}}; Reply = ok | Error
write_cache(Fd, _FileName, []) ->
{ok, #cache{fd = Fd}};
diff --git a/lib/kernel/src/disk_log_server.erl b/lib/kernel/src/disk_log_server.erl
index 735f1e5ceb..78c15d0ad8 100644
--- a/lib/kernel/src/disk_log_server.erl
+++ b/lib/kernel/src/disk_log_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/disk_log_sup.erl b/lib/kernel/src/disk_log_sup.erl
index c09b3f94d1..db5e3ecb3a 100644
--- a/lib/kernel/src/disk_log_sup.erl
+++ b/lib/kernel/src/disk_log_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/dist_ac.erl b/lib/kernel/src/dist_ac.erl
index f649f33a53..2a5cf0ba92 100644
--- a/lib/kernel/src/dist_ac.erl
+++ b/lib/kernel/src/dist_ac.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -123,7 +123,7 @@ load_application(AppName, DistNodes) ->
gen_server:call(?DIST_AC, {load_application, AppName, DistNodes}, infinity).
takeover_application(AppName, RestartType) ->
- case validRestartType(RestartType) of
+ case valid_restart_type(RestartType) of
true ->
wait_for_sync_dacs(),
Nodes = get_nodes(AppName),
@@ -1514,10 +1514,10 @@ dist_del_node(Appls, Node) ->
Appl#appl{run = NRun}
end, Appls).
-validRestartType(permanent) -> true;
-validRestartType(temporary) -> true;
-validRestartType(transient) -> true;
-validRestartType(_RestartType) -> false.
+valid_restart_type(permanent) -> true;
+valid_restart_type(temporary) -> true;
+valid_restart_type(transient) -> true;
+valid_restart_type(_RestartType) -> false.
dist_mismatch(AppName, Node) ->
error_msg("Distribution mismatch for application \"~p\" on nodes ~p and ~p~n",
diff --git a/lib/kernel/src/dist_util.erl b/lib/kernel/src/dist_util.erl
index c9fc26d62c..ecc022b28d 100644
--- a/lib/kernel/src/dist_util.erl
+++ b/lib/kernel/src/dist_util.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -27,8 +27,10 @@
%%-compile(export_all).
-export([handshake_we_started/1, handshake_other_started/1,
+ strict_order_flags/0,
start_timer/1, setup_timer/2,
reset_timer/1, cancel_timer/1,
+ is_node_name/1, split_node/1, is_allowed/2,
shutdown/3, shutdown/4]).
-import(error_logger,[error_msg/2]).
@@ -74,22 +76,52 @@
ticked = 0
}).
-remove_flag(Flag, Flags) ->
- case Flags band Flag of
- 0 ->
- Flags;
- _ ->
- Flags - Flag
- end.
+dflag2str(?DFLAG_PUBLISHED) ->
+ "PUBLISHED";
+dflag2str(?DFLAG_ATOM_CACHE) ->
+ "ATOM_CACHE";
+dflag2str(?DFLAG_EXTENDED_REFERENCES) ->
+ "EXTENDED_REFERENCES";
+dflag2str(?DFLAG_DIST_MONITOR) ->
+ "DIST_MONITOR";
+dflag2str(?DFLAG_FUN_TAGS) ->
+ "FUN_TAGS";
+dflag2str(?DFLAG_DIST_MONITOR_NAME) ->
+ "DIST_MONITOR_NAME";
+dflag2str(?DFLAG_HIDDEN_ATOM_CACHE) ->
+ "HIDDEN_ATOM_CACHE";
+dflag2str(?DFLAG_NEW_FUN_TAGS) ->
+ "NEW_FUN_TAGS";
+dflag2str(?DFLAG_EXTENDED_PIDS_PORTS) ->
+ "EXTENDED_PIDS_PORTS";
+dflag2str(?DFLAG_EXPORT_PTR_TAG) ->
+ "EXPORT_PTR_TAG";
+dflag2str(?DFLAG_BIT_BINARIES) ->
+ "BIT_BINARIES";
+dflag2str(?DFLAG_NEW_FLOATS) ->
+ "NEW_FLOATS";
+dflag2str(?DFLAG_UNICODE_IO) ->
+ "UNICODE_IO";
+dflag2str(?DFLAG_DIST_HDR_ATOM_CACHE) ->
+ "DIST_HDR_ATOM_CACHE";
+dflag2str(?DFLAG_SMALL_ATOM_TAGS) ->
+ "SMALL_ATOM_TAGS";
+dflag2str(?DFLAG_UTF8_ATOMS) ->
+ "UTF8_ATOMS";
+dflag2str(?DFLAG_MAP_TAG) ->
+ "MAP_TAG";
+dflag2str(?DFLAG_BIG_CREATION) ->
+ "BIG_CREATION";
+dflag2str(?DFLAG_SEND_SENDER) ->
+ "SEND_SENDER";
+dflag2str(?DFLAG_BIG_SEQTRACE_LABELS) ->
+ "BIG_SEQTRACE_LABELS";
+dflag2str(_) ->
+ "UNKNOWN".
+
adjust_flags(ThisFlags, OtherFlags) ->
- case (?DFLAG_PUBLISHED band ThisFlags) band OtherFlags of
- 0 ->
- {remove_flag(?DFLAG_PUBLISHED, ThisFlags),
- remove_flag(?DFLAG_PUBLISHED, OtherFlags)};
- _ ->
- {ThisFlags, OtherFlags}
- end.
+ ThisFlags band OtherFlags.
publish_flag(hidden, _) ->
0;
@@ -101,37 +133,56 @@ publish_flag(_, OtherNode) ->
0
end.
-make_this_flags(RequestType, OtherNode) ->
- publish_flag(RequestType, OtherNode) bor
- %% The parenthesis below makes the compiler generate better code.
- (?DFLAG_EXPORT_PTR_TAG bor
- ?DFLAG_EXTENDED_PIDS_PORTS bor
- ?DFLAG_EXTENDED_REFERENCES bor
- ?DFLAG_DIST_MONITOR bor
- ?DFLAG_FUN_TAGS bor
- ?DFLAG_DIST_MONITOR_NAME bor
- ?DFLAG_HIDDEN_ATOM_CACHE bor
- ?DFLAG_NEW_FUN_TAGS bor
- ?DFLAG_BIT_BINARIES bor
- ?DFLAG_NEW_FLOATS bor
- ?DFLAG_UNICODE_IO bor
- ?DFLAG_DIST_HDR_ATOM_CACHE bor
- ?DFLAG_SMALL_ATOM_TAGS bor
- ?DFLAG_UTF8_ATOMS bor
- ?DFLAG_MAP_TAG).
-
-handshake_other_started(#hs_data{request_type=ReqType}=HSData0) ->
+
+%% Sync with dist.c
+-record(erts_dflags, {
+ default, % flags erts prefers
+ mandatory, % flags erts needs
+ addable, % flags local dist implementation is allowed to add
+ rejectable, % flags local dist implementation is allowed to reject
+ strict_order % flags for features needing strict order delivery
+}).
+
+-spec strict_order_flags() -> integer().
+strict_order_flags() ->
+ EDF = erts_internal:get_dflags(),
+ EDF#erts_dflags.strict_order.
+
+make_this_flags(RequestType, AddFlags, RejectFlags, OtherNode,
+ #erts_dflags{}=EDF) ->
+ case RejectFlags band (bnot EDF#erts_dflags.rejectable) of
+ 0 -> ok;
+ Rerror -> exit({"Rejecting non rejectable flags", Rerror})
+ end,
+ case AddFlags band (bnot EDF#erts_dflags.addable) of
+ 0 -> ok;
+ Aerror -> exit({"Adding non addable flags", Aerror})
+ end,
+ Flgs0 = EDF#erts_dflags.default,
+ Flgs1 = Flgs0 bor publish_flag(RequestType, OtherNode),
+ Flgs2 = Flgs1 bor AddFlags,
+ Flgs2 band (bnot RejectFlags).
+
+handshake_other_started(#hs_data{request_type=ReqType,
+ add_flags=AddFlgs0,
+ reject_flags=RejFlgs0,
+ require_flags=ReqFlgs0}=HSData0) ->
+ AddFlgs = convert_flags(AddFlgs0),
+ RejFlgs = convert_flags(RejFlgs0),
+ ReqFlgs = convert_flags(ReqFlgs0),
{PreOtherFlags,Node,Version} = recv_name(HSData0),
- PreThisFlags = make_this_flags(ReqType, Node),
- {ThisFlags, OtherFlags} = adjust_flags(PreThisFlags,
- PreOtherFlags),
- HSData = HSData0#hs_data{this_flags=ThisFlags,
- other_flags=OtherFlags,
+ EDF = erts_internal:get_dflags(),
+ PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node, EDF),
+ ChosenFlags = adjust_flags(PreThisFlags, PreOtherFlags),
+ HSData = HSData0#hs_data{this_flags=ChosenFlags,
+ other_flags=ChosenFlags,
other_version=Version,
other_node=Node,
- other_started=true},
- check_dflag_xnc(HSData),
- is_allowed(HSData),
+ other_started=true,
+ add_flags=AddFlgs,
+ reject_flags=RejFlgs,
+ require_flags=ReqFlgs},
+ check_dflags(HSData, EDF),
?debug({"MD5 connection from ~p (V~p)~n",
[Node, HSData#hs_data.other_version]}),
mark_pending(HSData),
@@ -142,48 +193,27 @@ handshake_other_started(#hs_data{request_type=ReqType}=HSData0) ->
ChallengeB = recv_challenge_reply(HSData, ChallengeA, MyCookie),
send_challenge_ack(HSData, gen_digest(ChallengeB, HisCookie)),
?debug({dist_util, self(), accept_connection, Node}),
- connection(HSData).
+ connection(HSData);
+
+handshake_other_started(OldHsData) when element(1,OldHsData) =:= hs_data ->
+ handshake_other_started(convert_old_hsdata(OldHsData)).
-%%
-%% check if connecting node is allowed to connect
-%% with allow-node-scheme
-%%
-is_allowed(#hs_data{other_node = Node,
- allowed = Allowed} = HSData) ->
- case lists:member(Node, Allowed) of
- false when Allowed =/= [] ->
- send_status(HSData, not_allowed),
- error_msg("** Connection attempt from "
- "disallowed node ~w ** ~n", [Node]),
- ?shutdown(Node);
- _ -> true
- end.
%%
-%% Check that both nodes can handle the same types of extended
-%% node containers. If they can not, abort the connection.
+%% Check mandatory flags...
%%
-check_dflag_xnc(#hs_data{other_node = Node,
- other_flags = OtherFlags,
- other_started = OtherStarted} = HSData) ->
- XRFlg = ?DFLAG_EXTENDED_REFERENCES,
- XPPFlg = case erlang:system_info(compat_rel) of
- R when R >= 10 ->
- ?DFLAG_EXTENDED_PIDS_PORTS;
- _ ->
- 0
- end,
- ReqXncFlags = XRFlg bor XPPFlg,
- case OtherFlags band ReqXncFlags =:= ReqXncFlags of
- true ->
- ok;
- false ->
- What = case {OtherFlags band XRFlg =:= XRFlg,
- OtherFlags band XPPFlg =:= XPPFlg} of
- {false, false} -> "references, pids and ports";
- {true, false} -> "pids and ports";
- {false, true} -> "references"
- end,
+check_dflags(#hs_data{other_node = Node,
+ other_flags = OtherFlags,
+ other_started = OtherStarted,
+ require_flags = RequiredFlags} = HSData,
+ #erts_dflags{}=EDF) ->
+
+ Mandatory = (EDF#erts_dflags.mandatory bor RequiredFlags),
+ Missing = check_mandatory(Mandatory, OtherFlags, []),
+ case Missing of
+ [] ->
+ ok;
+ _ ->
case OtherStarted of
true ->
send_status(HSData, not_allowed),
@@ -194,11 +224,26 @@ check_dflag_xnc(#hs_data{other_node = Node,
How = "aborted"
end,
error_msg("** ~w: Connection attempt ~s node ~w ~s "
- "since it cannot handle extended ~s. "
- "**~n", [node(), Dir, Node, How, What]),
- ?shutdown(Node)
+ "since it cannot handle ~p."
+ "**~n", [node(), Dir, Node, How, Missing]),
+ ?shutdown2(Node, {check_dflags_failed, Missing})
end.
+check_mandatory(0, _OtherFlags, Missing) ->
+ Missing;
+check_mandatory(Mandatory, OtherFlags, Missing) ->
+ Left = Mandatory band (Mandatory - 1), % clear lowest set bit
+ DFlag = Mandatory bxor Left, % only lowest set bit
+ NewMissing = case DFlag band OtherFlags of
+ 0 ->
+ %% Mandatory and missing...
+ [dflag2str(DFlag) | Missing];
+ _ ->
+ %% Mandatory and present...
+ Missing
+ end,
+ check_mandatory(Left, OtherFlags, NewMissing).
+
%% No nodedown will be sent if we fail before this process has
%% succeeded to mark the node as pending.
@@ -312,24 +357,48 @@ flush_down() ->
end.
handshake_we_started(#hs_data{request_type=ReqType,
- other_node=Node}=PreHSData) ->
- PreThisFlags = make_this_flags(ReqType, Node),
- HSData = PreHSData#hs_data{this_flags=PreThisFlags},
+ other_node=Node,
+ add_flags=AddFlgs0,
+ reject_flags=RejFlgs0,
+ require_flags=ReqFlgs0}=PreHSData) ->
+ AddFlgs = convert_flags(AddFlgs0),
+ RejFlgs = convert_flags(RejFlgs0),
+ ReqFlgs = convert_flags(ReqFlgs0),
+ EDF = erts_internal:get_dflags(),
+ PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node, EDF),
+ HSData = PreHSData#hs_data{this_flags = PreThisFlags,
+ add_flags = AddFlgs,
+ reject_flags = RejFlgs,
+ require_flags = ReqFlgs},
send_name(HSData),
recv_status(HSData),
{PreOtherFlags,ChallengeA} = recv_challenge(HSData),
- {ThisFlags,OtherFlags} = adjust_flags(PreThisFlags, PreOtherFlags),
- NewHSData = HSData#hs_data{this_flags = ThisFlags,
- other_flags = OtherFlags,
+ ChosenFlags = adjust_flags(PreThisFlags, PreOtherFlags),
+ NewHSData = HSData#hs_data{this_flags = ChosenFlags,
+ other_flags = ChosenFlags,
other_started = false},
- check_dflag_xnc(NewHSData),
+ check_dflags(NewHSData, EDF),
MyChallenge = gen_challenge(),
{MyCookie,HisCookie} = get_cookies(Node),
send_challenge_reply(NewHSData,MyChallenge,
gen_digest(ChallengeA,HisCookie)),
reset_timer(NewHSData#hs_data.timer),
recv_challenge_ack(NewHSData, MyChallenge, MyCookie),
- connection(NewHSData).
+ connection(NewHSData);
+
+handshake_we_started(OldHsData) when element(1,OldHsData) =:= hs_data ->
+ handshake_we_started(convert_old_hsdata(OldHsData)).
+
+convert_old_hsdata(OldHsData) ->
+ OHSDL = tuple_to_list(OldHsData),
+ NoMissing = tuple_size(#hs_data{}) - tuple_size(OldHsData),
+ true = NoMissing > 0,
+ list_to_tuple(OHSDL ++ lists:duplicate(NoMissing, undefined)).
+
+convert_flags(Flags) when is_integer(Flags) ->
+ Flags;
+convert_flags(_Undefined) ->
+ 0.
%% --------------------------------------------------------------
%% The connection has been established.
@@ -344,20 +413,25 @@ connection(#hs_data{other_node = Node,
PType = publish_type(HSData#hs_data.other_flags),
case FPreNodeup(Socket) of
ok ->
- do_setnode(HSData), % Succeeds or exits the process.
+ DHandle = do_setnode(HSData), % Succeeds or exits the process.
Address = FAddress(Socket,Node),
mark_nodeup(HSData,Address),
case FPostNodeup(Socket) of
ok ->
- con_loop(HSData#hs_data.kernel_pid,
- Node,
- Socket,
- Address,
- HSData#hs_data.this_node,
- PType,
- #tick{},
- HSData#hs_data.mf_tick,
- HSData#hs_data.mf_getstat);
+ case HSData#hs_data.f_handshake_complete of
+ undefined -> ok;
+ HsComplete -> HsComplete(Socket, Node, DHandle)
+ end,
+ con_loop({HSData#hs_data.kernel_pid,
+ Node,
+ Socket,
+ PType,
+ DHandle,
+ HSData#hs_data.mf_tick,
+ HSData#hs_data.mf_getstat,
+ HSData#hs_data.mf_setopts,
+ HSData#hs_data.mf_getopts},
+ #tick{});
_ ->
?shutdown2(Node, connection_setup_failed)
end;
@@ -410,18 +484,16 @@ do_setnode(#hs_data{other_node = Node, socket = Socket,
[Node, Port, {publish_type(Flags),
'(', Flags, ')',
Version}]),
- case (catch
- erlang:setnode(Node, Port,
- {Flags, Version, '', ''})) of
- {'EXIT', {system_limit, _}} ->
+ try
+ erlang:setnode(Node, Port, {Flags, Version, '', ''})
+ catch
+ error:system_limit ->
error_msg("** Distribution system limit reached, "
"no table space left for node ~w ** ~n",
[Node]),
?shutdown(Node);
- {'EXIT', Other} ->
- exit(Other);
- _Else ->
- ok
+ error:Other:Stacktrace ->
+ exit({Other, Stacktrace})
end;
_ ->
error_msg("** Distribution connection error, "
@@ -453,29 +525,32 @@ mark_nodeup(#hs_data{kernel_pid = Kernel,
?shutdown(Node)
end.
-con_loop(Kernel, Node, Socket, TcpAddress,
- MyNode, Type, Tick, MFTick, MFGetstat) ->
+getstat(DHandle, _Socket, undefined) ->
+ erlang:dist_get_stat(DHandle);
+getstat(_DHandle, Socket, MFGetstat) ->
+ MFGetstat(Socket).
+
+con_loop({Kernel, Node, Socket, Type, DHandle, MFTick, MFGetstat,
+ MFSetOpts, MFGetOpts}=ConData,
+ Tick) ->
receive
{tcp_closed, Socket} ->
?shutdown2(Node, connection_closed);
{Kernel, disconnect} ->
?shutdown2(Node, disconnected);
{Kernel, aux_tick} ->
- case MFGetstat(Socket) of
+ case getstat(DHandle, Socket, MFGetstat) of
{ok, _, _, PendWrite} ->
- send_tick(Socket, PendWrite, MFTick);
+ send_aux_tick(Type, Socket, PendWrite, MFTick);
_ ->
ignore_it
end,
- con_loop(Kernel, Node, Socket, TcpAddress, MyNode, Type,
- Tick, MFTick, MFGetstat);
+ con_loop(ConData, Tick);
{Kernel, tick} ->
- case send_tick(Socket, Tick, Type,
+ case send_tick(DHandle, Socket, Tick, Type,
MFTick, MFGetstat) of
{ok, NewTick} ->
- con_loop(Kernel, Node, Socket, TcpAddress,
- MyNode, Type, NewTick, MFTick,
- MFGetstat);
+ con_loop(ConData, NewTick);
{error, not_responding} ->
error_msg("** Node ~p not responding **~n"
"** Removing (timedout) connection **~n",
@@ -485,16 +560,27 @@ con_loop(Kernel, Node, Socket, TcpAddress,
?shutdown2(Node, send_net_tick_failed)
end;
{From, get_status} ->
- case MFGetstat(Socket) of
+ case getstat(DHandle, Socket, MFGetstat) of
{ok, Read, Write, _} ->
From ! {self(), get_status, {ok, Read, Write}},
- con_loop(Kernel, Node, Socket, TcpAddress,
- MyNode,
- Type, Tick,
- MFTick, MFGetstat);
+ con_loop(ConData, Tick);
_ ->
?shutdown2(Node, get_status_failed)
- end
+ end;
+ {From, Ref, {setopts, Opts}} ->
+ Ret = case MFSetOpts of
+ undefined -> {error, enotsup};
+ _ -> MFSetOpts(Socket, Opts)
+ end,
+ From ! {Ref, Ret},
+ con_loop(ConData, Tick);
+ {From, Ref, {getopts, Opts}} ->
+ Ret = case MFGetOpts of
+ undefined -> {error, enotsup};
+ _ -> MFGetOpts(Socket, Opts)
+ end,
+ From ! {Ref, Ret},
+ con_loop(ConData, Tick)
end.
@@ -541,19 +627,129 @@ send_challenge_ack(#hs_data{socket = Socket, f_send = FSend},
%% tcp_drv.c which used it to detect simultaneous connection
%% attempts).
%%
-recv_name(#hs_data{socket = Socket, f_recv = Recv}) ->
+recv_name(#hs_data{socket = Socket, f_recv = Recv} = HSData) ->
case Recv(Socket, 0, infinity) of
- {ok,Data} ->
- get_name(Data);
+ {ok,
+ [$n,VersionA, VersionB, Flag1, Flag2, Flag3, Flag4
+ | OtherNode] = Data} ->
+ case is_node_name(OtherNode) of
+ true ->
+ Flags = ?u32(Flag1, Flag2, Flag3, Flag4),
+ Version = ?u16(VersionA,VersionB),
+ is_allowed(HSData, Flags, OtherNode, Version);
+ false ->
+ ?shutdown(Data)
+ end;
_ ->
?shutdown(no_node)
end.
-get_name([$n,VersionA, VersionB, Flag1, Flag2, Flag3, Flag4 | OtherNode]) ->
- {?u32(Flag1, Flag2, Flag3, Flag4), list_to_atom(OtherNode),
- ?u16(VersionA,VersionB)};
-get_name(Data) ->
- ?shutdown(Data).
+is_node_name(OtherNodeName) ->
+ case string:split(OtherNodeName, "@", all) of
+ [Name,Host] ->
+ (not string:is_empty(Name))
+ andalso (not string:is_empty(Host));
+ _ ->
+ false
+ end.
+
+split_node(Node) ->
+ Split = string:split(listify(Node), "@", all),
+ case Split of
+ [Name,Host] ->
+ case string:is_empty(Name) of
+ true ->
+ Split;
+ false ->
+ case string:is_empty(Host) of
+ true ->
+ {name,Name};
+ false ->
+ {node,Name,Host}
+ end
+ end;
+ [Host] ->
+ case string:is_empty(Host) of
+ true ->
+ Split;
+ false ->
+ {host,Host}
+ end
+ end.
+
+%% Check if connecting node is allowed to connect
+%% with allow-node-scheme. An empty allowed list
+%% allows all nodes.
+%%
+is_allowed(#hs_data{allowed = []}, Flags, Node, Version) ->
+ {Flags,list_to_atom(Node),Version};
+is_allowed(#hs_data{allowed = Allowed} = HSData, Flags, Node, Version) ->
+ case is_allowed(Node, Allowed) of
+ true ->
+ {Flags,list_to_atom(Node),Version};
+ false ->
+ send_status(HSData#hs_data{other_node = Node}, not_allowed),
+ error_msg("** Connection attempt from "
+ "disallowed node ~s ** ~n", [Node]),
+ ?shutdown2(Node, {is_allowed, not_allowed})
+ end.
+
+%% The allowed list can contain node names, host names
+%% or names before '@', in atom or list form:
+%% [[email protected], "host.example.org", "node@"].
+%% An empty allowed list allows no nodes.
+%%
+%% Allow a node that matches any entry in the allowed list.
+%% Also allow allowed entries as node to match, not from
+%% this module; here the node has to be a valid name.
+%%
+is_allowed(_Node, []) ->
+ false;
+is_allowed(Node, [Node|_Allowed]) ->
+ %% Just an optimization
+ true;
+is_allowed(Node, [AllowedNode|Allowed]) ->
+ case split_node(AllowedNode) of
+ {node,AllowedName,AllowedHost} ->
+ %% Allowed node name
+ case split_node(Node) of
+ {node,AllowedName,AllowedHost} ->
+ true;
+ _ ->
+ is_allowed(Node, Allowed)
+ end;
+ {host,AllowedHost} ->
+ %% Allowed host name
+ case split_node(Node) of
+ {node,_,AllowedHost} ->
+ %% Matching Host part
+ true;
+ {host,AllowedHost} ->
+ %% Host matches Host
+ true;
+ _ ->
+ is_allowed(Node, Allowed)
+ end;
+ {name,AllowedName} ->
+ %% Allowed name before '@'
+ case split_node(Node) of
+ {node,AllowedName,_} ->
+ %% Matching Name part
+ true;
+ {name,AllowedName} ->
+ %% Name matches Name
+ true;
+ _ ->
+ is_allowed(Node, Allowed)
+ end;
+ _ ->
+ is_allowed(Node, Allowed)
+ end.
+
+listify(Atom) when is_atom(Atom) ->
+ atom_to_list(Atom);
+listify(Node) when is_list(Node) ->
+ Node.
publish_type(Flags) ->
case Flags band ?DFLAG_PUBLISHED of
@@ -576,13 +772,13 @@ recv_challenge(#hs_data{socket=Socket,other_node=Node,
[Node, Challenge,Version]),
{Flags,Challenge};
_ ->
- ?shutdown(no_node)
+ ?shutdown2(no_node, {recv_challenge_failed, no_node, Ns})
catch
error:badarg ->
- ?shutdown(no_node)
+ ?shutdown2(no_node, {recv_challenge_failed, no_node, Ns})
end;
- _ ->
- ?shutdown(no_node)
+ Other ->
+ ?shutdown2(no_node, {recv_challenge_failed, Other})
end.
@@ -606,10 +802,10 @@ recv_challenge_reply(#hs_data{socket = Socket,
_ ->
error_msg("** Connection attempt from "
"disallowed node ~w ** ~n", [NodeB]),
- ?shutdown(NodeB)
+ ?shutdown2(NodeB, {recv_challenge_reply_failed, bad_cookie})
end;
- _ ->
- ?shutdown(no_node)
+ Other ->
+ ?shutdown2(no_node, {recv_challenge_reply_failed, Other})
end.
recv_challenge_ack(#hs_data{socket = Socket, f_recv = FRecv,
@@ -626,10 +822,10 @@ recv_challenge_ack(#hs_data{socket = Socket, f_recv = FRecv,
_ ->
error_msg("** Connection attempt to "
"disallowed node ~w ** ~n", [NodeB]),
- ?shutdown(NodeB)
+ ?shutdown2(NodeB, {recv_challenge_ack_failed, bad_cookie})
end;
- _ ->
- ?shutdown(NodeB)
+ Other ->
+ ?shutdown2(NodeB, {recv_challenge_ack_failed, Other})
end.
recv_status(#hs_data{kernel_pid = Kernel, socket = Socket,
@@ -639,7 +835,7 @@ recv_status(#hs_data{kernel_pid = Kernel, socket = Socket,
Stat = list_to_atom(StrStat),
?debug({dist_util,self(),recv_status, Node, Stat}),
case Stat of
- not_allowed -> ?shutdown(Node);
+ not_allowed -> ?shutdown2(Node, {recv_status_failed, not_allowed});
nok ->
%% wait to be killed by net_kernel
receive
@@ -656,10 +852,10 @@ recv_status(#hs_data{kernel_pid = Kernel, socket = Socket,
end;
_ -> Stat
end;
- _Error ->
+ Error ->
?debug({dist_util,self(),recv_status_error,
- Node, _Error}),
- ?shutdown(Node)
+ Node, Error}),
+ ?shutdown2(Node, {recv_status_failed, Error})
end.
@@ -695,51 +891,57 @@ send_status(#hs_data{socket = Socket, other_node = Node,
%% The detection time interval is thus, by default, 45s < DT < 75s
-%% A HIDDEN node is always (if not a pending write) ticked if
-%% we haven't read anything as a hidden node only ticks when it receives
-%% a TICK !!
+%% A HIDDEN node is always ticked if we haven't read anything
+%% as a (primitive) hidden node only ticks when it receives a TICK !!
-send_tick(Socket, Tick, Type, MFTick, MFGetstat) ->
+send_tick(DHandle, Socket, Tick, Type, MFTick, MFGetstat) ->
#tick{tick = T0,
read = Read,
write = Write,
- ticked = Ticked} = Tick,
+ ticked = Ticked0} = Tick,
T = T0 + 1,
T1 = T rem 4,
- case MFGetstat(Socket) of
- {ok, Read, _, _} when Ticked =:= T ->
+ case getstat(DHandle, Socket, MFGetstat) of
+ {ok, Read, _, _} when Ticked0 =:= T ->
{error, not_responding};
- {ok, Read, W, Pend} when Type =:= hidden ->
- send_tick(Socket, Pend, MFTick),
- {ok, Tick#tick{write = W + 1,
- tick = T1}};
- {ok, Read, Write, Pend} ->
- send_tick(Socket, Pend, MFTick),
- {ok, Tick#tick{write = Write + 1,
- tick = T1}};
- {ok, R, Write, Pend} ->
- send_tick(Socket, Pend, MFTick),
- {ok, Tick#tick{write = Write + 1,
- read = R,
- tick = T1,
- ticked = T}};
- {ok, Read, W, _} ->
- {ok, Tick#tick{write = W,
- tick = T1}};
- {ok, R, W, _} ->
- {ok, Tick#tick{write = W,
- read = R,
- tick = T1,
- ticked = T}};
+
+ {ok, R, W1, Pend} ->
+ RDiff = R - Read,
+ W2 = case need_to_tick(Type, RDiff, W1-Write, Pend) of
+ true ->
+ MFTick(Socket),
+ W1 + 1;
+ false ->
+ W1
+ end,
+
+ Ticked1 = case RDiff of
+ 0 -> Ticked0;
+ _ -> T
+ end,
+
+ {ok, Tick#tick{write = W2,
+ tick = T1,
+ read = R,
+ ticked = Ticked1}};
+
Error ->
Error
end.
-send_tick(Socket, 0, MFTick) ->
- MFTick(Socket);
-send_tick(_, _Pend, _) ->
- %% Dont send tick if pending write.
- ok.
+need_to_tick(_, _, 0, 0) -> % nothing written and empty send queue
+ true;
+need_to_tick(_, _, 0, false) -> % nothing written and empty send queue
+ true;
+need_to_tick(hidden, 0, _, _) -> % nothing read from hidden
+ true;
+need_to_tick(_, _, _, _) ->
+ false.
+
+send_aux_tick(normal, _, Pend, _) when Pend /= false, Pend /= 0 ->
+ ok; %% Dont send tick if pending write.
+send_aux_tick(_Type, Socket, _Pend, MFTick) ->
+ MFTick(Socket).
%% ------------------------------------------------------------
%% Connection setup timeout timer.
@@ -758,7 +960,7 @@ setup_timer(Pid, Timeout) ->
setup_timer(Pid, Timeout)
after Timeout ->
?trace("Timer expires ~p, ~p~n",[Pid, Timeout]),
- ?shutdown(timer)
+ ?shutdown2(timer, setup_timer_timeout)
end.
reset_timer(Timer) ->
diff --git a/lib/kernel/src/erl_boot_server.erl b/lib/kernel/src/erl_boot_server.erl
index 4076fab86d..4ac945ce01 100644
--- a/lib/kernel/src/erl_boot_server.erl
+++ b/lib/kernel/src/erl_boot_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -58,13 +58,11 @@
-define(single_addr_mask, {255, 255, 255, 255}).
--type ip4_address() :: {0..255,0..255,0..255,0..255}.
-
--spec start(Slaves) -> {'ok', Pid} | {'error', What} when
+-spec start(Slaves) -> {'ok', Pid} | {'error', Reason} when
Slaves :: [Host],
- Host :: atom(),
+ Host :: inet:ip_address() | inet:hostname(),
Pid :: pid(),
- What :: any().
+ Reason :: {'badarg', Slaves}.
start(Slaves) ->
case check_arg(Slaves) of
@@ -74,11 +72,11 @@ start(Slaves) ->
{error, {badarg, Slaves}}
end.
--spec start_link(Slaves) -> {'ok', Pid} | {'error', What} when
+-spec start_link(Slaves) -> {'ok', Pid} | {'error', Reason} when
Slaves :: [Host],
- Host :: atom(),
+ Host :: inet:ip_address() | inet:hostname(),
Pid :: pid(),
- What :: any().
+ Reason :: {'badarg', Slaves}.
start_link(Slaves) ->
case check_arg(Slaves) of
@@ -104,10 +102,10 @@ check_arg([], Result) ->
check_arg(_, _Result) ->
error.
--spec add_slave(Slave) -> 'ok' | {'error', What} when
+-spec add_slave(Slave) -> 'ok' | {'error', Reason} when
Slave :: Host,
- Host :: atom(),
- What :: any().
+ Host :: inet:ip_address() | inet:hostname(),
+ Reason :: {'badarg', Slave}.
add_slave(Slave) ->
case inet:getaddr(Slave, inet) of
@@ -117,10 +115,10 @@ add_slave(Slave) ->
{error, {badarg, Slave}}
end.
--spec delete_slave(Slave) -> 'ok' | {'error', What} when
+-spec delete_slave(Slave) -> 'ok' | {'error', Reason} when
Slave :: Host,
- Host :: atom(),
- What :: any().
+ Host :: inet:ip_address() | inet:hostname(),
+ Reason :: {'badarg', Slave}.
delete_slave(Slave) ->
case inet:getaddr(Slave, inet) of
@@ -130,7 +128,7 @@ delete_slave(Slave) ->
{error, {badarg, Slave}}
end.
--spec add_subnet(Mask :: ip4_address(), Addr :: ip4_address()) ->
+-spec add_subnet(Netmask :: inet:ip_address(), Addr :: inet:ip_address()) ->
'ok' | {'error', any()}.
add_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) ->
@@ -141,14 +139,15 @@ add_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) ->
{error, empty_subnet}
end.
--spec delete_subnet(Mask :: ip4_address(), Addr :: ip4_address()) -> 'ok'.
+-spec delete_subnet(Netmask :: inet:ip_address(),
+ Addr :: inet:ip_address()) -> 'ok'.
delete_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) ->
gen_server:call(boot_server, {delete, {Mask, Addr}}).
-spec which_slaves() -> Slaves when
- Slaves :: [Host],
- Host :: atom().
+ Slaves :: [Slave],
+ Slave :: {Netmask :: inet:ip_address(), Address :: inet:ip_address()}.
which_slaves() ->
gen_server:call(boot_server, which).
@@ -253,9 +252,9 @@ handle_info({udp, U, IP, Port, Data}, S0) ->
"~w is not a valid address ** ~n", [IP]),
{noreply,S0};
{true,_,_} ->
- case catch string:substr(Data, 1, length(?EBOOT_REQUEST)) of
+ case catch string:slice(Data, 0, length(?EBOOT_REQUEST)) of
?EBOOT_REQUEST ->
- Vsn = string:substr(Data, length(?EBOOT_REQUEST)+1, length(Data)),
+ Vsn = string:slice(Data, length(?EBOOT_REQUEST), length(Data)),
error_logger:error_msg("** Illegal boot server connection attempt: "
"client version is ~s ** ~n", [Vsn]);
_ ->
diff --git a/lib/kernel/src/erl_ddll.erl b/lib/kernel/src/erl_ddll.erl
index 6180510bdd..89a02cc762 100644
--- a/lib/kernel/src/erl_ddll.erl
+++ b/lib/kernel/src/erl_ddll.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/erl_distribution.erl b/lib/kernel/src/erl_distribution.erl
index 99db7a8bf0..0bec78e938 100644
--- a/lib/kernel/src/erl_distribution.erl
+++ b/lib/kernel/src/erl_distribution.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,20 +21,47 @@
-behaviour(supervisor).
--export([start_link/0,start_link/1,init/1,start/1,stop/0]).
+-export([start_link/0,start_link/2,init/1,start/1,stop/0]).
-define(DBG,erlang:display([?MODULE,?LINE])).
+%% Called during system start-up.
+
start_link() ->
- case catch start_p() of
- {ok,Args} ->
- start_link(Args);
- _ ->
- ignore
+ do_start_link([{sname,shortnames},{name,longnames}]).
+
+%% Called from net_kernel:start/1 to start distribution after the
+%% system has already started.
+
+start(Args) ->
+ C = {net_sup_dynamic, {?MODULE,start_link,[Args,false]}, permanent,
+ 1000, supervisor, [erl_distribution]},
+ supervisor:start_child(kernel_sup, C).
+
+%% Stop distribution.
+
+stop() ->
+ case supervisor:terminate_child(kernel_sup, net_sup_dynamic) of
+ ok ->
+ supervisor:delete_child(kernel_sup, net_sup_dynamic);
+ Error ->
+ case whereis(net_sup) of
+ Pid when is_pid(Pid) ->
+ %% Dist. started through -sname | -name flags
+ {error, not_allowed};
+ _ ->
+ Error
+ end
end.
-start_link(Args) ->
- supervisor:start_link({local,net_sup},erl_distribution,Args).
+%%%
+%%% Internal helper functions.
+%%%
+
+%% Helper start function.
+
+start_link(Args, CleanHalt) ->
+ supervisor:start_link({local,net_sup}, ?MODULE, [Args,CleanHalt]).
init(NetArgs) ->
Epmd =
@@ -47,31 +74,20 @@ init(NetArgs) ->
permanent,2000,worker,[EpmdMod]}]
end,
Auth = {auth,{auth,start_link,[]},permanent,2000,worker,[auth]},
- Kernel = {net_kernel,{net_kernel,start_link,[NetArgs]},
+ Kernel = {net_kernel,{net_kernel,start_link,NetArgs},
permanent,2000,worker,[net_kernel]},
EarlySpecs = net_kernel:protocol_childspecs(),
{ok,{{one_for_all,0,1}, EarlySpecs ++ Epmd ++ [Auth,Kernel]}}.
-start_p() ->
- sname(),
- lname(),
- false.
-
-sname() ->
- case init:get_argument(sname) of
+do_start_link([{Arg,Flag}|T]) ->
+ case init:get_argument(Arg) of
{ok,[[Name]]} ->
- throw({ok,[list_to_atom(Name),shortnames|ticktime()]});
+ start_link([list_to_atom(Name),Flag|ticktime()], true);
_ ->
- false
- end.
-
-lname() ->
- case init:get_argument(name) of
- {ok,[[Name]]} ->
- throw({ok,[list_to_atom(Name),longnames|ticktime()]});
- _ ->
- false
- end.
+ do_start_link(T)
+ end;
+do_start_link([]) ->
+ ignore.
ticktime() ->
%% catch, in case the system was started with boot file start_old,
@@ -84,23 +100,3 @@ ticktime() ->
_ ->
[]
end.
-
-start(Args) ->
- C = {net_sup_dynamic, {erl_distribution, start_link, [Args]}, permanent,
- 1000, supervisor, [erl_distribution]},
- supervisor:start_child(kernel_sup, C).
-
-stop() ->
- case supervisor:terminate_child(kernel_sup, net_sup_dynamic) of
- ok ->
- supervisor:delete_child(kernel_sup, net_sup_dynamic);
- Error ->
- case whereis(net_sup) of
- Pid when is_pid(Pid) ->
- %% Dist. started through -sname | -name flags
- {error, not_allowed};
- _ ->
- Error
- end
- end.
-
diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl
index c6202dd796..b7e8868911 100644
--- a/lib/kernel/src/erl_epmd.erl
+++ b/lib/kernel/src/erl_epmd.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,10 +29,20 @@
-define(port_please_failure2(Term), noop).
-endif.
+-ifndef(erlang_daemon_port).
+-define(erlang_daemon_port, 4369).
+-endif.
+-ifndef(epmd_dist_high).
+-define(epmd_dist_high, 4370).
+-endif.
+-ifndef(epmd_dist_low).
+-define(epmd_dist_low, 4370).
+-endif.
+
%% External exports
-export([start/0, start_link/0, stop/0, port_please/2,
port_please/3, names/0, names/1,
- register_node/2, register_node/3, open/0, open/1, open/2]).
+ register_node/2, register_node/3, address_please/3, open/0, open/1, open/2]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -53,7 +63,7 @@
start() ->
gen_server:start({local, erl_epmd}, ?MODULE, [], []).
-
+-spec start_link() -> {ok, pid()} | ignore | {error,term()}.
start_link() ->
gen_server:start_link({local, erl_epmd}, ?MODULE, [], []).
@@ -66,9 +76,22 @@ stop() ->
%% return {port, P, Version} | noport
%%
+-spec port_please(Name, Host) -> {ok, Port, Version} | noport when
+ Name :: string(),
+ Host :: inet:ip_address(),
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node, Host) ->
port_please(Node, Host, infinity).
+-spec port_please(Name, Host, Timeout) -> {ok, Port, Version} | noport when
+ Name :: string(),
+ Host :: inet:ip_address(),
+ Timeout :: non_neg_integer() | infinity,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node,HostName, Timeout) when is_atom(HostName) ->
port_please1(Node,atom_to_list(HostName), Timeout);
port_please(Node,HostName, Timeout) when is_list(HostName) ->
@@ -79,17 +102,34 @@ port_please(Node, EpmdAddr, Timeout) ->
port_please1(Node,HostName, Timeout) ->
- case inet:gethostbyname(HostName, inet, Timeout) of
+ Family = case inet_db:res_option(inet6) of
+ true ->
+ inet6;
+ false ->
+ inet
+ end,
+ case inet:gethostbyname(HostName, Family, Timeout) of
{ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
get_port(Node, EpmdAddr, Timeout);
Else ->
Else
end.
+-spec names() -> {ok, [{Name, Port}]} | {error, Reason} when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names() ->
{ok, H} = inet:gethostname(),
names(H).
+-spec names(Host) -> {ok, [{Name, Port}]} | {error, Reason} when
+ Host :: atom() | string() | inet:ip_address(),
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names(HostName) when is_atom(HostName); is_list(HostName) ->
case inet:gethostbyname(HostName) of
{ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
@@ -100,12 +140,40 @@ names(HostName) when is_atom(HostName); is_list(HostName) ->
names(EpmdAddr) ->
get_names(EpmdAddr).
+-spec register_node(Name, Port) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
register_node(Name, PortNo) ->
- register_node(Name, PortNo, inet).
+ register_node(Name, PortNo, inet).
+
+-spec register_node(Name, Port, Driver) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Driver :: inet_tcp | inet6_tcp | inet | inet6,
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
+
+register_node(Name, PortNo, inet_tcp) ->
+ register_node(Name, PortNo, inet);
+register_node(Name, PortNo, inet6_tcp) ->
+ register_node(Name, PortNo, inet6);
register_node(Name, PortNo, Family) ->
gen_server:call(erl_epmd, {register, Name, PortNo, Family}, infinity).
+-spec address_please(Name, Host, AddressFamily) -> Success | {error, term()} when
+ Name :: string(),
+ Host :: string() | inet:ip_address(),
+ AddressFamily :: inet | inet6,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer(),
+ Success :: {ok, inet:ip_address()} | {ok, inet:ip_address(), Port, Version}.
+
+address_please(_Name, Host, AddressFamily) ->
+ inet:getaddr(Host, AddressFamily).
+
%%%----------------------------------------------------------------------
%%% Callback functions from gen_server
%%%----------------------------------------------------------------------
@@ -403,8 +471,6 @@ select_best_version(L1, _H1, _L2, H2) when L1 > H2 ->
0;
select_best_version(_L1, H1, L2, _H2) when L2 > H1 ->
0;
-select_best_version(_L1, H1, L2, _H2) when L2 > H1 ->
- 0;
select_best_version(_L1, H1, _L2, H2) ->
erlang:min(H1, H2).
diff --git a/lib/kernel/src/erl_epmd.hrl b/lib/kernel/src/erl_epmd.hrl
index f3585fea5e..3efcc81b55 100644
--- a/lib/kernel/src/erl_epmd.hrl
+++ b/lib/kernel/src/erl_epmd.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/erl_reply.erl b/lib/kernel/src/erl_reply.erl
index ba046980f6..e1c4ffe839 100644
--- a/lib/kernel/src/erl_reply.erl
+++ b/lib/kernel/src/erl_reply.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -42,7 +42,7 @@ reply(_) ->
%% convert ip number to tuple
ip_string_to_tuple(Ip) ->
- [Ip1,Ip2,Ip3,Ip4] = string:tokens(Ip,"."),
+ [Ip1,Ip2,Ip3,Ip4] = string:lexemes(Ip,"."),
{list_to_integer(Ip1),
list_to_integer(Ip2),
list_to_integer(Ip3),
diff --git a/lib/kernel/src/erl_signal_handler.erl b/lib/kernel/src/erl_signal_handler.erl
new file mode 100644
index 0000000000..5be905d8ae
--- /dev/null
+++ b/lib/kernel/src/erl_signal_handler.erl
@@ -0,0 +1,66 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(erl_signal_handler).
+-behaviour(gen_event).
+-export([start/0, init/1, format_status/2,
+ handle_event/2, handle_call/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state,{}).
+
+start() ->
+ %% add signal handler
+ case whereis(erl_signal_server) of
+ %% in case of minimal mode
+ undefined -> ok;
+ _ ->
+ gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
+ end.
+
+init(_Args) ->
+ {ok, #state{}}.
+
+handle_event(sigusr1, S) ->
+ erlang:halt("Received SIGUSR1"),
+ {ok, S};
+handle_event(sigquit, S) ->
+ erlang:halt(),
+ {ok, S};
+handle_event(sigterm, S) ->
+ error_logger:info_msg("SIGTERM received - shutting down~n"),
+ ok = init:stop(),
+ {ok, S};
+handle_event(_SignalMsg, S) ->
+ {ok, S}.
+
+handle_info(_Info, S) ->
+ {ok, S}.
+
+handle_call(_Request, S) ->
+ {ok, ok, S}.
+
+format_status(_Opt, [_Pdict,_S]) ->
+ ok.
+
+code_change(_OldVsn, S, _Extra) ->
+ {ok, S}.
+
+terminate(_Args, _S) ->
+ ok.
diff --git a/lib/kernel/src/error_handler.erl b/lib/kernel/src/error_handler.erl
index 095e1163f7..a89ef83261 100644
--- a/lib/kernel/src/error_handler.erl
+++ b/lib/kernel/src/error_handler.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -106,8 +106,8 @@ crash(M, F, A) ->
crash(Tuple) ->
try erlang:error(undef)
catch
- error:undef ->
- Stk = [Tuple|tl(erlang:get_stacktrace())],
+ error:undef:Stacktrace ->
+ Stk = [Tuple|tl(Stacktrace)],
erlang:raise(error, undef, Stk)
end.
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index eb231fd155..e324be5290 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,19 +19,22 @@
%%
-module(error_logger).
--export([start/0,start_link/0,format/2,error_msg/1,error_msg/2,error_report/1,
+-include("logger_internal.hrl").
+
+-export([start/0,start_link/0,stop/0,
+ format/2,error_msg/1,error_msg/2,error_report/1,
error_report/2,info_report/1,info_report/2,warning_report/1,
warning_report/2,error_info/1,
info_msg/1,info_msg/2,warning_msg/1,warning_msg/2,
- logfile/1,tty/1,swap_handler/1,
+ logfile/1,tty/1,
add_report_handler/1,add_report_handler/2,
- delete_report_handler/1]).
+ delete_report_handler/1,
+ which_report_handlers/0]).
--export([init/1,
- handle_event/2, handle_call/2, handle_info/2,
- terminate/2]).
+%% logger callbacks
+-export([adding_handler/1, removing_handler/1, log/2]).
--define(buffer_size, 10).
+-export([get_format_depth/0, limit_term/1]).
%%-----------------------------------------------------------------
%% Types used in this file
@@ -41,8 +44,6 @@
| 'info' | 'info_msg' | 'info_report'
| 'warning_msg' | 'warning_report'.
--type state() :: {non_neg_integer(), non_neg_integer(), [term()]}.
-
%%% BIF
-export([warning_map/0]).
@@ -57,26 +58,164 @@ warning_map() ->
%%-----------------------------------------------------------------
--spec start() -> {'ok', pid()} | {'error', any()}.
+%%%-----------------------------------------------------------------
+%%% Start the event manager process under logger_sup, which is part of
+%%% the kernel application's supervision tree.
+-spec start() -> 'ok' | {'error', any()}.
start() ->
- case gen_event:start({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ case whereis(?MODULE) of
+ undefined ->
+ ErrorLogger =
+ #{id => ?MODULE,
+ start => {?MODULE, start_link, []},
+ restart => transient,
+ shutdown => 2000,
+ type => worker,
+ modules => dynamic},
+ case supervisor:start_child(logger_sup, ErrorLogger) of
+ {ok,Pid} ->
+ ok = logger_handler_watcher:register_handler(?MODULE,Pid);
+ Error ->
+ Error
+ end;
+ _ ->
+ ok
end.
+%%%-----------------------------------------------------------------
+%%% Start callback specified in child specification to supervisor, see start/0
-spec start_link() -> {'ok', pid()} | {'error', any()}.
start_link() ->
- case gen_event:start_link({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ gen_event:start_link({local, ?MODULE},
+ [{spawn_opt,[{message_queue_data, off_heap}]}]).
+
+%%%-----------------------------------------------------------------
+%%% Stop the event manager
+-spec stop() -> ok.
+stop() ->
+ case whereis(?MODULE) of
+ undefined ->
+ ok;
+ _Pid ->
+ _ = gen_event:stop(?MODULE,{shutdown,stopped},infinity),
+ _ = supervisor:delete_child(logger_sup,?MODULE),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for logger
+-spec adding_handler(logger:handler_config()) ->
+ {ok,logger:handler_config()} | {error,term()}.
+adding_handler(#{id:=?MODULE}=Config) ->
+ case start() of
+ ok ->
+ {ok,Config};
+ Error ->
+ Error
end.
+-spec removing_handler(logger:handler_config()) -> ok.
+removing_handler(#{id:=?MODULE}) ->
+ stop(),
+ ok.
+
+-spec log(logger:log_event(),logger:handler_config()) -> ok.
+log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) ->
+ do_log(Level,Msg,Meta).
+
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag,type:=Type}}=Meta) ->
+ %% From error_logger:*_report/1,2, or logger call which added
+ %% error_logger data to obtain backwards compatibility with
+ %% error_logger:*_report/1,2
+ Report =
+ case Msg of
+ #{label:=_,report:=R} -> R;
+ _ -> Msg
+ end,
+ notify(Level,Tag,Type,Report,Meta);
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag}}=Meta) ->
+ {Format,Args} =
+ case Msg of
+ #{label:=_,format:=F,args:=A} ->
+ %% From error_logger:*_msg/1,2.
+ %% In order to be backwards compatible with handling
+ %% of faulty parameters to error_logger:*_msg/1,2,
+ %% don't use report_cb here.
+ {F,A};
+ _ ->
+ %% From logger call which added error_logger data to
+ %% obtain backwards compatibility with error_logger:*_msg/1,2
+ case maps:get(report_cb,Meta,fun logger:format_report/1) of
+ RCBFun when is_function(RCBFun,1) ->
+ try RCBFun(Msg) of
+ {F,A} when is_list(F), is_list(A) ->
+ {F,A};
+ Other ->
+ {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]}
+ catch C:R ->
+ {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]}
+ end;
+ RCBFun when is_function(RCBFun,2) ->
+ try RCBFun(Msg,#{depth=>get_format_depth(),
+ chars_limit=>unlimited,
+ single_line=>false}) of
+ Chardata when ?IS_STRING(Chardata) ->
+ {"~ts",[Chardata]};
+ Other ->
+ {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]}
+ catch C:R ->
+ {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]}
+ end
+ end
+ end,
+ notify(Level,Tag,Format,Args,Meta);
+do_log(Level,{Format,Args},#{?MODULE:=#{tag:=Tag}}=Meta)
+ when is_list(Format), is_list(Args) ->
+ %% From logger call which added error_logger data to obtain
+ %% backwards compatibility with error_logger:*_msg/1,2
+ notify(Level,Tag,Format,Args,Meta);
+do_log(_Level,_Msg,_Meta) ->
+ %% Ignore the rest - i.e. to get backwards compatibility with
+ %% error_logger, you must use the error_logger API for logging.
+ %% Some modules within OTP go around this by adding an
+ %% error_logger field to its metadata. This is done only to allow
+ %% complete backwards compatibility for log events originating
+ %% from within OTP, while still using the new logger interface.
+ ok.
+
+-spec notify(logger:level(), msg_tag(), any(), any(), map()) -> 'ok'.
+notify(Level,Tag0,FormatOrType0,ArgsOrReport,#{pid:=Pid0,gl:=GL,?MODULE:=My}) ->
+ {Tag,FormatOrType} = maybe_map_warnings(Level,Tag0,FormatOrType0),
+ Pid = case maps:get(emulator,My,false) of
+ true -> emulator;
+ _ -> Pid0
+ end,
+ gen_event:notify(?MODULE,{Tag,GL,{Pid,FormatOrType,ArgsOrReport}}).
+
+%% For backwards compatibility with really old even handlers, check
+%% the warning map and update tag and type.
+maybe_map_warnings(warning,Tag,FormatOrType) ->
+ case error_logger:warning_map() of
+ warning ->
+ {Tag,FormatOrType};
+ Level ->
+ {fix_warning_tag(Level,Tag),fix_warning_type(Level,FormatOrType)}
+ end;
+maybe_map_warnings(_,Tag,FormatOrType) ->
+ {Tag,FormatOrType}.
+
+fix_warning_tag(error,warning_msg) -> error;
+fix_warning_tag(error,warning_report) -> error_report;
+fix_warning_tag(info,warning_msg) -> info_msg;
+fix_warning_tag(info,warning_report) -> info_report;
+fix_warning_tag(_,Tag) -> Tag.
+
+fix_warning_type(error,std_warning) -> std_error;
+fix_warning_type(info,std_warning) -> std_info;
+fix_warning_type(_,Type) -> Type.
+
%%-----------------------------------------------------------------
%% These two simple old functions generate events tagged 'error'
%% Used for simple messages; error or information.
@@ -93,14 +232,18 @@ error_msg(Format) ->
Data :: list().
error_msg(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_msg},
+ format=>Format,
+ args=>Args},
+ meta(error)).
-spec format(Format, Data) -> 'ok' when
Format :: string(),
Data :: list().
format(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ error_msg(Format, Args).
%%-----------------------------------------------------------------
%% This functions should be used for error reports. Events
@@ -122,7 +265,10 @@ error_report(Report) ->
Report :: report().
error_report(Type, Report) ->
- notify({error_report, group_leader(), {self(), Type, Report}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_report},
+ report=>Report},
+ meta(error_report,Type)).
%%-----------------------------------------------------------------
%% This function should be used for warning reports.
@@ -144,25 +290,10 @@ warning_report(Report) ->
Report :: report().
warning_report(Type, Report) ->
- {Tag, NType} = case error_logger:warning_map() of
- info ->
- if
- Type =:= std_warning ->
- {info_report, std_info};
- true ->
- {info_report, Type}
- end;
- warning ->
- {warning_report, Type};
- error ->
- if
- Type =:= std_warning ->
- {error_report, std_error};
- true ->
- {error_report, Type}
- end
- end,
- notify({Tag, group_leader(), {self(), NType, Report}}).
+ logger:log(warning,
+ #{label=>{?MODULE,warning_report},
+ report=>Report},
+ meta(warning_report,Type)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -181,15 +312,11 @@ warning_msg(Format) ->
Data :: list().
warning_msg(Format, Args) ->
- Tag = case error_logger:warning_map() of
- warning ->
- warning_msg;
- info ->
- info_msg;
- error ->
- error
- end,
- notify({Tag, group_leader(), {self(), Format, Args}}).
+ logger:log(warning,
+ #{label=>{?MODULE,warning_msg},
+ format=>Format,
+ args=>Args},
+ meta(warning_msg)).
%%-----------------------------------------------------------------
%% This function should be used for information reports. Events
@@ -208,7 +335,10 @@ info_report(Report) ->
Report :: report().
info_report(Type, Report) ->
- notify({info_report, group_leader(), {self(), Type, Report}}).
+ logger:log(notice,
+ #{label=>{?MODULE,info_report},
+ report=>Report},
+ meta(info_report,Type)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -226,7 +356,11 @@ info_msg(Format) ->
Data :: list().
info_msg(Format, Args) ->
- notify({info_msg, group_leader(), {self(), Format, Args}}).
+ logger:log(notice,
+ #{label=>{?MODULE,info_msg},
+ format=>Format,
+ args=>Args},
+ meta(info_msg)).
%%-----------------------------------------------------------------
%% Used by the init process. Events are tagged 'info'.
@@ -234,38 +368,75 @@ info_msg(Format, Args) ->
-spec error_info(Error :: any()) -> 'ok'.
+%% unused?
error_info(Error) ->
- notify({info, group_leader(), {self(), Error, []}}).
-
--spec notify({msg_tag(), pid(), {pid(), any(), any()}}) -> 'ok'.
-
-notify(Msg) ->
- gen_event:notify(error_logger, Msg).
-
--type swap_handler_type() :: 'false' | 'silent' | 'tty' | {'logfile', string()}.
--spec swap_handler(Type :: swap_handler_type()) -> any().
-
-swap_handler(tty) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_tty_h, []}),
- ok = simple_logger(),
- R;
-swap_handler({logfile, File}) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_file_h, File}),
- ok = simple_logger(),
- R;
-swap_handler(silent) ->
- _ = gen_event:delete_handler(error_logger, error_logger, delete),
- ok = simple_logger();
-swap_handler(false) ->
- ok. % keep primitive event handler as-is
+ {Format,Args} =
+ case string_p(Error) of
+ true -> {Error,[]};
+ false -> {"~p",[Error]}
+ end,
+ MyMeta = #{tag=>info,type=>Error},
+ logger:log(notice, Format, Args, #{?MODULE=>MyMeta,domain=>[Error]}).
+%%-----------------------------------------------------------------
+%% Create metadata
+meta(Tag) ->
+ meta(Tag,undefined).
+meta(Tag,Type) ->
+ meta(Tag,Type,#{report_cb=>fun report_to_format/1}).
+meta(Tag,undefined,Meta0) ->
+ Meta0#{?MODULE=>#{tag=>Tag}};
+meta(Tag,Type,Meta0) ->
+ maybe_add_domain(Tag,Type,Meta0#{?MODULE=>#{tag=>Tag,type=>Type}}).
+
+%% This is to prevent events of non standard type from being printed
+%% with the standard logger. Similar to how error_logger_tty_h
+%% discards events of non standard type.
+maybe_add_domain(error_report,std_error,Meta) -> Meta;
+maybe_add_domain(info_report,std_info,Meta) -> Meta;
+maybe_add_domain(warning_report,std_warning,Meta) -> Meta;
+maybe_add_domain(_,Type,Meta) -> Meta#{domain=>[Type]}.
+
+%% -----------------------------------------------------------------
+%% Report formatting - i.e. Term => {Format,Args}
+%% This was earlier done in the event handler (error_logger_tty_h, etc)
+%% -----------------------------------------------------------------
+report_to_format(#{label:={?MODULE,_},
+ report:=Report}) when is_map(Report) ->
+ %% logger:format_otp_report does maps:to_list, and for backwards
+ %% compatibility reasons we don't want that.
+ {"~tp\n",[Report]};
+report_to_format(#{label:={?MODULE,_},
+ format:=Format,
+ args:=Args}) ->
+ %% This is not efficient, but needed for backwards compatibility
+ %% in giving faulty arguments to the *_msg functions.
+ try io_lib:scan_format(Format,Args) of
+ _ -> {Format,Args}
+ catch _:_ ->
+ {"ERROR: ~tp - ~tp",[Format,Args]}
+ end;
+report_to_format(Term) ->
+ logger:format_otp_report(Term).
+
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_list(FlatList).
+
+%% -----------------------------------------------------------------
+%% Stuff directly related to the event manager
+%% -----------------------------------------------------------------
-spec add_report_handler(Handler) -> any() when
Handler :: module().
add_report_handler(Module) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, []).
+ add_report_handler(Module, []).
-spec add_report_handler(Handler, Args) -> Result when
Handler :: module(),
@@ -273,24 +444,37 @@ add_report_handler(Module) when is_atom(Module) ->
Result :: gen_event:add_handler_ret().
add_report_handler(Module, Args) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, Args).
+ _ = logger:add_handler(?MODULE,?MODULE,#{level=>info,filter_default=>log}),
+ gen_event:add_handler(?MODULE, Module, Args).
-spec delete_report_handler(Handler) -> Result when
Handler :: module(),
Result :: gen_event:del_handler_ret().
delete_report_handler(Module) when is_atom(Module) ->
- gen_event:delete_handler(error_logger, Module, []).
-
-%% Start the lowest level error_logger handler with Buffer.
-
-simple_logger(Buffer_size) when is_integer(Buffer_size) ->
- gen_event:add_handler(error_logger, error_logger, Buffer_size).
-
-%% Start the lowest level error_logger handler without Buffer.
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ Return = gen_event:delete_handler(?MODULE, Module, []),
+ case gen_event:which_handlers(?MODULE) of
+ [] ->
+ %% Don't want a lot of logs here if it's not needed
+ _ = logger:remove_handler(?MODULE),
+ ok;
+ _ ->
+ ok
+ end,
+ Return;
+ _ ->
+ ok
+ end.
-simple_logger() ->
- gen_event:add_handler(error_logger, error_logger, []).
+which_report_handlers() ->
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ gen_event:which_handlers(?MODULE);
+ undefined ->
+ []
+ end.
%% Log all errors to File for all eternity
@@ -306,26 +490,35 @@ simple_logger() ->
FilenameReason :: no_log_file.
logfile({open, File}) ->
- case lists:member(error_logger_file_h,
- gen_event:which_handlers(error_logger)) of
+ case lists:member(error_logger_file_h,which_report_handlers()) of
true ->
{error, allready_have_logfile};
_ ->
- gen_event:add_handler(error_logger, error_logger_file_h, File)
+ add_report_handler(error_logger_file_h, File)
end;
logfile(close) ->
- case gen_event:delete_handler(error_logger, error_logger_file_h, normal) of
- {error,Reason} ->
- {error,Reason};
- _ ->
- ok
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:delete_handler(?MODULE, error_logger_file_h, normal) of
+ {error,Reason} ->
+ {error,Reason};
+ _ ->
+ ok
+ end;
+ _ ->
+ {error,module_not_found}
end;
logfile(filename) ->
- case gen_event:call(error_logger, error_logger_file_h, filename) of
- {error,_} ->
- {error, no_log_file};
- Val ->
- Val
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:call(?MODULE, error_logger_file_h, filename) of
+ {error,_} ->
+ {error, no_log_file};
+ Val ->
+ Val
+ end;
+ _ ->
+ {error, no_log_file}
end.
%% Possibly turn off all tty printouts, maybe we only want the errors
@@ -335,105 +528,54 @@ logfile(filename) ->
Flag :: boolean().
tty(true) ->
- Hs = gen_event:which_handlers(error_logger),
- case lists:member(error_logger_tty_h, Hs) of
- false ->
- gen_event:add_handler(error_logger, error_logger_tty_h, []);
- true ->
- ignore
- end,
+ _ = case lists:member(error_logger_tty_h, which_report_handlers()) of
+ false ->
+ case logger:get_handler_config(default) of
+ {ok,#{module:=logger_std_h,config:=#{type:=standard_io}}} ->
+ logger:remove_handler_filter(default,
+ error_logger_tty_false);
+ _ ->
+ logger:add_handler(error_logger_tty_true,logger_std_h,
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS(
+ [otp]),
+ formatter=>{?DEFAULT_FORMATTER,
+ ?DEFAULT_FORMAT_CONFIG},
+ config=>#{type=>standard_io}})
+ end;
+ true ->
+ ok
+ end,
ok;
tty(false) ->
- gen_event:delete_handler(error_logger, error_logger_tty_h, []),
+ delete_report_handler(error_logger_tty_h),
+ _ = logger:remove_handler(error_logger_tty_true),
+ _ = case logger:get_handler_config(default) of
+ {ok,#{module:=logger_std_h,config:=#{type:=standard_io}}} ->
+ logger:add_handler_filter(default,error_logger_tty_false,
+ {fun(_,_) -> stop end, ok});
+ _ ->
+ ok
+ end,
ok.
+%%%-----------------------------------------------------------------
+-spec limit_term(term()) -> term().
+
+limit_term(Term) ->
+ case get_format_depth() of
+ unlimited -> Term;
+ D -> io_lib:limit_term(Term, D)
+ end.
+
+-spec get_format_depth() -> 'unlimited' | pos_integer().
-%%% ---------------------------------------------------
-%%% This is the default error_logger handler.
-%%% ---------------------------------------------------
-
--spec init(term()) -> {'ok', state() | []}.
-
-init(Max) when is_integer(Max) ->
- {ok, {Max, 0, []}};
-%% This one is called if someone took over from us, and now wants to
-%% go back.
-init({go_back, _PostState}) ->
- {ok, {?buffer_size, 0, []}};
-init(_) -> %% Start and just relay to other
- {ok, []}. %% node if node(GLeader) =/= node().
-
--spec handle_event(term(), state()) -> {'ok', state()}.
-
-handle_event({Type, GL, Msg}, State) when node(GL) =/= node() ->
- gen_event:notify({error_logger, node(GL)},{Type, GL, Msg}),
- %% handle_event2({Type, GL, Msg}, State); %% Shall we do something
- {ok, State}; %% at this node too ???
-handle_event({info_report, _, {_, Type, _}}, State) when Type =/= std_info ->
- {ok, State}; %% Ignore other info reports here
-handle_event(Event, State) ->
- handle_event2(Event, State).
-
--spec handle_info(term(), state()) -> {'ok', state()}.
-
-handle_info({emulator, GL, Chars}, State) when node(GL) =/= node() ->
- {error_logger, node(GL)} ! {emulator, GL, add_node(Chars,self())},
- {ok, State};
-handle_info({emulator, GL, Chars}, State) ->
- handle_event2({emulator, GL, Chars}, State);
-handle_info(_, State) ->
- {ok, State}.
-
--spec handle_call(term(), state()) -> {'ok', {'error', 'bad_query'}, state()}.
-
-handle_call(_Query, State) -> {ok, {error, bad_query}, State}.
-
--spec terminate(term(), state()) -> {'error_logger', [term()]}.
-
-terminate(swap, {_, 0, Buff}) ->
- {error_logger, Buff};
-terminate(swap, {_, Lost, Buff}) ->
- Myevent = {info, group_leader(), {self(), {lost_messages, Lost}, []}},
- {error_logger, [tag_event(Myevent)|Buff]};
-terminate(_, _) ->
- {error_logger, []}.
-
-handle_event2(Event, {1, Lost, Buff}) ->
- display(tag_event(Event)),
- {ok, {1, Lost+1, Buff}};
-handle_event2(Event, {N, Lost, Buff}) ->
- Tagged = tag_event(Event),
- display(Tagged),
- {ok, {N-1, Lost, [Tagged|Buff]}};
-handle_event2(_, State) ->
- {ok, State}.
-
-tag_event(Event) ->
- {erlang:localtime(), Event}.
-
-display({Tag,{error,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{error_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info,_,{_,Error,_}}}) ->
- display2(Tag,Error,[]);
-display({Tag,{info_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{warning_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{warning_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{emulator,_,Chars}}) ->
- display2(Tag,Chars,[]).
-
-add_node(X, Pid) when is_atom(X) ->
- add_node(atom_to_list(X), Pid);
-add_node(X, Pid) ->
- lists:concat([X,"** at node ",node(Pid)," **~n"]).
-
-%% Can't do io_lib:format
-
-display2(Tag,F,A) ->
- erlang:display({error_logger,Tag,F,A}).
+get_format_depth() ->
+ case application:get_env(kernel, error_logger_format_depth) of
+ {ok, Depth} when is_integer(Depth) ->
+ max(10, Depth);
+ {ok, unlimited} ->
+ unlimited;
+ undefined ->
+ unlimited
+ end.
diff --git a/lib/kernel/src/erts_debug.erl b/lib/kernel/src/erts_debug.erl
index 39308c0043..1270de4144 100644
--- a/lib/kernel/src/erts_debug.erl
+++ b/lib/kernel/src/erts_debug.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
%% Low-level debugging support. EXPERIMENTAL!
--export([size/1,df/1,df/2,df/3,ic/1]).
+-export([size/1,df/1,df/2,df/3,dis_to_file/2,ic/1]).
%% This module contains the following *experimental* BIFs:
%% disassemble/1
@@ -32,9 +32,11 @@
%%% BIFs
-export([breakpoint/2, disassemble/1, display/1, dist_ext_to_term/2,
- dump_monitors/1, dump_links/1, flat_size/1,
- get_internal_state/1, instructions/0, lock_counters/1,
- map_info/1, same/2, set_internal_state/2]).
+ flat_size/1, get_internal_state/1, instructions/0,
+ map_info/1, same/2, set_internal_state/2,
+ size_shared/1, copy_shared/1, dirty_cpu/2, dirty_io/2, dirty/3,
+ lcnt_control/1, lcnt_control/2, lcnt_collect/0, lcnt_clear/0,
+ lc_graph/0, lc_graph_to_dot/2, lc_graph_merge/2]).
-spec breakpoint(MFA, Flag) -> non_neg_integer() when
MFA :: {Module :: module(),
@@ -68,22 +70,22 @@ display(_) ->
dist_ext_to_term(_, _) ->
erlang:nif_error(undef).
--spec dump_monitors(Id) -> true when
- Id :: pid() | atom().
+-spec flat_size(Term) -> non_neg_integer() when
+ Term :: term().
-dump_monitors(_) ->
+flat_size(_) ->
erlang:nif_error(undef).
--spec dump_links(Id) -> true when
- Id :: pid() | port() | atom().
+-spec size_shared(Term) -> non_neg_integer() when
+ Term :: term().
-dump_links(_) ->
+size_shared(_) ->
erlang:nif_error(undef).
--spec flat_size(Term) -> non_neg_integer() when
+-spec copy_shared(Term) -> term() when
Term :: term().
-flat_size(_) ->
+copy_shared(_) ->
erlang:nif_error(undef).
-spec get_internal_state(W) -> term() when
@@ -128,12 +130,31 @@ ic(F) when is_function(F) ->
io:format("Total: ~w~n",[lists:sum([C||{_I,C}<-Is])]),
R.
--spec lock_counters(info) -> term();
- (clear) -> ok;
- ({copy_save, boolean()}) -> boolean();
- ({process_locks, boolean()}) -> boolean().
+-spec lcnt_control
+ (copy_save, boolean()) -> ok;
+ (mask, list(atom())) -> ok.
+
+lcnt_control(_Option, _Value) ->
+ erlang:nif_error(undef).
+
+-spec lcnt_control
+ (copy_save) -> boolean();
+ (mask) -> list(atom()).
+
+lcnt_control(_Option) ->
+ erlang:nif_error(undef).
+
+-type lcnt_lock_info() :: {atom(), term(), atom(), term()}.
+
+-spec lcnt_collect() ->
+ list({duration, {non_neg_integer(), non_neg_integer()}} |
+ {locks, list(lcnt_lock_info())}).
-lock_counters(_) ->
+lcnt_collect() ->
+ erlang:nif_error(undef).
+
+-spec lcnt_clear() -> ok.
+lcnt_clear() ->
erlang:nif_error(undef).
-spec same(Term1, Term2) -> boolean() when
@@ -169,6 +190,28 @@ same(_, _) ->
set_internal_state(_, _) ->
erlang:nif_error(undef).
+-spec dirty_cpu(Term1, Term2) -> term() when
+ Term1 :: term(),
+ Term2 :: term().
+
+dirty_cpu(_, _) ->
+ erlang:nif_error(undef).
+
+-spec dirty_io(Term1, Term2) -> term() when
+ Term1 :: term(),
+ Term2 :: term().
+
+dirty_io(_, _) ->
+ erlang:nif_error(undef).
+
+-spec dirty(Term1, Term2, Term3) -> term() when
+ Term1 :: term(),
+ Term2 :: term(),
+ Term3 :: term().
+
+dirty(_, _, _) ->
+ erlang:nif_error(undef).
+
%%% End of BIFs
%% size(Term)
@@ -230,7 +273,7 @@ map_size(Map,Seen0,Sum0) ->
%% is not allowed to leak anywhere. They are only allowed in
%% containers (cons cells and tuples, not maps), in gc and
%% in erts_debug:same/2
- case erts_internal:map_type(Map) of
+ case erts_internal:term_type(Map) of
flatmap ->
Kt = erts_internal:map_to_tuple_keys(Map),
Vs = maps:values(Map),
@@ -323,16 +366,21 @@ df(Mod, Func, Arity) when is_atom(Mod), is_atom(Func) ->
catch _:_ -> {undef,Mod}
end.
-dff(File, Fs) when is_pid(File), is_list(Fs) ->
- lists:foreach(fun(Mfa) ->
- disassemble_function(File, Mfa),
- io:nl(File)
- end, Fs);
-dff(Name, Fs) when is_list(Name) ->
- case file:open(Name, [write]) of
+-spec dis_to_file(module(), file:filename()) -> df_ret().
+
+dis_to_file(Mod, Name) when is_atom(Mod) ->
+ try Mod:module_info(functions) of
+ Fs0 when is_list(Fs0) ->
+ Fs = [{Mod,Func,Arity} || {Func,Arity} <- Fs0],
+ dff(Name, Fs)
+ catch _:_ -> {undef,Mod}
+ end.
+
+dff(Name, Fs) ->
+ case file:open(Name, [write,raw,delayed_write]) of
{ok,F} ->
try
- dff(F, Fs)
+ dff_1(F, Fs)
after
_ = file:close(F)
end;
@@ -340,12 +388,18 @@ dff(Name, Fs) when is_list(Name) ->
{error,{badopen,Reason}}
end.
+dff_1(File, Fs) ->
+ lists:foreach(fun(Mfa) ->
+ disassemble_function(File, Mfa),
+ file:write(File, "\n")
+ end, Fs).
+
disassemble_function(File, {_,_,_}=MFA) ->
cont_dis(File, erts_debug:disassemble(MFA), MFA).
cont_dis(_, false, _) -> ok;
cont_dis(File, {Addr,Str,MFA}, MFA) ->
- io:put_chars(File, binary_to_list(Str)),
+ ok = file:write(File, Str),
cont_dis(File, erts_debug:disassemble(Addr), MFA);
cont_dis(_, {_,_,_}, _) -> ok.
@@ -354,3 +408,90 @@ cont_dis(_, {_,_,_}, _) -> ok.
map_info(_) ->
erlang:nif_error(undef).
+
+%% Create file "lc_graph.<pid>" with all actual lock dependencies
+%% recorded so far by the VM.
+%% Needs debug VM or --enable-lock-checking config, returns 'notsup' otherwise.
+lc_graph() ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ erts_debug:get_internal_state(lc_graph).
+
+%% Convert "lc_graph.<pid>" file to https://www.graphviz.org dot format.
+lc_graph_to_dot(OutFile, InFile) ->
+ {ok, [LL0]} = file:consult(InFile),
+
+ [{"NO LOCK",0} | LL] = LL0,
+ Map = maps:from_list([{Id, Name} || {Name, Id, _, _} <- LL]),
+
+ case file:open(OutFile, [exclusive]) of
+ {ok, Out} ->
+ ok = file:write(Out, "digraph G {\n"),
+
+ [dot_print_lock(Out, Lck, Map) || Lck <- LL],
+
+ ok = file:write(Out, "}\n"),
+ ok = file:close(Out);
+
+ {error,eexist} ->
+ {"File already exists", OutFile}
+ end.
+
+dot_print_lock(Out, {_Name, Id, Lst, _}, Map) ->
+ [dot_print_edge(Out, From, Id, Map) || From <- Lst],
+ ok.
+
+dot_print_edge(_, 0, _, _) ->
+ ignore; % "NO LOCK"
+dot_print_edge(Out, From, To, Map) ->
+ io:format(Out, "~p -> ~p;\n", [maps:get(From,Map), maps:get(To,Map)]).
+
+
+%% Merge several "lc_graph" files into one file.
+lc_graph_merge(OutFile, InFiles) ->
+ LLs = lists:map(fun(InFile) ->
+ {ok, [LL]} = file:consult(InFile),
+ LL
+ end,
+ InFiles),
+
+ Res = lists:foldl(fun(A, B) -> lcg_merge(A, B) end,
+ hd(LLs),
+ tl(LLs)),
+ case file:open(OutFile, [exclusive]) of
+ {ok, Out} ->
+ try
+ lcg_print(Out, Res)
+ after
+ file:close(Out)
+ end,
+ ok;
+ {error, eexist} ->
+ {"File already exists", OutFile}
+ end.
+
+lcg_merge(A, B) ->
+ lists:zipwith(fun(LA, LB) -> lcg_merge_locks(LA, LB) end,
+ A, B).
+
+lcg_merge_locks(L, L) ->
+ L;
+lcg_merge_locks({Name, Id, DA, IA}, {Name, Id, DB, IB}) ->
+ Direct = lists:umerge(DA, DB),
+ Indirect = lists:umerge(IA, IB),
+ {Name, Id, Direct, Indirect -- Direct}.
+
+
+lcg_print(Out, LL) ->
+ io:format(Out, "[", []),
+ lcg_print_locks(Out, LL),
+ io:format(Out, "].\n", []),
+ ok.
+
+lcg_print_locks(Out, [{_,_}=NoLock | Rest]) ->
+ io:format(Out, "~p,\n", [NoLock]),
+ lcg_print_locks(Out, Rest);
+lcg_print_locks(Out, [LastLock]) ->
+ io:format(Out, "~w", [LastLock]);
+lcg_print_locks(Out, [Lock | Rest]) ->
+ io:format(Out, "~w,\n", [Lock]),
+ lcg_print_locks(Out, Rest).
diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl
index 1007f04413..1d4e37196c 100644
--- a/lib/kernel/src/file.erl
+++ b/lib/kernel/src/file.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -69,10 +69,10 @@
%% Types that can be used from other modules -- alphabetically ordered.
-export_type([date_time/0, fd/0, file_info/0, filename/0, filename_all/0,
- io_device/0, name/0, name_all/0, posix/0]).
+ io_device/0, mode/0, name/0, name_all/0, posix/0]).
%%% Includes and defines
--include("file.hrl").
+-include("file_int.hrl").
-define(FILE_IO_SERVER_TABLE, file_io_servers).
@@ -101,14 +101,25 @@
-type deep_list() :: [char() | atom() | deep_list()].
-type name() :: string() | atom() | deep_list().
-type name_all() :: string() | atom() | deep_list() | (RawFilename :: binary()).
--type posix() :: 'eacces' | 'eagain' | 'ebadf' | 'ebusy' | 'edquot'
- | 'eexist' | 'efault' | 'efbig' | 'eintr' | 'einval'
- | 'eio' | 'eisdir' | 'eloop' | 'emfile' | 'emlink'
- | 'enametoolong'
- | 'enfile' | 'enodev' | 'enoent' | 'enomem' | 'enospc'
- | 'enotblk' | 'enotdir' | 'enotsup' | 'enxio' | 'eperm'
- | 'epipe' | 'erofs' | 'espipe' | 'esrch' | 'estale'
- | 'exdev'.
+-type posix() ::
+ 'eacces' | 'eagain' |
+ 'ebadf' | 'ebadmsg' | 'ebusy' |
+ 'edeadlk' | 'edeadlock' | 'edquot' |
+ 'eexist' |
+ 'efault' | 'efbig' | 'eftype' |
+ 'eintr' | 'einval' | 'eio' | 'eisdir' |
+ 'eloop' |
+ 'emfile' | 'emlink' | 'emultihop' |
+ 'enametoolong' | 'enfile' |
+ 'enobufs' | 'enodev' | 'enolck' | 'enolink' | 'enoent' |
+ 'enomem' | 'enospc' | 'enosr' | 'enostr' | 'enosys' |
+ 'enotblk' | 'enotdir' | 'enotsup' | 'enxio' |
+ 'eopnotsupp' | 'eoverflow' |
+ 'eperm' | 'epipe' |
+ 'erange' | 'erofs' |
+ 'espipe' | 'esrch' | 'estale' |
+ 'etxtbsy' |
+ 'exdev'.
-type date_time() :: calendar:datetime().
-type posix_file_advise() :: 'normal' | 'sequential' | 'random'
| 'no_reuse' | 'will_need' | 'dont_need'.
@@ -397,25 +408,36 @@ write_file(Name, Bin) ->
Modes :: [mode()],
Reason :: posix() | badarg | terminated | system_limit.
-write_file(Name, Bin, ModeList) when is_list(ModeList) ->
- case make_binary(Bin) of
- B when is_binary(B) ->
- case open(Name, [binary, write |
- lists:delete(binary,
- lists:delete(write, ModeList))]) of
- {ok, Handle} ->
- case write(Handle, B) of
- ok ->
- close(Handle);
- E1 ->
- _ = close(Handle),
- E1
- end;
- E2 ->
- E2
- end;
- E3 ->
- E3
+write_file(Name, IOData, ModeList) when is_list(ModeList) ->
+ case lists:member(raw, ModeList) of
+ true ->
+ %% For backwards compatibility of error messages
+ try iolist_size(IOData) of
+ _Size -> do_write_file(Name, IOData, ModeList)
+ catch
+ error:Error -> {error, Error}
+ end;
+ false ->
+ case make_binary(IOData) of
+ Bin when is_binary(Bin) ->
+ do_write_file(Name, Bin, ModeList);
+ Error ->
+ Error
+ end
+ end.
+
+do_write_file(Name, IOData, ModeList) ->
+ case open(Name, [binary, write | ModeList]) of
+ {ok, Handle} ->
+ case write(Handle, IOData) of
+ ok ->
+ close(Handle);
+ E1 ->
+ _ = close(Handle),
+ E1
+ end;
+ E2 ->
+ E2
end.
%% Obsolete, undocumented, local node only, don't use!.
@@ -443,41 +465,23 @@ raw_write_file_info(Name, #file_info{} = Info) ->
Reason :: posix() | badarg | system_limit.
open(Item, ModeList) when is_list(ModeList) ->
- case lists:member(raw, ModeList) of
- %% Raw file, use ?PRIM_FILE to handle this file
- true ->
+ case {lists:member(raw, ModeList), lists:member(ram, ModeList)} of
+ {false, false} ->
+ %% File server file
Args = [file_name(Item) | ModeList],
case check_args(Args) of
ok ->
[FileName | _] = Args,
- %% We rely on the returned Handle (in {ok, Handle})
- %% being a pid() or a #file_descriptor{}
- ?PRIM_FILE:open(FileName, ModeList);
+ call(open, [FileName, ModeList]);
Error ->
Error
- end;
- false ->
- case lists:member(ram, ModeList) of
- %% RAM file, use ?RAM_FILE to handle this file
- true ->
- case check_args(ModeList) of
- ok ->
- ?RAM_FILE:open(Item, ModeList);
- Error ->
- Error
- end;
- %% File server file
- false ->
- Args = [file_name(Item) | ModeList],
- case check_args(Args) of
- ok ->
- [FileName | _] = Args,
- call(open, [FileName, ModeList]);
- Error ->
- Error
- end
- end
+ end;
+ {true, _Either} ->
+ raw_file_io:open(file_name(Item), ModeList);
+ {false, true} ->
+ ram_file:open(Item, ModeList)
end;
+
%% Old obsolete mode specification in atom or 2-tuple format
open(Item, Mode) ->
open(Item, mode_list(Mode)).
@@ -1227,7 +1231,8 @@ change_time(Name, {{AY, AM, AD}, {AH, AMin, ASec}}=Atime,
%% Send data using sendfile
%%
--define(MAX_CHUNK_SIZE, (1 bsl 20)*20). %% 20 MB, has to fit in primary memory
+%% 1 MB, Windows seems to behave badly if it is much larger then this
+-define(MAX_CHUNK_SIZE, (1 bsl 20)).
-spec sendfile(RawFile, Socket, Offset, Bytes, Opts) ->
{'ok', non_neg_integer()} | {'error', inet:posix() |
@@ -1242,15 +1247,18 @@ sendfile(File, _Sock, _Offet, _Bytes, _Opts) when is_pid(File) ->
sendfile(File, Sock, Offset, Bytes, []) ->
sendfile(File, Sock, Offset, Bytes, ?MAX_CHUNK_SIZE, [], [], []);
sendfile(File, Sock, Offset, Bytes, Opts) ->
- ChunkSize0 = proplists:get_value(chunk_size, Opts, ?MAX_CHUNK_SIZE),
- ChunkSize = if ChunkSize0 > ?MAX_CHUNK_SIZE ->
- ?MAX_CHUNK_SIZE;
- true -> ChunkSize0
- end,
- %% Support for headers, trailers and options has been removed because the
- %% Darwin and BSD API for using it does not play nice with
- %% non-blocking sockets. See unix_efile.c for more info.
- sendfile(File, Sock, Offset, Bytes, ChunkSize, [], [], Opts).
+ try proplists:get_value(chunk_size, Opts, ?MAX_CHUNK_SIZE) of
+ ChunkSize0 when is_integer(ChunkSize0) ->
+ ChunkSize = erlang:min(ChunkSize0, ?MAX_CHUNK_SIZE),
+ %% Support for headers, trailers and options has been removed
+ %% because the Darwin and BSD API for using it does not play nice
+ %% with non-blocking sockets. See unix_efile.c for more info.
+ sendfile(File, Sock, Offset, Bytes, ChunkSize, [], [], Opts);
+ _Other ->
+ {error, badarg}
+ catch
+ error:_ -> {error, badarg}
+ end.
%% sendfile/2
-spec sendfile(Filename, Socket) ->
@@ -1385,8 +1393,8 @@ eval_stream2({ok,Form,EndLine}, Fd, H, Last, E, Bs0) ->
try erl_eval:exprs(Form, Bs0) of
{value,V,Bs} ->
eval_stream(Fd, H, EndLine, {V}, E, Bs)
- catch Class:Reason ->
- Error = {EndLine,?MODULE,{Class,Reason,erlang:get_stacktrace()}},
+ catch Class:Reason:StackTrace ->
+ Error = {EndLine,?MODULE,{Class,Reason,StackTrace}},
eval_stream(Fd, H, EndLine, Last, [Error|E], Bs0)
end;
eval_stream2({error,What,EndLine}, Fd, H, Last, E, Bs) ->
@@ -1412,7 +1420,7 @@ path_open_first([Path|Rest], Name, Mode, LastError) ->
case open(FileName, Mode) of
{ok, Fd} ->
{ok, Fd, FileName};
- {error, enoent} ->
+ {error, Reason} when Reason =:= enoent; Reason =:= enotdir ->
path_open_first(Rest, Name, Mode, LastError);
Error ->
Error
diff --git a/lib/kernel/src/file_int.hrl b/lib/kernel/src/file_int.hrl
new file mode 100644
index 0000000000..bafc330c04
--- /dev/null
+++ b/lib/kernel/src/file_int.hrl
@@ -0,0 +1,33 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Internal definitions for the 'file' module and friends.
+%%
+
+-ifndef(FILE_INTERNAL_HRL_).
+-define(FILE_INTERNAL_HRL_, 1).
+
+-include("file.hrl").
+
+-define(CALL_FD(Fd, Method, Args),
+ apply(Fd#file_descriptor.module, Method, [Fd | Args])).
+
+-endif.
diff --git a/lib/kernel/src/file_io_server.erl b/lib/kernel/src/file_io_server.erl
index deb7b315b1..34d5497a4a 100644
--- a/lib/kernel/src/file_io_server.erl
+++ b/lib/kernel/src/file_io_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2015. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -28,7 +28,8 @@
-record(state, {handle,owner,mref,buf,read_mode,unic}).
--define(PRIM_FILE, prim_file).
+-include("file_int.hrl").
+
-define(READ_SIZE_LIST, 128).
-define(READ_SIZE_BINARY, (8*1024)).
@@ -67,8 +68,9 @@ do_start(Spawn, Owner, FileName, ModeList) ->
erlang:dt_restore_tag(Utag),
%% process_flag(trap_exit, true),
case parse_options(ModeList) of
- {ReadMode, UnicodeMode, Opts} ->
- case ?PRIM_FILE:open(FileName, Opts) of
+ {ReadMode, UnicodeMode, Opts0} ->
+ Opts = maybe_add_read_ahead(ReadMode, Opts0),
+ case raw_file_io:open(FileName, [raw | Opts]) of
{error, Reason} = Error ->
Self ! {Ref, Error},
exit(Reason);
@@ -157,6 +159,24 @@ valid_enc({utf32,little}) ->
valid_enc(_Other) ->
{error,badarg}.
+%% Add a small read_ahead buffer if the file is opened for reading
+%% only in list mode and no read_ahead is already given.
+maybe_add_read_ahead(binary, Opts) ->
+ Opts;
+maybe_add_read_ahead(list, Opts) ->
+ P = fun(read_ahead) -> true;
+ ({read_ahead,_}) -> true;
+ (append) -> true;
+ (exclusive) -> true;
+ (write) -> true;
+ (_) -> false
+ end,
+ case lists:any(P, Opts) of
+ false ->
+ [{read_ahead, 4096}|Opts];
+ true ->
+ Opts
+ end.
server_loop(#state{mref = Mref} = State) ->
receive
@@ -205,7 +225,7 @@ io_reply(From, ReplyAs, Reply) ->
file_request({advise,Offset,Length,Advise},
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:advise(Handle, Offset, Length, Advise) of
+ case ?CALL_FD(Handle, advise, [Offset, Length, Advise]) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State};
Reply ->
@@ -213,7 +233,7 @@ file_request({advise,Offset,Length,Advise},
end;
file_request({allocate, Offset, Length},
#state{handle = Handle} = State) ->
- Reply = ?PRIM_FILE:allocate(Handle, Offset, Length),
+ Reply = ?CALL_FD(Handle, allocate, [Offset, Length]),
{reply, Reply, State};
file_request({pread,At,Sz}, State)
when At =:= cur;
@@ -256,7 +276,7 @@ file_request({pwrite,At,Data},
end;
file_request(datasync,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:datasync(Handle) of
+ case ?CALL_FD(Handle, datasync, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State};
Reply ->
@@ -264,7 +284,7 @@ file_request(datasync,
end;
file_request(sync,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:sync(Handle) of
+ case ?CALL_FD(Handle, sync, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State};
Reply ->
@@ -272,7 +292,7 @@ file_request(sync,
end;
file_request(close,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:close(Handle) of
+ case ?CALL_FD(Handle, close, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State#state{buf= <<>>}};
Reply ->
@@ -288,7 +308,7 @@ file_request({position,At},
end;
file_request(truncate,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:truncate(Handle) of
+ case ?CALL_FD(Handle, truncate, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State#state{buf= <<>>}};
Reply ->
@@ -398,7 +418,7 @@ io_request_loop([Request|Tail],
%%
put_chars(Chars, latin1, #state{handle=Handle, unic=latin1}=State) ->
NewState = State#state{buf = <<>>},
- case ?PRIM_FILE:write(Handle, Chars) of
+ case ?CALL_FD(Handle, write, [Chars]) of
{error,Reason}=Reply ->
{stop,Reason,Reply,NewState};
Reply ->
@@ -408,7 +428,7 @@ put_chars(Chars, InEncoding, #state{handle=Handle, unic=OutEncoding}=State) ->
NewState = State#state{buf = <<>>},
case unicode:characters_to_binary(Chars,InEncoding,OutEncoding) of
Bin when is_binary(Bin) ->
- case ?PRIM_FILE:write(Handle, Bin) of
+ case ?CALL_FD(Handle, write, [Bin]) of
{error,Reason}=Reply ->
{stop,Reason,Reply,NewState};
Reply ->
@@ -422,7 +442,7 @@ put_chars(Chars, InEncoding, #state{handle=Handle, unic=OutEncoding}=State) ->
get_line(S, {<<>>, Cont}, OutEnc,
#state{handle=Handle, read_mode=Mode, unic=InEnc}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(Mode)) of
+ case ?CALL_FD(Handle, read, [read_size(Mode)]) of
{ok,Bin} ->
get_line(S, convert_enc([Cont, Bin], InEnc, OutEnc), OutEnc, State);
eof ->
@@ -472,7 +492,7 @@ get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=latin1}
BufSize = byte_size(Buf),
NeedSize = N-BufSize,
Size = erlang:max(NeedSize, ?READ_SIZE_BINARY),
- case ?PRIM_FILE:read(Handle, Size) of
+ case ?CALL_FD(Handle, read, [Size]) of
{ok, B} ->
if BufSize+byte_size(B) < N ->
std_reply(cat(Buf, B, ReadMode,latin1,OutEnc), State);
@@ -504,7 +524,7 @@ get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=InEncod
%% Need more, Try to read 4*needed in bytes...
NeedSize = (N - BufCount) * 4,
Size = erlang:max(NeedSize, ?READ_SIZE_BINARY),
- case ?PRIM_FILE:read(Handle, Size) of
+ case ?CALL_FD(Handle, read, [Size]) of
{ok, B} ->
NewBuf = list_to_binary([Buf,B]),
{NewCount,NewSplit} = count_and_find(NewBuf,N,InEncoding),
@@ -544,7 +564,7 @@ get_chars(Mod, Func, XtraArg, OutEnc, #state{buf=Buf}=State) ->
get_chars_empty(Mod, Func, XtraArg, S, latin1,
#state{handle=Handle,read_mode=ReadMode, unic=latin1}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of
{ok,Bin} ->
get_chars_apply(Mod, Func, XtraArg, S, latin1, State, Bin);
eof ->
@@ -554,7 +574,7 @@ get_chars_empty(Mod, Func, XtraArg, S, latin1,
end;
get_chars_empty(Mod, Func, XtraArg, S, OutEnc,
#state{handle=Handle,read_mode=ReadMode}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of
{ok,Bin} ->
get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, Bin);
eof ->
@@ -564,7 +584,7 @@ get_chars_empty(Mod, Func, XtraArg, S, OutEnc,
end.
get_chars_notempty(Mod, Func, XtraArg, S, OutEnc,
#state{handle=Handle,read_mode=ReadMode,buf = B}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of
{ok,Bin} ->
get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, list_to_binary([B,Bin]));
eof ->
@@ -918,13 +938,10 @@ cbv({utf32,little},_) ->
%% Compensates ?PRIM_FILE:position/2 for the number of bytes
%% we have buffered
position(Handle, At, Buf) ->
- ?PRIM_FILE:position(
- Handle,
- case At of
- cur ->
- {cur, -byte_size(Buf)};
- {cur, Offs} ->
- {cur, Offs-byte_size(Buf)};
- _ ->
- At
- end).
+ SeekTo =
+ case At of
+ {cur, Offs} -> {cur, Offs-byte_size(Buf)};
+ cur -> {cur, -byte_size(Buf)};
+ _ -> At
+ end,
+ ?CALL_FD(Handle, position, [SeekTo]).
diff --git a/lib/kernel/src/file_server.erl b/lib/kernel/src/file_server.erl
index 6df6be7d06..29eaa23375 100644
--- a/lib/kernel/src/file_server.erl
+++ b/lib/kernel/src/file_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -63,7 +63,7 @@ stop() ->
%%% Callback functions from gen_server
%%%----------------------------------------------------------------------
--type state() :: port(). % Internal type
+-type state() :: term(). % Internal type
%%----------------------------------------------------------------------
%% Func: init/1
@@ -73,18 +73,12 @@ stop() ->
%% {stop, Reason}
%%----------------------------------------------------------------------
--spec init([]) -> {'ok', state()} | {'stop', term()}.
+-spec init([]) -> {'ok', state()}.
init([]) ->
process_flag(trap_exit, true),
- case ?PRIM_FILE:start() of
- {ok, Handle} ->
- ?FILE_IO_SERVER_TABLE =
- ets:new(?FILE_IO_SERVER_TABLE, [named_table]),
- {ok, Handle};
- {error, Reason} ->
- {stop, Reason}
- end.
+ ?FILE_IO_SERVER_TABLE = ets:new(?FILE_IO_SERVER_TABLE, [named_table]),
+ {ok, undefined}.
%%----------------------------------------------------------------------
%% Func: handle_call/3
@@ -101,7 +95,7 @@ init([]) ->
{'reply', 'eof' | 'ok' | {'error', term()} | {'ok', term()}, state()} |
{'stop', 'normal', 'stopped', state()}.
-handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, Handle)
+handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, State)
when is_list(ModeList) ->
Child = ?FILE_IO_SERVER:start_link(Pid, Name, ModeList),
case Child of
@@ -110,78 +104,78 @@ handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, Handle)
_ ->
ok
end,
- {reply, Child, Handle};
+ {reply, Child, State};
-handle_call({open, _Name, _Mode}, _From, Handle) ->
- {reply, {error, einval}, Handle};
+handle_call({open, _Name, _Mode}, _From, State) ->
+ {reply, {error, einval}, State};
-handle_call({read_file, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_file(Name), Handle};
+handle_call({read_file, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_file(Name), State};
-handle_call({write_file, Name, Bin}, _From, Handle) ->
- {reply, ?PRIM_FILE:write_file(Name, Bin), Handle};
+handle_call({write_file, Name, Bin}, _From, State) ->
+ {reply, ?PRIM_FILE:write_file(Name, Bin), State};
-handle_call({set_cwd, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:set_cwd(Handle, Name), Handle};
+handle_call({set_cwd, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:set_cwd(Name), State};
-handle_call({delete, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:delete(Handle, Name), Handle};
+handle_call({delete, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:delete(Name), State};
-handle_call({rename, Fr, To}, _From, Handle) ->
- {reply, ?PRIM_FILE:rename(Handle, Fr, To), Handle};
+handle_call({rename, Fr, To}, _From, State) ->
+ {reply, ?PRIM_FILE:rename(Fr, To), State};
-handle_call({make_dir, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:make_dir(Handle, Name), Handle};
+handle_call({make_dir, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:make_dir(Name), State};
-handle_call({del_dir, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:del_dir(Handle, Name), Handle};
+handle_call({del_dir, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:del_dir(Name), State};
-handle_call({list_dir, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:list_dir(Handle, Name), Handle};
-handle_call({list_dir_all, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:list_dir_all(Handle, Name), Handle};
+handle_call({list_dir, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:list_dir(Name), State};
+handle_call({list_dir_all, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:list_dir_all(Name), State};
-handle_call(get_cwd, _From, Handle) ->
- {reply, ?PRIM_FILE:get_cwd(Handle), Handle};
-handle_call({get_cwd}, _From, Handle) ->
- {reply, ?PRIM_FILE:get_cwd(Handle), Handle};
-handle_call({get_cwd, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:get_cwd(Handle, Name), Handle};
+handle_call(get_cwd, _From, State) ->
+ {reply, ?PRIM_FILE:get_cwd(), State};
+handle_call({get_cwd}, _From, State) ->
+ {reply, ?PRIM_FILE:get_cwd(), State};
+handle_call({get_cwd, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:get_cwd(Name), State};
-handle_call({read_file_info, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_file_info(Handle, Name), Handle};
+handle_call({read_file_info, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_file_info(Name), State};
-handle_call({read_file_info, Name, Opts}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_file_info(Handle, Name, Opts), Handle};
+handle_call({read_file_info, Name, Opts}, _From, State) ->
+ {reply, ?PRIM_FILE:read_file_info(Name, Opts), State};
-handle_call({altname, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:altname(Handle, Name), Handle};
+handle_call({altname, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:altname(Name), State};
-handle_call({write_file_info, Name, Info}, _From, Handle) ->
- {reply, ?PRIM_FILE:write_file_info(Handle, Name, Info), Handle};
+handle_call({write_file_info, Name, Info}, _From, State) ->
+ {reply, ?PRIM_FILE:write_file_info(Name, Info), State};
-handle_call({write_file_info, Name, Info, Opts}, _From, Handle) ->
- {reply, ?PRIM_FILE:write_file_info(Handle, Name, Info, Opts), Handle};
+handle_call({write_file_info, Name, Info, Opts}, _From, State) ->
+ {reply, ?PRIM_FILE:write_file_info(Name, Info, Opts), State};
-handle_call({read_link_info, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link_info(Handle, Name), Handle};
+handle_call({read_link_info, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link_info(Name), State};
-handle_call({read_link_info, Name, Opts}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link_info(Handle, Name, Opts), Handle};
+handle_call({read_link_info, Name, Opts}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link_info(Name, Opts), State};
-handle_call({read_link, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link(Handle, Name), Handle};
-handle_call({read_link_all, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link_all(Handle, Name), Handle};
+handle_call({read_link, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link(Name), State};
+handle_call({read_link_all, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link_all(Name), State};
-handle_call({make_link, Old, New}, _From, Handle) ->
- {reply, ?PRIM_FILE:make_link(Handle, Old, New), Handle};
+handle_call({make_link, Old, New}, _From, State) ->
+ {reply, ?PRIM_FILE:make_link(Old, New), State};
-handle_call({make_symlink, Old, New}, _From, Handle) ->
- {reply, ?PRIM_FILE:make_symlink(Handle, Old, New), Handle};
+handle_call({make_symlink, Old, New}, _From, State) ->
+ {reply, ?PRIM_FILE:make_symlink(Old, New), State};
handle_call({copy, SourceName, SourceOpts, DestName, DestOpts, Length},
- _From, Handle) ->
+ _From, State) ->
Reply =
case ?PRIM_FILE:open(SourceName, [read, binary | SourceOpts]) of
{ok, Source} ->
@@ -201,14 +195,14 @@ handle_call({copy, SourceName, SourceOpts, DestName, DestOpts, Length},
{error, _} = Error ->
Error
end,
- {reply, Reply, Handle};
+ {reply, Reply, State};
-handle_call(stop, _From, Handle) ->
- {stop, normal, stopped, Handle};
+handle_call(stop, _From, State) ->
+ {stop, normal, stopped, State};
-handle_call(Request, From, Handle) ->
- error_logger:error_msg("handle_call(~p, ~p, _)", [Request, From]),
- {noreply, Handle}.
+handle_call(Request, From, State) ->
+ error_logger:error_msg("handle_call(~tp, ~tp, _)", [Request, From]),
+ {noreply, State}.
%%----------------------------------------------------------------------
%% Func: handle_cast/2
@@ -220,7 +214,7 @@ handle_call(Request, From, Handle) ->
-spec handle_cast(term(), state()) -> {'noreply', state()}.
handle_cast(Msg, State) ->
- error_logger:error_msg("handle_cast(~p, _)", [Msg]),
+ error_logger:error_msg("handle_cast(~tp, _)", [Msg]),
{noreply, State}.
%%----------------------------------------------------------------------
@@ -231,19 +225,14 @@ handle_cast(Msg, State) ->
%%----------------------------------------------------------------------
-spec handle_info(term(), state()) ->
- {'noreply', state()} | {'stop', 'normal', state()}.
+ {'noreply', state()}.
-handle_info({'EXIT', Pid, _Reason}, Handle) when is_pid(Pid) ->
+handle_info({'EXIT', Pid, _Reason}, State) when is_pid(Pid) ->
ets:delete(?FILE_IO_SERVER_TABLE, Pid),
- {noreply, Handle};
-
-handle_info({'EXIT', Handle, _Reason}, Handle) ->
- error_logger:error_msg("Port controlling ~w terminated in ~w",
- [?FILE_SERVER, ?MODULE]),
- {stop, normal, Handle};
+ {noreply, State};
handle_info(Info, State) ->
- error_logger:error_msg("handle_Info(~p, _)", [Info]),
+ error_logger:error_msg("handle_Info(~tp, _)", [Info]),
{noreply, State}.
%%----------------------------------------------------------------------
@@ -254,8 +243,8 @@ handle_info(Info, State) ->
-spec terminate(term(), state()) -> 'ok'.
-terminate(_Reason, Handle) ->
- ?PRIM_FILE:stop(Handle).
+terminate(_Reason, _State) ->
+ ok.
%%----------------------------------------------------------------------
%% Func: code_change/3
diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl
index a47535b2f8..d893d44079 100644
--- a/lib/kernel/src/gen_sctp.erl
+++ b/lib/kernel/src/gen_sctp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -66,7 +66,12 @@
{sctp_set_peer_primary_addr, #sctp_setpeerprim{}} |
{sctp_status, #sctp_status{}} |
{sndbuf, non_neg_integer()} |
- {tos, non_neg_integer()}.
+ {tos, non_neg_integer()} |
+ {tclass, non_neg_integer()} |
+ {ttl, non_neg_integer()} |
+ {recvtos, boolean()} |
+ {recvtclass, boolean()} |
+ {recvttl, boolean()}.
-type option_name() ::
active |
buffer |
@@ -97,7 +102,12 @@
sctp_set_peer_primary_addr |
sctp_status |
sndbuf |
- tos.
+ tos |
+ tclass |
+ ttl |
+ recvtos |
+ recvtclass |
+ recvttl.
-type sctp_socket() :: port().
-export_type([assoc_id/0, option/0, option_name/0, sctp_socket/0]).
@@ -118,14 +128,16 @@ open() ->
| inet:address_family()
| {port,Port}
| {type,SockType}
+ | {netns, file:filename_all()}
+ | {bind_to_device, binary()}
| option(),
IP :: inet:ip_address() | any | loopback,
Port :: inet:port_number(),
SockType :: seqpacket | stream,
Socket :: sctp_socket().
-open(Opts) when is_list(Opts) ->
- Mod = mod(Opts, undefined),
+open(Opts0) when is_list(Opts0) ->
+ {Mod, Opts} = inet:sctp_module(Opts0),
case Mod:open(Opts) of
{error,badarg} ->
erlang:error(badarg, [Opts]);
@@ -363,7 +375,7 @@ send(S, AssocChange, Stream, Data) ->
Socket :: sctp_socket(),
FromIP :: inet:ip_address(),
FromPort :: inet:port_number(),
- AncData :: [#sctp_sndrcvinfo{}],
+ AncData :: [#sctp_sndrcvinfo{} | inet:ancillary_data()],
Data :: binary() | string() | #sctp_sndrcvinfo{}
| #sctp_assoc_change{} | #sctp_paddr_change{}
| #sctp_adaptation_event{},
@@ -380,7 +392,7 @@ recv(S) ->
Timeout :: timeout(),
FromIP :: inet:ip_address(),
FromPort :: inet:port_number(),
- AncData :: [#sctp_sndrcvinfo{}],
+ AncData :: [#sctp_sndrcvinfo{} | inet:ancillary_data()],
Data :: binary() | string() | #sctp_sndrcvinfo{}
| #sctp_assoc_change{} | #sctp_paddr_change{}
| #sctp_adaptation_event{},
@@ -439,38 +451,9 @@ error_string(X) ->
-spec controlling_process(Socket, Pid) -> ok | {error, Reason} when
Socket :: sctp_socket(),
Pid :: pid(),
- Reason :: closed | not_owner | inet:posix().
+ Reason :: closed | not_owner | badarg | inet:posix().
controlling_process(S, Pid) when is_port(S), is_pid(Pid) ->
inet:udp_controlling_process(S, Pid);
controlling_process(S, Pid) ->
erlang:error(badarg, [S,Pid]).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Utilites
-%%
-
-%% Get the SCTP module, but IPv6 address overrides default IPv4
-mod(Address) ->
- case inet_db:sctp_module() of
- inet_sctp when tuple_size(Address) =:= 8 ->
- inet6_sctp;
- Mod ->
- Mod
- end.
-
-%% Get the SCTP module, but option sctp_module|inet|inet6 overrides
-mod([{sctp_module,Mod}|_], _Address) ->
- Mod;
-mod([inet|_], _Address) ->
- inet_sctp;
-mod([inet6|_], _Address) ->
- inet6_sctp;
-mod([{ip, Address}|Opts], _) ->
- mod(Opts, Address);
-mod([{ifaddr, Address}|Opts], _) ->
- mod(Opts, Address);
-mod([_|Opts], Address) ->
- mod(Opts, Address);
-mod([], Address) ->
- mod(Address).
diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl
index 8cb2a725e8..7f7833ec23 100644
--- a/lib/kernel/src/gen_tcp.erl
+++ b/lib/kernel/src/gen_tcp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -62,7 +62,14 @@
{show_econnreset, boolean()} |
{sndbuf, non_neg_integer()} |
{tos, non_neg_integer()} |
+ {tclass, non_neg_integer()} |
+ {ttl, non_neg_integer()} |
+ {recvtos, boolean()} |
+ {recvtclass, boolean()} |
+ {recvttl, boolean()} |
{ipv6_v6only, boolean()}.
+-type pktoptions_value() ::
+ {pktoptions, inet:ancillary_data()}.
-type option_name() ::
active |
buffer |
@@ -81,6 +88,7 @@
nodelay |
packet |
packet_size |
+ pktoptions |
priority |
{raw,
Protocol :: non_neg_integer(),
@@ -94,35 +102,45 @@
show_econnreset |
sndbuf |
tos |
+ tclass |
+ ttl |
+ recvtos |
+ recvtclass |
+ recvttl |
+ pktoptions |
ipv6_v6only.
-type connect_option() ::
- {ip, inet:ip_address()} |
+ {ip, inet:socket_address()} |
{fd, Fd :: non_neg_integer()} |
- {ifaddr, inet:ip_address()} |
+ {ifaddr, inet:socket_address()} |
inet:address_family() |
{port, inet:port_number()} |
{tcp_module, module()} |
+ {netns, file:filename_all()} |
+ {bind_to_device, binary()} |
option().
-type listen_option() ::
- {ip, inet:ip_address()} |
+ {ip, inet:socket_address()} |
{fd, Fd :: non_neg_integer()} |
- {ifaddr, inet:ip_address()} |
+ {ifaddr, inet:socket_address()} |
inet:address_family() |
{port, inet:port_number()} |
{backlog, B :: non_neg_integer()} |
{tcp_module, module()} |
+ {netns, file:filename_all()} |
+ {bind_to_device, binary()} |
option().
-type socket() :: port().
-export_type([option/0, option_name/0, connect_option/0, listen_option/0,
- socket/0]).
+ socket/0, pktoptions_value/0]).
%%
%% Connect a socket
%%
-spec connect(Address, Port, Options) -> {ok, Socket} | {error, Reason} when
- Address :: inet:ip_address() | inet:hostname(),
+ Address :: inet:socket_address() | inet:hostname(),
Port :: inet:port_number(),
Options :: [connect_option()],
Socket :: socket(),
@@ -133,7 +151,7 @@ connect(Address, Port, Opts) ->
-spec connect(Address, Port, Options, Timeout) ->
{ok, Socket} | {error, Reason} when
- Address :: inet:ip_address() | inet:hostname(),
+ Address :: inet:socket_address() | inet:hostname(),
Port :: inet:port_number(),
Options :: [connect_option()],
Timeout :: timeout(),
@@ -151,8 +169,8 @@ connect(Address, Port, Opts, Time) ->
Error -> Error
end.
-connect1(Address,Port,Opts,Timer) ->
- Mod = mod(Opts, Address),
+connect1(Address, Port, Opts0, Timer) ->
+ {Mod, Opts} = inet:tcp_module(Opts0, Address),
case Mod:getaddrs(Address,Timer) of
{ok,IPs} ->
case Mod:getserv(Port) of
@@ -185,8 +203,8 @@ try_connect([], _Port, _Opts, _Timer, _Mod, Err) ->
ListenSocket :: socket(),
Reason :: system_limit | inet:posix().
-listen(Port, Opts) ->
- Mod = mod(Opts, undefined),
+listen(Port, Opts0) ->
+ {Mod, Opts} = inet:tcp_module(Opts0),
case Mod:getserv(Port) of
{ok,TP} ->
Mod:listen(TP, Opts);
@@ -320,7 +338,7 @@ unrecv(S, Data) when is_port(S) ->
-spec controlling_process(Socket, Pid) -> ok | {error, Reason} when
Socket :: socket(),
Pid :: pid(),
- Reason :: closed | not_owner | inet:posix().
+ Reason :: closed | not_owner | badarg | inet:posix().
controlling_process(S, NewOwner) ->
case inet_db:lookup_socket(S) of
@@ -335,32 +353,6 @@ controlling_process(S, NewOwner) ->
%%
%% Create a port/socket from a file descriptor
%%
-fdopen(Fd, Opts) ->
- Mod = mod(Opts, undefined),
+fdopen(Fd, Opts0) ->
+ {Mod, Opts} = inet:tcp_module(Opts0),
Mod:fdopen(Fd, Opts).
-
-%% Get the tcp_module, but IPv6 address overrides default IPv4
-mod(Address) ->
- case inet_db:tcp_module() of
- inet_tcp when tuple_size(Address) =:= 8 ->
- inet6_tcp;
- Mod ->
- Mod
- end.
-
-%% Get the tcp_module, but option tcp_module|inet|inet6 overrides
-mod([{tcp_module,Mod}|_], _Address) ->
- Mod;
-mod([inet|_], _Address) ->
- inet_tcp;
-mod([inet6|_], _Address) ->
- inet6_tcp;
-mod([{ip, Address}|Opts], _) ->
- mod(Opts, Address);
-mod([{ifaddr, Address}|Opts], _) ->
- mod(Opts, Address);
-mod([_|Opts], Address) ->
- mod(Opts, Address);
-mod([], Address) ->
- mod(Address).
-
diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl
index 6698d5f0fa..d6e8652e77 100644
--- a/lib/kernel/src/gen_udp.erl
+++ b/lib/kernel/src/gen_udp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -51,6 +51,11 @@
{reuseaddr, boolean()} |
{sndbuf, non_neg_integer()} |
{tos, non_neg_integer()} |
+ {tclass, non_neg_integer()} |
+ {ttl, non_neg_integer()} |
+ {recvtos, boolean()} |
+ {recvtclass, boolean()} |
+ {recvttl, boolean()} |
{ipv6_v6only, boolean()}.
-type option_name() ::
active |
@@ -76,6 +81,12 @@
reuseaddr |
sndbuf |
tos |
+ tclass |
+ ttl |
+ recvtos |
+ recvtclass |
+ recvttl |
+ pktoptions |
ipv6_v6only.
-type socket() :: port().
@@ -92,18 +103,20 @@ open(Port) ->
-spec open(Port, Opts) -> {ok, Socket} | {error, Reason} when
Port :: inet:port_number(),
Opts :: [Option],
- Option :: {ip, inet:ip_address()}
+ Option :: {ip, inet:socket_address()}
| {fd, non_neg_integer()}
- | {ifaddr, inet:ip_address()}
+ | {ifaddr, inet:socket_address()}
| inet:address_family()
| {port, inet:port_number()}
+ | {netns, file:filename_all()}
+ | {bind_to_device, binary()}
| option(),
Socket :: socket(),
Reason :: inet:posix().
-open(Port, Opts) ->
- Mod = mod(Opts, undefined),
- {ok,UP} = Mod:getserv(Port),
+open(Port, Opts0) ->
+ {Mod, Opts} = inet:udp_module(Opts0),
+ {ok, UP} = Mod:getserv(Port),
Mod:open(UP, Opts).
-spec close(Socket) -> ok when
@@ -114,7 +127,7 @@ close(S) ->
-spec send(Socket, Address, Port, Packet) -> ok | {error, Reason} when
Socket :: socket(),
- Address :: inet:ip_address() | inet:hostname(),
+ Address :: inet:socket_address() | inet:hostname(),
Port :: inet:port_number(),
Packet :: iodata(),
Reason :: not_owner | inet:posix().
@@ -145,11 +158,13 @@ send(S, Packet) when is_port(S) ->
end.
-spec recv(Socket, Length) ->
- {ok, {Address, Port, Packet}} | {error, Reason} when
+ {ok, RecvData} | {error, Reason} when
Socket :: socket(),
Length :: non_neg_integer(),
- Address :: inet:ip_address(),
+ RecvData :: {Address, Port, Packet} | {Address, Port, AncData, Packet},
+ Address :: inet:ip_address() | inet:returned_non_ip_address(),
Port :: inet:port_number(),
+ AncData :: inet:ancillary_data(),
Packet :: string() | binary(),
Reason :: not_owner | inet:posix().
@@ -162,12 +177,14 @@ recv(S,Len) when is_port(S), is_integer(Len) ->
end.
-spec recv(Socket, Length, Timeout) ->
- {ok, {Address, Port, Packet}} | {error, Reason} when
+ {ok, RecvData} | {error, Reason} when
Socket :: socket(),
Length :: non_neg_integer(),
Timeout :: timeout(),
- Address :: inet:ip_address(),
+ RecvData :: {Address, Port, Packet} | {Address, Port, AncData, Packet},
+ Address :: inet:ip_address() | inet:returned_non_ip_address(),
Port :: inet:port_number(),
+ AncData :: inet:ancillary_data(),
Packet :: string() | binary(),
Reason :: not_owner | inet:posix().
@@ -195,7 +212,7 @@ connect(S, Address, Port) when is_port(S) ->
-spec controlling_process(Socket, Pid) -> ok | {error, Reason} when
Socket :: socket(),
Pid :: pid(),
- Reason :: closed | not_owner | inet:posix().
+ Reason :: closed | not_owner | badarg | inet:posix().
controlling_process(S, NewOwner) ->
inet:udp_controlling_process(S, NewOwner).
@@ -203,32 +220,6 @@ controlling_process(S, NewOwner) ->
%%
%% Create a port/socket from a file descriptor
%%
-fdopen(Fd, Opts) ->
- Mod = mod(Opts, undefined),
+fdopen(Fd, Opts0) ->
+ {Mod,Opts} = inet:udp_module(Opts0),
Mod:fdopen(Fd, Opts).
-
-
-%% Get the udp_module, but IPv6 address overrides default IPv4
-mod(Address) ->
- case inet_db:udp_module() of
- inet_udp when tuple_size(Address) =:= 8 ->
- inet6_udp;
- Mod ->
- Mod
- end.
-
-%% Get the udp_module, but option udp_module|inet|inet6 overrides
-mod([{udp_module,Mod}|_], _Address) ->
- Mod;
-mod([inet|_], _Address) ->
- inet_udp;
-mod([inet6|_], _Address) ->
- inet6_udp;
-mod([{ip, Address}|Opts], _) ->
- mod(Opts, Address);
-mod([{ifaddr, Address}|Opts], _) ->
- mod(Opts, Address);
-mod([_|Opts], Address) ->
- mod(Opts, Address);
-mod([], Address) ->
- mod(Address).
diff --git a/lib/kernel/src/global.erl b/lib/kernel/src/global.erl
index 2be1efaf24..a38522eb5c 100644
--- a/lib/kernel/src/global.erl
+++ b/lib/kernel/src/global.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -58,14 +58,18 @@
%%% In certain places in the server, calling io:format hangs everything,
%%% so we'd better use erlang:display/1.
%%% my_tracer is used in testsuites
--define(trace(_), ok).
+%% uncomment this if tracing is wanted
+%%-define(DEBUG, true).
+-ifdef(DEBUG).
+-define(trace(T), erlang:display({format, node(), cs(), T})).
+ cs() ->
+ {_Big, Small, Tiny} = erlang:timestamp(),
+ (Small rem 100) * 100 + (Tiny div 10000).
%-define(trace(T), (catch my_tracer ! {node(), {line,?LINE}, T})).
-
-%-define(trace(T), erlang:display({format, node(), cs(), T})).
-%cs() ->
-% {_Big, Small, Tiny} = now(),
-% (Small rem 100) * 100 + (Tiny div 10000).
+-else.
+-define(trace(_), ok).
+-endif.
%% These are the protocol versions:
%% Vsn 1 is the original protocol.
@@ -258,7 +262,7 @@ check_dupname(Name, Pid) ->
{ok, allow} ->
true;
_ ->
- S = "global: ~w registered under several names: ~w\n",
+ S = "global: ~w registered under several names: ~tw\n",
Names = [Name | [Name1 || {_Pid, Name1} <- PidNames]],
error_logger:error_msg(S, [Pid, Names]),
false
@@ -443,7 +447,8 @@ info() ->
init([]) ->
process_flag(trap_exit, true),
_ = ets:new(global_locks, [set, named_table, protected]),
- _ = ets:new(global_names, [set, named_table, protected]),
+ _ = ets:new(global_names, [set, named_table, protected,
+ {read_concurrency, true}]),
_ = ets:new(global_names_ext, [set, named_table, protected]),
_ = ets:new(global_pid_names, [bag, named_table, protected]),
@@ -459,17 +464,17 @@ init([]) ->
no_trace
end,
+ Ca = case init:get_argument(connect_all) of
+ {ok, [["false"]]} ->
+ false;
+ _ ->
+ true
+ end,
S = #state{the_locker = start_the_locker(DoTrace),
trace = T0,
- the_registrar = start_the_registrar()},
- S1 = trace_message(S, {init, node()}, []),
-
- case init:get_argument(connect_all) of
- {ok, [["false"]]} ->
- {ok, S1#state{connect_all = false}};
- _ ->
- {ok, S1#state{connect_all = true}}
- end.
+ the_registrar = start_the_registrar(),
+ connect_all = Ca},
+ {ok, trace_message(S, {init, node()}, [])}.
%%-----------------------------------------------------------------
%% Connection algorithm
@@ -654,7 +659,7 @@ handle_call(stop, _From, S) ->
handle_call(Request, From, S) ->
error_logger:warning_msg("The global_name_server "
"received an unexpected message:\n"
- "handle_call(~p, ~p, _)\n",
+ "handle_call(~tp, ~tp, _)\n",
[Request, From]),
{noreply, S}.
@@ -823,7 +828,7 @@ handle_cast({async_del_lock, _ResourceId, _Pid}, S) ->
handle_cast(Request, S) ->
error_logger:warning_msg("The global_name_server "
"received an unexpected message:\n"
- "handle_cast(~p, _)\n", [Request]),
+ "handle_cast(~tp, _)\n", [Request]),
{noreply, S}.
%%========================================================================
@@ -950,7 +955,7 @@ handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S0) ->
handle_info(Message, S) ->
error_logger:warning_msg("The global_name_server "
"received an unexpected message:\n"
- "handle_info(~p, _)\n", [Message]),
+ "handle_info(~tp, _)\n", [Message]),
{noreply, S}.
@@ -1944,13 +1949,13 @@ exchange_names([{Name, Pid, Method} | Tail], Node, Ops, Res) ->
exchange_names(Tail, Node, [Op | Ops], [Op | Res]);
{badrpc, Badrpc} ->
error_logger:info_msg("global: badrpc ~w received when "
- "conflicting name ~w was found\n",
+ "conflicting name ~tw was found\n",
[Badrpc, Name]),
Op = {insert, {Name, Pid, Method}},
exchange_names(Tail, Node, [Op | Ops], Res);
Else ->
error_logger:info_msg("global: Resolve method ~w for "
- "conflicting name ~w returned ~w\n",
+ "conflicting name ~tw returned ~tw\n",
[Method, Name, Else]),
Op = {delete, Name},
exchange_names(Tail, Node, [Op | Ops], [Op | Res])
@@ -1979,7 +1984,7 @@ minmax(P1,P2) ->
Pid2 :: pid().
random_exit_name(Name, Pid, Pid2) ->
{Min, Max} = minmax(Pid, Pid2),
- error_logger:info_msg("global: Name conflict terminating ~w\n",
+ error_logger:info_msg("global: Name conflict terminating ~tw\n",
[{Name, Max}]),
exit(Max, kill),
Min.
@@ -2068,23 +2073,17 @@ get_known() ->
gen_server:call(global_name_server, get_known, infinity).
random_sleep(Times) ->
- case (Times rem 10) of
- 0 -> erase(random_seed);
- _ -> ok
- end,
- case get(random_seed) of
- undefined ->
- _ = random:seed(erlang:phash2([erlang:node()]),
- erlang:monotonic_time(),
- erlang:unique_integer()),
- ok;
- _ -> ok
- end,
+ _ = case Times rem 10 of
+ 0 ->
+ _ = rand:seed(exsplus);
+ _ ->
+ ok
+ end,
%% First time 1/4 seconds, then doubling each time up to 8 seconds max.
Tmax = if Times > 5 -> 8000;
true -> ((1 bsl Times) * 1000) div 8
end,
- T = random:uniform(Tmax),
+ T = rand:uniform(Tmax),
?trace({random_sleep, {me,self()}, {times,Times}, {t,T}, {tmax,Tmax}}),
receive after T -> ok end.
@@ -2201,7 +2200,7 @@ unexpected_message({'EXIT', _Pid, _Reason}, _What) ->
ok;
unexpected_message(Message, What) ->
error_logger:warning_msg("The global_name_server ~w process "
- "received an unexpected message:\n~p\n",
+ "received an unexpected message:\n~tp\n",
[What, Message]).
%%% Utilities
diff --git a/lib/kernel/src/global_group.erl b/lib/kernel/src/global_group.erl
index 848df13c39..f5ead2a4c5 100644
--- a/lib/kernel/src/global_group.erl
+++ b/lib/kernel/src/global_group.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -273,7 +273,7 @@ init([]) ->
{ok, #state{publish_type = PT, group_publish_type = PubTpGrp,
sync_state = synced, group_name = DefGroupName,
no_contact = lists:sort(DefNodes),
- other_grps = DefOther}}
+ other_grps = DefOther, connect_all = Ca}}
end.
@@ -692,7 +692,7 @@ handle_cast({registered_names, User}, S) ->
handle_cast({registered_names_res, Result, Pid, From}, S) ->
% io:format(">>>>> registered_names_res Result ~p~n",[Result]),
unlink(Pid),
- exit(Pid, normal),
+ Pid ! kill,
Wait = get(registered_names),
NewWait = lists:delete({Pid, From},Wait),
put(registered_names, NewWait),
@@ -718,7 +718,7 @@ handle_cast({send_res, Result, Name, Msg, Pid, From}, S) ->
ToPid ! Msg
end,
unlink(Pid),
- exit(Pid, normal),
+ Pid ! kill,
Wait = get(send),
NewWait = lists:delete({Pid, From, Name, Msg},Wait),
put(send, NewWait),
@@ -748,7 +748,7 @@ handle_cast({find_name_res, Result, Pid, From}, S) ->
% io:format(">>>>> find_name_res Result ~p~n",[Result]),
% io:format(">>>>> find_name_res get() ~p~n",[get()]),
unlink(Pid),
- exit(Pid, normal),
+ Pid ! kill,
Wait = get(whereis_name),
NewWait = lists:delete({Pid, From},Wait),
put(whereis_name, NewWait),
diff --git a/lib/kernel/src/global_search.erl b/lib/kernel/src/global_search.erl
index 9429295bdb..11b70113e2 100644
--- a/lib/kernel/src/global_search.erl
+++ b/lib/kernel/src/global_search.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/group.erl b/lib/kernel/src/group.erl
index ea0734e0c9..5625ae6eb7 100644
--- a/lib/kernel/src/group.erl
+++ b/lib/kernel/src/group.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -33,7 +33,7 @@ start(Drv, Shell, Options) ->
server(Drv, Shell, Options) ->
process_flag(trap_exit, true),
edlin:init(),
- put(line_buffer, proplists:get_value(line_buffer, Options, [])),
+ put(line_buffer, proplists:get_value(line_buffer, Options, group_history:load())),
put(read_mode, list),
put(user_drv, Drv),
put(expand_fun,
@@ -114,7 +114,7 @@ server_loop(Drv, Shell, Buf0) ->
{io_request,From,ReplyAs,Req} when is_pid(From) ->
%% This io_request may cause a transition to a couple of
%% selective receive loops elsewhere in this module.
- Buf = io_request(Req, From, ReplyAs, Drv, Buf0),
+ Buf = io_request(Req, From, ReplyAs, Drv, Shell, Buf0),
server_loop(Drv, Shell, Buf);
{reply,{{From,ReplyAs},Reply}} ->
io_reply(From, ReplyAs, Reply),
@@ -135,7 +135,7 @@ server_loop(Drv, Shell, Buf0) ->
exit(R);
%% We want to throw away any term that we don't handle (standard
%% practice in receive loops), but not any {Drv,_} tuples which are
- %% handled in io_request/5.
+ %% handled in io_request/6.
NotDrvTuple when (not is_tuple(NotDrvTuple)) orelse
(tuple_size(NotDrvTuple) =/= 2) orelse
(element(1, NotDrvTuple) =/= Drv) ->
@@ -177,8 +177,8 @@ set_unicode_state(Drv,Bool) ->
end.
-io_request(Req, From, ReplyAs, Drv, Buf0) ->
- case io_request(Req, Drv, {From,ReplyAs}, Buf0) of
+io_request(Req, From, ReplyAs, Drv, Shell, Buf0) ->
+ case io_request(Req, Drv, Shell, {From,ReplyAs}, Buf0) of
{ok,Reply,Buf} ->
io_reply(From, ReplyAs, Reply),
Buf;
@@ -208,7 +208,7 @@ io_request(Req, From, ReplyAs, Drv, Buf0) ->
%%
%% These put requests have to be synchronous to the driver as otherwise
%% there is no guarantee that the data has actually been printed.
-io_request({put_chars,unicode,Chars}, Drv, From, Buf) ->
+io_request({put_chars,unicode,Chars}, Drv, _Shell, From, Buf) ->
case catch unicode:characters_to_binary(Chars,utf8) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}),
@@ -216,7 +216,7 @@ io_request({put_chars,unicode,Chars}, Drv, From, Buf) ->
_ ->
{error,{error,{put_chars, unicode,Chars}},Buf}
end;
-io_request({put_chars,unicode,M,F,As}, Drv, From, Buf) ->
+io_request({put_chars,unicode,M,F,As}, Drv, _Shell, From, Buf) ->
case catch apply(M, F, As) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}),
@@ -230,12 +230,12 @@ io_request({put_chars,unicode,M,F,As}, Drv, From, Buf) ->
{error,{error,F},Buf}
end
end;
-io_request({put_chars,latin1,Binary}, Drv, From, Buf) when is_binary(Binary) ->
+io_request({put_chars,latin1,Binary}, Drv, _Shell, From, Buf) when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode,
unicode:characters_to_binary(Binary,latin1),
{From,ok}}),
{noreply,Buf};
-io_request({put_chars,latin1,Chars}, Drv, From, Buf) ->
+io_request({put_chars,latin1,Chars}, Drv, _Shell, From, Buf) ->
case catch unicode:characters_to_binary(Chars,latin1) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}),
@@ -243,7 +243,7 @@ io_request({put_chars,latin1,Chars}, Drv, From, Buf) ->
_ ->
{error,{error,{put_chars,latin1,Chars}},Buf}
end;
-io_request({put_chars,latin1,M,F,As}, Drv, From, Buf) ->
+io_request({put_chars,latin1,M,F,As}, Drv, _Shell, From, Buf) ->
case catch apply(M, F, As) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode,
@@ -260,30 +260,30 @@ io_request({put_chars,latin1,M,F,As}, Drv, From, Buf) ->
end
end;
-io_request({get_chars,Encoding,Prompt,N}, Drv, _From, Buf) ->
- get_chars(Prompt, io_lib, collect_chars, N, Drv, Buf, Encoding);
-io_request({get_line,Encoding,Prompt}, Drv, _From, Buf) ->
- get_chars(Prompt, io_lib, collect_line, [], Drv, Buf, Encoding);
-io_request({get_until,Encoding, Prompt,M,F,As}, Drv, _From, Buf) ->
- get_chars(Prompt, io_lib, get_until, {M,F,As}, Drv, Buf, Encoding);
-io_request({get_password,_Encoding},Drv,_From,Buf) ->
- get_password_chars(Drv, Buf);
-io_request({setopts,Opts}, Drv, _From, Buf) when is_list(Opts) ->
+io_request({get_chars,Encoding,Prompt,N}, Drv, Shell, _From, Buf) ->
+ get_chars_n(Prompt, io_lib, collect_chars, N, Drv, Shell, Buf, Encoding);
+io_request({get_line,Encoding,Prompt}, Drv, Shell, _From, Buf) ->
+ get_chars_line(Prompt, io_lib, collect_line, [], Drv, Shell, Buf, Encoding);
+io_request({get_until,Encoding, Prompt,M,F,As}, Drv, Shell, _From, Buf) ->
+ get_chars_line(Prompt, io_lib, get_until, {M,F,As}, Drv, Shell, Buf, Encoding);
+io_request({get_password,_Encoding},Drv,Shell,_From,Buf) ->
+ get_password_chars(Drv, Shell, Buf);
+io_request({setopts,Opts}, Drv, _Shell, _From, Buf) when is_list(Opts) ->
setopts(Opts, Drv, Buf);
-io_request(getopts, Drv, _From, Buf) ->
+io_request(getopts, Drv, _Shell, _From, Buf) ->
getopts(Drv, Buf);
-io_request({requests,Reqs}, Drv, From, Buf) ->
- io_requests(Reqs, {ok,ok,Buf}, From, Drv);
+io_request({requests,Reqs}, Drv, Shell, From, Buf) ->
+ io_requests(Reqs, {ok,ok,Buf}, From, Drv, Shell);
%% New in R12
-io_request({get_geometry,columns},Drv,_From,Buf) ->
+io_request({get_geometry,columns},Drv,_Shell,_From,Buf) ->
case get_tty_geometry(Drv) of
{W,_H} ->
{ok,W,Buf};
_ ->
{error,{error,enotsup},Buf}
end;
-io_request({get_geometry,rows},Drv,_From,Buf) ->
+io_request({get_geometry,rows},Drv,_Shell,_From,Buf) ->
case get_tty_geometry(Drv) of
{_W,H} ->
{ok,H,Buf};
@@ -292,40 +292,40 @@ io_request({get_geometry,rows},Drv,_From,Buf) ->
end;
%% BC with pre-R13
-io_request({put_chars,Chars}, Drv, From, Buf) ->
- io_request({put_chars,latin1,Chars}, Drv, From, Buf);
-io_request({put_chars,M,F,As}, Drv, From, Buf) ->
- io_request({put_chars,latin1,M,F,As}, Drv, From, Buf);
-io_request({get_chars,Prompt,N}, Drv, From, Buf) ->
- io_request({get_chars,latin1,Prompt,N}, Drv, From, Buf);
-io_request({get_line,Prompt}, Drv, From, Buf) ->
- io_request({get_line,latin1,Prompt}, Drv, From, Buf);
-io_request({get_until, Prompt,M,F,As}, Drv, From, Buf) ->
- io_request({get_until,latin1, Prompt,M,F,As}, Drv, From, Buf);
-io_request(get_password,Drv,From,Buf) ->
- io_request({get_password,latin1},Drv,From,Buf);
-
-
-
-io_request(_, _Drv, _From, Buf) ->
+io_request({put_chars,Chars}, Drv, Shell, From, Buf) ->
+ io_request({put_chars,latin1,Chars}, Drv, Shell, From, Buf);
+io_request({put_chars,M,F,As}, Drv, Shell, From, Buf) ->
+ io_request({put_chars,latin1,M,F,As}, Drv, Shell, From, Buf);
+io_request({get_chars,Prompt,N}, Drv, Shell, From, Buf) ->
+ io_request({get_chars,latin1,Prompt,N}, Drv, Shell, From, Buf);
+io_request({get_line,Prompt}, Drv, Shell, From, Buf) ->
+ io_request({get_line,latin1,Prompt}, Drv, Shell, From, Buf);
+io_request({get_until, Prompt,M,F,As}, Drv, Shell, From, Buf) ->
+ io_request({get_until,latin1, Prompt,M,F,As}, Drv, Shell, From, Buf);
+io_request(get_password,Drv,Shell,From,Buf) ->
+ io_request({get_password,latin1},Drv,Shell,From,Buf);
+
+
+
+io_request(_, _Drv, _Shell, _From, Buf) ->
{error,{error,request},Buf}.
-%% Status = io_requests(RequestList, PrevStat, From, Drv)
+%% Status = io_requests(RequestList, PrevStat, From, Drv, Shell)
%% Process a list of output requests as long as
%% the previous status is 'ok' or noreply.
%%
%% We use undefined as the From for all but the last request
%% in order to discards acknowledgements from those requests.
%%
-io_requests([R|Rs], {noreply,Buf}, From, Drv) ->
+io_requests([R|Rs], {noreply,Buf}, From, Drv, Shell) ->
ReqFrom = if Rs =:= [] -> From; true -> undefined end,
- io_requests(Rs, io_request(R, Drv, ReqFrom, Buf), From, Drv);
-io_requests([R|Rs], {ok,ok,Buf}, From, Drv) ->
+ io_requests(Rs, io_request(R, Drv, Shell, ReqFrom, Buf), From, Drv, Shell);
+io_requests([R|Rs], {ok,ok,Buf}, From, Drv, Shell) ->
ReqFrom = if Rs =:= [] -> From; true -> undefined end,
- io_requests(Rs, io_request(R, Drv, ReqFrom, Buf), From, Drv);
-io_requests([_|_], Error, _From, _Drv) ->
+ io_requests(Rs, io_request(R, Drv, Shell, ReqFrom, Buf), From, Drv, Shell);
+io_requests([_|_], Error, _From, _Drv, _Shell) ->
Error;
-io_requests([], Stat, _From, _) ->
+io_requests([], Stat, _From, _, _Shell) ->
Stat.
%% io_reply(From, ReplyAs, Reply)
@@ -333,7 +333,7 @@ io_requests([], Stat, _From, _) ->
%% The ACK contains the return value.
io_reply(undefined, _ReplyAs, _Reply) ->
- %% Ignore these replies as they are generated from io_requests/4.
+ %% Ignore these replies as they are generated from io_requests/5.
ok;
io_reply(From, ReplyAs, Reply) ->
From ! {io_reply,ReplyAs,Reply},
@@ -434,7 +434,7 @@ getopts(Drv,Buf) ->
{ok,[Exp,Echo,Bin,Uni],Buf}.
-%% get_chars(Prompt, Module, Function, XtraArgument, Drv, Buffer)
+%% get_chars_*(Prompt, Module, Function, XtraArgument, Drv, Buffer)
%% Gets characters from the input Drv until as the applied function
%% returns {stop,Result,Rest}. Does not block output until input has been
%% received.
@@ -442,8 +442,8 @@ getopts(Drv,Buf) ->
%% {Result,NewSaveBuffer}
%% {error,What,NewSaveBuffer}
-get_password_chars(Drv,Buf) ->
- case get_password_line(Buf, Drv) of
+get_password_chars(Drv,Shell,Buf) ->
+ case get_password_line(Buf, Drv, Shell) of
{done, Line, Buf1} ->
{ok, Line, Buf1};
interrupted ->
@@ -452,36 +452,62 @@ get_password_chars(Drv,Buf) ->
{exit, terminated}
end.
-get_chars(Prompt, M, F, Xa, Drv, Buf, Encoding) ->
+get_chars_n(Prompt, M, F, Xa, Drv, Shell, Buf, Encoding) ->
Pbs = prompt_bytes(Prompt, Encoding),
- get_chars_loop(Pbs, M, F, Xa, Drv, Buf, start, Encoding).
+ case get(echo) of
+ true ->
+ get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding);
+ false ->
+ get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding)
+ end.
+
+get_chars_line(Prompt, M, F, Xa, Drv, Shell, Buf, Encoding) ->
+ Pbs = prompt_bytes(Prompt, Encoding),
+ get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding).
-get_chars_loop(Pbs, M, F, Xa, Drv, Buf0, State, Encoding) ->
- Result = case get(echo) of
+get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf0, State, Encoding) ->
+ Result = case get(echo) of
true ->
- get_line(Buf0, Pbs, Drv, Encoding);
+ get_line(Buf0, Pbs, Drv, Shell, Encoding);
false ->
% get_line_echo_off only deals with lists
% and does not need encoding...
- get_line_echo_off(Buf0, Pbs, Drv)
+ get_line_echo_off(Buf0, Pbs, Drv, Shell)
end,
case Result of
- {done,Line,Buf1} ->
- get_chars_apply(Pbs, M, F, Xa, Drv, Buf1, State, Line, Encoding);
+ {done,Line,Buf} ->
+ get_chars_apply(Pbs, M, F, Xa, Drv, Shell, Buf, State, Line, Encoding);
interrupted ->
{error,{error,interrupted},[]};
terminated ->
{exit,terminated}
end.
-get_chars_apply(Pbs, M, F, Xa, Drv, Buf, State0, Line, Encoding) ->
+get_chars_apply(Pbs, M, F, Xa, Drv, Shell, Buf, State0, Line, Encoding) ->
case catch M:F(State0, cast(Line,get(read_mode), Encoding), Encoding, Xa) of
- {stop,Result,Rest} ->
- {ok,Result,append(Rest, Buf, Encoding)};
- {'EXIT',_} ->
- {error,{error,err_func(M, F, Xa)},[]};
- State1 ->
- get_chars_loop(Pbs, M, F, Xa, Drv, Buf, State1, Encoding)
+ {stop,Result,Rest} ->
+ {ok,Result,append(Rest, Buf, Encoding)};
+ {'EXIT',_} ->
+ {error,{error,err_func(M, F, Xa)},[]};
+ State1 ->
+ get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, State1, Encoding)
+ end.
+
+get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf0, State, Encoding) ->
+ try M:F(State, cast(Buf0, get(read_mode), Encoding), Encoding, Xa) of
+ {stop,Result,Rest} ->
+ {ok, Result, Rest};
+ State1 ->
+ case get_chars_echo_off(Pbs, Drv, Shell) of
+ interrupted ->
+ {error,{error,interrupted},[]};
+ terminated ->
+ {exit,terminated};
+ Buf ->
+ get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf, State1, Encoding)
+ end
+ catch _:_ ->
+ {error,{error,err_func(M, F, Xa)},[]}
end.
%% Convert error code to make it look as before
@@ -497,24 +523,24 @@ err_func(_, F, _) ->
%% {done,LineChars,RestChars}
%% interrupted
-get_line(Chars, Pbs, Drv, Encoding) ->
+get_line(Chars, Pbs, Drv, Shell, Encoding) ->
{more_chars,Cont,Rs} = edlin:start(Pbs),
send_drv_reqs(Drv, Rs),
- get_line1(edlin:edit_line(Chars, Cont), Drv, new_stack(get(line_buffer)),
+ get_line1(edlin:edit_line(Chars, Cont), Drv, Shell, new_stack(get(line_buffer)),
Encoding).
-get_line1({done,Line,Rest,Rs}, Drv, Ls, _Encoding) ->
+get_line1({done,Line,Rest,Rs}, Drv, _Shell, Ls, _Encoding) ->
send_drv_reqs(Drv, Rs),
save_line_buffer(Line, get_lines(Ls)),
{done,Line,Rest};
-get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
+get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls0, Encoding)
when ((Mode =:= none) and (Char =:= $\^P))
or ((Mode =:= meta_left_sq_bracket) and (Char =:= $A)) ->
send_drv_reqs(Drv, Rs),
case up_stack(save_line(Ls0, edlin:current_line(Cont))) of
{none,_Ls} ->
send_drv(Drv, beep),
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding);
{Lcs,Ls} ->
send_drv_reqs(Drv, edlin:erase_line(Cont)),
{more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)),
@@ -522,16 +548,17 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1),
Ncont),
Drv,
+ Shell,
Ls, Encoding)
end;
-get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
+get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls0, Encoding)
when ((Mode =:= none) and (Char =:= $\^N))
or ((Mode =:= meta_left_sq_bracket) and (Char =:= $B)) ->
send_drv_reqs(Drv, Rs),
case down_stack(save_line(Ls0, edlin:current_line(Cont))) of
{none,_Ls} ->
send_drv(Drv, beep),
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding);
{Lcs,Ls} ->
send_drv_reqs(Drv, edlin:erase_line(Cont)),
{more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)),
@@ -539,6 +566,7 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1),
Ncont),
Drv,
+ Shell,
Ls, Encoding)
end;
%% ^R = backward search, ^S = forward search.
@@ -551,7 +579,7 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
%% new modes: search, search_quit, search_found. These are added to
%% the regular ones (none, meta_left_sq_bracket) and handle special
%% cases of history search.
-get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls, Encoding)
+get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls, Encoding)
when ((Mode =:= none) and (Char =:= $\^R)) ->
send_drv_reqs(Drv, Rs),
%% drop current line, move to search mode. We store the current
@@ -561,8 +589,8 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls, Encoding)
Pbs = prompt_bytes("(search)`': ", Encoding),
{more_chars,Ncont,Nrs} = edlin:start(Pbs, search),
send_drv_reqs(Drv, Nrs),
- get_line1(edlin:edit_line1(Cs, Ncont), Drv, Ls, Encoding);
-get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Ls0, Encoding) ->
+ get_line1(edlin:edit_line1(Cs, Ncont), Drv, Shell, Ls, Encoding);
+get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Shell, Ls0, Encoding) ->
send_drv_reqs(Drv, Rs),
ExpandFun = get(expand_fun),
{Found, Add, Matches} = ExpandFun(Before),
@@ -577,37 +605,37 @@ get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Ls0, Encoding) ->
send_drv(Drv, {put_chars, unicode, unicode:characters_to_binary(MatchStr,unicode)}),
[$\^L | Cs1]
end,
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
-get_line1({undefined,_Char,Cs,Cont,Rs}, Drv, Ls, Encoding) ->
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding);
+get_line1({undefined,_Char,Cs,Cont,Rs}, Drv, Shell, Ls, Encoding) ->
send_drv_reqs(Drv, Rs),
send_drv(Drv, beep),
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls, Encoding);
%% The search item was found and accepted (new line entered on the exact
%% result found)
-get_line1({_What,Cont={line,_Prompt,_Chars,search_found},Rs}, Drv, Ls0, Encoding) ->
+get_line1({_What,Cont={line,_Prompt,_Chars,search_found},Rs}, Drv, Shell, Ls0, Encoding) ->
Line = edlin:current_line(Cont),
%% this may create duplicate entries.
Ls = save_line(new_stack(get_lines(Ls0)), Line),
- get_line1({done, Line, "", Rs}, Drv, Ls, Encoding);
+ get_line1({done, Line, "", Rs}, Drv, Shell, Ls, Encoding);
%% The search mode has been exited, but the user wants to remain in line
%% editing mode wherever that was, but editing the search result.
-get_line1({What,Cont={line,_Prompt,_Chars,search_quit},Rs}, Drv, Ls, Encoding) ->
+get_line1({What,Cont={line,_Prompt,_Chars,search_quit},Rs}, Drv, Shell, Ls, Encoding) ->
Line = edlin:current_chars(Cont),
%% Load back the old prompt with the correct line number.
case get(search_quit_prompt) of
undefined -> % should not happen. Fallback.
LsFallback = save_line(new_stack(get_lines(Ls)), Line),
- get_line1({done, "\n", Line, Rs}, Drv, LsFallback, Encoding);
+ get_line1({done, "\n", Line, Rs}, Drv, Shell, LsFallback, Encoding);
Prompt -> % redraw the line and keep going with the same stack position
NCont = {line,Prompt,{lists:reverse(Line),[]},none},
send_drv_reqs(Drv, Rs),
send_drv_reqs(Drv, edlin:erase_line(Cont)),
send_drv_reqs(Drv, edlin:redraw_line(NCont)),
- get_line1({What, NCont ,[]}, Drv, pad_stack(Ls), Encoding)
+ get_line1({What, NCont ,[]}, Drv, Shell, pad_stack(Ls), Encoding)
end;
%% Search mode is entered.
get_line1({What,{line,Prompt,{RevCmd0,_Aft},search},Rs},
- Drv, Ls0, Encoding) ->
+ Drv, Shell, Ls0, Encoding) ->
send_drv_reqs(Drv, Rs),
%% Figure out search direction. ^S and ^R are returned through edlin
%% whenever we received a search while being already in search mode.
@@ -629,61 +657,90 @@ get_line1({What,{line,Prompt,{RevCmd0,_Aft},search},Rs},
{Ls2, {RevCmd, "': "++Line}}
end,
Cont = {line,Prompt,NewStack,search},
- more_data(What, Cont, Drv, Ls, Encoding);
-get_line1({What,Cont0,Rs}, Drv, Ls, Encoding) ->
+ more_data(What, Cont, Drv, Shell, Ls, Encoding);
+get_line1({What,Cont0,Rs}, Drv, Shell, Ls, Encoding) ->
send_drv_reqs(Drv, Rs),
- more_data(What, Cont0, Drv, Ls, Encoding).
+ more_data(What, Cont0, Drv, Shell, Ls, Encoding).
-more_data(What, Cont0, Drv, Ls, Encoding) ->
+more_data(What, Cont0, Drv, Shell, Ls, Encoding) ->
receive
{Drv,{data,Cs}} ->
- get_line1(edlin:edit_line(Cs, Cont0), Drv, Ls, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont0), Drv, Shell, Ls, Encoding);
{Drv,eof} ->
- get_line1(edlin:edit_line(eof, Cont0), Drv, Ls, Encoding);
+ get_line1(edlin:edit_line(eof, Cont0), Drv, Shell, Ls, Encoding);
{io_request,From,ReplyAs,Req} when is_pid(From) ->
{more_chars,Cont,_More} = edlin:edit_line([], Cont0),
send_drv_reqs(Drv, edlin:erase_line(Cont)),
- io_request(Req, From, ReplyAs, Drv, []), %WRONG!!!
+ io_request(Req, From, ReplyAs, Drv, Shell, []), %WRONG!!!
send_drv_reqs(Drv, edlin:redraw_line(Cont)),
- get_line1({more_chars,Cont,[]}, Drv, Ls, Encoding);
+ get_line1({more_chars,Cont,[]}, Drv, Shell, Ls, Encoding);
{reply,{{From,ReplyAs},Reply}} ->
%% We take care of replies from puts here as well
io_reply(From, ReplyAs, Reply),
- more_data(What, Cont0, Drv, Ls, Encoding);
+ more_data(What, Cont0, Drv, Shell, Ls, Encoding);
{'EXIT',Drv,interrupt} ->
interrupted;
{'EXIT',Drv,_} ->
- terminated
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
after
get_line_timeout(What)->
- get_line1(edlin:edit_line([], Cont0), Drv, Ls, Encoding)
+ get_line1(edlin:edit_line([], Cont0), Drv, Shell, Ls, Encoding)
end.
-get_line_echo_off(Chars, Pbs, Drv) ->
+get_line_echo_off(Chars, Pbs, Drv, Shell) ->
send_drv_reqs(Drv, [{put_chars, unicode,Pbs}]),
- get_line_echo_off1(edit_line(Chars,[]), Drv).
+ get_line_echo_off1(edit_line(Chars,[]), Drv, Shell).
-get_line_echo_off1({Chars,[]}, Drv) ->
+get_line_echo_off1({Chars,[]}, Drv, Shell) ->
receive
{Drv,{data,Cs}} ->
- get_line_echo_off1(edit_line(Cs, Chars), Drv);
+ get_line_echo_off1(edit_line(Cs, Chars), Drv, Shell);
{Drv,eof} ->
- get_line_echo_off1(edit_line(eof, Chars), Drv);
+ get_line_echo_off1(edit_line(eof, Chars), Drv, Shell);
{io_request,From,ReplyAs,Req} when is_pid(From) ->
- io_request(Req, From, ReplyAs, Drv, []),
- get_line_echo_off1({Chars,[]}, Drv);
+ io_request(Req, From, ReplyAs, Drv, Shell, []),
+ get_line_echo_off1({Chars,[]}, Drv, Shell);
{reply,{{From,ReplyAs},Reply}} when From =/= undefined ->
%% We take care of replies from puts here as well
io_reply(From, ReplyAs, Reply),
- get_line_echo_off1({Chars,[]},Drv);
+ get_line_echo_off1({Chars,[]},Drv, Shell);
{'EXIT',Drv,interrupt} ->
interrupted;
{'EXIT',Drv,_} ->
- terminated
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
end;
-get_line_echo_off1({Chars,Rest}, _Drv) ->
+get_line_echo_off1({Chars,Rest}, _Drv, _Shell) ->
{done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}.
+get_chars_echo_off(Pbs, Drv, Shell) ->
+ send_drv_reqs(Drv, [{put_chars, unicode,Pbs}]),
+ get_chars_echo_off1(Drv, Shell).
+
+get_chars_echo_off1(Drv, Shell) ->
+ receive
+ {Drv, {data, Cs}} ->
+ Cs;
+ {Drv, eof} ->
+ eof;
+ {io_request,From,ReplyAs,Req} when is_pid(From) ->
+ io_request(Req, From, ReplyAs, Drv, Shell, []),
+ get_chars_echo_off1(Drv, Shell);
+ {reply,{{From,ReplyAs},Reply}} when From =/= undefined ->
+ %% We take care of replies from puts here as well
+ io_reply(From, ReplyAs, Reply),
+ get_chars_echo_off1(Drv, Shell);
+ {'EXIT',Drv,interrupt} ->
+ interrupted;
+ {'EXIT',Drv,_} ->
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
+ end.
+
%% We support line editing for the ICANON mode except the following
%% line editing characters, which already has another meaning in
%% echo-on mode (See Advanced Programming in the Unix Environment, 2nd ed,
@@ -783,6 +840,7 @@ save_line_buffer("\n", Lines) ->
save_line_buffer(Line, [Line|_Lines]=Lines) ->
save_line_buffer(Lines);
save_line_buffer(Line, Lines) ->
+ group_history:add(Line),
save_line_buffer([Line|Lines]).
save_line_buffer(Lines) ->
@@ -792,9 +850,9 @@ search_up_stack(Stack, Substr) ->
case up_stack(Stack) of
{none,NewStack} -> {none,NewStack};
{L, NewStack} ->
- case string:str(L, Substr) of
- 0 -> search_up_stack(NewStack, Substr);
- _ -> {string:strip(L,right,$\n), NewStack}
+ case string:find(L, Substr) of
+ nomatch -> search_up_stack(NewStack, Substr);
+ _ -> {string:trim(L, trailing, "$\n"), NewStack}
end
end.
@@ -802,39 +860,41 @@ search_down_stack(Stack, Substr) ->
case down_stack(Stack) of
{none,NewStack} -> {none,NewStack};
{L, NewStack} ->
- case string:str(L, Substr) of
- 0 -> search_down_stack(NewStack, Substr);
- _ -> {string:strip(L,right,$\n), NewStack}
+ case string:find(L, Substr) of
+ nomatch -> search_down_stack(NewStack, Substr);
+ _ -> {string:trim(L, trailing, "$\n"), NewStack}
end
end.
%% This is get_line without line editing (except for backspace) and
%% without echo.
-get_password_line(Chars, Drv) ->
- get_password1(edit_password(Chars,[]),Drv).
+get_password_line(Chars, Drv, Shell) ->
+ get_password1(edit_password(Chars,[]),Drv,Shell).
-get_password1({Chars,[]}, Drv) ->
+get_password1({Chars,[]}, Drv, Shell) ->
receive
{Drv,{data,Cs}} ->
- get_password1(edit_password(Cs,Chars),Drv);
+ get_password1(edit_password(Cs,Chars),Drv,Shell);
{io_request,From,ReplyAs,Req} when is_pid(From) ->
%send_drv_reqs(Drv, [{delete_chars, -length(Pbs)}]),
- io_request(Req, From, ReplyAs, Drv, []), %WRONG!!!
+ io_request(Req, From, ReplyAs, Drv, Shell, []), %WRONG!!!
%% I guess the reason the above line is wrong is that Buf is
%% set to []. But do we expect anything but plain output?
- get_password1({Chars, []}, Drv);
+ get_password1({Chars, []}, Drv, Shell);
{reply,{{From,ReplyAs},Reply}} ->
%% We take care of replies from puts here as well
io_reply(From, ReplyAs, Reply),
- get_password1({Chars, []},Drv);
+ get_password1({Chars, []},Drv, Shell);
{'EXIT',Drv,interrupt} ->
interrupted;
{'EXIT',Drv,_} ->
- terminated
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
end;
-get_password1({Chars,Rest},Drv) ->
+get_password1({Chars,Rest},Drv,_Shell) ->
send_drv_reqs(Drv,[{put_chars, unicode, "\n"}]),
{done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}.
diff --git a/lib/kernel/src/group_history.erl b/lib/kernel/src/group_history.erl
new file mode 100644
index 0000000000..9745848992
--- /dev/null
+++ b/lib/kernel/src/group_history.erl
@@ -0,0 +1,341 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(group_history).
+-export([load/0, add/1]).
+
+%% Make a minimal size that should encompass set of lines and then make
+%% a file rotation for N files of this size.
+-define(DEFAULT_HISTORY_FILE, "erlang-shell-log").
+-define(MAX_HISTORY_FILES, 10).
+-define(DEFAULT_SIZE, 1024*512). % 512 kb total default
+-define(DEFAULT_STATUS, disabled).
+-define(MIN_HISTORY_SIZE, (50*1024)). % 50 kb, in bytes
+-define(DEFAULT_DROP, []).
+-define(DISK_LOG_FORMAT, internal). % since we want repairs
+-define(LOG_NAME, '$#group_history').
+-define(VSN, {0,1,0}).
+
+%%%%%%%%%%%%%%
+%%% PUBLIC %%%
+%%%%%%%%%%%%%%
+
+%% @doc Loads the shell history from memory. This function should only be
+%% called from group:server/3 to inject itself in the previous commands
+%% stack.
+-spec load() -> [string()].
+load() ->
+ wait_for_kernel_safe_sup(),
+ case history_status() of
+ enabled ->
+ case open_log() of
+ {ok, ?LOG_NAME} ->
+ read_full_log(?LOG_NAME);
+ {repaired, ?LOG_NAME, {recovered, Good}, {badbytes, Bad}} ->
+ report_repairs(?LOG_NAME, Good, Bad),
+ read_full_log(?LOG_NAME);
+ {error, {need_repair, _FileName}} ->
+ repair_log(?LOG_NAME);
+ {error, {arg_mismatch, repair, true, false}} ->
+ repair_log(?LOG_NAME);
+ {error, {name_already_open, _}} ->
+ show_rename_warning(),
+ read_full_log(?LOG_NAME);
+ {error, {size_mismatch, Current, New}} ->
+ show_size_warning(Current, New),
+ resize_log(?LOG_NAME, Current, New),
+ load();
+ {error, {invalid_header, {vsn, Version}}} ->
+ upgrade_version(?LOG_NAME, Version),
+ load();
+ {error, Reason} ->
+ handle_open_error(Reason),
+ disable_history(),
+ []
+ end;
+ _ ->
+ []
+ end.
+
+%% @doc adds a log line to the erlang history log, if configured to do so.
+-spec add(iodata()) -> ok.
+add(Line) -> add(Line, history_status()).
+
+add(Line, enabled) ->
+ case lists:member(Line, to_drop()) of
+ false ->
+ case disk_log:log(?LOG_NAME, Line) of
+ ok ->
+ ok;
+ {error, no_such_log} ->
+ _ = open_log(), % a wild attempt we hope works!
+ disk_log:log(?LOG_NAME, Line);
+ {error, _Other} ->
+ % just ignore, we're too late
+ ok
+ end;
+ true ->
+ ok
+ end;
+add(_Line, disabled) ->
+ ok.
+
+%%%%%%%%%%%%%%%
+%%% PRIVATE %%%
+%%%%%%%%%%%%%%%
+
+%% Because loading the shell happens really damn early, processes we depend on
+%% might not be there yet. Luckily, the load function is called from the shell
+%% after a new process has been spawned, so we can block in here
+wait_for_kernel_safe_sup() ->
+ case whereis(kernel_safe_sup) of
+ undefined ->
+ timer:sleep(50),
+ wait_for_kernel_safe_sup();
+ _ ->
+ ok
+ end.
+
+%% Repair the log out of band
+repair_log(Name) ->
+ Opts = lists:keydelete(size, 1, log_options()),
+ case disk_log:open(Opts) of
+ {repaired, ?LOG_NAME, {recovered, Good}, {badbytes, Bad}} ->
+ report_repairs(?LOG_NAME, Good, Bad);
+ _ ->
+ ok
+ end,
+ _ = disk_log:close(Name),
+ load().
+
+%% Return whether the shell history is enabled or not
+-spec history_status() -> enabled | disabled.
+history_status() ->
+ case is_user() orelse application:get_env(kernel, shell_history) of
+ true -> disabled; % don't run for user proc
+ {ok, enabled} -> enabled;
+ undefined -> ?DEFAULT_STATUS;
+ _ -> disabled
+ end.
+
+%% Return whether the user process is running this
+-spec is_user() -> boolean().
+is_user() ->
+ case process_info(self(), registered_name) of
+ {registered_name, user} -> true;
+ _ -> false
+ end.
+
+%% Open a disk_log file while ensuring the required path is there.
+open_log() ->
+ Opts = log_options(),
+ _ = ensure_path(Opts),
+ disk_log:open(Opts).
+
+%% Return logger options
+log_options() ->
+ Path = find_path(),
+ File = filename:join([Path, ?DEFAULT_HISTORY_FILE]),
+ Size = find_wrap_values(),
+ [{name, ?LOG_NAME},
+ {file, File},
+ {repair, true},
+ {format, internal},
+ {type, wrap},
+ {size, Size},
+ {distributed, []},
+ {notify, false},
+ {head, {vsn, ?VSN}},
+ {quiet, true},
+ {mode, read_write}].
+
+-spec ensure_path([{file, string()} | {atom(), _}, ...]) -> ok | {error, term()}.
+ensure_path(Opts) ->
+ {file, Path} = lists:keyfind(file, 1, Opts),
+ filelib:ensure_dir(Path).
+
+%% @private read the logs from an already open file. Treat closed files
+%% as wrong and returns an empty list to avoid crash loops in the shell.
+-spec read_full_log(term()) -> [string()].
+read_full_log(Name) ->
+ case disk_log:chunk(Name, start) of
+ {error, no_such_log} ->
+ show_unexpected_close_warning(),
+ [];
+ eof ->
+ [];
+ {Cont, Logs} ->
+ lists:reverse(maybe_drop_header(Logs) ++ read_full_log(Name, Cont))
+ end.
+
+read_full_log(Name, Cont) ->
+ case disk_log:chunk(Name, Cont) of
+ {error, no_such_log} ->
+ show_unexpected_close_warning(),
+ [];
+ eof ->
+ [];
+ {NextCont, Logs} ->
+ maybe_drop_header(Logs) ++ read_full_log(Name, NextCont)
+ end.
+
+maybe_drop_header([{vsn, _} | Rest]) -> Rest;
+maybe_drop_header(Logs) -> Logs.
+
+-spec handle_open_error(_) -> ok.
+handle_open_error({arg_mismatch, OptName, CurrentVal, NewVal}) ->
+ show('$#erlang-history-arg-mismatch',
+ "Log file argument ~p changed value from ~p to ~p "
+ "and cannot be automatically updated. Please clear the "
+ "history files and try again.~n",
+ [OptName, CurrentVal, NewVal]);
+handle_open_error({not_a_log_file, FileName}) ->
+ show_invalid_file_warning(FileName);
+handle_open_error({invalid_index_file, FileName}) ->
+ show_invalid_file_warning(FileName);
+handle_open_error({invalid_header, Term}) ->
+ show('$#erlang-history-invalid-header',
+ "Shell history expects to be able to use the log files "
+ "which currently have unknown headers (~p) and may belong to "
+ "another mechanism. History logging will be "
+ "disabled.~n",
+ [Term]);
+handle_open_error({file_error, FileName, Reason}) ->
+ show('$#erlang-history-file-error',
+ "Error handling File ~ts. Reason: ~p~n"
+ "History logging will be disabled.~n",
+ [FileName, Reason]);
+handle_open_error(Err) ->
+ show_unexpected_warning({disk_log, open, 1}, Err).
+
+find_wrap_values() ->
+ ConfSize = case application:get_env(kernel, shell_history_file_bytes) of
+ undefined -> ?DEFAULT_SIZE;
+ {ok, S} -> S
+ end,
+ SizePerFile = max(?MIN_HISTORY_SIZE, ConfSize div ?MAX_HISTORY_FILES),
+ FileCount = if SizePerFile > ?MIN_HISTORY_SIZE ->
+ ?MAX_HISTORY_FILES
+ ; SizePerFile =< ?MIN_HISTORY_SIZE ->
+ max(1, ConfSize div SizePerFile)
+ end,
+ {SizePerFile, FileCount}.
+
+report_repairs(_, _, 0) ->
+ %% just a regular close repair
+ ok;
+report_repairs(_, Good, Bad) ->
+ show('$#erlang-history-report-repairs',
+ "The shell history log file was corrupted and was repaired. "
+ "~p bytes were recovered and ~p were lost.~n", [Good, Bad]).
+
+resize_log(Name, _OldSize, NewSize) ->
+ show('$#erlang-history-resize-attempt',
+ "Attempting to resize the log history file to ~p...", [NewSize]),
+ Opts = lists:keydelete(size, 1, log_options()),
+ _ = case disk_log:open(Opts) of
+ {error, {need_repair, _}} ->
+ _ = repair_log(Name),
+ disk_log:open(Opts);
+ _ ->
+ ok
+ end,
+ case disk_log:change_size(Name, NewSize) of
+ ok ->
+ show('$#erlang-history-resize-result',
+ "ok~n", []);
+ {error, {new_size_too_small, _, _}} ->
+ show('$#erlang-history-resize-result',
+ "failed (new size is too small)~n", []),
+ disable_history();
+ {error, Reason} ->
+ show('$#erlang-history-resize-result',
+ "failed (~p)~n", [Reason]),
+ disable_history()
+ end.
+
+upgrade_version(_Name, Unsupported) ->
+ %% We only know of one version and can't support a newer one
+ show('$#erlang-history-upgrade',
+ "The version for the shell logs found on disk (~p) is "
+ "not supported by the current version (~p)~n",
+ [Unsupported, ?VSN]),
+ disable_history().
+
+disable_history() ->
+ show('$#erlang-history-disable', "Disabling shell history logging.~n", []),
+ application:set_env(kernel, shell_history, force_disabled).
+
+find_path() ->
+ case application:get_env(kernel, shell_history_path) of
+ undefined -> filename:basedir(user_cache, "erlang-history");
+ {ok, Path} -> Path
+ end.
+
+to_drop() ->
+ case application:get_env(kernel, shell_history_drop) of
+ undefined ->
+ application:set_env(kernel, shell_history_drop, ?DEFAULT_DROP),
+ ?DEFAULT_DROP;
+ {ok, V} when is_list(V) -> [Ln++"\n" || Ln <- V];
+ {ok, _} -> ?DEFAULT_DROP
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Output functions %%%
+%%%%%%%%%%%%%%%%%%%%%%%%
+show_rename_warning() ->
+ show('$#erlang-history-rename-warn',
+ "A history file with a different path has already "
+ "been started for the shell of this node. The old "
+ "name will keep being used for this session.~n",
+ []).
+
+show_invalid_file_warning(FileName) ->
+ show('$#erlang-history-invalid-file',
+ "Shell history expects to be able to use the file ~ts "
+ "which currently exists and is not a file usable for "
+ "history logging purposes. History logging will be "
+ "disabled.~n", [FileName]).
+
+show_unexpected_warning({M,F,A}, Term) ->
+ show('$#erlang-history-unexpected-return',
+ "unexpected return value from ~p:~p/~p: ~p~n"
+ "shell history will be disabled for this session.~n",
+ [M,F,A,Term]).
+
+show_unexpected_close_warning() ->
+ show('$#erlang-history-unexpected-close',
+ "The shell log file has mysteriousy closed. Ignoring "
+ "currently unread history.~n", []).
+
+show_size_warning(_Current, _New) ->
+ show('$#erlang-history-size',
+ "The configured log history file size is different from "
+ "the size of the log file on disk.~n", []).
+
+show(Key, Format, Args) ->
+ case get(Key) of
+ undefined ->
+ io:format(standard_error, Format, Args),
+ put(Key, true),
+ ok;
+ true ->
+ ok
+ end.
diff --git a/lib/kernel/src/heart.erl b/lib/kernel/src/heart.erl
index eea78aabdf..8fa48d56fb 100644
--- a/lib/kernel/src/heart.erl
+++ b/lib/kernel/src/heart.erl
@@ -198,16 +198,11 @@ start_portprogram() ->
end.
get_heart_timeouts() ->
- HeartOpts = case os:getenv("HEART_BEAT_TIMEOUT") of
- false -> "";
- H when is_list(H) ->
- "-ht " ++ H
- end,
- HeartOpts ++ case os:getenv("HEART_BEAT_BOOT_DELAY") of
- false -> "";
- W when is_list(W) ->
- " -wt " ++ W
- end.
+ case os:getenv("HEART_BEAT_TIMEOUT") of
+ false -> "";
+ H when is_list(H) ->
+ "-ht " ++ H
+ end.
check_start_heart() ->
case init:get_argument(heart) of
diff --git a/lib/kernel/src/hipe_ext_format.hrl b/lib/kernel/src/hipe_ext_format.hrl
index 102cb49a2b..05c678fdec 100644
--- a/lib/kernel/src/hipe_ext_format.hrl
+++ b/lib/kernel/src/hipe_ext_format.hrl
@@ -1,3 +1,15 @@
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+
%% hipe_x86_ext_format.hrl
%% Definitions for unified external object format
%% Currently: sparc, x86, amd64
diff --git a/lib/kernel/src/hipe_unified_loader.erl b/lib/kernel/src/hipe_unified_loader.erl
index ddbbc548dd..5704cc79c2 100644
--- a/lib/kernel/src/hipe_unified_loader.erl
+++ b/lib/kernel/src/hipe_unified_loader.erl
@@ -1,4 +1,15 @@
%% -*- erlang-indent-level: 2 -*-
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
%% =======================================================================
%% Filename : hipe_unified_loader.erl
%% Module : hipe_unified_loader
@@ -41,10 +52,11 @@
% I think the real solution would be to let BIF erlang:load_module/2 redirect all
% hipe calls to the module and thereby remove post_beam_load.
+% SVERK: Can we remove -compile(no_native) now when post_beam_load is gone?
+
-export([chunk_name/1,
%% Only the code and code_server modules may call the entries below!
load_native_code/3,
- post_beam_load/2,
load_module/4,
load/3]).
@@ -101,41 +113,23 @@ word_size(Architecture) ->
load_native_code(_Mod, _Bin, undefined) ->
no_native;
load_native_code(Mod, Bin, Architecture) when is_atom(Mod), is_binary(Bin) ->
- %% patch_to_emu(Mod),
case code:get_chunk(Bin, chunk_name(Architecture)) of
undefined -> no_native;
NativeCode when is_binary(NativeCode) ->
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
try
- OldReferencesToPatch = patch_to_emu_step1(Mod),
- case load_module(Mod, NativeCode, Bin, OldReferencesToPatch,
- Architecture) of
+ put(hipe_patch_closures, false),
+ case load_common(Mod, NativeCode, Bin, Architecture) of
bad_crc -> no_native;
Result -> Result
end
after
- erlang:system_flag(multi_scheduling, unblock)
+ erlang:system_flag(multi_scheduling, unblock_normal)
end
end.
%%========================================================================
--spec post_beam_load(atom(), hipe_architecture()) -> 'ok'.
-
-%% does nothing on a hipe-disabled system
-post_beam_load(_Mod, undefined) ->
- ok;
-post_beam_load(Mod, _) when is_atom(Mod) ->
- erlang:system_flag(multi_scheduling, block),
- try
- patch_to_emu(Mod)
- after
- erlang:system_flag(multi_scheduling, unblock)
- end,
- ok.
-
-%%========================================================================
-
version_check(Version, Mod) when is_atom(Mod) ->
Ver = ?VERSION_STRING(),
case Version < Ver of
@@ -151,21 +145,14 @@ version_check(Version, Mod) when is_atom(Mod) ->
'bad_crc' | {'module', Mod} when Mod :: atom().
load_module(Mod, Bin, Beam, Architecture) ->
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
try
- load_module_nosmp(Mod, Bin, Beam, Architecture)
+ put(hipe_patch_closures, false),
+ load_common(Mod, Bin, Beam, Architecture)
after
- erlang:system_flag(multi_scheduling, unblock)
+ erlang:system_flag(multi_scheduling, unblock_normal)
end.
-load_module_nosmp(Mod, Bin, Beam, Architecture) ->
- load_module(Mod, Bin, Beam, [], Architecture).
-
-load_module(Mod, Bin, Beam, OldReferencesToPatch, Architecture) ->
- ?debug_msg("************ Loading Module ~w ************\n",[Mod]),
- %% Loading a whole module, let the BEAM loader patch closures.
- put(hipe_patch_closures, false),
- load_common(Mod, Bin, Beam, OldReferencesToPatch, Architecture).
%%========================================================================
@@ -173,22 +160,19 @@ load_module(Mod, Bin, Beam, OldReferencesToPatch, Architecture) ->
'bad_crc' | {'module', Mod} when Mod :: atom().
load(Mod, Bin, Architecture) ->
- erlang:system_flag(multi_scheduling, block),
+ erlang:system_flag(multi_scheduling, block_normal),
try
- load_nosmp(Mod, Bin, Architecture)
+ ?debug_msg("********* Loading funs in module ~w *********\n",[Mod]),
+ %% Loading just some functions in a module; patch closures separately.
+ put(hipe_patch_closures, true),
+ load_common(Mod, Bin, [], Architecture)
after
- erlang:system_flag(multi_scheduling, unblock)
+ erlang:system_flag(multi_scheduling, unblock_normal)
end.
-load_nosmp(Mod, Bin, Architecture) ->
- ?debug_msg("********* Loading funs in module ~w *********\n",[Mod]),
- %% Loading just some functions in a module; patch closures separately.
- put(hipe_patch_closures, true),
- load_common(Mod, Bin, [], [], Architecture).
-
%%------------------------------------------------------------------------
-load_common(Mod, Bin, Beam, OldReferencesToPatch, Architecture) ->
+load_common(Mod, Bin, Beam, Architecture) ->
%% Unpack the binary.
[{Version, CheckSum},
ConstAlign, ConstSize, ConstMap, LabelMap, ExportMap,
@@ -215,29 +199,31 @@ load_common(Mod, Bin, Beam, OldReferencesToPatch, Architecture) ->
put(closures_to_patch, []),
WordSize = word_size(Architecture),
WriteWord = write_word_fun(WordSize),
+ LoaderState = hipe_bifs:alloc_loader_state(Mod),
+ put(hipe_loader_state, LoaderState),
%% Create data segment
{ConstAddr,ConstMap2} =
- create_data_segment(ConstAlign, ConstSize, ConstMap, WriteWord),
+ create_data_segment(ConstAlign, ConstSize, ConstMap, WriteWord,
+ LoaderState),
%% Find callees for which we may need trampolines.
CalleeMFAs = find_callee_mfas(Refs, Architecture),
%% Write the code to memory.
{CodeAddress,Trampolines} =
- enter_code(CodeSize, CodeBinary, CalleeMFAs, Mod, Beam),
+ enter_code(CodeSize, CodeBinary, CalleeMFAs, LoaderState),
%% Construct CalleeMFA-to-trampoline mapping.
TrampolineMap = mk_trampoline_map(CalleeMFAs, Trampolines,
Architecture),
%% Patch references to code labels in data seg.
ok = patch_consts(LabelMap, ConstAddr, CodeAddress, WriteWord),
+
%% Find out which functions are being loaded (and where).
- %% Note: Addresses are sorted descending.
- {MFAs,Addresses} = exports(ExportMap, CodeAddress),
- %% Remove references to old versions of the module.
- ReferencesToPatch = get_refs_from(MFAs, []),
- %% io:format("References to patch: ~w~n", [ReferencesToPatch]),
- ok = remove_refs_from(MFAs),
+ %% Note: FunDefs are sorted descending address order.
+ FunDefs = exports(ExportMap, CodeAddress),
+
%% Patch all dynamic references in the code.
%% Function calls, Atoms, Constants, System calls
- ok = patch(Refs, CodeAddress, ConstMap2, Addresses, TrampolineMap),
+
+ ok = patch(Refs, CodeAddress, ConstMap2, FunDefs, TrampolineMap),
%% Tell the system where the loaded funs are.
%% (patches the BEAM code to redirect to native.)
@@ -250,25 +236,23 @@ load_common(Mod, Bin, Beam, OldReferencesToPatch, Architecture) ->
lists:foreach(fun({FE, DestAddress}) ->
hipe_bifs:set_native_address_in_fe(FE, DestAddress)
end, erase(closures_to_patch)),
- export_funs(Addresses),
- ok;
+ set_beam_call_traps(FunDefs),
+ export_funs(FunDefs),
+ ok = hipe_bifs:commit_patch_load(LoaderState),
+ ok;
BeamBinary when is_binary(BeamBinary) ->
%% Find all closures in the code.
[] = erase(closures_to_patch), %Clean up, assertion.
ClosurePatches = find_closure_patches(Refs),
AddressesOfClosuresToPatch =
- calculate_addresses(ClosurePatches, CodeAddress, Addresses),
- export_funs(Addresses),
- export_funs(Mod, MD5, BeamBinary,
- Addresses, AddressesOfClosuresToPatch)
+ calculate_addresses(ClosurePatches, CodeAddress, FunDefs),
+ export_funs(FunDefs),
+ make_beam_stub(Mod, LoaderState, MD5, BeamBinary, FunDefs,
+ AddressesOfClosuresToPatch)
end,
- %% Redirect references to the old module to the new module's BEAM stub.
- patch_to_emu_step2(OldReferencesToPatch),
- %% Patch referring functions to call the new function
- %% The call to export_funs/1 above updated the native addresses
- %% for the targets, so passing 'Addresses' is not needed.
- redirect(ReferencesToPatch),
+
%% Final clean up.
+ _ = erase(hipe_loader_state),
_ = erase(hipe_patch_closures),
_ = erase(hipe_assert_code_area),
?debug_msg("****************Loader Finished****************\n", []),
@@ -291,6 +275,7 @@ needs_trampolines(Architecture) ->
arm -> true;
powerpc -> true;
ppc64 -> true;
+ amd64 -> true;
_ -> false
end.
@@ -371,31 +356,31 @@ trampoline_map_lookup(Primop, Map) ->
is_exported :: boolean()}).
exports(ExportMap, BaseAddress) ->
- exports(ExportMap, BaseAddress, [], []).
+ exports(ExportMap, BaseAddress, []).
-exports([Offset,M,F,A,IsClosure,IsExported|Rest], BaseAddress, MFAs, Addresses) ->
+exports([Offset,M,F,A,IsClosure,IsExported|Rest], BaseAddress, FunDefs) ->
case IsExported andalso erlang:is_builtin(M, F, A) of
true ->
- exports(Rest, BaseAddress, MFAs, Addresses);
+ exports(Rest, BaseAddress, FunDefs);
_false ->
MFA = {M,F,A},
Address = BaseAddress + Offset,
FunDef = #fundef{address=Address, mfa=MFA, is_closure=IsClosure,
is_exported=IsExported},
- exports(Rest, BaseAddress, [MFA|MFAs], [FunDef|Addresses])
+ exports(Rest, BaseAddress, [FunDef|FunDefs])
end;
-exports([], _, MFAs, Addresses) ->
- {MFAs, Addresses}.
+exports([], _, FunDefs) ->
+ FunDefs.
mod({M,_F,_A}) -> M.
%%------------------------------------------------------------------------
-calculate_addresses(PatchOffsets, Base, Addresses) ->
+calculate_addresses(PatchOffsets, Base, FunDefs) ->
RemoteOrLocal = local, % closure code refs are local
[{Data,
offsets_to_addresses(Offsets, Base),
- get_native_address(DestMFA, Addresses, RemoteOrLocal)} ||
+ get_native_address(DestMFA, FunDefs, RemoteOrLocal)} ||
{{DestMFA,_,_}=Data,Offsets} <- PatchOffsets].
offsets_to_addresses(Os, Base) ->
@@ -424,9 +409,9 @@ find_closure_refs([], Refs) ->
%%------------------------------------------------------------------------
-export_funs([FunDef | Addresses]) ->
+set_beam_call_traps([FunDef | FunDefs]) ->
#fundef{address=Address, mfa=MFA, is_closure=IsClosure,
- is_exported=IsExported} = FunDef,
+ is_exported=_IsExported} = FunDef,
?IF_DEBUG({M,F,A} = MFA, no_debug),
?IF_DEBUG(
case IsClosure of
@@ -437,21 +422,38 @@ export_funs([FunDef | Addresses]) ->
?debug_msg("LINKING: ~w:~w/~w to closure (0x~.16b)\n",
[M,F,A, Address])
end, no_debug),
- hipe_bifs:set_funinfo_native_address(MFA, Address, IsExported),
hipe_bifs:set_native_address(MFA, Address, IsClosure),
- export_funs(Addresses);
+ set_beam_call_traps(FunDefs);
+set_beam_call_traps([]) ->
+ ok.
+
+export_funs([FunDef | FunDefs]) ->
+ #fundef{address=Address, mfa=MFA, is_closure=_IsClosure,
+ is_exported=IsExported} = FunDef,
+ ?IF_DEBUG({M,F,A} = MFA, no_debug),
+ ?IF_DEBUG(
+ case _IsClosure of
+ false ->
+ ?debug_msg("LINKING: ~w:~w/~w to (0x~.16b)\n",
+ [M,F,A, Address]);
+ true ->
+ ?debug_msg("LINKING: ~w:~w/~w to closure (0x~.16b)\n",
+ [M,F,A, Address])
+ end, no_debug),
+ hipe_bifs:set_funinfo_native_address(MFA, Address, IsExported),
+ export_funs(FunDefs);
export_funs([]) ->
ok.
-export_funs(Mod, MD5, Beam, Addresses, ClosuresToPatch) ->
- Fs = [{F,A,Address} || #fundef{address=Address, mfa={_M,F,A}} <- Addresses],
- Mod = code:make_stub_module(Mod, Beam, {Fs,ClosuresToPatch,MD5}),
+make_beam_stub(Mod, LoaderState, MD5, Beam, FunDefs, ClosuresToPatch) ->
+ Fs = [{F,A,Address} || #fundef{address=Address, mfa={_M,F,A}} <- FunDefs],
+ Mod = code:make_stub_module(LoaderState, Beam, {Fs,ClosuresToPatch,MD5}),
ok.
%%========================================================================
%% Patching
%% @spec patch(refs(), BaseAddress::integer(), ConstAndZone::term(),
-%% Addresses::term(), TrampolineMap::term()) -> 'ok'.
+%% FunDefs::term(), TrampolineMap::term()) -> 'ok'
%% @type refs()=[{RefType::integer(), Reflist::reflist()} | refs()]
%%
%% @type reflist()= [{Data::term(), Offsets::offests()}|reflist()]
@@ -463,39 +465,39 @@ export_funs(Mod, MD5, Beam, Addresses, ClosuresToPatch) ->
%% (we use this to look up the address of a referred function only once).
%%
-patch([{Type,SortedRefs}|Rest], CodeAddress, ConstMap2, Addresses, TrampolineMap) ->
+patch([{Type,SortedRefs}|Rest], CodeAddress, ConstMap2, FunDefs, TrampolineMap) ->
?debug_msg("Patching ~w at [~w+offset] with ~w\n",
[Type,CodeAddress,SortedRefs]),
case ?EXT2PATCH_TYPE(Type) of
call_local ->
- patch_call(SortedRefs, CodeAddress, Addresses, 'local', TrampolineMap);
+ patch_call(SortedRefs, CodeAddress, FunDefs, 'local', TrampolineMap);
call_remote ->
- patch_call(SortedRefs, CodeAddress, Addresses, 'remote', TrampolineMap);
+ patch_call(SortedRefs, CodeAddress, FunDefs, 'remote', TrampolineMap);
Other ->
- patch_all(Other, SortedRefs, CodeAddress, {ConstMap2,CodeAddress}, Addresses)
+ patch_all(Other, SortedRefs, CodeAddress, {ConstMap2,CodeAddress}, FunDefs)
end,
- patch(Rest, CodeAddress, ConstMap2, Addresses, TrampolineMap);
+ patch(Rest, CodeAddress, ConstMap2, FunDefs, TrampolineMap);
patch([], _, _, _, _) -> ok.
%%----------------------------------------------------------------
%% Handle a 'call_local' or 'call_remote' patch.
%%
-patch_call([{DestMFA,Offsets}|SortedRefs], BaseAddress, Addresses, RemoteOrLocal, TrampolineMap) ->
+patch_call([{DestMFA,Offsets}|SortedRefs], BaseAddress, FunDefs, RemoteOrLocal, TrampolineMap) ->
case bif_address(DestMFA) of
false ->
- %% Previous code used mfa_to_address(DestMFA, Addresses)
+ %% Previous code used mfa_to_address(DestMFA, FunDefs)
%% here for local calls. That is wrong because even local
- %% destinations may not be present in Addresses: they may
+ %% destinations may not be present in FunDefs: they may
%% not have been compiled yet, or they may be BEAM-only
%% functions (e.g. module_info).
- DestAddress = get_native_address(DestMFA, Addresses, RemoteOrLocal),
+ DestAddress = get_native_address(DestMFA, FunDefs, RemoteOrLocal),
Trampoline = trampoline_map_get(DestMFA, TrampolineMap),
- patch_mfa_call_list(Offsets, BaseAddress, DestMFA, DestAddress, Addresses, RemoteOrLocal, Trampoline);
+ patch_mfa_call_list(Offsets, BaseAddress, DestMFA, DestAddress, FunDefs, RemoteOrLocal, Trampoline);
BifAddress when is_integer(BifAddress) ->
Trampoline = trampoline_map_lookup(DestMFA, TrampolineMap),
patch_bif_call_list(Offsets, BaseAddress, BifAddress, Trampoline)
end,
- patch_call(SortedRefs, BaseAddress, Addresses, RemoteOrLocal, TrampolineMap);
+ patch_call(SortedRefs, BaseAddress, FunDefs, RemoteOrLocal, TrampolineMap);
patch_call([], _, _, _, _) ->
ok.
@@ -506,49 +508,48 @@ patch_bif_call_list([Offset|Offsets], BaseAddress, BifAddress, Trampoline) ->
patch_bif_call_list(Offsets, BaseAddress, BifAddress, Trampoline);
patch_bif_call_list([], _, _, _) -> ok.
-patch_mfa_call_list([Offset|Offsets], BaseAddress, DestMFA, DestAddress, Addresses, RemoteOrLocal, Trampoline) ->
+patch_mfa_call_list([Offset|Offsets], BaseAddress, DestMFA, DestAddress, FunDefs, RemoteOrLocal, Trampoline) ->
CallAddress = BaseAddress+Offset,
- add_ref(DestMFA, CallAddress, Addresses, 'call', Trampoline, RemoteOrLocal),
+ add_ref(DestMFA, CallAddress, FunDefs, 'call', Trampoline, RemoteOrLocal),
?ASSERT(assert_local_patch(CallAddress)),
patch_call_insn(CallAddress, DestAddress, Trampoline),
- patch_mfa_call_list(Offsets, BaseAddress, DestMFA, DestAddress, Addresses, RemoteOrLocal, Trampoline);
+ patch_mfa_call_list(Offsets, BaseAddress, DestMFA, DestAddress, FunDefs, RemoteOrLocal, Trampoline);
patch_mfa_call_list([], _, _, _, _, _, _) -> ok.
patch_call_insn(CallAddress, DestAddress, Trampoline) ->
- %% This assertion is false when we're called from redirect/2.
- %% ?ASSERT(assert_local_patch(CallAddress)),
+ ?ASSERT(assert_local_patch(CallAddress)),
hipe_bifs:patch_call(CallAddress, DestAddress, Trampoline).
%% ____________________________________________________________________
%%
-patch_all(Type, [{Dest,Offsets}|Rest], BaseAddress, ConstAndZone, Addresses)->
- patch_all_offsets(Type, Dest, Offsets, BaseAddress, ConstAndZone, Addresses),
- patch_all(Type, Rest, BaseAddress, ConstAndZone, Addresses);
+patch_all(Type, [{Dest,Offsets}|Rest], BaseAddress, ConstAndZone, FunDefs)->
+ patch_all_offsets(Type, Dest, Offsets, BaseAddress, ConstAndZone, FunDefs),
+ patch_all(Type, Rest, BaseAddress, ConstAndZone, FunDefs);
patch_all(_, [], _, _, _) -> ok.
patch_all_offsets(Type, Data, [Offset|Offsets], BaseAddress,
- ConstAndZone, Addresses) ->
+ ConstAndZone, FunDefs) ->
?debug_msg("Patching ~w at [~w+~w] with ~w\n",
[Type,BaseAddress,Offset, Data]),
Address = BaseAddress + Offset,
- patch_offset(Type, Data, Address, ConstAndZone, Addresses),
+ patch_offset(Type, Data, Address, ConstAndZone, FunDefs),
?debug_msg("Patching done\n",[]),
- patch_all_offsets(Type, Data, Offsets, BaseAddress, ConstAndZone, Addresses);
+ patch_all_offsets(Type, Data, Offsets, BaseAddress, ConstAndZone, FunDefs);
patch_all_offsets(_, _, [], _, _, _) -> ok.
%%----------------------------------------------------------------
%% Handle any patch type except 'call_local' or 'call_remote'.
%%
-patch_offset(Type, Data, Address, ConstAndZone, Addresses) ->
+patch_offset(Type, Data, Address, ConstAndZone, FunDefs) ->
case Type of
load_address ->
- patch_load_address(Data, Address, ConstAndZone, Addresses);
+ patch_load_address(Data, Address, ConstAndZone, FunDefs);
load_atom ->
Atom = Data,
patch_atom(Address, Atom);
sdesc ->
- patch_sdesc(Data, Address, ConstAndZone, Addresses);
+ patch_sdesc(Data, Address, ConstAndZone, FunDefs);
x86_abs_pcrel ->
patch_instr(Address, Data, x86_abs_pcrel)
%% _ ->
@@ -561,37 +562,38 @@ patch_atom(Address, Atom) ->
patch_instr(Address, hipe_bifs:atom_to_word(Atom), atom).
patch_sdesc(?STACK_DESC(SymExnRA, FSize, Arity, Live),
- Address, {_ConstMap2,CodeAddress}, _Addresses) ->
+ Address, {_ConstMap2,CodeAddress}, FunDefs) ->
ExnRA =
case SymExnRA of
[] -> 0; % No catch
LabelOffset -> CodeAddress + LabelOffset
end,
?ASSERT(assert_local_patch(Address)),
- DBG_MFA = ?IF_DEBUG(address_to_mfa_lth(Address, _Addresses), {undefined,undefined,0}),
- hipe_bifs:enter_sdesc({Address, ExnRA, FSize, Arity, Live, DBG_MFA}).
+ MFA = address_to_mfa_lth(Address, FunDefs),
+ hipe_bifs:enter_sdesc({Address, ExnRA, FSize, Arity, Live, MFA},
+ get(hipe_loader_state)).
%%----------------------------------------------------------------
%% Handle a 'load_address'-type patch.
%%
-patch_load_address(Data, Address, ConstAndZone, Addresses) ->
+patch_load_address(Data, Address, ConstAndZone, FunDefs) ->
case Data of
{local_function,DestMFA} ->
- patch_load_mfa(Address, DestMFA, Addresses, 'local');
+ patch_load_mfa(Address, DestMFA, FunDefs, 'local');
{remote_function,DestMFA} ->
- patch_load_mfa(Address, DestMFA, Addresses, 'remote');
+ patch_load_mfa(Address, DestMFA, FunDefs, 'remote');
{constant,Name} ->
{ConstMap2,_CodeAddress} = ConstAndZone,
ConstAddress = find_const(Name, ConstMap2),
patch_instr(Address, ConstAddress, constant);
{closure,{DestMFA,Uniq,Index}} ->
- patch_closure(DestMFA, Uniq, Index, Address, Addresses);
+ patch_closure(DestMFA, Uniq, Index, Address, FunDefs);
{c_const,CConst} ->
patch_instr(Address, bif_address(CConst), c_const)
end.
-patch_closure(DestMFA, Uniq, Index, Address, Addresses) ->
+patch_closure(DestMFA, Uniq, Index, Address, FunDefs) ->
case get(hipe_patch_closures) of
false ->
[]; % This is taken care of when registering the module.
@@ -602,7 +604,7 @@ patch_closure(DestMFA, Uniq, Index, Address, Addresses) ->
%% address into the fun entry to ensure that the native code cannot
%% be called until it has been completely fixed up.
RemoteOrLocal = local, % closure code refs are local
- DestAddress = get_native_address(DestMFA, Addresses, RemoteOrLocal),
+ DestAddress = get_native_address(DestMFA, FunDefs, RemoteOrLocal),
BEAMAddress = hipe_bifs:fun_to_address(DestMFA),
FE = hipe_bifs:get_fe(mod(DestMFA), {Uniq, Index, BEAMAddress}),
put(closures_to_patch, [{FE,DestAddress}|get(closures_to_patch)]),
@@ -616,17 +618,17 @@ patch_closure(DestMFA, Uniq, Index, Address, Addresses) ->
%% Patch an instruction loading the address of an MFA.
%% RemoteOrLocal ::= 'remote' | 'local'
%%
-patch_load_mfa(CodeAddress, DestMFA, Addresses, RemoteOrLocal) ->
+patch_load_mfa(CodeAddress, DestMFA, FunDefs, RemoteOrLocal) ->
+ ?ASSERT(assert_local_patch(CodeAddress)),
DestAddress =
case bif_address(DestMFA) of
false ->
- NativeAddress = get_native_address(DestMFA, Addresses, RemoteOrLocal),
- add_ref(DestMFA, CodeAddress, Addresses, 'load_mfa', [], RemoteOrLocal),
+ NativeAddress = get_native_address(DestMFA, FunDefs, RemoteOrLocal),
+ add_ref(DestMFA, CodeAddress, FunDefs, 'load_mfa', [], RemoteOrLocal),
NativeAddress;
BifAddress when is_integer(BifAddress) ->
BifAddress
end,
- ?ASSERT(assert_local_patch(CodeAddress)),
patch_instr(CodeAddress, DestAddress, 'load_mfa').
%%----------------------------------------------------------------
@@ -702,9 +704,9 @@ bif_address(Name) when is_atom(Name) ->
%% memory, and produces a ConstMap2 mapping each constant's ConstNo to
%% its runtime address, tagged if the constant is a term.
%%
-create_data_segment(DataAlign, DataSize, DataList, WriteWord) ->
+create_data_segment(DataAlign, DataSize, DataList, WriteWord, LoaderState) ->
%%io:format("create_data_segment: \nDataAlign: ~p\nDataSize: ~p\nDataList: ~p\n",[DataAlign,DataSize,DataList]),
- DataAddress = hipe_bifs:alloc_data(DataAlign, DataSize),
+ DataAddress = hipe_bifs:alloc_data(DataAlign, DataSize, LoaderState),
enter_data(DataList, [], DataAddress, DataSize, WriteWord).
enter_data(List, ConstMap2, DataAddress, DataSize, WriteWord) ->
@@ -772,7 +774,7 @@ find_const(ConstNo, []) ->
%%----------------------------------------------------------------
%% Record that the code at address 'Address' has a reference
%% of type 'RefType' ('call' or 'load_mfa') to 'CalleeMFA'.
-%% 'Addresses' must be an address-descending list from exports/2.
+%% 'FunDefs' must be an address-descending list from exports/2.
%%
%% If 'RefType' is 'call', then 'Trampoline' may be the address
%% of a stub branching to 'CalleeMFA', where the stub is reachable
@@ -781,34 +783,29 @@ find_const(ConstNo, []) ->
%% RemoteOrLocal ::= 'remote' | 'local'.
%%
-%%
-%% -record(ref, {caller_mfa, address, ref_type, trampoline, remote_or_local}).
-%%
+add_ref(CalleeMFA, Address, FunDefs, RefType, Trampoline, RemoteOrLocal) ->
+ CallerMFA = address_to_mfa_lth(Address, FunDefs),
+ case RemoteOrLocal of
+ local ->
+ %% assert that the callee and caller are from the same module
+ {M,_,_} = CalleeMFA,
+ {M,_,_} = CallerMFA,
+ ok;
+ remote ->
+ hipe_bifs:add_ref(CalleeMFA, {CallerMFA,Address,RefType,Trampoline,
+ get(hipe_loader_state)})
+ end.
-add_ref(CalleeMFA, Address, Addresses, RefType, Trampoline, RemoteOrLocal) ->
- CallerMFA = address_to_mfa_lth(Address, Addresses),
- %% just a sanity assertion below
- true = case RemoteOrLocal of
- local ->
- {M1,_,_} = CalleeMFA,
- {M2,_,_} = CallerMFA,
- M1 =:= M2;
- remote ->
- true
- end,
- %% io:format("Adding ref ~w\n",[{CallerMFA, CalleeMFA, Address, RefType}]),
- hipe_bifs:add_ref(CalleeMFA, {CallerMFA,Address,RefType,Trampoline,RemoteOrLocal}).
-
-% For FunDefs sorted from low to high addresses
+%% For FunDefs sorted from low to high addresses
address_to_mfa_lth(Address, FunDefs) ->
- case address_to_mfa_lth(Address, FunDefs, false) of
- false ->
- ?error_msg("Local adddress not found ~w\n",[Address]),
- exit({?MODULE, local_address_not_found});
- MFA ->
- MFA
- end.
-
+ case address_to_mfa_lth(Address, FunDefs, false) of
+ false ->
+ ?error_msg("Local adddress not found ~w\n",[Address]),
+ exit({?MODULE, local_address_not_found});
+ MFA ->
+ MFA
+ end.
+
address_to_mfa_lth(Address, [#fundef{address=Adr, mfa=MFA}|Rest], Prev) ->
if Address < Adr ->
Prev;
@@ -818,107 +815,33 @@ address_to_mfa_lth(Address, [#fundef{address=Adr, mfa=MFA}|Rest], Prev) ->
address_to_mfa_lth(_Address, [], Prev) ->
Prev.
-% For FunDefs sorted from high to low addresses
+%% For FunDefs sorted from high to low addresses
%% address_to_mfa_htl(Address, [#fundef{address=Adr, mfa=MFA}|_Rest]) when Address >= Adr -> MFA;
%% address_to_mfa_htl(Address, [_ | Rest]) -> address_to_mfa_htl(Address, Rest);
%% address_to_mfa_htl(Address, []) ->
%% ?error_msg("Local adddress not found ~w\n",[Address]),
%% exit({?MODULE, local_address_not_found}).
-%%----------------------------------------------------------------
-%% Change callers of the given module to instead trap to BEAM.
-%% load_native_code/3 calls this just before loading native code.
-%%
-patch_to_emu(Mod) ->
- patch_to_emu_step2(patch_to_emu_step1(Mod)).
-
-%% Step 1 must occur before the loading of native code updates
-%% references information or creates a new BEAM stub module.
-patch_to_emu_step1(Mod) ->
- case is_loaded(Mod) of
- true ->
- %% Get exported functions
- MFAs = [{Mod,Fun,Arity} || {Fun,Arity} <- Mod:module_info(exports)],
- %% get_refs_from/2 only finds references from compiled static
- %% call sites to the module, but some native address entries
- %% were added as the result of dynamic apply calls. We must
- %% purge them too, but we have no explicit record of them.
- %% Therefore invalidate all native addresses for the module.
- hipe_bifs:invalidate_funinfo_native_addresses(MFAs),
- %% Find all call sites that call these MFAs. As a side-effect,
- %% create native stubs for any MFAs that are referred.
- ReferencesToPatch = get_refs_from(MFAs, []),
- ok = remove_refs_from(MFAs),
- ReferencesToPatch;
- false ->
- %% The first time we load the module, no redirection needs to be done.
- []
- end.
-
-%% Step 2 must occur after the new BEAM stub module is created.
-patch_to_emu_step2(ReferencesToPatch) ->
- redirect(ReferencesToPatch).
-
--spec is_loaded(Module::atom()) -> boolean().
-%% @doc Checks whether a module is loaded or not.
-is_loaded(M) when is_atom(M) ->
- try hipe_bifs:fun_to_address({M,module_info,0}) of
- I when is_integer(I) -> true
- catch _:_ -> false
- end.
-
-%%--------------------------------------------------------------------
-%% Given a list of MFAs, tag them with their referred_from references.
-%% The resulting {MFA,Refs} list is later passed to redirect/1, once
-%% the MFAs have been bound to (possibly new) native-code addresses.
-%%
-get_refs_from(MFAs, []) ->
- mark_referred_from(MFAs),
- MFAs.
-
-mark_referred_from(MFAs) ->
- lists:foreach(fun(MFA) -> hipe_bifs:mark_referred_from(MFA) end, MFAs).
-
-%%--------------------------------------------------------------------
-%% Given a list of MFAs with referred_from references, update their
-%% callers to refer to their new native-code addresses.
-%%
-%% The {MFA,Refs} list must come from get_refs_from/2.
-%%
-redirect(MFAs) ->
- lists:foreach(fun(MFA) -> hipe_bifs:redirect_referred_from(MFA) end, MFAs).
-
-%%--------------------------------------------------------------------
-%% Given a list of MFAs, remove all referred_from references having
-%% any of them as CallerMFA.
-%%
-%% This is the only place using refers_to. Whenever a reference is
-%% added from CallerMFA to CalleeMFA, CallerMFA is added to CalleeMFA's
-%% referred_from list, and CalleeMFA is added to CallerMFA's refers_to
-%% list. The refers_to list is used here to find the CalleeMFAs whose
-%% referred_from lists should be updated.
-%%
-remove_refs_from(MFAs) ->
- lists:foreach(fun(MFA) -> hipe_bifs:remove_refs_from(MFA) end, MFAs).
%%--------------------------------------------------------------------
%% To find the native code of an MFA we need to look in 3 places:
-%% 1. If it is compiled now look in the Addresses data structure.
+%% 1. If it is compiled now look in the FunDefs data structure.
%% 2. Then look in native_addresses from module info.
%% 3. Then (the function might have been singled compiled) look in
%% hipe_funinfo
%% If all else fails create a native stub for the MFA
-get_native_address(MFA, Addresses, RemoteOrLocal) ->
- case mfa_to_address(MFA, Addresses, RemoteOrLocal) of
+get_native_address(MFA, FunDefs, RemoteOrLocal) ->
+ case mfa_to_address(MFA, FunDefs, RemoteOrLocal) of
Adr when is_integer(Adr) -> Adr;
false ->
- IsRemote =
case RemoteOrLocal of
- remote -> true;
- local -> false
- end,
- hipe_bifs:find_na_or_make_stub(MFA, IsRemote)
+ remote ->
+ hipe_bifs:find_na_or_make_stub(MFA);
+ local ->
+ ?error_msg("Local function ~p not found\n",[MFA]),
+ exit({function_not_found,MFA})
+ end
end.
mfa_to_address(MFA, [#fundef{address=Adr, mfa=MFA,
@@ -958,12 +881,10 @@ assert_local_patch(Address) when is_integer(Address) ->
%% ____________________________________________________________________
%%
-%% Beam: nil() | binary() (used as a flag)
-
-enter_code(CodeSize, CodeBinary, CalleeMFAs, Mod, Beam) ->
+enter_code(CodeSize, CodeBinary, CalleeMFAs, LoaderState) ->
true = byte_size(CodeBinary) =:= CodeSize,
- hipe_bifs:update_code_size(Mod, Beam, CodeSize),
- {CodeAddress,Trampolines} = hipe_bifs:enter_code(CodeBinary, CalleeMFAs),
+ {CodeAddress,Trampolines} = hipe_bifs:enter_code(CodeBinary, CalleeMFAs,
+ LoaderState),
?init_assert_patch(CodeAddress, byte_size(CodeBinary)),
{CodeAddress,Trampolines}.
diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl
index c1ae99ea24..9f22eb6aaa 100644
--- a/lib/kernel/src/inet.erl
+++ b/lib/kernel/src/inet.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -34,9 +34,11 @@
ip/1, stats/0, options/0,
pushf/3, popf/1, close/1, gethostname/0, gethostname/1,
parse_ipv4_address/1, parse_ipv6_address/1, parse_ipv4strict_address/1,
- parse_ipv6strict_address/1, parse_address/1, parse_strict_address/1, ntoa/1]).
+ parse_ipv6strict_address/1, parse_address/1, parse_strict_address/1,
+ ntoa/1, ipv4_mapped_ipv6_address/1]).
-export([connect_options/2, listen_options/2, udp_options/2, sctp_options/2]).
+-export([udp_module/1, tcp_module/1, tcp_module/2, sctp_module/1]).
-export([i/0, i/1, i/2]).
@@ -71,10 +73,11 @@
%% timer interface
-export([start_timer/1, timeout/1, timeout/2, stop_timer/1]).
--export_type([address_family/0, hostent/0, hostname/0, ip4_address/0,
- ip6_address/0, ip_address/0, posix/0, socket/0,
- port_number/0]).
-
+-export_type([address_family/0, socket_protocol/0, hostent/0, hostname/0, ip4_address/0,
+ ip6_address/0, ip_address/0, port_number/0,
+ local_address/0, socket_address/0, returned_non_ip_address/0,
+ socket_setopt/0, socket_getopt/0, ancillary_data/0,
+ posix/0, socket/0, stat_option/0]).
%% imports
-import(lists, [append/1, duplicate/2, filter/2, foldl/3]).
@@ -97,7 +100,25 @@
0..65535,0..65535,0..65535,0..65535}.
-type ip_address() :: ip4_address() | ip6_address().
-type port_number() :: 0..65535.
--type posix() :: exbadport | exbadseq | file:posix().
+-type local_address() :: {local, File :: binary() | string()}.
+-type returned_non_ip_address() ::
+ {local, binary()} |
+ {unspec, <<>>} |
+ {undefined, any()}.
+-type posix() ::
+ 'eaddrinuse' | 'eaddrnotavail' | 'eafnosupport' | 'ealready' |
+ 'econnaborted' | 'econnrefused' | 'econnreset' |
+ 'edestaddrreq' |
+ 'ehostdown' | 'ehostunreach' |
+ 'einprogress' | 'eisconn' |
+ 'emsgsize' |
+ 'enetdown' | 'enetunreach' |
+ 'enopkg' | 'enoprotoopt' | 'enotconn' | 'enotty' | 'enotsock' |
+ 'eproto' | 'eprotonosupport' | 'eprototype' |
+ 'esocktnosupport' |
+ 'etimedout' |
+ 'ewouldblock' |
+ 'exbadport' | 'exbadseq' | file:posix().
-type socket() :: port().
-type socket_setopt() ::
@@ -133,16 +154,31 @@
'running' | 'multicast' | 'loopback']} |
{'hwaddr', ether_address()}.
--type address_family() :: 'inet' | 'inet6'.
+-type getifaddrs_ifopts() ::
+ [Ifopt :: {flags, Flags :: [up | broadcast | loopback |
+ pointtopoint | running | multicast]} |
+ {addr, Addr :: ip_address()} |
+ {netmask, Netmask :: ip_address()} |
+ {broadaddr, Broadaddr :: ip_address()} |
+ {dstaddr, Dstaddr :: ip_address()} |
+ {hwaddr, Hwaddr :: [byte()]}].
+
+-type address_family() :: 'inet' | 'inet6' | 'local'.
-type socket_protocol() :: 'tcp' | 'udp' | 'sctp'.
-type socket_type() :: 'stream' | 'dgram' | 'seqpacket'.
+-type socket_address() ::
+ ip_address() | 'any' | 'loopback' | local_address().
-type stat_option() ::
'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'.
+-type ancillary_data() ::
+ [ {'tos', byte()} | {'tclass', byte()} | {'ttl', byte()} ].
+
%%% ---------------------------------
--spec get_rc() -> [{Par :: any(), Val :: any()}].
+-spec get_rc() -> [{Par :: atom(), Val :: any()} |
+ {Par :: atom(), Val1 :: any(), Val2 :: any()}].
get_rc() ->
inet_db:get_rc().
@@ -160,26 +196,33 @@ close(Socket) ->
end.
--spec peername(Socket) -> {ok, {Address, Port}} | {error, posix()} when
- Socket :: socket(),
- Address :: ip_address(),
- Port :: non_neg_integer().
+-spec peername(Socket :: socket()) ->
+ {ok,
+ {ip_address(), port_number()} |
+ returned_non_ip_address()} |
+ {error, posix()}.
peername(Socket) ->
prim_inet:peername(Socket).
--spec setpeername(Socket :: socket(), Address :: {ip_address(), port_number()}) ->
- 'ok' | {'error', any()}.
+-spec setpeername(
+ Socket :: socket(),
+ Address ::
+ {ip_address() | 'any' | 'loopback',
+ port_number()} |
+ socket_address()) ->
+ 'ok' | {'error', any()}.
setpeername(Socket, {IP,Port}) ->
prim_inet:setpeername(Socket, {IP,Port});
setpeername(Socket, undefined) ->
prim_inet:setpeername(Socket, undefined).
--spec peernames(Socket) -> {ok, [{Address, Port}]} | {error, posix()} when
- Socket :: socket(),
- Address :: ip_address(),
- Port :: non_neg_integer().
+-spec peernames(Socket :: socket()) ->
+ {ok,
+ [{ip_address(), port_number()} |
+ returned_non_ip_address()]} |
+ {error, posix()}.
peernames(Socket) ->
prim_inet:peernames(Socket).
@@ -195,15 +238,21 @@ peernames(Socket, Assoc) ->
prim_inet:peernames(Socket, Assoc).
--spec sockname(Socket) -> {ok, {Address, Port}} | {error, posix()} when
- Socket :: socket(),
- Address :: ip_address(),
- Port :: non_neg_integer().
+-spec sockname(Socket :: socket()) ->
+ {ok,
+ {ip_address(), port_number()} |
+ returned_non_ip_address()} |
+ {error, posix()}.
sockname(Socket) ->
prim_inet:sockname(Socket).
--spec setsockname(Socket :: socket(), Address :: {ip_address(), port_number()}) ->
+-spec setsockname(
+ Socket :: socket(),
+ Address ::
+ {ip_address() | 'any' | 'loopback',
+ port_number()} |
+ socket_address()) ->
'ok' | {'error', any()}.
setsockname(Socket, {IP,Port}) ->
@@ -211,10 +260,11 @@ setsockname(Socket, {IP,Port}) ->
setsockname(Socket, undefined) ->
prim_inet:setsockname(Socket, undefined).
--spec socknames(Socket) -> {ok, [{Address, Port}]} | {error, posix()} when
- Socket :: socket(),
- Address :: ip_address(),
- Port :: non_neg_integer().
+-spec socknames(Socket :: socket()) ->
+ {ok,
+ [{ip_address(), port_number()} |
+ returned_non_ip_address()]} |
+ {error, posix()}.
socknames(Socket) ->
prim_inet:socknames(Socket).
@@ -264,7 +314,7 @@ setopts(Socket, Opts) ->
{'ok', OptionValues} | {'error', posix()} when
Socket :: socket(),
Options :: [socket_getopt()],
- OptionValues :: [socket_setopt()].
+ OptionValues :: [socket_setopt() | gen_tcp:pktoptions_value()].
getopts(Socket, Opts) ->
case prim_inet:getopts(Socket, Opts) of
@@ -280,32 +330,32 @@ getopts(Socket, Opts) ->
Other
end.
--spec getifaddrs(Socket :: socket()) ->
- {'ok', [string()]} | {'error', posix()}.
-
+-spec getifaddrs(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
+ {'ok', [{Ifname :: string(),
+ Ifopts :: getifaddrs_ifopts()}]}
+ | {'error', posix()}.
+getifaddrs(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> prim_inet:getifaddrs(S) end, Opts);
getifaddrs(Socket) ->
prim_inet:getifaddrs(Socket).
--spec getifaddrs() -> {ok, Iflist} | {error, posix()} when
- Iflist :: [{Ifname,[Ifopt]}],
- Ifname :: string(),
- Ifopt :: {flags,[Flag]} | {addr,Addr} | {netmask,Netmask}
- | {broadaddr,Broadaddr} | {dstaddr,Dstaddr}
- | {hwaddr,Hwaddr},
- Flag :: up | broadcast | loopback | pointtopoint
- | running | multicast,
- Addr :: ip_address(),
- Netmask :: ip_address(),
- Broadaddr :: ip_address(),
- Dstaddr :: ip_address(),
- Hwaddr :: [byte()].
-
+-spec getifaddrs() ->
+ {'ok', [{Ifname :: string(),
+ Ifopts :: getifaddrs_ifopts()}]}
+ | {'error', posix()}.
getifaddrs() ->
withsocket(fun(S) -> prim_inet:getifaddrs(S) end).
--spec getiflist(Socket :: socket()) ->
- {'ok', [string()]} | {'error', posix()}.
+-spec getiflist(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
+ {'ok', [string()]} | {'error', posix()}.
+
+getiflist(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> prim_inet:getiflist(S) end, Opts);
getiflist(Socket) ->
prim_inet:getiflist(Socket).
@@ -322,11 +372,19 @@ getiflist() ->
ifget(Socket, Name, Opts) ->
prim_inet:ifget(Socket, Name, Opts).
--spec ifget(Name :: string() | atom(), Opts :: [if_getopt()]) ->
+-spec ifget(
+ Name :: string() | atom(),
+ Opts :: [if_getopt() |
+ {netns, Namespace :: file:filename_all()}]) ->
{'ok', [if_getopt_result()]} | {'error', posix()}.
ifget(Name, Opts) ->
- withsocket(fun(S) -> prim_inet:ifget(S, Name, Opts) end).
+ {NSOpts,IFOpts} =
+ lists:partition(
+ fun ({netns,_}) -> true;
+ (_) -> false
+ end, Opts),
+ withsocket(fun(S) -> prim_inet:ifget(S, Name, IFOpts) end, NSOpts).
-spec ifset(Socket :: socket(),
Name :: string() | atom(),
@@ -336,11 +394,19 @@ ifget(Name, Opts) ->
ifset(Socket, Name, Opts) ->
prim_inet:ifset(Socket, Name, Opts).
--spec ifset(Name :: string() | atom(), Opts :: [if_setopt()]) ->
+-spec ifset(
+ Name :: string() | atom(),
+ Opts :: [if_setopt() |
+ {netns, Namespace :: file:filename_all()}]) ->
'ok' | {'error', posix()}.
ifset(Name, Opts) ->
- withsocket(fun(S) -> prim_inet:ifset(S, Name, Opts) end).
+ {NSOpts,IFOpts} =
+ lists:partition(
+ fun ({netns,_}) -> true;
+ (_) -> false
+ end, Opts),
+ withsocket(fun(S) -> prim_inet:ifset(S, Name, IFOpts) end, NSOpts).
-spec getif() ->
{'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
@@ -350,10 +416,14 @@ getif() ->
withsocket(fun(S) -> getif(S) end).
%% backwards compatible getif
--spec getif(Socket :: socket()) ->
+-spec getif(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
{'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
{'error', posix()}.
+getif(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> getif(S) end, Opts);
getif(Socket) ->
case prim_inet:getiflist(Socket) of
{ok, IfList} ->
@@ -374,7 +444,10 @@ getif(Socket) ->
end.
withsocket(Fun) ->
- case inet_udp:open(0,[]) of
+ withsocket(Fun, []).
+%%
+withsocket(Fun, Opts) ->
+ case inet_udp:open(0, Opts) of
{ok,Socket} ->
Res = Fun(Socket),
inet_udp:close(Socket),
@@ -439,7 +512,12 @@ getstat(Socket,What) ->
Hostent :: hostent().
gethostbyname(Name) ->
- gethostbyname_tm(Name, inet, false).
+ case inet_db:res_option(inet6) of
+ true ->
+ gethostbyname_tm(Name, inet6, false);
+ false ->
+ gethostbyname_tm(Name, inet, false)
+ end.
-spec gethostbyname(Hostname, Family) ->
{ok, Hostent} | {error, posix()} when
@@ -646,10 +724,18 @@ parse_address(Addr) ->
parse_strict_address(Addr) ->
inet_parse:strict_address(Addr).
+-spec ipv4_mapped_ipv6_address(ip_address()) -> ip_address().
+ipv4_mapped_ipv6_address({D1,D2,D3,D4})
+ when (D1 bor D2 bor D3 bor D4) < 256 ->
+ {0,0,0,0,0,16#ffff,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4};
+ipv4_mapped_ipv6_address({D1,D2,D3,D4,D5,D6,D7,D8})
+ when (D1 bor D2 bor D3 bor D4 bor D5 bor D6 bor D7 bor D8) < 65536 ->
+ {D7 bsr 8,D7 band 255,D8 bsr 8,D8 band 255}.
+
%% Return a list of available options
options() ->
[
- tos, priority, reuseaddr, keepalive, dontroute, linger,
+ tos, tclass, priority, reuseaddr, keepalive, dontroute, linger,
broadcast, sndbuf, recbuf, nodelay, ipv6_v6only,
buffer, header, active, packet, deliver, mode,
multicast_if, multicast_ttl, multicast_loop,
@@ -670,13 +756,14 @@ stats() ->
%% Available options for tcp:connect
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
connect_options() ->
- [tos, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ [tos, tclass, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ recvtos, recvtclass, ttl, recvttl,
header, active, packet, packet_size, buffer, mode, deliver, line_delimiter,
exit_on_close, high_watermark, low_watermark, high_msgq_watermark,
low_msgq_watermark, send_timeout, send_timeout_close, delay_send, raw,
- show_econnreset].
+ show_econnreset, bind_to_device].
-connect_options(Opts, Family) ->
+connect_options(Opts, Mod) ->
BaseOpts =
case application:get_env(kernel, inet_default_connect_options) of
{ok,List} when is_list(List) ->
@@ -693,7 +780,7 @@ connect_options(Opts, Family) ->
{ok, R} ->
{ok, R#connect_opts {
opts = lists:reverse(R#connect_opts.opts),
- ifaddr = translate_ip(R#connect_opts.ifaddr, Family)
+ ifaddr = Mod:translate_ip(R#connect_opts.ifaddr)
}};
Error -> Error
end.
@@ -708,9 +795,6 @@ con_opt([Opt | Opts], #connect_opts{} = R, As) ->
{fd,Fd} -> con_opt(Opts, R#connect_opts { fd = Fd }, As);
binary -> con_add(mode, binary, R, Opts, As);
list -> con_add(mode, list, R, Opts, As);
- {tcp_module,_} -> con_opt(Opts, R, As);
- inet -> con_opt(Opts, R, As);
- inet6 -> con_opt(Opts, R, As);
{netns,NS} ->
BinNS = filename2binary(NS),
case prim_inet:is_sockopt_val(netns, BinNS) of
@@ -741,13 +825,14 @@ con_add(Name, Val, #connect_opts{} = R, Opts, AllOpts) ->
%% Available options for tcp:listen
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
listen_options() ->
- [tos, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ [tos, tclass, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ recvtos, recvtclass, ttl, recvttl,
header, active, packet, buffer, mode, deliver, backlog, ipv6_v6only,
exit_on_close, high_watermark, low_watermark, high_msgq_watermark,
low_msgq_watermark, send_timeout, send_timeout_close, delay_send,
- packet_size, raw, show_econnreset].
+ packet_size, raw, show_econnreset, bind_to_device].
-listen_options(Opts, Family) ->
+listen_options(Opts, Mod) ->
BaseOpts =
case application:get_env(kernel, inet_default_listen_options) of
{ok,List} when is_list(List) ->
@@ -764,7 +849,7 @@ listen_options(Opts, Family) ->
{ok, R} ->
{ok, R#listen_opts {
opts = lists:reverse(R#listen_opts.opts),
- ifaddr = translate_ip(R#listen_opts.ifaddr, Family)
+ ifaddr = Mod:translate_ip(R#listen_opts.ifaddr)
}};
Error -> Error
end.
@@ -780,9 +865,6 @@ list_opt([Opt | Opts], #listen_opts{} = R, As) ->
{backlog,BL} -> list_opt(Opts, R#listen_opts { backlog = BL }, As);
binary -> list_add(mode, binary, R, Opts, As);
list -> list_add(mode, list, R, Opts, As);
- {tcp_module,_} -> list_opt(Opts, R, As);
- inet -> list_opt(Opts, R, As);
- inet6 -> list_opt(Opts, R, As);
{netns,NS} ->
BinNS = filename2binary(NS),
case prim_inet:is_sockopt_val(netns, BinNS) of
@@ -807,23 +889,36 @@ list_add(Name, Val, #listen_opts{} = R, Opts, As) ->
Error -> Error
end.
+tcp_module(Opts) ->
+ tcp_module_1(Opts, undefined).
+
+tcp_module(Opts, Addr) ->
+ Address = {undefined,Addr},
+ %% Address has to be a 2-tuple but the first element is ignored
+ tcp_module_1(Opts, Address).
+
+tcp_module_1(Opts, Address) ->
+ mod(
+ Opts, tcp_module, Address,
+ #{inet => inet_tcp, inet6 => inet6_tcp, local => local_tcp}).
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Available options for udp:open
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
udp_options() ->
- [tos, priority, reuseaddr, sndbuf, recbuf, header, active, buffer, mode,
- deliver, ipv6_v6only,
+ [tos, tclass, priority, reuseaddr, sndbuf, recbuf, header, active, buffer, mode,
+ recvtos, recvtclass, ttl, recvttl, deliver, ipv6_v6only,
broadcast, dontroute, multicast_if, multicast_ttl, multicast_loop,
add_membership, drop_membership, read_packets,raw,
- high_msgq_watermark, low_msgq_watermark].
+ high_msgq_watermark, low_msgq_watermark, bind_to_device].
-udp_options(Opts, Family) ->
+udp_options(Opts, Mod) ->
case udp_opt(Opts, #udp_opts { }, udp_options()) of
{ok, R} ->
{ok, R#udp_opts {
opts = lists:reverse(R#udp_opts.opts),
- ifaddr = translate_ip(R#udp_opts.ifaddr, Family)
+ ifaddr = Mod:translate_ip(R#udp_opts.ifaddr)
}};
Error -> Error
end.
@@ -838,9 +933,6 @@ udp_opt([Opt | Opts], #udp_opts{} = R, As) ->
{fd,Fd} -> udp_opt(Opts, R#udp_opts { fd = Fd }, As);
binary -> udp_add(mode, binary, R, Opts, As);
list -> udp_add(mode, list, R, Opts, As);
- {udp_module,_} -> udp_opt(Opts, R, As);
- inet -> udp_opt(Opts, R, As);
- inet6 -> udp_opt(Opts, R, As);
{netns,NS} ->
BinNS = filename2binary(NS),
case prim_inet:is_sockopt_val(netns, BinNS) of
@@ -865,6 +957,11 @@ udp_add(Name, Val, #udp_opts{} = R, Opts, As) ->
Error -> Error
end.
+udp_module(Opts) ->
+ mod(
+ Opts, udp_module, undefined,
+ #{inet => inet_udp, inet6 => inet6_udp, local => local_udp}).
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Available options for sctp:open
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -880,8 +977,11 @@ udp_add(Name, Val, #udp_opts{} = R, Opts, As) ->
% (*) passing of open FDs ("fdopen") is not supported.
sctp_options() ->
[ % The following are generic inet options supported for SCTP sockets:
- mode, active, buffer, tos, priority, dontroute, reuseaddr, linger, sndbuf,
- recbuf, ipv6_v6only, high_msgq_watermark, low_msgq_watermark,
+ mode, active, buffer, tos, tclass, ttl,
+ priority, dontroute, reuseaddr, linger,
+ recvtos, recvtclass, recvttl,
+ sndbuf, recbuf, ipv6_v6only, high_msgq_watermark, low_msgq_watermark,
+ bind_to_device,
% Other options are SCTP-specific (though they may be similar to their
% TCP and UDP counter-parts):
@@ -921,9 +1021,6 @@ sctp_opt([Opt|Opts], Mod, #sctp_opts{} = R, As) ->
sctp_opt(Opts, Mod, R#sctp_opts{type=Type}, As);
binary -> sctp_opt (Opts, Mod, R, As, mode, binary);
list -> sctp_opt (Opts, Mod, R, As, mode, list);
- {sctp_module,_} -> sctp_opt (Opts, Mod, R, As); % Done with
- inet -> sctp_opt (Opts, Mod, R, As); % Done with
- inet6 -> sctp_opt (Opts, Mod, R, As); % Done with
{netns,NS} ->
BinNS = filename2binary(NS),
case prim_inet:is_sockopt_val(netns, BinNS) of
@@ -965,6 +1062,11 @@ sctp_opt_ifaddr(Opts, Mod, #sctp_opts{ifaddr=IfAddr}=R, As, Addr) ->
_ -> [IP,IfAddr]
end}, As).
+sctp_module(Opts) ->
+ mod(
+ Opts, sctp_module, undefined,
+ #{inet => inet_sctp, inet6 => inet6_sctp}).
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Util to check and insert option in option list
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1016,13 +1118,59 @@ binary2filename(Bin) ->
Bin
end.
-
translate_ip(any, inet) -> {0,0,0,0};
translate_ip(loopback, inet) -> {127,0,0,1};
translate_ip(any, inet6) -> {0,0,0,0,0,0,0,0};
translate_ip(loopback, inet6) -> {0,0,0,0,0,0,0,1};
translate_ip(IP, _) -> IP.
+mod(Opts, Tag, Address, Map) ->
+ mod(Opts, Tag, Address, Map, undefined, []).
+%%
+mod([{Tag, M}|Opts], Tag, Address, Map, Mod, Acc) ->
+ mod(Opts, Tag, Address, Map, Mod, Acc, M);
+mod([{T, _} = Opt|Opts], Tag, _Address, Map, Mod, Acc)
+ when T =:= ip; T =:= ifaddr->
+ mod(Opts, Tag, Opt, Map, Mod, [Opt|Acc]);
+mod([Family|Opts], Tag, Address, Map, Mod, Acc) when is_atom(Family) ->
+ case Map of
+ #{Family := M} ->
+ mod(Opts, Tag, Address, Map, Mod, Acc, M);
+ #{} ->
+ mod(Opts, Tag, Address, Map, Mod, [Family|Acc])
+ end;
+mod([Opt|Opts], Tag, Address, Map, Mod, Acc) ->
+ mod(Opts, Tag, Address, Map, Mod, [Opt|Acc]);
+mod([], Tag, Address, Map, undefined, Acc) ->
+ {case Address of
+ {_, {local, _}} ->
+ case Map of
+ #{local := Mod} ->
+ Mod;
+ #{} ->
+ inet_db:Tag()
+ end;
+ {_, IP} when tuple_size(IP) =:= 8 ->
+ #{inet := IPv4Mod} = Map,
+ %% Get the mod, but IPv6 address overrides default IPv4
+ case inet_db:Tag() of
+ IPv4Mod ->
+ #{inet6 := IPv6Mod} = Map,
+ IPv6Mod;
+ Mod ->
+ Mod
+ end;
+ _ ->
+ inet_db:Tag()
+ end, lists:reverse(Acc)};
+mod([], _Tag, _Address, _Map, Mod, Acc) ->
+ {Mod, lists:reverse(Acc)}.
+%%
+mod(Opts, Tag, Address, Map, undefined, Acc, M) ->
+ mod(Opts, Tag, Address, Map, M, Acc);
+mod(Opts, Tag, Address, Map, Mod, Acc, _M) ->
+ mod(Opts, Tag, Address, Map, Mod, Acc).
+
getaddrs_tm({A,B,C,D} = IP, Fam, _) ->
%% Only "syntactic" validation and check of family.
@@ -1157,9 +1305,7 @@ gethostbyname_string(Name, Type)
inet ->
inet_parse:ipv4_address(Name);
inet6 ->
- %% XXX should we really translate IPv4 addresses here
- %% even if we do not know if this host can do IPv6?
- inet_parse:ipv6_address(Name)
+ inet_parse:ipv6strict_address(Name)
end of
{ok,IP} ->
{ok,make_hostent(Name, [IP], [], Type)};
@@ -1230,7 +1376,17 @@ gethostbyaddr_tm_native(Addr, Timer, Opts) ->
end.
-spec open(Fd_or_OpenOpts :: integer() | list(),
- Addr :: ip_address(),
+ Addr ::
+ socket_address() |
+ {ip_address() | 'any' | 'loopback', % Unofficial
+ port_number()} |
+ {inet, % Unofficial
+ {ip4_address() | 'any' | 'loopback',
+ port_number()}} |
+ {inet6, % Unofficial
+ {ip6_address() | 'any' | 'loopback',
+ port_number()}} |
+ undefined, % Internal - no bind()
Port :: port_number(),
Opts :: [socket_setopt()],
Protocol :: socket_protocol(),
@@ -1249,13 +1405,12 @@ open(FdO, Addr, Port, Opts, Protocol, Family, Type, Module)
case prim_inet:open(Protocol, Family, Type, OpenOpts) of
{ok,S} ->
case prim_inet:setopts(S, Opts) of
+ ok when Addr =:= undefined ->
+ inet_db:register_socket(S, Module),
+ {ok,S};
ok ->
- case if is_list(Addr) ->
- bindx(S, Addr, Port);
- true ->
- prim_inet:bind(S, Addr, Port)
- end of
- {ok, _} ->
+ case bind(S, Addr, Port) of
+ {ok, _} ->
inet_db:register_socket(S, Module),
{ok,S};
Error ->
@@ -1273,6 +1428,11 @@ open(Fd, Addr, Port, Opts, Protocol, Family, Type, Module)
when is_integer(Fd) ->
fdopen(Fd, Addr, Port, Opts, Protocol, Family, Type, Module).
+bind(S, Addr, Port) when is_list(Addr) ->
+ bindx(S, Addr, Port);
+bind(S, Addr, Port) ->
+ prim_inet:bind(S, Addr, Port).
+
bindx(S, [Addr], Port0) ->
{IP, Port} = set_bindx_port(Addr, Port0),
prim_inet:bind(S, IP, Port);
@@ -1313,34 +1473,36 @@ fdopen(Fd, Opts, Protocol, Family, Type, Module) ->
fdopen(Fd, any, 0, Opts, Protocol, Family, Type, Module).
fdopen(Fd, Addr, Port, Opts, Protocol, Family, Type, Module) ->
- IsAnyAddr = (Addr == {0,0,0,0} orelse Addr == {0,0,0,0,0,0,0,0}
- orelse Addr == any),
- Bound = Port == 0 andalso IsAnyAddr,
+ Bound =
+ %% We do not do any binding if default port+addr options
+ %% were given, in order to keep backwards compatability
+ %% with pre Erlang/OTP 17
+ case Addr of
+ {0,0,0,0} when Port =:= 0 -> true;
+ {0,0,0,0,0,0,0,0} when Port =:= 0 -> true;
+ any when Port =:= 0 -> true;
+ _ -> false
+ end,
case prim_inet:fdopen(Protocol, Family, Type, Fd, Bound) of
{ok, S} ->
case prim_inet:setopts(S, Opts) of
+ ok
+ when Addr =:= undefined;
+ Bound ->
+ inet_db:register_socket(S, Module),
+ {ok, S};
ok ->
- case if
- Bound ->
- %% We do not do any binding if default
- %% port+addr options where given in order
- %% to keep backwards compatability with
- %% pre Erlang/TOP 17
- {ok, ok};
- is_list(Addr) ->
- bindx(S, Addr, Port);
- true ->
- prim_inet:bind(S, Addr, Port)
- end of
- {ok, _} ->
- inet_db:register_socket(S, Module),
- {ok, S};
- Error ->
- prim_inet:close(S),
- Error
+ case bind(S, Addr, Port) of
+ {ok, _} ->
+ inet_db:register_socket(S, Module),
+ {ok, S};
+ Error ->
+ prim_inet:close(S),
+ Error
end;
Error ->
- prim_inet:close(S), Error
+ prim_inet:close(S),
+ Error
end;
Error -> Error
end.
@@ -1349,11 +1511,14 @@ fdopen(Fd, Addr, Port, Opts, Protocol, Family, Type, Module) ->
%% socket stat
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-spec i() -> ok.
i() -> i(tcp), i(udp), i(sctp).
+-spec i(socket_protocol()) -> ok.
i(Proto) -> i(Proto, [port, module, recv, sent, owner,
local_address, foreign_address, state, type]).
+-spec i(socket_protocol(), [atom()]) -> ok.
i(tcp, Fs) ->
ii(tcp_sockets(), Fs, tcp);
i(udp, Fs) ->
diff --git a/lib/kernel/src/inet6_sctp.erl b/lib/kernel/src/inet6_sctp.erl
index 5934c269fa..a5503f6f54 100644
--- a/lib/kernel/src/inet6_sctp.erl
+++ b/lib/kernel/src/inet6_sctp.erl
@@ -1,8 +1,8 @@
%%
%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2007-2011. All Rights Reserved.
-%%
+%%
+%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
+%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
@@ -14,13 +14,12 @@
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-%%
+%%
%% %CopyrightEnd%
%%
%% SCTP protocol contribution by Leonid Timochouk and Serge Aleynikov.
%% See also: $ERL_TOP/lib/kernel/AUTHORS
%%
-%%
-module(inet6_sctp).
%% This module provides functions for communicating with
@@ -31,6 +30,7 @@
-include("inet_sctp.hrl").
-include("inet_int.hrl").
+-define(PROTO, sctp).
-define(FAMILY, inet6).
-export([getserv/1,getaddr/1,getaddr/2,translate_ip/1]).
-export([open/1,close/1,listen/2,peeloff/2,connect/5]).
@@ -39,25 +39,19 @@
getserv(Port) when is_integer(Port) -> {ok, Port};
-getserv(Name) when is_atom(Name) ->
- inet:getservbyname(Name, sctp);
-getserv(_) ->
- {error,einval}.
-
-getaddr(Address) ->
- inet:getaddr(Address, ?FAMILY).
-getaddr(Address, Timer) ->
- inet:getaddr_tm(Address, ?FAMILY, Timer).
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name, ?PROTO);
+getserv(_) -> {error,einval}.
-translate_ip(IP) ->
- inet:translate_ip(IP, ?FAMILY).
+getaddr(Address) -> inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) -> inet:getaddr_tm(Address, ?FAMILY, Timer).
+translate_ip(IP) -> inet:translate_ip(IP, ?FAMILY).
open(Opts) ->
case inet:sctp_options(Opts, ?MODULE) of
{ok,#sctp_opts{fd=Fd,ifaddr=Addr,port=Port,type=Type,opts=SOs}} ->
- inet:open(Fd, Addr, Port, SOs, sctp, ?FAMILY, Type, ?MODULE);
+ inet:open(Fd, Addr, Port, SOs, ?PROTO, ?FAMILY, Type, ?MODULE);
Error -> Error
end.
diff --git a/lib/kernel/src/inet6_tcp.erl b/lib/kernel/src/inet6_tcp.erl
index 1978307b3c..347b8b9a1b 100644
--- a/lib/kernel/src/inet6_tcp.erl
+++ b/lib/kernel/src/inet6_tcp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,13 +25,18 @@
-export([controlling_process/2]).
-export([fdopen/2]).
--export([family/0, mask/2, parse_address/1]).
+-export([family/0, mask/2, parse_address/1]). % inet_tcp_dist
-export([getserv/1, getaddr/1, getaddr/2, getaddrs/1, getaddrs/2]).
+-export([translate_ip/1]).
-include("inet_int.hrl").
+-define(FAMILY, inet6).
+-define(PROTO, tcp).
+-define(TYPE, stream).
+
%% my address family
-family() -> inet6.
+family() -> ?FAMILY.
%% Apply netmask on address
mask({M1,M2,M3,M4,M5,M6,M7,M8}, {IP1,IP2,IP3,IP4,IP5,IP6,IP7,IP8}) ->
@@ -50,15 +55,18 @@ parse_address(Host) ->
%% inet_tcp port lookup
getserv(Port) when is_integer(Port) -> {ok, Port};
-getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,tcp).
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name, ?PROTO).
%% inet_tcp address lookup
-getaddr(Address) -> inet:getaddr(Address, inet6).
-getaddr(Address,Timer) -> inet:getaddr_tm(Address, inet6, Timer).
+getaddr(Address) -> inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) -> inet:getaddr_tm(Address, ?FAMILY, Timer).
%% inet_tcp address lookup
-getaddrs(Address) -> inet:getaddrs(Address, inet6).
-getaddrs(Address,Timer) -> inet:getaddrs_tm(Address,inet6,Timer).
+getaddrs(Address) -> inet:getaddrs(Address, ?FAMILY).
+getaddrs(Address, Timer) -> inet:getaddrs_tm(Address, ?FAMILY, Timer).
+
+%% inet_udp special this side addresses
+translate_ip(IP) -> inet:translate_ip(IP, ?FAMILY).
%%
%% Send data on a socket
@@ -73,11 +81,6 @@ recv(Socket, Length) -> prim_inet:recv(Socket, Length).
recv(Socket, Length, Timeout) -> prim_inet:recv(Socket, Length, Timeout).
unrecv(Socket, Data) -> prim_inet:unrecv(Socket, Data).
-%%
-%% Close a socket (async)
-%%
-close(Socket) ->
- inet:tcp_close(Socket).
%%
%% Shutdown one end of a socket
@@ -86,6 +89,12 @@ shutdown(Socket, How) ->
prim_inet:shutdown(Socket, How).
%%
+%% Close a socket (async)
+%%
+close(Socket) ->
+ inet:tcp_close(Socket).
+
+%%
%% Set controlling process
%% FIXME: move messages to new owner!!!
%%
@@ -100,24 +109,28 @@ connect(Address, Port, Opts) ->
connect(Address, Port, Opts, infinity) ->
do_connect(Address, Port, Opts, infinity);
-connect(Address, Port, Opts, Timeout) when is_integer(Timeout),
- Timeout >= 0 ->
+connect(Address, Port, Opts, Timeout)
+ when is_integer(Timeout), Timeout >= 0 ->
do_connect(Address, Port, Opts, Timeout).
-do_connect(Addr = {A,B,C,D,E,F,G,H}, Port, Opts, Time) when
- ?ip6(A,B,C,D,E,F,G,H), ?port(Port) ->
- case inet:connect_options(Opts, inet6) of
+do_connect(Addr = {A,B,C,D,E,F,G,H}, Port, Opts, Time)
+ when ?ip6(A,B,C,D,E,F,G,H), ?port(Port) ->
+ case inet:connect_options(Opts, ?MODULE) of
{error, Reason} -> exit(Reason);
- {ok, #connect_opts{fd=Fd,
- ifaddr=BAddr={Ab,Bb,Cb,Db,Eb,Fb,Gb,Hb},
- port=BPort,
- opts=SockOpts}}
+ {ok,
+ #connect_opts{
+ fd = Fd,
+ ifaddr = BAddr = {Ab,Bb,Cb,Db,Eb,Fb,Gb,Hb},
+ port = BPort,
+ opts = SockOpts}}
when ?ip6(Ab,Bb,Cb,Db,Eb,Fb,Gb,Hb), ?port(BPort) ->
- case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet6,stream,?MODULE) of
+ case inet:open(
+ Fd, BAddr, BPort, SockOpts,
+ ?PROTO, ?FAMILY, ?TYPE, ?MODULE) of
{ok, S} ->
case prim_inet:connect(S, Addr, Port, Time) of
- ok -> {ok,S};
- Error -> prim_inet:close(S), Error
+ ok -> {ok,S};
+ Error -> prim_inet:close(S), Error
end;
Error -> Error
end;
@@ -128,14 +141,18 @@ do_connect(Addr = {A,B,C,D,E,F,G,H}, Port, Opts, Time) when
%% Listen
%%
listen(Port, Opts) ->
- case inet:listen_options([{port,Port} | Opts], inet6) of
+ case inet:listen_options([{port,Port} | Opts], ?MODULE) of
{error, Reason} -> exit(Reason);
- {ok, #listen_opts{fd=Fd,
- ifaddr=BAddr={A,B,C,D,E,F,G,H},
- port=BPort,
- opts=SockOpts}=R}
+ {ok,
+ #listen_opts{
+ fd = Fd,
+ ifaddr = BAddr = {A,B,C,D,E,F,G,H},
+ port = BPort,
+ opts = SockOpts} = R}
when ?ip6(A,B,C,D,E,F,G,H), ?port(BPort) ->
- case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet6,stream,?MODULE) of
+ case inet:open(
+ Fd, BAddr, BPort, SockOpts,
+ ?PROTO, ?FAMILY, ?TYPE, ?MODULE) of
{ok, S} ->
case prim_inet:listen(S, R#listen_opts.backlog) of
ok -> {ok, S};
@@ -150,24 +167,25 @@ listen(Port, Opts) ->
%% Accept
%%
accept(L) ->
- case prim_inet:accept(L) of
+ case prim_inet:accept(L, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
Error -> Error
end.
-
-accept(L,Timeout) ->
- case prim_inet:accept(L,Timeout) of
+
+accept(L, Timeout) ->
+ case prim_inet:accept(L, Timeout, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
Error -> Error
end.
-
+
+accept_family_opts() -> [tclass, recvtclass].
+
%%
%% Create a port/socket from a file descriptor
%%
fdopen(Fd, Opts) ->
- inet:fdopen(Fd, Opts, tcp, inet6, stream, ?MODULE).
-
+ inet:fdopen(Fd, Opts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE).
diff --git a/lib/kernel/src/inet6_tcp_dist.erl b/lib/kernel/src/inet6_tcp_dist.erl
index 3ab7f269bb..9b6c2745d5 100644
--- a/lib/kernel/src/inet6_tcp_dist.erl
+++ b/lib/kernel/src/inet6_tcp_dist.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@
-export([listen/1, accept/1, accept_connection/5,
setup/5, close/1, select/1, is_node_name/1]).
+-export([setopts/2, getopts/2]).
%% ------------------------------------------------------------
%% Select this protocol based on node name
@@ -72,3 +73,9 @@ close(Socket) ->
is_node_name(Node) when is_atom(Node) ->
inet_tcp_dist:is_node_name(Node).
+
+setopts(S, Opts) ->
+ inet_tcp_dist:setopts(S, Opts).
+
+getopts(S, Opts) ->
+ inet_tcp_dist:getopts(S, Opts).
diff --git a/lib/kernel/src/inet6_udp.erl b/lib/kernel/src/inet6_udp.erl
index 61c74bf14f..71db0357cd 100644
--- a/lib/kernel/src/inet6_udp.erl
+++ b/lib/kernel/src/inet6_udp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -24,29 +24,44 @@
-export([controlling_process/2]).
-export([fdopen/2]).
--export([getserv/1, getaddr/1, getaddr/2]).
+-export([getserv/1, getaddr/1, getaddr/2, translate_ip/1]).
-include("inet_int.hrl").
+-define(FAMILY, inet6).
+-define(PROTO, udp).
+-define(TYPE, dgram).
+
+
%% inet_udp port lookup
getserv(Port) when is_integer(Port) -> {ok, Port};
-getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,udp).
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name, ?PROTO).
%% inet_udp address lookup
-getaddr(Address) -> inet:getaddr(Address, inet6).
-getaddr(Address,Timer) -> inet:getaddr(Address, inet6, Timer).
+getaddr(Address) -> inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) -> inet:getaddr(Address, ?FAMILY, Timer).
+
+%% inet_udp special this side addresses
+translate_ip(IP) -> inet:translate_ip(IP, ?FAMILY).
+-spec open(_) -> {ok, inet:socket()} | {error, atom()}.
open(Port) -> open(Port, []).
+-spec open(_, _) -> {ok, inet:socket()} | {error, atom()}.
open(Port, Opts) ->
- case inet:udp_options([{port,Port} | Opts], inet6) of
+ case inet:udp_options(
+ [{port,Port} | Opts],
+ ?MODULE) of
{error, Reason} -> exit(Reason);
- {ok, #udp_opts{fd=Fd,
- ifaddr=BAddr={A,B,C,D,E,F,G,H},
- port=BPort,
- opts=SockOpts}}
+ {ok,
+ #udp_opts{
+ fd = Fd,
+ ifaddr = BAddr = {A,B,C,D,E,F,G,H},
+ port = BPort,
+ opts = SockOpts}}
when ?ip6(A,B,C,D,E,F,G,H), ?port(BPort) ->
- inet:open(Fd,BAddr,BPort,SockOpts,udp,inet6,dgram,?MODULE);
+ inet:open(
+ Fd, BAddr, BPort, SockOpts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE);
{ok, _} -> exit(badarg)
end.
@@ -61,12 +76,13 @@ connect(S, Addr = {A,B,C,D,E,F,G,H}, P)
when ?ip6(A,B,C,D,E,F,G,H), ?port(P) ->
prim_inet:connect(S, Addr, P).
-recv(S,Len) ->
+recv(S, Len) ->
prim_inet:recvfrom(S, Len).
-recv(S,Len,Time) ->
+recv(S, Len, Time) ->
prim_inet:recvfrom(S, Len, Time).
+-spec close(inet:socket()) -> ok.
close(S) ->
inet:udp_close(S).
@@ -85,4 +101,4 @@ controlling_process(Socket, NewOwner) ->
%% Create a port/socket from a file descriptor
%%
fdopen(Fd, Opts) ->
- inet:fdopen(Fd, Opts, udp, inet6, dgram, ?MODULE).
+ inet:fdopen(Fd, Opts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE).
diff --git a/lib/kernel/src/inet_boot.hrl b/lib/kernel/src/inet_boot.hrl
index ec0d4064e5..adeda604e6 100644
--- a/lib/kernel/src/inet_boot.hrl
+++ b/lib/kernel/src/inet_boot.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/inet_config.erl b/lib/kernel/src/inet_config.erl
index 803fae846e..9f76360b8b 100644
--- a/lib/kernel/src/inet_config.erl
+++ b/lib/kernel/src/inet_config.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -188,9 +188,6 @@ do_load_resolv({win32,Type}, longnames) ->
win32_load_from_registry(Type),
inet_db:set_lookup([native]);
-do_load_resolv({ose,_}, _) ->
- inet_db:set_lookup([file]);
-
do_load_resolv(_, _) ->
inet_db:set_lookup([native]).
@@ -372,7 +369,7 @@ win32_load1(Reg,Type,HFileKey) ->
end.
win32_split_line(Line,nt) -> inet_parse:split_line(Line);
-win32_split_line(Line,windows) -> string:tokens(Line, ",").
+win32_split_line(Line,windows) -> string:lexemes(Line, ",").
win32_get_strings(Reg, Names) ->
win32_get_strings(Reg, Names, []).
diff --git a/lib/kernel/src/inet_config.hrl b/lib/kernel/src/inet_config.hrl
index 7faae8d127..b22ee0f598 100644
--- a/lib/kernel/src/inet_config.hrl
+++ b/lib/kernel/src/inet_config.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/inet_db.erl b/lib/kernel/src/inet_db.erl
index 108a803610..6cbb6ac2da 100644
--- a/lib/kernel/src/inet_db.erl
+++ b/lib/kernel/src/inet_db.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1206,7 +1206,8 @@ handle_set_file(Option, Fname, TagTm, TagInfo, ParseFun, From,
File = filename:flatten(Fname),
ets:insert(Db, {res_optname(Option), File}),
ets:insert(Db, {TagInfo, undefined}),
- ets:insert(Db, {TagTm, 0}),
+ TimeZero = - (?RES_FILE_UPDATE_TM + 1), % Early enough
+ ets:insert(Db, {TagTm, TimeZero}),
{reply,ok,State};
true ->
File = filename:flatten(Fname),
@@ -1378,7 +1379,8 @@ cache_rr(_Db, Cache, RR) ->
ets:insert(Cache, RR).
times() ->
- erlang:convert_time_unit(erlang:monotonic_time() - erlang:system_info(start_time),native,seconds).
+ erlang:convert_time_unit(erlang:monotonic_time() - erlang:system_info(start_time),
+ native, second).
%% lookup and remove old entries
diff --git a/lib/kernel/src/inet_dns.erl b/lib/kernel/src/inet_dns.erl
index f344b26228..f1f58bc872 100644
--- a/lib/kernel/src/inet_dns.erl
+++ b/lib/kernel/src/inet_dns.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@
-export([decode/1, encode/1]).
--import(lists, [reverse/1, reverse/2, nthtail/2]).
+-import(lists, [reverse/1]).
-include("inet_int.hrl").
-include("inet_dns.hrl").
@@ -473,7 +473,7 @@ decode_data(<<Order:16,Preference:16,Data0/binary>>, _, ?S_NAPTR, Buffer) ->
{Data2,Services} = decode_string(Data1),
{Data,Regexp} = decode_characters(Data2, utf8),
Replacement = decode_domain(Data, Buffer),
- {Order,Preference,string:to_lower(Flags),string:to_lower(Services),
+ {Order,Preference,string:lowercase(Flags),string:lowercase(Services),
Regexp,Replacement};
%% ?S_OPT falls through to default
decode_data(Data, _, ?S_TXT, _) ->
diff --git a/lib/kernel/src/inet_dns.hrl b/lib/kernel/src/inet_dns.hrl
index d1b01bb9c4..07226bbf5c 100644
--- a/lib/kernel/src/inet_dns.hrl
+++ b/lib/kernel/src/inet_dns.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/inet_dns_record_adts.pl b/lib/kernel/src/inet_dns_record_adts.pl
index 6d719d836e..c89d837098 100644
--- a/lib/kernel/src/inet_dns_record_adts.pl
+++ b/lib/kernel/src/inet_dns_record_adts.pl
@@ -2,7 +2,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2011. All Rights Reserved.
+# Copyright Ericsson AB 2009-2016. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/inet_gethost_native.erl b/lib/kernel/src/inet_gethost_native.erl
index 53294810af..9e76c08365 100644
--- a/lib/kernel/src/inet_gethost_native.erl
+++ b/lib/kernel/src/inet_gethost_native.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/inet_hosts.erl b/lib/kernel/src/inet_hosts.erl
index e8457fd9d6..fc653bf0d3 100644
--- a/lib/kernel/src/inet_hosts.erl
+++ b/lib/kernel/src/inet_hosts.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -72,9 +72,6 @@ gethostbyname(Name, Type, Byname, Byaddr) ->
gethostbyaddr({A,B,C,D}=IP) when ?ip(A,B,C,D) ->
gethostbyaddr(IP, inet);
-%% ipv4 only ipv6 address
-gethostbyaddr({0,0,0,0,0,16#ffff=F,G,H}) when ?ip6(0,0,0,0,0,F,G,H) ->
- gethostbyaddr({G bsr 8, G band 255, H bsr 8, H band 255});
gethostbyaddr({A,B,C,D,E,F,G,H}=IP) when ?ip6(A,B,C,D,E,F,G,H) ->
gethostbyaddr(IP, inet6);
gethostbyaddr(Addr) when is_list(Addr) ->
diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl
index e7c6cf8ae2..f6525d7261 100644
--- a/lib/kernel/src/inet_int.hrl
+++ b/lib/kernel/src/inet_int.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,10 +25,13 @@
%%
%% family codes to open
+-define(INET_AF_UNSPEC, 0).
-define(INET_AF_INET, 1).
-define(INET_AF_INET6, 2).
-define(INET_AF_ANY, 3). % Fake for ANY in any address family
-define(INET_AF_LOOPBACK, 4). % Fake for LOOPBACK in any address family
+-define(INET_AF_LOCAL, 5). % For Unix Domain address family
+-define(INET_AF_UNDEFINED, 6). % For any unknown address family
%% type codes to open and gettype - INET_REQ_GETTYPE
-define(INET_TYPE_STREAM, 1).
@@ -97,6 +100,8 @@
-define(TCP_REQ_RECV, 42).
-define(TCP_REQ_UNRECV, 43).
-define(TCP_REQ_SHUTDOWN, 44).
+-define(TCP_REQ_SENDFILE, 45).
+
%% UDP and SCTP requests
-define(PACKET_REQ_RECV, 60).
%%-define(SCTP_REQ_LISTEN, 61). MERGED
@@ -150,6 +155,14 @@
-define(INET_LOPT_NETNS, 38).
-define(INET_LOPT_TCP_SHOW_ECONNRESET, 39).
-define(INET_LOPT_LINE_DELIM, 40).
+-define(INET_OPT_TCLASS, 41).
+-define(INET_OPT_BIND_TO_DEVICE, 42).
+-define(INET_OPT_RECVTOS, 43).
+-define(INET_OPT_RECVTCLASS, 44).
+-define(INET_OPT_PKTOPTIONS, 45).
+-define(INET_OPT_TTL, 46).
+-define(INET_OPT_RECVTTL, 47).
+-define(TCP_OPT_NOPUSH, 48).
% Specific SCTP options: separate range:
-define(SCTP_OPT_RTOINFO, 100).
-define(SCTP_OPT_ASSOCINFO, 101).
@@ -314,6 +327,12 @@
[((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff,
((X) bsr 8) band 16#ff, (X) band 16#ff]).
+-define(int64(X),
+ [((X) bsr 56) band 16#ff, ((X) bsr 48) band 16#ff,
+ ((X) bsr 40) band 16#ff, ((X) bsr 32) band 16#ff,
+ ((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff,
+ ((X) bsr 8) band 16#ff, (X) band 16#ff]).
+
-define(intAID(X), % For SCTP AssocID
?int32(X)).
@@ -378,7 +397,7 @@
{
ifaddr = any, %% bind to interface address
port = 0, %% bind to port (default is dynamic port)
- fd = -1, %% fd >= 0 => already bound
+ fd = -1, %% fd >= 0 => already bound
opts = [] %% [{active,true}] added in inet:connect_options
}).
diff --git a/lib/kernel/src/inet_parse.erl b/lib/kernel/src/inet_parse.erl
index 877745ed55..e9685c6554 100644
--- a/lib/kernel/src/inet_parse.erl
+++ b/lib/kernel/src/inet_parse.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -95,7 +95,7 @@ hosts(Fname,File) ->
%% interface with a %if suffix. These kind of
%% addresses maybe need to be gracefully handled
%% throughout inet* and inet_drv.
- case string:tokens(Address, "%") of
+ case string:lexemes(Address, "%") of
[Addr,_] ->
{ok,_} = address(Addr),
skip;
@@ -407,7 +407,7 @@ is_dom1([C | Cs]) when C >= $a, C =< $z -> is_dom_ldh(Cs);
is_dom1([C | Cs]) when C >= $A, C =< $Z -> is_dom_ldh(Cs);
is_dom1([C | Cs]) when C >= $0, C =< $9 ->
case is_dom_ldh(Cs) of
- true -> is_dom2(string:tokens([C | Cs],"."));
+ true -> is_dom2(string:lexemes([C | Cs],"."));
false -> false
end;
is_dom1(_) -> false.
@@ -644,8 +644,12 @@ ipv6_addr(Cs) ->
ipv6_addr(hex(Cs), [], 0).
%% Before "::"
+ipv6_addr({Cs0,"%"++Cs1}, A, N) when N == 7 ->
+ ipv6_addr_scope(Cs1, [hex_to_int(Cs0)|A], [], N+1, []);
ipv6_addr({Cs0,[]}, A, N) when N == 7 ->
ipv6_addr_done([hex_to_int(Cs0)|A]);
+ipv6_addr({Cs0,"::%"++Cs1}, A, N) when N =< 6 ->
+ ipv6_addr_scope(Cs1, [hex_to_int(Cs0)|A], [], N+1, []);
ipv6_addr({Cs0,"::"}, A, N) when N =< 6 ->
ipv6_addr_done([hex_to_int(Cs0)|A], [], N+1);
ipv6_addr({Cs0,"::"++Cs1}, A, N) when N =< 5 ->
@@ -658,6 +662,8 @@ ipv6_addr(_, _, _) ->
erlang:error(badarg).
%% After "::"
+ipv6_addr({Cs0,"%"++Cs1}, A, B, N) when N =< 6 ->
+ ipv6_addr_scope(Cs1, A, [hex_to_int(Cs0)|B], N+1, []);
ipv6_addr({Cs0,[]}, A, B, N) when N =< 6 ->
ipv6_addr_done(A, [hex_to_int(Cs0)|B], N+1);
ipv6_addr({Cs0,":"++Cs1}, A, B, N) when N =< 5 ->
@@ -667,6 +673,43 @@ ipv6_addr({Cs0,"."++_=Cs1}, A, B, N) when N =< 5 ->
ipv6_addr(_, _, _, _) ->
erlang:error(badarg).
+%% After "%"
+ipv6_addr_scope([], Ar, Br, N, Sr) ->
+ ScopeId =
+ case lists:reverse(Sr) of
+ %% Empty scope id
+ "" -> 0;
+ %% Scope id starts with 0
+ "0"++S -> dec16(S);
+ _ -> 0
+ end,
+ %% Suggested formats for scope id parsing:
+ %% "" -> "0"
+ %% "0" -> Scope id 0
+ %% "1" - "9", "10" - "99" -> "0"++S
+ %% "0"++DecimalScopeId -> decimal scope id
+ %% "25"++PercentEncoded -> Percent encoded interface name
+ %% S -> Interface name (Unicode?)
+ %% Missing: translation from interface name into integer scope id.
+ %% XXX: scope id is actually 32 bit, but we only have room for
+ %% 16 bit in the second address word - ignore or fix (how)?
+ ipv6_addr_scope(ScopeId, Ar, Br, N);
+ipv6_addr_scope([C|Cs], Ar, Br, N, Sr) ->
+ ipv6_addr_scope(Cs, Ar, Br, N, [C|Sr]).
+%%
+ipv6_addr_scope(ScopeId, [P], Br, N)
+ when N =< 7, P =:= 16#fe80;
+ N =< 7, P =:= 16#ff02 ->
+ %% Optimized special case
+ ipv6_addr_done([ScopeId,P], Br, N+1);
+ipv6_addr_scope(ScopeId, Ar, Br, N) ->
+ case lists:reverse(Br++dup(8-N, 0, Ar)) of
+ [P,0|Xs] when P =:= 16#fe80; P =:= 16#ff02 ->
+ list_to_tuple([P,ScopeId|Xs]);
+ _ ->
+ erlang:error(badarg)
+ end.
+
ipv6_addr_done(Ar, Br, N, {D1,D2,D3,D4}) ->
ipv6_addr_done(Ar, [((D3 bsl 8) bor D4),((D1 bsl 8) bor D2)|Br], N+2).
@@ -690,6 +733,19 @@ hex(Cs, [_|_]=R, _) when is_list(Cs) ->
hex(_, _, _) ->
erlang:error(badarg).
+%% Parse a reverse decimal integer string, empty is 0
+dec16(Cs) -> dec16(Cs, 0).
+%%
+dec16([], I) -> I;
+dec16([C|Cs], I) when C >= $0, C =< $9 ->
+ case 10*I + (C - $0) of
+ J when 16#ffff < J ->
+ erlang:error(badarg);
+ J ->
+ dec16(Cs, J)
+ end;
+dec16(_, _) -> erlang:error(badarg).
+
%% Hex string to integer
hex_to_int(Cs) -> erlang:list_to_integer(Cs, 16).
@@ -701,9 +757,9 @@ dup(N, E, L) when is_integer(N), N >= 1 ->
-%% Convert IPv4 adress to ascii
-%% Convert IPv6 / IPV4 adress to ascii (plain format)
-ntoa({A,B,C,D}) ->
+%% Convert IPv4 address to ascii
+%% Convert IPv6 / IPV4 address to ascii (plain format)
+ntoa({A,B,C,D}) when (A band B band C band D band (bnot 16#ff)) =:= 0 ->
integer_to_list(A) ++ "." ++ integer_to_list(B) ++ "." ++
integer_to_list(C) ++ "." ++ integer_to_list(D);
%% ANY
@@ -711,13 +767,25 @@ ntoa({0,0,0,0,0,0,0,0}) -> "::";
%% LOOPBACK
ntoa({0,0,0,0,0,0,0,1}) -> "::1";
%% IPV4 ipv6 host address
-ntoa({0,0,0,0,0,0,A,B}) -> "::" ++ dig_to_dec(A) ++ "." ++ dig_to_dec(B);
+ntoa({0,0,0,0,0,0,A,B}) when (A band B band (bnot 16#ffff)) =:= 0 ->
+ "::" ++ dig_to_dec(A) ++ "." ++ dig_to_dec(B);
%% IPV4 non ipv6 host address
-ntoa({0,0,0,0,0,16#ffff,A,B}) ->
- "::FFFF:" ++ dig_to_dec(A) ++ "." ++ dig_to_dec(B);
-ntoa({_,_,_,_,_,_,_,_}=T) ->
- %% Find longest sequence of zeros, at least 2, to replace with "::"
- ntoa(tuple_to_list(T), []);
+ntoa({0,0,0,0,0,16#ffff,A,B}) when (A band B band (bnot 16#ffff)) =:= 0 ->
+ "::ffff:" ++ dig_to_dec(A) ++ "." ++ dig_to_dec(B);
+ntoa({A,B,C,D,E,F,G,H})
+ when (A band B band C band D band E band F band G band H band
+ (bnot 16#ffff)) =:= 0 ->
+ if
+ A =:= 16#fe80, B =/= 0;
+ A =:= 16#ff02, B =/= 0 ->
+ %% Find longest sequence of zeros, at least 2,
+ %% to replace with "::"
+ ntoa([A,0,C,D,E,F,G,H], []) ++ "%0" ++ integer_to_list(B);
+ true ->
+ %% Find longest sequence of zeros, at least 2,
+ %% to replace with "::"
+ ntoa([A,B,C,D,E,F,G,H], [])
+ end;
ntoa(_) ->
{error, einval}.
@@ -780,9 +848,19 @@ dig_to_dec(X) ->
integer_to_list((X bsr 8) band 16#ff) ++ "." ++
integer_to_list(X band 16#ff).
-%% Convert a integer to hex string
-dig_to_hex(X) ->
- erlang:integer_to_list(X, 16).
+%% Convert a integer to hex string (lowercase)
+dig_to_hex(0) -> "0";
+dig_to_hex(X) when is_integer(X), 0 < X ->
+ dig_to_hex(X, "").
+%%
+dig_to_hex(0, Acc) -> Acc;
+dig_to_hex(X, Acc) ->
+ dig_to_hex(
+ X bsr 4,
+ [case X band 15 of
+ D when D < 10 -> D + $0;
+ D -> D - 10 + $a
+ end|Acc]).
%%
%% Count number of '.' in a name
diff --git a/lib/kernel/src/inet_res.erl b/lib/kernel/src/inet_res.erl
index e6988ac79b..6454802b04 100644
--- a/lib/kernel/src/inet_res.erl
+++ b/lib/kernel/src/inet_res.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -349,9 +349,6 @@ gethostbyaddr_tm({A,B,C,D} = IP, Timer) when ?ip(A,B,C,D) ->
{ok, HEnt} -> {ok, HEnt};
_ -> res_gethostbyaddr(dn_in_addr_arpa(A,B,C,D), IP, Timer)
end;
-%% ipv4 only ipv6 address
-gethostbyaddr_tm({0,0,0,0,0,16#ffff,G,H},Timer) when is_integer(G+H) ->
- gethostbyaddr_tm({G div 256, G rem 256, H div 256, H rem 256},Timer);
gethostbyaddr_tm({A,B,C,D,E,F,G,H} = IP, Timer) when ?ip6(A,B,C,D,E,F,G,H) ->
inet_db:res_update_conf(),
case inet_db:gethostbyaddr(IP) of
@@ -431,28 +428,7 @@ gethostbyname(Name,Family,Timeout) ->
gethostbyname_tm(Name,inet,Timer) ->
getbyname_tm(Name,?S_A,Timer);
gethostbyname_tm(Name,inet6,Timer) ->
- case getbyname_tm(Name,?S_AAAA,Timer) of
- {ok,HEnt} -> {ok,HEnt};
- {error,nxdomain} ->
- case getbyname_tm(Name, ?S_A,Timer) of
- {ok, HEnt} ->
- %% rewrite to a ipv4 only ipv6 address
- {ok,
- HEnt#hostent {
- h_addrtype = inet6,
- h_length = 16,
- h_addr_list =
- lists:map(
- fun({A,B,C,D}) ->
- {0,0,0,0,0,16#ffff,A*256+B,C*256+D}
- end, HEnt#hostent.h_addr_list)
- }};
- Error ->
- Error
- end;
- Error ->
- Error
- end;
+ getbyname_tm(Name,?S_AAAA,Timer);
gethostbyname_tm(_Name, _Family, _Timer) ->
{error, einval}.
@@ -859,15 +835,17 @@ query_ns(S0, Id, Buffer, IP, Port, Timer, Retry, I,
{ok,S} ->
Timeout =
inet:timeout( (Tm * (1 bsl I)) div Retry, Timer),
- {S,
case query_udp(
S, Id, Buffer, IP, Port, Timeout, Verbose) of
{ok,#dns_rec{header=H}} when H#dns_header.tc ->
TcpTimeout = inet:timeout(Tm*5, Timer),
- query_tcp(
- TcpTimeout, Id, Buffer, IP, Port, Verbose);
- Reply -> Reply
- end};
+ {S, query_tcp(
+ TcpTimeout, Id, Buffer, IP, Port, Verbose)};
+ {error, econnrefused} = Err ->
+ ok = udp_close(S),
+ {#sock{}, Err};
+ Reply -> {S, Reply}
+ end;
Error ->
{S0,Error}
end
diff --git a/lib/kernel/src/inet_res.hrl b/lib/kernel/src/inet_res.hrl
index c77fe30e7a..774b4074a5 100644
--- a/lib/kernel/src/inet_res.hrl
+++ b/lib/kernel/src/inet_res.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/inet_sctp.erl b/lib/kernel/src/inet_sctp.erl
index 88c8d24143..8569cacb29 100644
--- a/lib/kernel/src/inet_sctp.erl
+++ b/lib/kernel/src/inet_sctp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,6 +30,7 @@
-include("inet_sctp.hrl").
-include("inet_int.hrl").
+-define(PROTO, sctp).
-define(FAMILY, inet).
-export([getserv/1,getaddr/1,getaddr/2,translate_ip/1]).
-export([open/1,close/1,listen/2,peeloff/2,connect/5]).
@@ -38,25 +39,19 @@
getserv(Port) when is_integer(Port) -> {ok, Port};
-getserv(Name) when is_atom(Name) ->
- inet:getservbyname(Name, sctp);
-getserv(_) ->
- {error,einval}.
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name, ?PROTO);
+getserv(_) -> {error,einval}.
-getaddr(Address) ->
- inet:getaddr(Address, ?FAMILY).
-getaddr(Address, Timer) ->
- inet:getaddr_tm(Address, ?FAMILY, Timer).
-
-translate_ip(IP) ->
- inet:translate_ip(IP, ?FAMILY).
+getaddr(Address) -> inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) -> inet:getaddr_tm(Address, ?FAMILY, Timer).
+translate_ip(IP) -> inet:translate_ip(IP, ?FAMILY).
open(Opts) ->
case inet:sctp_options(Opts, ?MODULE) of
{ok,#sctp_opts{fd=Fd,ifaddr=Addr,port=Port,type=Type,opts=SOs}} ->
- inet:open(Fd, Addr, Port, SOs, sctp, ?FAMILY, Type, ?MODULE);
+ inet:open(Fd, Addr, Port, SOs, ?PROTO, ?FAMILY, Type, ?MODULE);
Error -> Error
end.
diff --git a/lib/kernel/src/inet_tcp.erl b/lib/kernel/src/inet_tcp.erl
index f551af9709..f1e3116856 100644
--- a/lib/kernel/src/inet_tcp.erl
+++ b/lib/kernel/src/inet_tcp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -27,13 +27,18 @@
-export([controlling_process/2]).
-export([fdopen/2]).
--export([family/0, mask/2, parse_address/1]).
+-export([family/0, mask/2, parse_address/1]). % inet_tcp_dist
-export([getserv/1, getaddr/1, getaddr/2, getaddrs/1, getaddrs/2]).
+-export([translate_ip/1]).
-include("inet_int.hrl").
+-define(FAMILY, inet).
+-define(PROTO, tcp).
+-define(TYPE, stream).
+
%% my address family
-family() -> inet.
+family() -> ?FAMILY.
%% Apply netmask on address
mask({M1,M2,M3,M4}, {IP1,IP2,IP3,IP4}) ->
@@ -48,16 +53,19 @@ parse_address(Host) ->
%% inet_tcp port lookup
getserv(Port) when is_integer(Port) -> {ok, Port};
-getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,tcp).
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name, ?PROTO).
%% inet_tcp address lookup
-getaddr(Address) -> inet:getaddr(Address, inet).
-getaddr(Address,Timer) -> inet:getaddr_tm(Address, inet, Timer).
+getaddr(Address) -> inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) -> inet:getaddr_tm(Address, ?FAMILY, Timer).
%% inet_tcp address lookup
-getaddrs(Address) -> inet:getaddrs(Address, inet).
-getaddrs(Address,Timer) -> inet:getaddrs_tm(Address,inet,Timer).
-
+getaddrs(Address) -> inet:getaddrs(Address, ?FAMILY).
+getaddrs(Address, Timer) -> inet:getaddrs_tm(Address, ?FAMILY, Timer).
+
+%% inet_udp special this side addresses
+translate_ip(IP) -> inet:translate_ip(IP, ?FAMILY).
+
%%
%% Send data on a socket
%%
@@ -77,7 +85,7 @@ unrecv(Socket, Data) -> prim_inet:unrecv(Socket, Data).
%%
shutdown(Socket, How) ->
prim_inet:shutdown(Socket, How).
-
+
%%
%% Close a socket (async)
%%
@@ -88,7 +96,7 @@ close(Socket) ->
%% Set controlling process
%%
controlling_process(Socket, NewOwner) ->
- inet:tcp_controlling_process(Socket, NewOwner).
+ inet:tcp_controlling_process(Socket, NewOwner).
%%
%% Connect
@@ -98,23 +106,28 @@ connect(Address, Port, Opts) ->
connect(Address, Port, Opts, infinity) ->
do_connect(Address, Port, Opts, infinity);
-connect(Address, Port, Opts, Timeout) when is_integer(Timeout),
- Timeout >= 0 ->
+connect(Address, Port, Opts, Timeout)
+ when is_integer(Timeout), Timeout >= 0 ->
do_connect(Address, Port, Opts, Timeout).
-do_connect({A,B,C,D}, Port, Opts, Time) when ?ip(A,B,C,D), ?port(Port) ->
- case inet:connect_options(Opts, inet) of
+do_connect(Addr = {A,B,C,D}, Port, Opts, Time)
+ when ?ip(A,B,C,D), ?port(Port) ->
+ case inet:connect_options(Opts, ?MODULE) of
{error, Reason} -> exit(Reason);
- {ok, #connect_opts{fd=Fd,
- ifaddr=BAddr={Ab,Bb,Cb,Db},
- port=BPort,
- opts=SockOpts}}
+ {ok,
+ #connect_opts{
+ fd = Fd,
+ ifaddr = BAddr = {Ab,Bb,Cb,Db},
+ port = BPort,
+ opts = SockOpts}}
when ?ip(Ab,Bb,Cb,Db), ?port(BPort) ->
- case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet,stream,?MODULE) of
+ case inet:open(
+ Fd, BAddr, BPort, SockOpts,
+ ?PROTO, ?FAMILY, ?TYPE, ?MODULE) of
{ok, S} ->
- case prim_inet:connect(S, {A,B,C,D}, Port, Time) of
- ok -> {ok,S};
- Error -> prim_inet:close(S), Error
+ case prim_inet:connect(S, Addr, Port, Time) of
+ ok -> {ok,S};
+ Error -> prim_inet:close(S), Error
end;
Error -> Error
end;
@@ -125,14 +138,18 @@ do_connect({A,B,C,D}, Port, Opts, Time) when ?ip(A,B,C,D), ?port(Port) ->
%% Listen
%%
listen(Port, Opts) ->
- case inet:listen_options([{port,Port} | Opts], inet) of
- {error,Reason} -> exit(Reason);
- {ok, #listen_opts{fd=Fd,
- ifaddr=BAddr={A,B,C,D},
- port=BPort,
- opts=SockOpts}=R}
+ case inet:listen_options([{port,Port} | Opts], ?MODULE) of
+ {error, Reason} -> exit(Reason);
+ {ok,
+ #listen_opts{
+ fd = Fd,
+ ifaddr = BAddr = {A,B,C,D},
+ port = BPort,
+ opts = SockOpts} = R}
when ?ip(A,B,C,D), ?port(BPort) ->
- case inet:open(Fd,BAddr,BPort,SockOpts,tcp,inet,stream,?MODULE) of
+ case inet:open(
+ Fd, BAddr, BPort, SockOpts,
+ ?PROTO, ?FAMILY, ?TYPE, ?MODULE) of
{ok, S} ->
case prim_inet:listen(S, R#listen_opts.backlog) of
ok -> {ok, S};
@@ -146,23 +163,26 @@ listen(Port, Opts) ->
%%
%% Accept
%%
-accept(L) ->
- case prim_inet:accept(L) of
+accept(L) ->
+ case prim_inet:accept(L, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
Error -> Error
end.
-
-accept(L,Timeout) ->
- case prim_inet:accept(L,Timeout) of
+
+accept(L, Timeout) ->
+ case prim_inet:accept(L, Timeout, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
Error -> Error
end.
+
+accept_family_opts() -> [tos, ttl, recvtos, recvttl].
+
%%
%% Create a port/socket from a file descriptor
%%
fdopen(Fd, Opts) ->
- inet:fdopen(Fd, Opts, tcp, inet, stream, ?MODULE).
+ inet:fdopen(Fd, Opts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE).
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
index 64b28bb49b..c37212b0f9 100644
--- a/lib/kernel/src/inet_tcp_dist.erl
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -24,13 +24,16 @@
-export([listen/1, accept/1, accept_connection/5,
setup/5, close/1, select/1, is_node_name/1]).
+%% Optional
+-export([setopts/2, getopts/2]).
+
%% Generalized dist API
-export([gen_listen/2, gen_accept/2, gen_accept_connection/6,
gen_setup/6, gen_select/2]).
%% internal exports
--export([accept_loop/3,do_accept/7,do_setup/7,getstat/1]).
+-export([accept_loop/3,do_accept/7,do_setup/7,getstat/1,tick/2]).
-import(error_logger,[error_msg/2]).
@@ -73,7 +76,8 @@ gen_listen(Driver, Name) ->
{ok, Socket} ->
TcpAddress = get_tcp_address(Driver, Socket),
{_,Port} = TcpAddress#net_address.address,
- case erl_epmd:register_node(Name, Port) of
+ ErlEpmd = net_kernel:epmd_module(),
+ case ErlEpmd:register_node(Name, Port, Driver) of
{ok, Creation} ->
{ok, {Socket, TcpAddress, Creation}};
Error ->
@@ -214,8 +218,10 @@ do_accept(Driver, Kernel, AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
inet:getll(S)
end,
f_address = fun(S, Node) -> get_remote_id(Driver, S, Node) end,
- mf_tick = fun(S) -> tick(Driver, S) end,
- mf_getstat = fun ?MODULE:getstat/1
+ mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
+ mf_getstat = fun ?MODULE:getstat/1,
+ mf_setopts = fun ?MODULE:setopts/2,
+ mf_getopts = fun ?MODULE:getopts/2
},
dist_util:handshake_other_started(HSData);
{false,IP} ->
@@ -277,69 +283,22 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
[Name, Address] = splitnode(Driver, Node, LongOrShortNames),
AddressFamily = Driver:family(),
- case inet:getaddr(Address, AddressFamily) of
+ ErlEpmd = net_kernel:epmd_module(),
+ {ARMod, ARFun} = get_address_resolver(ErlEpmd),
+ Timer = dist_util:start_timer(SetupTime),
+ case ARMod:ARFun(Name, Address, AddressFamily) of
+ {ok, Ip, TcpPort, Version} ->
+ ?trace("address_please(~p) -> version ~p~n",
+ [Node,Version]),
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
{ok, Ip} ->
- Timer = dist_util:start_timer(SetupTime),
- case erl_epmd:port_please(Name, Ip) of
+ case ErlEpmd:port_please(Name, Ip) of
{port, TcpPort, Version} ->
?trace("port_please(~p) -> version ~p~n",
[Node,Version]),
- dist_util:reset_timer(Timer),
- case
- Driver:connect(
- Ip, TcpPort,
- connect_options([{active, false}, {packet, 2}]))
- of
- {ok, Socket} ->
- HSData = #hs_data{
- kernel_pid = Kernel,
- other_node = Node,
- this_node = MyNode,
- socket = Socket,
- timer = Timer,
- this_flags = 0,
- other_version = Version,
- f_send = fun Driver:send/2,
- f_recv = fun Driver:recv/3,
- f_setopts_pre_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, false},
- {packet, 4},
- nodelay()])
- end,
- f_setopts_post_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, true},
- {deliver, port},
- {packet, 4},
- nodelay()])
- end,
- f_getll = fun inet:getll/1,
- f_address =
- fun(_,_) ->
- #net_address{
- address = {Ip,TcpPort},
- host = Address,
- protocol = tcp,
- family = AddressFamily}
- end,
- mf_tick = fun(S) -> tick(Driver, S) end,
- mf_getstat = fun ?MODULE:getstat/1,
- request_type = Type
- },
- dist_util:handshake_we_started(HSData);
- _ ->
- %% Other Node may have closed since
- %% port_please !
- ?trace("other node (~p) "
- "closed since port_please.~n",
- [Node]),
- ?shutdown(Node)
- end;
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
_ ->
?trace("port_please (~p) "
"failed.~n", [Node]),
@@ -351,6 +310,71 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?shutdown(Node)
end.
+%%
+%% Actual setup of connection
+%%
+do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer) ->
+ dist_util:reset_timer(Timer),
+ case
+ Driver:connect(
+ Ip, TcpPort,
+ connect_options([{active, false}, {packet, 2}]))
+ of
+ {ok, Socket} ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun Driver:send/2,
+ f_recv = fun Driver:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address{
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = AddressFamily}
+ end,
+ mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
+ mf_getstat = fun ?MODULE:getstat/1,
+ request_type = Type,
+ mf_setopts = fun ?MODULE:setopts/2,
+ mf_getopts = fun ?MODULE:getopts/2
+ },
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% discovery !
+ ?trace("other node (~p) "
+ "closed since discovery (port_please).~n",
+ [Node]),
+ ?shutdown(Node)
+ end.
+
connect_options(Opts) ->
case application:get_env(kernel, inet_dist_connect_options) of
{ok,ConnectOpts} ->
@@ -380,14 +404,14 @@ splitnode(Driver, Node, LongOrShortNames) ->
error_msg("** System running to use "
"fully qualified "
"hostnames **~n"
- "** Hostname ~s is illegal **~n",
+ "** Hostname ~ts is illegal **~n",
[Host]),
?shutdown(Node)
end;
L when length(L) > 1, LongOrShortNames =:= shortnames ->
error_msg("** System NOT running to use fully qualified "
"hostnames **~n"
- "** Hostname ~s is illegal **~n",
+ "** Hostname ~ts is illegal **~n",
[Host]),
?shutdown(Node);
_ ->
@@ -420,6 +444,16 @@ get_tcp_address(Driver, Socket) ->
}.
%% ------------------------------------------------------------
+%% Determine if EPMD module supports address resolving. Default
+%% is to use inet:getaddr/2.
+%% ------------------------------------------------------------
+get_address_resolver(EpmdModule) ->
+ case erlang:function_exported(EpmdModule, address_please, 3) of
+ true -> {EpmdModule, address_please};
+ _ -> {erl_epmd, address_please}
+ end.
+
+%% ------------------------------------------------------------
%% Do only accept new connection attempts from nodes at our
%% own LAN, if the check_ip environment parameter is true.
%% ------------------------------------------------------------
@@ -490,3 +524,12 @@ split_stat([], R, W, P) ->
{ok, R, W, P}.
+setopts(S, Opts) ->
+ case [Opt || {K,_}=Opt <- Opts,
+ K =:= active orelse K =:= deliver orelse K =:= packet] of
+ [] -> inet:setopts(S,Opts);
+ Opts1 -> {error, {badopts,Opts1}}
+ end.
+
+getopts(S, Opts) ->
+ inet:getopts(S, Opts).
diff --git a/lib/kernel/src/inet_udp.erl b/lib/kernel/src/inet_udp.erl
index 5b2e5120c9..1e624b9e90 100644
--- a/lib/kernel/src/inet_udp.erl
+++ b/lib/kernel/src/inet_udp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -24,21 +24,26 @@
-export([controlling_process/2]).
-export([fdopen/2]).
--export([getserv/1, getaddr/1, getaddr/2]).
+-export([getserv/1, getaddr/1, getaddr/2, translate_ip/1]).
-include("inet_int.hrl").
+-define(FAMILY, inet).
+-define(PROTO, udp).
+-define(TYPE, dgram).
-define(RECBUF, (8*1024)).
-
%% inet_udp port lookup
getserv(Port) when is_integer(Port) -> {ok, Port};
-getserv(Name) when is_atom(Name) -> inet:getservbyname(Name,udp).
+getserv(Name) when is_atom(Name) -> inet:getservbyname(Name, ?PROTO).
%% inet_udp address lookup
-getaddr(Address) -> inet:getaddr(Address, inet).
-getaddr(Address,Timer) -> inet:getaddr_tm(Address, inet, Timer).
+getaddr(Address) -> inet:getaddr(Address, ?FAMILY).
+getaddr(Address, Timer) -> inet:getaddr(Address, ?FAMILY, Timer).
+
+%% inet_udp special this side addresses
+translate_ip(IP) -> inet:translate_ip(IP, ?FAMILY).
-spec open(_) -> {ok, inet:socket()} | {error, atom()}.
open(Port) -> open(Port, []).
@@ -47,33 +52,38 @@ open(Port) -> open(Port, []).
open(Port, Opts) ->
case inet:udp_options(
[{port,Port}, {recbuf, ?RECBUF} | Opts],
- inet) of
+ ?MODULE) of
{error, Reason} -> exit(Reason);
- {ok, #udp_opts{fd=Fd,
- ifaddr=BAddr={A,B,C,D},
- port=BPort,
- opts=SockOpts}} when ?ip(A,B,C,D), ?port(BPort) ->
- inet:open(Fd,BAddr,BPort,SockOpts,udp,inet,dgram,?MODULE);
+ {ok,
+ #udp_opts{
+ fd = Fd,
+ ifaddr = BAddr = {A,B,C,D},
+ port = BPort,
+ opts = SockOpts}}
+ when ?ip(A,B,C,D), ?port(BPort) ->
+ inet:open(
+ Fd, BAddr, BPort, SockOpts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE);
{ok, _} -> exit(badarg)
end.
-send(S,{A,B,C,D},P,Data) when ?ip(A,B,C,D), ?port(P) ->
- prim_inet:sendto(S, {A,B,C,D}, P, Data).
+send(S, {A,B,C,D} = Addr, P, Data)
+ when ?ip(A,B,C,D), ?port(P) ->
+ prim_inet:sendto(S, Addr, P, Data).
send(S, Data) ->
prim_inet:sendto(S, {0,0,0,0}, 0, Data).
-connect(S, {A,B,C,D}, P) when ?ip(A,B,C,D), ?port(P) ->
- prim_inet:connect(S, {A,B,C,D}, P).
+connect(S, Addr = {A,B,C,D}, P)
+ when ?ip(A,B,C,D), ?port(P) ->
+ prim_inet:connect(S, Addr, P).
-recv(S,Len) ->
+recv(S, Len) ->
prim_inet:recvfrom(S, Len).
-recv(S,Len,Time) ->
+recv(S, Len, Time) ->
prim_inet:recvfrom(S, Len, Time).
-spec close(inet:socket()) -> ok.
-
close(S) ->
inet:udp_close(S).
@@ -92,9 +102,9 @@ controlling_process(Socket, NewOwner) ->
%% Create a port/socket from a file descriptor
%%
fdopen(Fd, Opts) ->
- inet:fdopen(Fd,
- optuniquify([{recbuf, ?RECBUF} | Opts]),
- udp, inet, dgram, ?MODULE).
+ inet:fdopen(
+ Fd, optuniquify([{recbuf, ?RECBUF} | Opts]),
+ ?PROTO, ?FAMILY, ?TYPE, ?MODULE).
%% Remove all duplicate options from an option list.
@@ -103,7 +113,7 @@ fdopen(Fd, Opts) ->
%% Here's how:
%% Reverse the list.
%% For each head option go through the tail and remove
-%% all occurences of the same option from the tail.
+%% all occurrences of the same option from the tail.
%% Store that head option and iterate using the new tail.
%% Return the list of stored head options.
optuniquify(List) ->
@@ -112,8 +122,8 @@ optuniquify(List) ->
optuniquify([], Result) ->
Result;
optuniquify([Opt | Tail], Result) ->
- %% Remove all occurences of Opt in Tail,
- %% prepend Opt to Result,
+ %% Remove all occurrences of Opt in Tail,
+ %% prepend Opt to Result,
%% then iterate back here.
optuniquify(Opt, Tail, [], Result).
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index 419dc0a2fc..fe073621c8 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -34,6 +34,7 @@
erl_boot_server,
erl_distribution,
erl_reply,
+ erl_signal_handler,
error_handler,
error_logger,
file,
@@ -43,6 +44,7 @@
global_group,
global_search,
group,
+ group_history,
heart,
hipe_unified_loader,
inet6_tcp,
@@ -55,6 +57,21 @@
inet_tcp_dist,
kernel,
kernel_config,
+ kernel_refc,
+ local_tcp,
+ local_udp,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_h_common,
+ logger_handler_watcher,
+ logger_server,
+ logger_simple_h,
+ logger_std_h,
+ logger_sup,
net,
net_adm,
net_kernel,
@@ -84,6 +101,13 @@
inet_udp,
inet_sctp,
pg2,
+ raw_file_io,
+ raw_file_io_compressed,
+ raw_file_io_deflate,
+ raw_file_io_delayed,
+ raw_file_io_inflate,
+ raw_file_io_list,
+ raw_file_io_raw,
seq_trace,
standard_error,
wrap_log_reader]},
@@ -103,7 +127,11 @@
heart,
init,
kernel_config,
+ kernel_refc,
kernel_sup,
+ logger,
+ logger_handler_watcher,
+ logger_sup,
net_kernel,
net_sup,
rex,
@@ -114,8 +142,10 @@
inet_db,
pg2]},
{applications, []},
- {env, [{error_logger, tty}]},
+ {env, [{logger_level, notice},
+ {logger_sasl_compatible, false}
+ ]},
{mod, {kernel, []}},
- {runtime_dependencies, ["erts-7.3", "stdlib-2.6", "sasl-2.6"]}
+ {runtime_dependencies, ["erts-10.1", "stdlib-3.5", "sasl-3.0"]}
]
}.
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
index cc9e6f771a..0c0435e051 100644
--- a/lib/kernel/src/kernel.appup.src
+++ b/lib/kernel/src/kernel.appup.src
@@ -1,7 +1,7 @@
%% -*- erlang -*-
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -18,9 +18,13 @@
%% %CopyrightEnd%
{"%VSN%",
%% Up from - max one major revision back
- [{<<"4\\.[0-2](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-18.*
- {<<"3\\.[0-2](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-17
+ [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
+ {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0
+ {<<"6\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-21.1
%% Down to - max one major revision back
- [{<<"4\\.[0-2](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-18.*
- {<<"3\\.[0-2](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-17
+ [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
+ {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0
+ {<<"6\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-21.1
}.
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index 5f33f25a0d..c68d04e279 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,16 +30,13 @@
%%% Callback functions for the kernel application.
%%%-----------------------------------------------------------------
start(_, []) ->
+ %% Setup the logger and configure the kernel logger environment
+ ok = logger:internal_init_logger(),
case supervisor:start_link({local, kernel_sup}, kernel, []) of
{ok, Pid} ->
- Type = get_error_logger_type(),
- case error_logger:swap_handler(Type) of
- ok -> {ok, Pid, []};
- Error ->
- %% Not necessary since the node will crash anyway:
- exit(Pid, shutdown),
- Error
- end;
+ ok = erl_signal_handler:start(),
+ ok = logger:add_handlers(kernel),
+ {ok, Pid, []};
Error -> Error
end.
@@ -54,16 +51,6 @@ config_change(Changed, New, Removed) ->
do_global_groups_change(Changed, New, Removed),
ok.
-get_error_logger_type() ->
- case application:get_env(kernel, error_logger) of
- {ok, tty} -> tty;
- {ok, {file, File}} when is_list(File) -> {logfile, File};
- {ok, false} -> false;
- {ok, silent} -> silent;
- undefined -> tty; % default value
- {ok, Bad} -> exit({bad_config, {kernel, {error_logger, Bad}}})
- end.
-
%%%-----------------------------------------------------------------
%%% The process structure in kernel is as shown in the figure.
%%%
@@ -92,60 +79,128 @@ get_error_logger_type() ->
%%%-----------------------------------------------------------------
init([]) ->
- SupFlags = {one_for_all, 0, 1},
-
- Config = {kernel_config,
- {kernel_config, start_link, []},
- permanent, 2000, worker, [kernel_config]},
- Code = {code_server,
- {code, start_link, get_code_args()},
- permanent, 2000, worker, [code]},
- File = {file_server_2,
- {file_server, start_link, []},
- permanent, 2000, worker,
- [file, file_server, file_io_server, prim_file]},
- StdError = {standard_error,
- {standard_error, start_link, []},
- temporary, 2000, supervisor, [user_sup]},
- User = {user,
- {user_sup, start, []},
- temporary, 2000, supervisor, [user_sup]},
-
+ SupFlags = #{strategy => one_for_all,
+ intensity => 0,
+ period => 1},
+
+ Config = #{id => kernel_config,
+ start => {kernel_config, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [kernel_config]},
+
+ RefC = #{id => kernel_refc,
+ start => {kernel_refc, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [kernel_refc]},
+
+ Code = #{id => code_server,
+ start => {code, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [code]},
+
+ File = #{id => file_server_2,
+ start => {file_server, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [file, file_server, file_io_server, prim_file]},
+
+ StdError = #{id => standard_error,
+ start => {standard_error, start_link, []},
+ restart => temporary,
+ shutdown => 2000,
+ type => supervisor,
+ modules => [user_sup]},
+
+ User = #{id => user,
+ start => {user_sup, start, []},
+ restart => temporary,
+ shutdown => 2000,
+ type => supervisor,
+ modules => [user_sup]},
+
+ SafeSup = #{id => kernel_safe_sup,
+ start =>{supervisor, start_link, [{local, kernel_safe_sup}, ?MODULE, safe]},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [?MODULE]},
+
+
+ LoggerSup = #{id => logger_sup,
+ start => {logger_sup, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [logger_sup]},
+
case init:get_argument(mode) of
- {ok, [["minimal"]]} ->
- SafeSupervisor = {kernel_safe_sup,
- {supervisor, start_link,
- [{local, kernel_safe_sup}, ?MODULE, safe]},
- permanent, infinity, supervisor, [?MODULE]},
- {ok, {SupFlags,
- [Code, File, StdError, User,
- Config, SafeSupervisor]}};
- _ ->
- Rpc = {rex, {rpc, start_link, []},
- permanent, 2000, worker, [rpc]},
- Global = {global_name_server, {global, start_link, []},
- permanent, 2000, worker, [global]},
- Glo_grp = {global_group, {global_group,start_link,[]},
- permanent, 2000, worker, [global_group]},
- InetDb = {inet_db, {inet_db, start_link, []},
- permanent, 2000, worker, [inet_db]},
- NetSup = {net_sup, {erl_distribution, start_link, []},
- permanent, infinity, supervisor,[erl_distribution]},
- DistAC = start_dist_ac(),
-
- Timer = start_timer(),
-
- SafeSupervisor = {kernel_safe_sup,
- {supervisor, start_link,
- [{local, kernel_safe_sup}, ?MODULE, safe]},
- permanent, infinity, supervisor, [?MODULE]},
- {ok, {SupFlags,
- [Code, Rpc, Global, InetDb | DistAC] ++
- [NetSup, Glo_grp, File,
- StdError, User, Config, SafeSupervisor] ++ Timer}}
+ {ok, [["minimal"]]} ->
+ {ok, {SupFlags,
+ [Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}};
+ _ ->
+ Rpc = #{id => rex,
+ start => {rpc, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [rpc]},
+
+ Global = #{id => global_name_server,
+ start => {global, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [global]},
+
+ GlGroup = #{id => global_group,
+ start => {global_group,start_link,[]},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [global_group]},
+
+ InetDb = #{id => inet_db,
+ start => {inet_db, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [inet_db]},
+
+ NetSup = #{id => net_sup,
+ start => {erl_distribution, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [erl_distribution]},
+
+ SigSrv = #{id => erl_signal_server,
+ start => {gen_event, start_link, [{local, erl_signal_server}]},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => dynamic},
+
+ DistAC = start_dist_ac(),
+
+ Timer = start_timer(),
+
+ {ok, {SupFlags,
+ [Code, Rpc, Global, InetDb | DistAC] ++
+ [NetSup, GlGroup, File, SigSrv,
+ StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
end;
init(safe) ->
- SupFlags = {one_for_one, 4, 3600},
+ SupFlags = #{strategy => one_for_one,
+ intensity => 4,
+ period => 3600},
+
Boot = start_boot_server(),
DiskLog = start_disk_log(),
Pg2 = start_pg2(),
@@ -158,67 +213,86 @@ init(safe) ->
{ok, {SupFlags, Boot ++ DiskLog ++ Pg2}}.
-get_code_args() ->
- case init:get_argument(nostick) of
- {ok, [[]]} -> [[nostick]];
- _ -> []
- end.
-
start_dist_ac() ->
- Spec = [{dist_ac,{dist_ac,start_link,[]},permanent,2000,worker,[dist_ac]}],
+ Spec = [#{id => dist_ac,
+ start => {dist_ac,start_link,[]},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [dist_ac]}],
case application:get_env(kernel, start_dist_ac) of
- {ok, true} -> Spec;
- {ok, false} -> [];
- undefined ->
- case application:get_env(kernel, distributed) of
- {ok, _} -> Spec;
- _ -> []
- end
+ {ok, true} -> Spec;
+ {ok, false} -> [];
+ undefined ->
+ case application:get_env(kernel, distributed) of
+ {ok, _} -> Spec;
+ _ -> []
+ end
end.
start_boot_server() ->
case application:get_env(kernel, start_boot_server) of
- {ok, true} ->
- Args = get_boot_args(),
- [{boot_server, {erl_boot_server, start_link, [Args]}, permanent,
- 1000, worker, [erl_boot_server]}];
- _ ->
- []
+ {ok, true} ->
+ Args = get_boot_args(),
+ [#{id => boot_server,
+ start => {erl_boot_server, start_link, [Args]},
+ restart => permanent,
+ shutdown => 1000,
+ type => worker,
+ modules => [erl_boot_server]}];
+ _ ->
+ []
end.
get_boot_args() ->
case application:get_env(kernel, boot_server_slaves) of
- {ok, Slaves} -> Slaves;
- _ -> []
+ {ok, Slaves} -> Slaves;
+ _ -> []
end.
start_disk_log() ->
case application:get_env(kernel, start_disk_log) of
- {ok, true} ->
- [{disk_log_server,
- {disk_log_server, start_link, []},
- permanent, 2000, worker, [disk_log_server]},
- {disk_log_sup, {disk_log_sup, start_link, []}, permanent,
- 1000, supervisor, [disk_log_sup]}];
- _ ->
- []
+ {ok, true} ->
+ [#{id => disk_log_server,
+ start => {disk_log_server, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [disk_log_server]},
+ #{id => disk_log_sup,
+ start => {disk_log_sup, start_link, []},
+ restart => permanent,
+ shutdown => 1000,
+ type => supervisor,
+ modules => [disk_log_sup]}];
+ _ ->
+ []
end.
start_pg2() ->
case application:get_env(kernel, start_pg2) of
- {ok, true} ->
- [{pg2, {pg2, start_link, []}, permanent, 1000, worker, [pg2]}];
- _ ->
- []
+ {ok, true} ->
+ [#{id => pg2,
+ start => {pg2, start_link, []},
+ restart => permanent,
+ shutdown => 1000,
+ type => worker,
+ modules => [pg2]}];
+ _ ->
+ []
end.
start_timer() ->
case application:get_env(kernel, start_timer) of
- {ok, true} ->
- [{timer_server, {timer, start_link, []}, permanent, 1000, worker,
- [timer]}];
- _ ->
- []
+ {ok, true} ->
+ [#{id => timer_server,
+ start => {timer, start_link, []},
+ restart => permanent,
+ shutdown => 1000,
+ type => worker,
+ modules => [timer]}];
+ _ ->
+ []
end.
%%-----------------------------------------------------------------
diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl
index c65728aa53..691a266c2d 100644
--- a/lib/kernel/src/kernel_config.erl
+++ b/lib/kernel/src/kernel_config.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,11 +30,8 @@
%%%-----------------------------------------------------------------
%%% This module implements a process that configures the kernel
%%% application.
-%%% Its purpose is that in the init phase add an error_logger
-%%% and when it dies (when the kernel application dies) deleting the
-%%% previously installed error_logger.
-%%% Also, this process waits for other nodes at startup, if
-%%% specified.
+%%% Its purpose is that in the init phase waits for other nodes at startup,
+%%% if specified.
%%%-----------------------------------------------------------------
start_link() -> gen_server:start_link(kernel_config, [], []).
diff --git a/lib/kernel/src/kernel_refc.erl b/lib/kernel/src/kernel_refc.erl
new file mode 100644
index 0000000000..8e04ff99d8
--- /dev/null
+++ b/lib/kernel/src/kernel_refc.erl
@@ -0,0 +1,139 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(kernel_refc).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start_link/0, scheduler_wall_time/1]).
+%% Internal exports
+-export([init/1, handle_info/2, terminate/2]).
+-export([handle_call/3, handle_cast/2, code_change/3]).
+
+%%%-----------------------------------------------------------------
+%%% This module implements a process that handles reference counters for
+%%% various erts or other kernel resources which needs reference counting.
+%%%
+%%% Should not be documented nor used directly by user applications.
+%%%-----------------------------------------------------------------
+start_link() ->
+ gen_server:start_link({local,kernel_refc}, kernel_refc, [], []).
+
+-spec scheduler_wall_time(boolean()) -> boolean().
+scheduler_wall_time(Bool) ->
+ gen_server:call(kernel_refc, {scheduler_wall_time, self(), Bool}, infinity).
+
+%%-----------------------------------------------------------------
+%% Callback functions from gen_server
+%%-----------------------------------------------------------------
+
+-spec init([]) -> {'ok', map()}.
+
+init([]) ->
+ resource(scheduler_wall_time, false),
+ {ok, #{scheduler_wall_time=>#{}}}.
+
+-spec handle_call(term(), term(), State) -> {'reply', term(), State}.
+handle_call({What, Who, false}, _From, State) ->
+ {Reply, Cnt} = do_stop(What, maps:get(What, State), Who),
+ {reply, Reply, State#{What:=Cnt}};
+handle_call({What, Who, true}, _From, State) ->
+ {Reply, Cnt} = do_start(What, maps:get(What, State), Who),
+ {reply, Reply, State#{What:=Cnt}};
+handle_call(_, _From, State) ->
+ {reply, badarg, State}.
+
+-spec handle_cast(term(), State) -> {'noreply', State}.
+handle_cast(_, State) ->
+ {noreply, State}.
+
+-spec handle_info(term(), State) -> {'noreply', State}.
+handle_info({'DOWN', _Ref, process, Pid, _}, State) ->
+ Cleanup = fun(Resource, Cnts) ->
+ cleanup(Resource, Cnts, Pid)
+ end,
+ {noreply, maps:map(Cleanup, State)};
+handle_info(_, State) ->
+ {noreply, State}.
+
+-spec terminate(term(), term()) -> 'ok'.
+terminate(_Reason, _State) ->
+ ok.
+
+-spec code_change(term(), State, term()) -> {'ok', State}.
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%-----------------------------------------------------------------
+%% Internal functions
+%%-----------------------------------------------------------------
+
+do_start(Resource, Cnt, Pid) ->
+ case maps:get(Pid, Cnt, undefined) of
+ undefined ->
+ Ref = erlang:monitor(process, Pid),
+ case any(Cnt) of
+ true ->
+ {true, Cnt#{Pid=>{1, Ref}}};
+ false ->
+ resource(Resource, true),
+ {false, Cnt#{Pid=>{1, Ref}}}
+ end;
+ {N, Ref} ->
+ {true, Cnt#{Pid=>{N+1, Ref}}}
+ end.
+
+do_stop(Resource, Cnt0, Pid) ->
+ case maps:get(Pid, Cnt0, undefined) of
+ undefined ->
+ {any(Cnt0), Cnt0};
+ {1, Ref} ->
+ erlang:demonitor(Ref, [flush]),
+ Cnt = maps:remove(Pid, Cnt0),
+ case any(Cnt) of
+ true ->
+ {true, Cnt};
+ false ->
+ resource(Resource, false),
+ {true, Cnt}
+ end;
+ {N, Ref} ->
+ {true, Cnt0#{Pid=>{N-1, Ref}}}
+ end.
+
+cleanup(Resource, Cnt0, Pid) ->
+ case maps:is_key(Pid, Cnt0) of
+ true ->
+ Cnt = maps:remove(Pid, Cnt0),
+ case any(Cnt) of
+ true ->
+ Cnt;
+ false ->
+ resource(Resource, false),
+ Cnt
+ end;
+ false ->
+ Cnt0
+ end.
+
+any(Cnt) -> maps:size(Cnt) > 0.
+
+resource(scheduler_wall_time, Enable) ->
+ _ = erts_internal:scheduler_wall_time(Enable).
diff --git a/lib/kernel/src/local_tcp.erl b/lib/kernel/src/local_tcp.erl
new file mode 100644
index 0000000000..90e0fa2162
--- /dev/null
+++ b/lib/kernel/src/local_tcp.erl
@@ -0,0 +1,178 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(local_tcp).
+
+%% Socket server for TCP/IP
+
+-export([connect/3, connect/4, listen/2, accept/1, accept/2, close/1]).
+-export([send/2, send/3, recv/2, recv/3, unrecv/2]).
+-export([shutdown/2]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-export([getserv/1, getaddr/1, getaddr/2, getaddrs/1, getaddrs/2]).
+-export([translate_ip/1]).
+
+-include("inet_int.hrl").
+
+-define(FAMILY, local).
+-define(PROTO, tcp).
+-define(TYPE, stream).
+
+%% port lookup
+getserv(0) -> {ok, 0}.
+
+%% no address lookup
+getaddr({?FAMILY, _} = Address) -> {ok, Address}.
+getaddr({?FAMILY, _} = Address, _Timer) -> {ok, Address}.
+
+%% no address lookup
+getaddrs({?FAMILY, _} = Address) -> {ok, [Address]}.
+getaddrs({?FAMILY, _} = Address, _Timer) -> {ok, [Address]}.
+
+%% special this side addresses
+translate_ip(IP) -> IP.
+
+%%
+%% Send data on a socket
+%%
+send(Socket, Packet, Opts) -> prim_inet:send(Socket, Packet, Opts).
+send(Socket, Packet) -> prim_inet:send(Socket, Packet, []).
+
+%%
+%% Receive data from a socket (inactive only)
+%%
+recv(Socket, Length) -> prim_inet:recv(Socket, Length).
+recv(Socket, Length, Timeout) -> prim_inet:recv(Socket, Length, Timeout).
+
+unrecv(Socket, Data) -> prim_inet:unrecv(Socket, Data).
+
+%%
+%% Shutdown one end of a socket
+%%
+shutdown(Socket, How) ->
+ prim_inet:shutdown(Socket, How).
+
+%%
+%% Close a socket (async)
+%%
+close(Socket) ->
+ inet:tcp_close(Socket).
+
+%%
+%% Set controlling process
+%% FIXME: move messages to new owner!!!
+%%
+controlling_process(Socket, NewOwner) ->
+ inet:tcp_controlling_process(Socket, NewOwner).
+
+%%
+%% Connect
+%%
+connect(Address, Port, Opts) ->
+ do_connect(Address, Port, Opts, infinity).
+%%
+connect(Address, Port, Opts, infinity) ->
+ do_connect(Address, Port, Opts, infinity);
+connect(Address, Port, Opts, Timeout)
+ when is_integer(Timeout), Timeout >= 0 ->
+ do_connect(Address, Port, Opts, Timeout).
+
+do_connect(Addr = {?FAMILY, _}, 0, Opts, Time) ->
+ case inet:connect_options(Opts, ?MODULE) of
+ {error, Reason} -> exit(Reason);
+ {ok,
+ #connect_opts{
+ fd = Fd,
+ ifaddr = BAddr,
+ port = 0,
+ opts = SockOpts}}
+ when tuple_size(BAddr) =:= 2, element(1, BAddr) =:= ?FAMILY;
+ BAddr =:= any ->
+ case inet:open(
+ Fd,
+ case BAddr of
+ any ->
+ undefined;
+ _ ->
+ BAddr
+ end,
+ 0, SockOpts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE) of
+ {ok, S} ->
+ case prim_inet:connect(S, Addr, 0, Time) of
+ ok -> {ok,S};
+ Error -> prim_inet:close(S), Error
+ end;
+ Error -> Error
+ end;
+ {ok, _} -> exit(badarg)
+ end.
+
+%%
+%% Listen
+%%
+listen(0, Opts) ->
+ case inet:listen_options([{port,0} | Opts], ?MODULE) of
+ {error, Reason} -> exit(Reason);
+ {ok,
+ #listen_opts{
+ fd = Fd,
+ ifaddr = BAddr,
+ port = 0,
+ opts = SockOpts} = R}
+ when tuple_size(BAddr) =:= 2, element(1, BAddr) =:= ?FAMILY;
+ BAddr =:= any ->
+ case inet:open(
+ Fd, BAddr, 0, SockOpts,
+ ?PROTO, ?FAMILY, ?TYPE, ?MODULE) of
+ {ok, S} ->
+ case prim_inet:listen(S, R#listen_opts.backlog) of
+ ok -> {ok, S};
+ Error -> prim_inet:close(S), Error
+ end;
+ Error -> Error
+ end;
+ {ok, _} -> exit(badarg)
+ end.
+
+%%
+%% Accept
+%%
+accept(L) ->
+ case prim_inet:accept(L) of
+ {ok, S} ->
+ inet_db:register_socket(S, ?MODULE),
+ {ok,S};
+ Error -> Error
+ end.
+%%
+accept(L, Timeout) ->
+ case prim_inet:accept(L, Timeout) of
+ {ok, S} ->
+ inet_db:register_socket(S, ?MODULE),
+ {ok,S};
+ Error -> Error
+ end.
+
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ inet:open(Fd, undefined, 0, Opts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE).
diff --git a/lib/kernel/src/local_udp.erl b/lib/kernel/src/local_udp.erl
new file mode 100644
index 0000000000..481a8c4910
--- /dev/null
+++ b/lib/kernel/src/local_udp.erl
@@ -0,0 +1,106 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(local_udp).
+
+-export([open/1, open/2, close/1]).
+-export([send/2, send/4, recv/2, recv/3, connect/3]).
+-export([controlling_process/2]).
+-export([fdopen/2]).
+
+-export([getserv/1, getaddr/1, getaddr/2, translate_ip/1]).
+
+-include("inet_int.hrl").
+
+-define(FAMILY, local).
+-define(PROTO, udp).
+-define(TYPE, dgram).
+
+
+%% port lookup
+getserv(0) -> {ok, 0}.
+
+%% no address lookup
+getaddr({?FAMILY, _} = Address) -> {ok, Address}.
+getaddr({?FAMILY, _} = Address, _Timer) -> {ok, Address}.
+
+%% special this side addresses
+translate_ip(IP) -> IP.
+
+open(0) -> open(0, []).
+%%
+open(0, Opts) ->
+ case inet:udp_options(
+ [{port,0} | Opts],
+ ?MODULE) of
+ {error, Reason} -> exit(Reason);
+ {ok,
+ #udp_opts{
+ fd = Fd,
+ ifaddr = BAddr,
+ port = 0,
+ opts = SockOpts}}
+ when tuple_size(BAddr) =:= 2, element(1, BAddr) =:= ?FAMILY;
+ BAddr =:= any ->
+ inet:open(
+ Fd,
+ case BAddr of
+ any ->
+ undefined;
+ _ ->
+ BAddr
+ end,
+ 0, SockOpts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE);
+ {ok, _} -> exit(badarg)
+ end.
+
+send(S, Addr = {?FAMILY,_}, 0, Data) ->
+ prim_inet:sendto(S, Addr, 0, Data).
+%%
+send(S, Data) ->
+ prim_inet:sendto(S, {?FAMILY,<<>>}, 0, Data).
+
+connect(S, Addr = {?FAMILY,_}, 0) ->
+ prim_inet:connect(S, Addr, 0).
+
+recv(S, Len) ->
+ prim_inet:recvfrom(S, Len).
+%%
+recv(S, Len, Time) ->
+ prim_inet:recvfrom(S, Len, Time).
+
+close(S) ->
+ inet:udp_close(S).
+
+%%
+%% Set controlling process:
+%% 1) First sync socket into a known state
+%% 2) Move all messages onto the new owners message queue
+%% 3) Commit the owner
+%% 4) Wait for ack of new Owner (since socket does some link and unlink)
+%%
+
+controlling_process(Socket, NewOwner) ->
+ inet:udp_controlling_process(Socket, NewOwner).
+
+%%
+%% Create a port/socket from a file descriptor
+%%
+fdopen(Fd, Opts) ->
+ inet:fdopen(Fd, Opts, ?PROTO, ?FAMILY, ?TYPE, ?MODULE).
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
new file mode 100644
index 0000000000..6762998d4f
--- /dev/null
+++ b/lib/kernel/src/logger.erl
@@ -0,0 +1,948 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger).
+
+%% Log interface
+-export([emergency/1,emergency/2,emergency/3,
+ alert/1,alert/2,alert/3,
+ critical/1,critical/2,critical/3,
+ error/1,error/2,error/3,
+ warning/1,warning/2,warning/3,
+ notice/1,notice/2,notice/3,
+ info/1,info/2,info/3,
+ debug/1,debug/2,debug/3]).
+-export([log/2,log/3,log/4]).
+
+%% Called by macro
+-export([allow/2,macro_log/3,macro_log/4,macro_log/5,add_default_metadata/1]).
+
+%% Configuration
+-export([add_handler/3, remove_handler/1,
+ add_primary_filter/2, add_handler_filter/3,
+ remove_primary_filter/1, remove_handler_filter/2,
+ set_module_level/2,
+ unset_module_level/1, unset_module_level/0,
+ set_application_level/2, unset_application_level/1,
+ get_module_level/0, get_module_level/1,
+ set_primary_config/1, set_primary_config/2,
+ set_handler_config/2, set_handler_config/3,
+ update_primary_config/1,
+ update_handler_config/2, update_handler_config/3,
+ update_formatter_config/2, update_formatter_config/3,
+ get_primary_config/0, get_handler_config/1,
+ get_handler_config/0, get_handler_ids/0, get_config/0,
+ add_handlers/1]).
+
+%% Private configuration
+-export([internal_init_logger/0]).
+
+%% Misc
+-export([compare_levels/2]).
+-export([set_process_metadata/1, update_process_metadata/1,
+ unset_process_metadata/0, get_process_metadata/0]).
+
+%% Basic report formatting
+-export([format_report/1, format_otp_report/1]).
+
+-export([internal_log/2,filter_stacktrace/2]).
+
+-include("logger_internal.hrl").
+-include("logger.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type log_event() :: #{level:=level(),
+ msg:={io:format(),[term()]} |
+ {report,report()} |
+ {string,unicode:chardata()},
+ meta:=metadata()}.
+-type level() :: emergency | alert | critical | error |
+ warning | notice | info | debug.
+-type report() :: map() | [{atom(),term()}].
+-type report_cb() :: fun((report()) -> {io:format(),[term()]}) |
+ fun((report(),report_cb_config()) -> unicode:chardata()).
+-type report_cb_config() :: #{depth := pos_integer() | unlimited,
+ chars_limit := pos_integer() | unlimited,
+ single_line := boolean()}.
+-type msg_fun() :: fun((term()) -> {io:format(),[term()]} |
+ report() |
+ unicode:chardata()).
+-type metadata() :: #{pid => pid(),
+ gl => pid(),
+ time => timestamp(),
+ mfa => {module(),atom(),non_neg_integer()},
+ file => file:filename(),
+ line => non_neg_integer(),
+ domain => [atom()],
+ report_cb => report_cb(),
+ atom() => term()}.
+-type location() :: #{mfa := {module(),atom(),non_neg_integer()},
+ file := file:filename(),
+ line := non_neg_integer()}.
+-type handler_id() :: atom().
+-type filter_id() :: atom().
+-type filter() :: {fun((log_event(),filter_arg()) ->
+ filter_return()),filter_arg()}.
+-type filter_arg() :: term().
+-type filter_return() :: stop | ignore | log_event().
+-type primary_config() :: #{level => level() | all | none,
+ filter_default => log | stop,
+ filters => [{filter_id(),filter()}]}.
+-type handler_config() :: #{id => handler_id(),
+ config => term(),
+ level => level() | all | none,
+ module => module(),
+ filter_default => log | stop,
+ filters => [{filter_id(),filter()}],
+ formatter => {module(),formatter_config()}}.
+-type timestamp() :: integer().
+-type formatter_config() :: #{atom() => term()}.
+
+-type config_handler() :: {handler, handler_id(), module(), handler_config()}.
+
+-type config_logger() :: [{handler,default,undefined} |
+ config_handler() |
+ {filters,log | stop,[{filter_id(),filter()}]} |
+ {module_level,level(),[module()]}].
+
+-export_type([log_event/0,
+ level/0,
+ report/0,
+ report_cb/0,
+ report_cb_config/0,
+ msg_fun/0,
+ metadata/0,
+ primary_config/0,
+ handler_config/0,
+ handler_id/0,
+ filter_id/0,
+ filter/0,
+ filter_arg/0,
+ filter_return/0,
+ config_handler/0,
+ formatter_config/0]).
+
+%%%-----------------------------------------------------------------
+%%% API
+emergency(X) ->
+ log(emergency,X).
+emergency(X,Y) ->
+ log(emergency,X,Y).
+emergency(X,Y,Z) ->
+ log(emergency,X,Y,Z).
+
+alert(X) ->
+ log(alert,X).
+alert(X,Y) ->
+ log(alert,X,Y).
+alert(X,Y,Z) ->
+ log(alert,X,Y,Z).
+
+critical(X) ->
+ log(critical,X).
+critical(X,Y) ->
+ log(critical,X,Y).
+critical(X,Y,Z) ->
+ log(critical,X,Y,Z).
+
+error(X) ->
+ log(error,X).
+error(X,Y) ->
+ log(error,X,Y).
+error(X,Y,Z) ->
+ log(error,X,Y,Z).
+
+warning(X) ->
+ log(warning,X).
+warning(X,Y) ->
+ log(warning,X,Y).
+warning(X,Y,Z) ->
+ log(warning,X,Y,Z).
+
+notice(X) ->
+ log(notice,X).
+notice(X,Y) ->
+ log(notice,X,Y).
+notice(X,Y,Z) ->
+ log(notice,X,Y,Z).
+
+info(X) ->
+ log(info,X).
+info(X,Y) ->
+ log(info,X,Y).
+info(X,Y,Z) ->
+ log(info,X,Y,Z).
+
+debug(X) ->
+ log(debug,X).
+debug(X,Y) ->
+ log(debug,X,Y).
+debug(X,Y,Z) ->
+ log(debug,X,Y,Z).
+
+-spec log(Level,StringOrReport) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+log(Level, StringOrReport) ->
+ do_log(Level,StringOrReport,#{}).
+
+-spec log(Level,StringOrReport,Metadata) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Metadata :: metadata();
+ (Level,Format,Args) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Level,Fun,FunArgs) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+log(Level, StringOrReport, Metadata)
+ when is_map(Metadata), not is_function(StringOrReport) ->
+ do_log(Level,StringOrReport,Metadata);
+log(Level, FunOrFormat, Args) ->
+ do_log(Level,{FunOrFormat,Args},#{}).
+
+-spec log(Level,Format, Args, Metadata) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args :: [term()],
+ Metadata :: metadata();
+ (Level,Fun,FunArgs,Metadata) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Metadata :: metadata().
+log(Level, FunOrFormat, Args, Metadata) ->
+ do_log(Level,{FunOrFormat,Args},Metadata).
+
+-spec allow(Level,Module) -> boolean() when
+ Level :: level(),
+ Module :: module().
+allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) ->
+ logger_config:allow(?LOGGER_TABLE,Level,Module).
+
+
+-spec macro_log(Location,Level,StringOrReport) -> ok when
+ Location :: location(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+macro_log(Location,Level,StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,#{}).
+
+-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when
+ Location :: location(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Meta :: metadata();
+ (Location,Level,Format,Args) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Location,Level,Fun,FunArgs) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+macro_log(Location,Level,StringOrReport,Meta)
+ when is_map(Meta), not is_function(StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,Meta);
+macro_log(Location,Level,FunOrFormat,Args) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},#{}).
+
+-spec macro_log(Location,Level,Format,Args,Meta) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()],
+ Meta :: metadata();
+ (Location,Level,Fun,FunArgs,Meta) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Meta :: metadata().
+macro_log(Location,Level,FunOrFormat,Args,Meta) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},Meta).
+
+-spec format_otp_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_otp_report(#{label:=_,report:=Report}) ->
+ format_report(Report);
+format_otp_report(Report) ->
+ format_report(Report).
+
+-spec format_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_report(Report) when is_map(Report) ->
+ format_report(maps:to_list(Report));
+format_report(Report) when is_list(Report) ->
+ case lists:flatten(Report) of
+ [] ->
+ {"~tp",[[]]};
+ FlatList ->
+ case string_p1(FlatList) of
+ true ->
+ {"~ts",[FlatList]};
+ false ->
+ format_term_list(Report,[],[])
+ end
+ end;
+format_report(Report) ->
+ {"~tp",[Report]}.
+
+format_term_list([{Tag,Data}|T],Format,Args) ->
+ PorS = case string_p(Data) of
+ true -> "s";
+ false -> "p"
+ end,
+ format_term_list(T,[" ~tp: ~t"++PorS|Format],[Data,Tag|Args]);
+format_term_list([Data|T],Format,Args) ->
+ format_term_list(T,[" ~tp"|Format],[Data|Args]);
+format_term_list([],Format,Args) ->
+ {lists:flatten(lists:join($\n,lists:reverse(Format))),lists:reverse(Args)}.
+
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_unicode_list(FlatList).
+
+internal_log(Level,Term) when is_atom(Level) ->
+ erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "),
+ erlang:display(Term).
+
+%%%-----------------------------------------------------------------
+%%% Configuration
+-spec add_primary_filter(FilterId,Filter) -> ok | {error,term()} when
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_primary_filter(FilterId,Filter) ->
+ logger_server:add_filter(primary,{FilterId,Filter}).
+
+-spec add_handler_filter(HandlerId,FilterId,Filter) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_handler_filter(HandlerId,FilterId,Filter) ->
+ logger_server:add_filter(HandlerId,{FilterId,Filter}).
+
+
+-spec remove_primary_filter(FilterId) -> ok | {error,term()} when
+ FilterId :: filter_id().
+remove_primary_filter(FilterId) ->
+ logger_server:remove_filter(primary,FilterId).
+
+-spec remove_handler_filter(HandlerId,FilterId) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id().
+remove_handler_filter(HandlerId,FilterId) ->
+ logger_server:remove_filter(HandlerId,FilterId).
+
+-spec add_handler(HandlerId,Module,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: handler_config().
+add_handler(HandlerId,Module,Config) ->
+ logger_server:add_handler(HandlerId,Module,Config).
+
+-spec remove_handler(HandlerId) -> ok | {error,term()} when
+ HandlerId :: handler_id().
+remove_handler(HandlerId) ->
+ logger_server:remove_handler(HandlerId).
+
+-spec set_primary_config(level,Level) -> ok | {error,term()} when
+ Level :: level() | all | none;
+ (filter_default,FilterDefault) -> ok | {error,term()} when
+ FilterDefault :: log | stop;
+ (filters,Filters) -> ok | {error,term()} when
+ Filters :: [{filter_id(),filter()}].
+set_primary_config(Key,Value) ->
+ logger_server:set_config(primary,Key,Value).
+
+-spec set_primary_config(Config) -> ok | {error,term()} when
+ Config :: primary_config().
+set_primary_config(Config) ->
+ logger_server:set_config(primary,Config).
+
+-spec set_handler_config(HandlerId,level,Level) -> Return when
+ HandlerId :: handler_id(),
+ Level :: level() | all | none,
+ Return :: ok | {error,term()};
+ (HandlerId,filter_default,FilterDefault) -> Return when
+ HandlerId :: handler_id(),
+ FilterDefault :: log | stop,
+ Return :: ok | {error,term()};
+ (HandlerId,filters,Filters) -> Return when
+ HandlerId :: handler_id(),
+ Filters :: [{filter_id(),filter()}],
+ Return :: ok | {error,term()};
+ (HandlerId,formatter,Formatter) -> Return when
+ HandlerId :: handler_id(),
+ Formatter :: {module(), formatter_config()},
+ Return :: ok | {error,term()};
+ (HandlerId,config,Config) -> Return when
+ HandlerId :: handler_id(),
+ Config :: term(),
+ Return :: ok | {error,term()}.
+set_handler_config(HandlerId,Key,Value) ->
+ logger_server:set_config(HandlerId,Key,Value).
+
+-spec set_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: handler_config().
+set_handler_config(HandlerId,Config) ->
+ logger_server:set_config(HandlerId,Config).
+
+-spec update_primary_config(Config) -> ok | {error,term()} when
+ Config :: primary_config().
+update_primary_config(Config) ->
+ logger_server:update_config(primary,Config).
+
+-spec update_handler_config(HandlerId,level,Level) -> Return when
+ HandlerId :: handler_id(),
+ Level :: level() | all | none,
+ Return :: ok | {error,term()};
+ (HandlerId,filter_default,FilterDefault) -> Return when
+ HandlerId :: handler_id(),
+ FilterDefault :: log | stop,
+ Return :: ok | {error,term()};
+ (HandlerId,filters,Filters) -> Return when
+ HandlerId :: handler_id(),
+ Filters :: [{filter_id(),filter()}],
+ Return :: ok | {error,term()};
+ (HandlerId,formatter,Formatter) -> Return when
+ HandlerId :: handler_id(),
+ Formatter :: {module(), formatter_config()},
+ Return :: ok | {error,term()};
+ (HandlerId,config,Config) -> Return when
+ HandlerId :: handler_id(),
+ Config :: term(),
+ Return :: ok | {error,term()}.
+update_handler_config(HandlerId,Key,Value) ->
+ logger_server:update_config(HandlerId,Key,Value).
+
+-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: handler_config().
+update_handler_config(HandlerId,Config) ->
+ logger_server:update_config(HandlerId,Config).
+
+-spec get_primary_config() -> Config when
+ Config :: primary_config().
+get_primary_config() ->
+ {ok,Config} = logger_config:get(?LOGGER_TABLE,primary),
+ maps:remove(handlers,Config).
+
+-spec get_handler_config(HandlerId) -> {ok,Config} | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: handler_config().
+get_handler_config(HandlerId) ->
+ case logger_config:get(?LOGGER_TABLE,HandlerId) of
+ {ok,#{module:=Module}=Config} ->
+ {ok,try Module:filter_config(Config)
+ catch _:_ -> Config
+ end};
+ Error ->
+ Error
+ end.
+
+-spec get_handler_config() -> [Config] when
+ Config :: handler_config().
+get_handler_config() ->
+ [begin
+ {ok,Config} = get_handler_config(HandlerId),
+ Config
+ end || HandlerId <- get_handler_ids()].
+
+-spec get_handler_ids() -> [HandlerId] when
+ HandlerId :: handler_id().
+get_handler_ids() ->
+ {ok,#{handlers:=HandlerIds}} = logger_config:get(?LOGGER_TABLE,primary),
+ HandlerIds.
+
+-spec update_formatter_config(HandlerId,FormatterConfig) ->
+ ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FormatterConfig :: formatter_config().
+update_formatter_config(HandlerId,FormatterConfig) ->
+ logger_server:update_formatter_config(HandlerId,FormatterConfig).
+
+-spec update_formatter_config(HandlerId,Key,Value) ->
+ ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Key :: atom(),
+ Value :: term().
+update_formatter_config(HandlerId,Key,Value) ->
+ logger_server:update_formatter_config(HandlerId,#{Key=>Value}).
+
+-spec set_module_level(Modules,Level) -> ok | {error,term()} when
+ Modules :: [module()] | module(),
+ Level :: level() | all | none.
+set_module_level(Module,Level) when is_atom(Module) ->
+ set_module_level([Module],Level);
+set_module_level(Modules,Level) ->
+ logger_server:set_module_level(Modules,Level).
+
+-spec unset_module_level(Modules) -> ok when
+ Modules :: [module()] | module().
+unset_module_level(Module) when is_atom(Module) ->
+ unset_module_level([Module]);
+unset_module_level(Modules) ->
+ logger_server:unset_module_level(Modules).
+
+-spec unset_module_level() -> ok.
+unset_module_level() ->
+ logger_server:unset_module_level().
+
+-spec set_application_level(Application,Level) -> ok | {error, not_loaded} when
+ Application :: atom(),
+ Level :: level() | all | none.
+set_application_level(App,Level) ->
+ case application:get_key(App, modules) of
+ {ok, Modules} ->
+ set_module_level(Modules, Level);
+ undefined ->
+ {error, {not_loaded, App}}
+ end.
+
+-spec unset_application_level(Application) -> ok | {error, not_loaded} when
+ Application :: atom().
+unset_application_level(App) ->
+ case application:get_key(App, modules) of
+ {ok, Modules} ->
+ unset_module_level(Modules);
+ undefined ->
+ {error, {not_loaded, App}}
+ end.
+
+-spec get_module_level(Modules) -> [{Module,Level}] when
+ Modules :: [Module] | Module,
+ Module :: module(),
+ Level :: level() | all | none.
+get_module_level(Module) when is_atom(Module) ->
+ get_module_level([Module]);
+get_module_level(Modules) when is_list(Modules) ->
+ [{M,L} || {M,L} <- get_module_level(),
+ lists:member(M,Modules)].
+
+-spec get_module_level() -> [{Module,Level}] when
+ Module :: module(),
+ Level :: level() | all | none.
+get_module_level() ->
+ logger_config:get_module_level(?LOGGER_TABLE).
+
+%%%-----------------------------------------------------------------
+%%% Misc
+-spec compare_levels(Level1,Level2) -> eq | gt | lt when
+ Level1 :: level(),
+ Level2 :: level().
+compare_levels(Level,Level) when ?IS_LEVEL(Level) ->
+ eq;
+compare_levels(Level1,Level2) when ?IS_LEVEL(Level1), ?IS_LEVEL(Level2) ->
+ Int1 = logger_config:level_to_int(Level1),
+ Int2 = logger_config:level_to_int(Level2),
+ if Int1 < Int2 -> gt;
+ true -> lt
+ end;
+compare_levels(Level1,Level2) ->
+ erlang:error(badarg,[Level1,Level2]).
+
+-spec set_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+set_process_metadata(Meta) when is_map(Meta) ->
+ _ = put(?LOGGER_META_KEY,Meta),
+ ok;
+set_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
+-spec update_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+update_process_metadata(Meta) when is_map(Meta) ->
+ case get_process_metadata() of
+ undefined ->
+ set_process_metadata(Meta);
+ Meta0 when is_map(Meta0) ->
+ set_process_metadata(maps:merge(Meta0,Meta)),
+ ok
+ end;
+update_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
+-spec get_process_metadata() -> Meta | undefined when
+ Meta :: metadata().
+get_process_metadata() ->
+ get(?LOGGER_META_KEY).
+
+-spec unset_process_metadata() -> ok.
+unset_process_metadata() ->
+ _ = erase(?LOGGER_META_KEY),
+ ok.
+
+-spec get_config() -> #{primary=>primary_config(),
+ handlers=>[handler_config()],
+ module_levels=>[{module(),level() | all | none}]}.
+get_config() ->
+ #{primary=>get_primary_config(),
+ handlers=>get_handler_config(),
+ module_levels=>lists:keysort(1,get_module_level())}.
+
+-spec internal_init_logger() -> ok | {error,term()}.
+%% This function is responsible for config of the logger
+%% This is done before add_handlers because we want the
+%% logger settings to take effect before the kernel supervisor
+%% tree is started.
+internal_init_logger() ->
+ try
+ Env = get_logger_env(kernel),
+ check_logger_config(kernel,Env),
+ ok = logger:set_primary_config(level, get_logger_level()),
+ ok = logger:set_primary_config(filter_default,
+ get_primary_filter_default(Env)),
+
+ [case logger:add_primary_filter(Id, Filter) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end || {Id, Filter} <- get_primary_filters(Env)],
+
+ [case logger:set_module_level(Modules, Level) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end || {module_level, Level, Modules} <- Env],
+
+ case logger:set_handler_config(simple,filters,
+ get_default_handler_filters()) of
+ ok -> ok;
+ {error,{not_found,simple}} -> ok
+ end,
+
+ init_kernel_handlers(Env)
+ catch throw:Reason ->
+ ?LOG_ERROR("Invalid logger config: ~p", [Reason]),
+ {error, {bad_config, {kernel, Reason}}}
+ end.
+
+-spec init_kernel_handlers(config_logger()) -> ok | {error,term()}.
+%% Setup the kernel environment variables to be correct
+%% The actual handlers are started by a call to add_handlers.
+init_kernel_handlers(Env) ->
+ try
+ case get_logger_type(Env) of
+ {ok,silent} ->
+ ok = logger:remove_handler(simple);
+ {ok,false} ->
+ ok;
+ {ok,Type} ->
+ init_default_config(Type,Env)
+ end
+ catch throw:Reason ->
+ ?LOG_ERROR("Invalid default handler config: ~p", [Reason]),
+ {error, {bad_config, {kernel, Reason}}}
+ end.
+
+-spec add_handlers(Application) -> ok | {error,term()} when
+ Application :: atom();
+ (HandlerConfig) -> ok | {error,term()} when
+ HandlerConfig :: [config_handler()].
+%% This function is responsible for resolving the handler config
+%% and then starting the correct handlers. This is done after the
+%% kernel supervisor tree has been started as it needs the logger_sup.
+add_handlers(App) when is_atom(App) ->
+ add_handlers(App,get_logger_env(App));
+add_handlers(HandlerConfig) ->
+ add_handlers(application:get_application(),HandlerConfig).
+
+add_handlers(App,HandlerConfig) ->
+ try
+ check_logger_config(App,HandlerConfig),
+ DefaultAdded =
+ lists:foldl(
+ fun({handler, default = Id, Module, Config}, _)
+ when not is_map_key(filters, Config) ->
+ %% The default handler should have a couple of extra filters
+ %% set on it by default.
+ DefConfig = #{ filter_default => stop,
+ filters => get_default_handler_filters()},
+ setup_handler(Id, Module, maps:merge(DefConfig,Config)),
+ true;
+ ({handler, Id, Module, Config}, Default) ->
+ setup_handler(Id, Module, Config),
+ Default orelse Id == default;
+ (_,Default) -> Default
+ end, false, HandlerConfig),
+ %% If a default handler was added we try to remove the simple_logger
+ %% If the simple logger exists it will replay its log events
+ %% to the handler(s) added in the fold above.
+ [case logger:remove_handler(simple) of
+ ok -> ok;
+ {error,{not_found,simple}} -> ok
+ end || DefaultAdded],
+ ok
+ catch throw:Reason0 ->
+ Reason =
+ case App of
+ undefined -> Reason0;
+ _ -> {App,Reason0}
+ end,
+ ?LOG_ERROR("Invalid logger handler config: ~p", [Reason]),
+ {error, {bad_config, {handler, Reason}}}
+ end.
+
+setup_handler(Id, Module, Config) ->
+ case logger:add_handler(Id, Module, Config) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end.
+
+check_logger_config(_,[]) ->
+ ok;
+check_logger_config(App,[{handler,_,_,_}|Env]) ->
+ check_logger_config(App,Env);
+check_logger_config(kernel,[{handler,default,undefined}|Env]) ->
+ check_logger_config(kernel,Env);
+check_logger_config(kernel,[{filters,_,_}|Env]) ->
+ check_logger_config(kernel,Env);
+check_logger_config(kernel,[{module_level,_,_}|Env]) ->
+ check_logger_config(kernel,Env);
+check_logger_config(_,Bad) ->
+ throw(Bad).
+
+-spec get_logger_type(config_logger()) ->
+ {ok, standard_io | false | silent |
+ {file, file:name_all()} |
+ {file, file:name_all(), [file:mode()]}}.
+get_logger_type(Env) ->
+ case application:get_env(kernel, error_logger) of
+ {ok, tty} ->
+ {ok, standard_io};
+ {ok, {file, File}} when is_list(File) ->
+ {ok, {file, File}};
+ {ok, false} ->
+ {ok, false};
+ {ok, silent} ->
+ {ok, silent};
+ undefined ->
+ case lists:member({handler,default,undefined}, Env) of
+ true ->
+ {ok, false};
+ false ->
+ {ok, standard_io} % default value
+ end;
+ {ok, Bad} ->
+ throw({error_logger, Bad})
+ end.
+
+get_logger_level() ->
+ case application:get_env(kernel,logger_level,info) of
+ Level when ?IS_LEVEL(Level); Level=:=all; Level=:=none ->
+ Level;
+ Level ->
+ throw({logger_level, Level})
+ end.
+
+get_primary_filter_default(Env) ->
+ case lists:keyfind(filters,1,Env) of
+ {filters,Default,_} ->
+ Default;
+ false ->
+ log
+ end.
+
+get_primary_filters(Env) ->
+ case [F || F={filters,_,_} <- Env] of
+ [{filters,_,Filters}] ->
+ case lists:all(fun({_,_}) -> true; (_) -> false end,Filters) of
+ true -> Filters;
+ false -> throw({invalid_filters,Filters})
+ end;
+ [] -> [];
+ _ -> throw({multiple_filters,Env})
+ end.
+
+%% This function looks at the kernel logger environment
+%% and updates it so that the correct logger is configured
+init_default_config(Type,Env) when Type==standard_io;
+ Type==standard_error;
+ element(1,Type)==file ->
+ DefaultFormatter = #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}},
+ DefaultConfig = DefaultFormatter#{config=>#{type=>Type}},
+ NewLoggerEnv =
+ case lists:keyfind(default, 2, Env) of
+ {handler, default, logger_std_h, Config} ->
+ %% Only want to add the logger_std_h config
+ %% if not configured by user AND the default
+ %% handler is still the logger_std_h.
+ lists:keyreplace(default, 2, Env,
+ {handler, default, logger_std_h,
+ maps:merge(DefaultConfig,Config)});
+ {handler, default, Module,Config} ->
+ %% Add default formatter. The point of this
+ %% is to get the expected formatter config
+ %% for the default handler, since this
+ %% differs from the default values that
+ %% logger_formatter itself adds.
+ lists:keyreplace(default, 2, Env,
+ {handler, default, Module,
+ maps:merge(DefaultFormatter,Config)});
+ _ ->
+ %% Nothing has been configured, use default
+ [{handler, default, logger_std_h, DefaultConfig} | Env]
+ end,
+ application:set_env(kernel, logger, NewLoggerEnv, [{timeout,infinity}]).
+
+get_default_handler_filters() ->
+ case application:get_env(kernel, logger_sasl_compatible, false) of
+ true ->
+ ?DEFAULT_HANDLER_FILTERS([otp]);
+ false ->
+ ?DEFAULT_HANDLER_FILTERS([otp,sasl])
+ end.
+
+get_logger_env(App) ->
+ application:get_env(App, logger, []).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+do_log(Level,Msg,#{mfa:={Module,_,_}}=Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level,Module) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end;
+do_log(Level,Msg,Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end.
+
+-spec log_allowed(Location,Level,Msg,Meta) -> ok when
+ Location :: location() | #{},
+ Level :: level(),
+ Msg :: {msg_fun(),term()} |
+ {io:format(),[term()]} |
+ report() |
+ unicode:chardata(),
+ Meta :: metadata().
+log_allowed(Location,Level,{Fun,FunArgs},Meta) when is_function(Fun,1) ->
+ try Fun(FunArgs) of
+ Msg={Format,Args} when is_list(Format), is_list(Args) ->
+ log_allowed(Location,Level,Msg,Meta);
+ Report when ?IS_REPORT(Report) ->
+ log_allowed(Location,Level,Report,Meta);
+ String when ?IS_STRING(String) ->
+ log_allowed(Location,Level,String,Meta);
+ Other ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{Fun,FunArgs},Other]},
+ Meta)
+ catch C:R ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{Fun,FunArgs},{C,R}]},
+ Meta)
+ end;
+log_allowed(Location,Level,Msg,Meta0) when is_map(Meta0) ->
+ %% Metadata priorities are:
+ %% Location (added in API macros) - will be overwritten by process
+ %% metadata (set by set_process_metadata/1), which in turn will be
+ %% overwritten by the metadata given as argument in the log call
+ %% (function or macro).
+ Meta = add_default_metadata(
+ maps:merge(Location,maps:merge(proc_meta(),Meta0))),
+ case node(maps:get(gl,Meta)) of
+ Node when Node=/=node() ->
+ log_remote(Node,Level,Msg,Meta),
+ do_log_allowed(Level,Msg,Meta);
+ _ ->
+ do_log_allowed(Level,Msg,Meta)
+ end.
+
+do_log_allowed(Level,{Format,Args}=Msg,Meta)
+ when ?IS_LEVEL(Level),
+ is_list(Format),
+ is_list(Args),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>Msg,meta=>Meta},tid());
+do_log_allowed(Level,Report,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_REPORT(Report),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{report,Report},meta=>Meta},
+ tid());
+do_log_allowed(Level,String,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_STRING(String),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{string,String},meta=>Meta},
+ tid()).
+tid() ->
+ ets:whereis(?LOGGER_TABLE).
+
+log_remote(Node,Level,{Format,Args},Meta) ->
+ log_remote(Node,{log,Level,Format,Args,Meta});
+log_remote(Node,Level,Msg,Meta) ->
+ log_remote(Node,{log,Level,Msg,Meta}).
+
+log_remote(Node,Request) ->
+ {logger,Node} ! Request,
+ ok.
+
+add_default_metadata(Meta) ->
+ add_default_metadata([pid,gl,time],Meta).
+
+add_default_metadata([Key|Keys],Meta) ->
+ case maps:is_key(Key,Meta) of
+ true ->
+ add_default_metadata(Keys,Meta);
+ false ->
+ add_default_metadata(Keys,Meta#{Key=>default(Key)})
+ end;
+add_default_metadata([],Meta) ->
+ Meta.
+
+proc_meta() ->
+ case get_process_metadata() of
+ ProcMeta when is_map(ProcMeta) -> ProcMeta;
+ _ -> #{}
+ end.
+
+default(pid) -> self();
+default(gl) -> group_leader();
+default(time) -> erlang:system_time(microsecond).
+
+%% Remove everything upto and including this module from the stacktrace
+filter_stacktrace(Module,[{Module,_,_,_}|_]) ->
+ [];
+filter_stacktrace(Module,[H|T]) ->
+ [H|filter_stacktrace(Module,T)];
+filter_stacktrace(_,[]) ->
+ [].
diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl
new file mode 100644
index 0000000000..432c671afd
--- /dev/null
+++ b/lib/kernel/src/logger_backend.erl
@@ -0,0 +1,133 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_backend).
+
+-export([log_allowed/2]).
+
+-include("logger_internal.hrl").
+
+-define(OWN_KEYS,[level,filters,filter_default,handlers]).
+
+%%%-----------------------------------------------------------------
+%%% The default logger backend
+log_allowed(Log, Tid) ->
+ {ok,Config} = logger_config:get(Tid,primary),
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(primary,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Handlers = maps:get(handlers,Config,[]),
+ call_handlers(Log1,Handlers,Tid)
+ end,
+ ok.
+
+call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) ->
+ case logger_config:get(Tid,Id,Level) of
+ {ok,#{module:=Module}=Config} ->
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(Id,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Config1 = maps:without(?OWN_KEYS,Config),
+ try Module:log(Log1,Config1)
+ catch C:R:S ->
+ case logger:remove_handler(Id) of
+ ok ->
+ logger:internal_log(
+ error,{removed_failing_handler,Id}),
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,removed_failing_handler},
+ {handler,{Id,Module}},
+ {log_event,Log1},
+ {config,Config1},
+ {reason,{C,R,filter_stacktrace(S)}}]);
+ {error,{not_found,_}} ->
+ %% Probably already removed by other client
+ %% Don't report again
+ ok;
+ {error,Reason} ->
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,remove_handler_failed},
+ {reason,Reason}])
+ end
+ end
+ end;
+ _ ->
+ ok
+ end,
+ call_handlers(Log,Handlers,Tid);
+call_handlers(_Log,[],_Tid) ->
+ ok.
+
+apply_filters(Owner,Log,Filters,Config) ->
+ case do_apply_filters(Owner,Log,Filters,ignore) of
+ stop ->
+ stop;
+ ignore ->
+ case maps:get(filter_default,Config) of
+ log ->
+ Log;
+ stop ->
+ stop
+ end;
+ Log1 ->
+ Log1
+ end.
+
+do_apply_filters(Owner,Log,[{_Id,{FilterFun,FilterArgs}}=Filter|Filters],State) ->
+ try FilterFun(Log,FilterArgs) of
+ stop ->
+ stop;
+ ignore ->
+ do_apply_filters(Owner,Log,Filters,State);
+ Log1=#{level:=Level,msg:=Msg,meta:=Meta}
+ when is_atom(Level), ?IS_MSG(Msg), is_map(Meta) ->
+ do_apply_filters(Owner,Log1,Filters,log);
+ Bad ->
+ handle_filter_failed(Filter,Owner,Log,{bad_return_value,Bad})
+ catch C:R:S ->
+ handle_filter_failed(Filter,Owner,Log,{C,R,filter_stacktrace(S)})
+ end;
+do_apply_filters(_Owner,_Log,[],ignore) ->
+ ignore;
+do_apply_filters(_Owner,Log,[],log) ->
+ Log.
+
+handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) ->
+ case logger_server:remove_filter(Owner,Id) of
+ ok ->
+ logger:internal_log(error,{removed_failing_filter,Id}),
+ ?LOG_INTERNAL(debug,
+ [{logger,removed_failing_filter},
+ {filter,Filter},
+ {owner,Owner},
+ {log_event,Log},
+ {reason,Reason}]);
+ _ ->
+ ok
+ end,
+ ignore.
+
+filter_stacktrace(Stacktrace) ->
+ logger:filter_stacktrace(?MODULE,Stacktrace).
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
new file mode 100644
index 0000000000..6bfe658552
--- /dev/null
+++ b/lib/kernel/src/logger_config.erl
@@ -0,0 +1,150 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_config).
+
+-export([new/1,delete/2,
+ exist/2,
+ allow/2,allow/3,
+ get/2, get/3,
+ create/3, set/3,
+ set_module_level/3,unset_module_level/2,
+ get_module_level/1,cache_module_level/2,
+ level_to_int/1]).
+
+-include("logger_internal.hrl").
+
+new(Name) ->
+ _ = ets:new(Name,[set,protected,named_table,{write_concurrency,true}]),
+ ets:whereis(Name).
+
+delete(Tid,Id) ->
+ ets:delete(Tid,table_key(Id)).
+
+allow(Tid,Level,Module) ->
+ LevelInt = level_to_int(Level),
+ case ets:lookup(Tid,Module) of
+ [{Module,{ModLevel,cached}}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [{Module,ModLevel}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [] ->
+ logger_server:cache_module_level(Module),
+ allow(Tid,Level);
+ _ ->
+ false
+ end.
+
+allow(Tid,Level) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?PRIMARY_KEY,2),
+ level_to_int(Level) =< GlobalLevelInt.
+
+exist(Tid,What) ->
+ ets:member(Tid,table_key(What)).
+
+get(Tid,What) ->
+ case ets:lookup(Tid,table_key(What)) of
+ [{_,_,Config}] ->
+ {ok,Config};
+ [] ->
+ {error,{not_found,What}}
+ end.
+
+get(Tid,What,Level) ->
+ MS = [{{table_key(What),'$1','$2'},
+ [{'>=','$1',level_to_int(Level)}],
+ ['$2']}],
+ case ets:select(Tid,MS) of
+ [] -> error;
+ [Data] -> {ok,Data}
+ end.
+
+create(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config}).
+
+set(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ %% Should do this only if the level has actually changed. Possibly
+ %% overwrite instead of delete?
+ case What of
+ primary ->
+ _ = ets:select_delete(Tid,[{{'_',{'$1',cached}},
+ [{'=/=','$1',LevelInt}],
+ [true]}]),
+ ok;
+ _ ->
+ ok
+ end,
+ ets:update_element(Tid,table_key(What),[{2,LevelInt},{3,Config}]),
+ ok.
+
+set_module_level(Tid,Modules,Level) ->
+ LevelInt = level_to_int(Level),
+ [ets:insert(Tid,{Module,LevelInt}) || Module <- Modules],
+ ok.
+
+%% should possibly overwrite instead of delete?
+unset_module_level(Tid,all) ->
+ MS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[true]}],
+ _ = ets:select_delete(Tid,MS),
+ ok;
+unset_module_level(Tid,Modules) ->
+ [ets:delete(Tid,Module) || Module <- Modules],
+ ok.
+
+get_module_level(Tid) ->
+ MS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[{{'$1','$2'}}]}],
+ Modules = ets:select(Tid,MS),
+ lists:sort([{M,int_to_level(L)} || {M,L} <- Modules]).
+
+cache_module_level(Tid,Module) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?PRIMARY_KEY,2),
+ ets:insert_new(Tid,{Module,{GlobalLevelInt,cached}}),
+ ok.
+
+level_to_int(none) -> ?LOG_NONE;
+level_to_int(emergency) -> ?EMERGENCY;
+level_to_int(alert) -> ?ALERT;
+level_to_int(critical) -> ?CRITICAL;
+level_to_int(error) -> ?ERROR;
+level_to_int(warning) -> ?WARNING;
+level_to_int(notice) -> ?NOTICE;
+level_to_int(info) -> ?INFO;
+level_to_int(debug) -> ?DEBUG;
+level_to_int(all) -> ?LOG_ALL.
+
+int_to_level(?LOG_NONE) -> none;
+int_to_level(?EMERGENCY) -> emergency;
+int_to_level(?ALERT) -> alert;
+int_to_level(?CRITICAL) -> critical;
+int_to_level(?ERROR) -> error;
+int_to_level(?WARNING) -> warning;
+int_to_level(?NOTICE) -> notice;
+int_to_level(?INFO) -> info;
+int_to_level(?DEBUG) -> debug;
+int_to_level(?LOG_ALL) -> all.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+table_key(primary) -> ?PRIMARY_KEY;
+table_key(HandlerId) -> {?HANDLER_KEY,HandlerId}.
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
new file mode 100644
index 0000000000..2a81458ec8
--- /dev/null
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -0,0 +1,740 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+%%% API
+-export([start_link/3, info/1, filesync/1, reset/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+%% handler internal
+-export([log_handler_info/4]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a disk_log handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added (as a result of calling add/3).
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:handler_config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec filesync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+filesync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(?MODULE,Name),
+ disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+filesync(Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(?MODULE,Name),
+ info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(?MODULE,Name),
+ reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(#{id:=Name}=Config) ->
+ case check_config(adding, Config) of
+ {ok, #{config:=HConfig}=Config1} ->
+ %% create initial handler state by merging defaults with config
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ start(Name, Config1, HState);
+ false ->
+ #{sync_mode_qlen := SMQL,
+ drop_mode_qlen := DMQL,
+ flush_qlen := FQL} = HState,
+ {error,{invalid_levels,{SMQL,DMQL,FQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+ WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig),
+ ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
+ NewHConfig0 = maps:get(config, NewConfig, #{}),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
+
+ %% Allow (accidentially) included read-only fields - just overwrite them
+ NewHConfig = maps:merge(maps:merge(Default,NewHConfig0),ReadOnly),
+
+ %% But fail if write-once fields are changed
+ case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of
+ WriteOnce ->
+ changing_config1(maps:get(handler_pid,OldHConfig),
+ OldConfig,
+ NewConfig#{config=>NewHConfig});
+ Other ->
+ {Old,New} = logger_server:diff_maps(WriteOnce,Other),
+ {error,{illegal_config_change,#{config=>Old},#{config=>New}}}
+ end.
+
+changing_config1(HPid, OldConfig, NewConfig) ->
+ case check_config(changing, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ Error -> Error
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end.
+
+check_config(adding, #{id:=Name}=Config) ->
+ %% merge handler specific config data
+ HConfig1 = maps:get(config, Config, #{}),
+ HConfig2 = maps:merge(get_default_config(), HConfig1),
+ HConfig3 = merge_default_logopts(Name, HConfig2),
+ case check_h_config(maps:to_list(HConfig3)) of
+ ok ->
+ {ok,Config#{config=>HConfig3}};
+ Error ->
+ Error
+ end;
+check_config(changing, Config) ->
+ HConfig = maps:get(config, Config, #{}),
+ case check_h_config(maps:to_list(HConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+merge_default_logopts(Name, HConfig) ->
+ Type = maps:get(type, HConfig, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Defaults = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Defaults, HConfig).
+
+check_h_config([{file,File}|Config]) when is_list(File) ->
+ check_h_config(Config);
+check_h_config([{max_no_files,undefined}|Config]) ->
+ check_h_config(Config);
+check_h_config([{max_no_files,N}|Config]) when is_integer(N), N>0 ->
+ check_h_config(Config);
+check_h_config([{max_no_bytes,infinity}|Config]) ->
+ check_h_config(Config);
+check_h_config([{max_no_bytes,N}|Config]) when is_integer(N), N>0 ->
+ check_h_config(Config);
+check_h_config([{type,Type}|Config]) when Type==wrap; Type==halt ->
+ check_h_config(Config);
+check_h_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_h_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_h_config([]) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(#{id:=Name}) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config = #{id := Name,
+ config := #{handler_pid := HPid,
+ mode_tab := ModeTab}}) ->
+ %% if the handler has crashed, we must drop this event
+ %% and hope the handler restarts so we can try again
+ true = is_process_alive(HPid),
+ Bin = logger_h_common:log_to_binary(LogEvent, Config),
+ logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name,
+ Config = #{config := HConfig = #{file:=File,
+ type:=Type,
+ max_no_bytes:=MNB,
+ max_no_files:=MNF}},
+ State = #{dl_sync_int := DLSyncInt}]) ->
+
+ RegName = ?name_to_reg_name(?MODULE,Name),
+ register(RegName, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ LogOpts = #{file=>File, type=>Type, max_no_bytes=>MNB, max_no_files=>MNF},
+ case open_disk_log(Name, File, Type, MNB, MNF) of
+ ok ->
+ try ets:new(Name, [public]) of
+ ModeTab ->
+ ?set_mode(ModeTab, async),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{
+ id => Name,
+ mode_tab => ModeTab,
+ mode => async,
+ dl_sync => DLSyncInt,
+ log_opts => LogOpts,
+ last_qlen => 0,
+ last_log_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ last_op => sync,
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}),
+ Config1 =
+ Config#{config => HConfig#{handler_pid => self(),
+ mode_tab => ModeTab}},
+ proc_lib:init_ack({ok,self(),Config1}),
+ gen_server:cast(self(), repeated_disk_log_sync),
+ case logger_h_common:unset_restart_flag(Name, ?MODULE) of
+ true ->
+ %% inform about restart
+ gen_server:cast(self(), {log_handler_info,
+ "Handler ~p restarted",
+ [Name]});
+ false ->
+ %% initial start
+ ok
+ end,
+ gen_server:enter_loop(?MODULE, [], State1)
+ catch
+ _:Error ->
+ unregister(RegName),
+ logger_h_common:error_notify({open_disk_log,Name,Error}),
+ proc_lib:init_ack(Error)
+ end;
+ Error ->
+ unregister(RegName),
+ logger_h_common:error_notify({open_disk_log,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+%% This is the synchronous log event.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply, Result, State1};
+
+handle_call(disk_log_sync, _From, State = #{id := Name}) ->
+ State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State),
+ {reply, Result, State1};
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0}) ->
+ HConfig = maps:get(config, NewConfig, #{}),
+ State1 = #{sync_mode_qlen := SMQL,
+ drop_mode_qlen := DMQL,
+ flush_qlen := FQL} = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ _ = gen_server:cast(self(), repeated_disk_log_sync)
+ end,
+ {reply, ok, State1};
+ false ->
+ {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp(),
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+
+%% This is the asynchronous log event.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
+ log_handler_info(Name, Format, Args, State),
+ {noreply, State};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% event, the repeat operation must be active!
+handle_cast(repeated_disk_log_sync,
+ State = #{id := Name,
+ filesync_repeat_interval := FSyncInt,
+ last_op := LastOp}) ->
+ State1 =
+ if is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ NewState = if LastOp == sync ->
+ State;
+ true ->
+ disk_log_sync(Name, State)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),repeated_disk_log_sync]),
+ NewState#{rep_sync_tref => TRef, last_op => sync};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+%% The disk log owner must handle status messages from disk_log.
+handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) ->
+ {noreply, State};
+handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, full},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}),
+ {noreply, State#{prev_disk_log_info => full}};
+handle_info({disk_log, _Node, Log, Info = {error_status,_Status}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+
+handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) ->
+ {noreply, State};
+
+handle_info(_, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id := Name}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ _ = close_disk_log(Name, normal),
+ ok = logger_h_common:stop_or_restart(Name, Reason, State),
+ unregister(?name_to_reg_name(?MODULE, Name)),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+%%%-----------------------------------------------------------------
+%%%
+get_default_config() ->
+ #{sync_mode_qlen => ?SYNC_MODE_QLEN,
+ drop_mode_qlen => ?DROP_MODE_QLEN,
+ flush_qlen => ?FLUSH_QLEN,
+ burst_limit_enable => ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+get_init_state() ->
+ #{dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
+
+%%%-----------------------------------------------------------------
+%%% Add a disk_log handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Config is the logger:handler_config() map. Handler specific parameters
+%%% should be provided with a sub map associated with a key named
+%%% 'config', e.g:
+%%%
+%%% Config = #{config => #{sync_mode_qlen => 50}
+%%%
+%%% The 'config' sub map will also contain parameters for configuring
+%%% the disk_log:
+%%%
+%%% Config = #{config => #{file => file:filename(),
+%%% max_no_bytes => integer(),
+%%% max_no_files => integer(),
+%%% type => wrap | halt}}.
+%%%
+%%% If type == halt, then max_no_files is ignored.
+%%%
+%%% The disk_log handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerDLH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerDLH) of
+ {ok,Pid,Config1} ->
+ ok = logger_handler_watcher:register_handler(Name,Pid),
+ {ok,Config1};
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(?name_to_reg_name(?MODULE,Name)) of
+ undefined ->
+ ok;
+ Pid ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ %% And we don't need to do supervisor:delete_child, since
+ %% the restart type is temporary, which means that the
+ %% child specification is automatically removed from the
+ %% supervisor when the process dies.
+ _ = gen_server:call(Pid, stop),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_dl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every event (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize events have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, mode := Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% event to react quickly to large bursts of events and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ if (Mode1 == drop) andalso (Mode0 =/= drop) ->
+ log_handler_info(Name, "Handler ~p switched to drop mode",
+ [Name], State);
+ (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
+ log_handler_info(Name, "Handler ~p switched to ~w mode",
+ [Name,Mode1], State);
+ true ->
+ ok
+ end,
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this function is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
+
+ %% write info in log about flushed messages
+ log_handler_info(Name, "Handler ~p flushed ~w log events",
+ [Name,NewFlushed], State),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log event
+ ?observe(Name,{flushed,NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,NewFlushed,
+ State1#{mode => ?set_mode(ModeTab,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this function is called to write to disk_log
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{mode_tab := ModeTab,
+ dl_sync := DLSync,
+ dl_sync_int := DLSyncInt,
+ last_qlen := LastQLen,
+ last_log_ts := T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log events
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous event to the disk_log process
+ %% every DLSyncInt time, to give the handler time between
+ %% writes so it can keep up with incoming messages
+ {Status,LastQLen1,State1} =
+ if DoWrite, DLSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, element(2,process_info(self(),message_queue_len)),
+ NewState};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, LastQLen, NewState};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped, LastQLen, State}
+ end,
+
+ %% Check if the time since the previous log event is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log events is fast and no new
+ %% event comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1,State2} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ {?change_mode(ModeTab,Mode,async), 0, disk_log_sync(Name,State1)};
+ true ->
+ {Mode, BurstMsgCount,State1}
+ end,
+
+ State3 =
+ ?update_calls_or_casts(_CallOrCast,1,State2),
+ State4 =
+ ?update_max_time(Time,
+ State3#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}),
+ {Status,State4}.
+
+
+log_handler_info(Name, Format, Args, State) ->
+ Config =
+ case logger:get_handler_config(Name) of
+ {ok,Conf} -> Conf;
+ _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
+ end,
+ Meta = #{time=>erlang:system_time(microsecond)},
+ Bin = logger_h_common:log_to_binary(#{level => notice,
+ msg => {Format,Args},
+ meta => Meta}, Config),
+ _ = disk_log_write(Name, Bin, State),
+ ok.
+
+
+open_disk_log(Name, File, Type, MaxNoBytes, MaxNoFiles) ->
+ case filelib:ensure_dir(File) of
+ ok ->
+ Size =
+ if Type == halt -> MaxNoBytes;
+ Type == wrap -> {MaxNoBytes,MaxNoFiles}
+ end,
+ Opts = [{name, Name},
+ {file, File},
+ {size, Size},
+ {type, Type},
+ {linkto, self()},
+ {repair, false},
+ {format, external},
+ {notify, true},
+ {quiet, true},
+ {mode, read_write}],
+ case disk_log:open(Opts) of
+ {ok,Name} ->
+ ok;
+ Error = {error,_Reason} ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+close_disk_log(Name, _) ->
+ _ = ?disk_log_sync(Name),
+ _ = disk_log:lclose(Name),
+ ok.
+
+disk_log_write(Name, Bin, State) ->
+ case ?disk_log_blog(Name, Bin) of
+ ok ->
+ State#{prev_log_result => ok, last_op => write};
+ LogError ->
+ _ = case maps:get(prev_log_result, State) of
+ LogError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,log,
+ LogOpts,
+ LogError})
+ end,
+ State#{prev_log_result => LogError}
+ end.
+
+disk_log_sync(Name, State) ->
+ case ?disk_log_sync(Name) of
+ ok ->
+ State#{prev_sync_result => ok, last_op => sync};
+ SyncError ->
+ _ = case maps:get(prev_sync_result, State) of
+ SyncError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,filesync,
+ LogOpts,
+ SyncError})
+ end,
+ State#{prev_sync_result => SyncError}
+ end.
+
+error_notify_new(Info,Info, _Term) ->
+ ok;
+error_notify_new(_Info0,_Info1, Term) ->
+ logger_h_common:error_notify(Term).
diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl
new file mode 100644
index 0000000000..0664c598e1
--- /dev/null
+++ b/lib/kernel/src/logger_filters.erl
@@ -0,0 +1,127 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters).
+
+-export([domain/2,
+ level/2,
+ progress/2,
+ remote_gl/2]).
+
+-include("logger_internal.hrl").
+-define(IS_ACTION(A), (A==log orelse A==stop)).
+
+-spec domain(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: {Action,Compare,MatchDomain},
+ Action :: log | stop,
+ Compare :: super | sub | equal | not_equal | undefined,
+ MatchDomain :: list(atom()).
+domain(#{meta:=Meta}=LogEvent,{Action,Compare,MatchDomain})
+ when ?IS_ACTION(Action) andalso
+ (Compare==super orelse
+ Compare==sub orelse
+ Compare==equal orelse
+ Compare==not_equal orelse
+ Compare==undefined) andalso
+ is_list(MatchDomain) ->
+ filter_domain(Compare,Meta,MatchDomain,on_match(Action,LogEvent));
+domain(LogEvent,Extra) ->
+ erlang:error(badarg,[LogEvent,Extra]).
+
+-spec level(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: {Action,Operator,MatchLevel},
+ Action :: log | stop,
+ Operator :: neq | eq | lt | gt | lteq | gteq,
+ MatchLevel :: logger:level().
+level(#{level:=L1}=LogEvent,{Action,Op,L2})
+ when ?IS_ACTION(Action) andalso
+ (Op==neq orelse
+ Op==eq orelse
+ Op==lt orelse
+ Op==gt orelse
+ Op==lteq orelse
+ Op==gteq) andalso
+ ?IS_LEVEL(L2) ->
+ filter_level(Op,L1,L2,on_match(Action,LogEvent));
+level(LogEvent,Extra) ->
+ erlang:error(badarg,[LogEvent,Extra]).
+
+-spec progress(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: log | stop.
+progress(LogEvent,Action) when ?IS_ACTION(Action) ->
+ filter_progress(LogEvent,on_match(Action,LogEvent));
+progress(LogEvent,Action) ->
+ erlang:error(badarg,[LogEvent,Action]).
+
+-spec remote_gl(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: log | stop.
+remote_gl(LogEvent,Action) when ?IS_ACTION(Action) ->
+ filter_remote_gl(LogEvent,on_match(Action,LogEvent));
+remote_gl(LogEvent,Action) ->
+ erlang:error(badarg,[LogEvent,Action]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+filter_domain(super,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(Domain,MatchDomain,OnMatch);
+filter_domain(sub,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(MatchDomain,Domain,OnMatch);
+filter_domain(equal,#{domain:=Domain},Domain,OnMatch) ->
+ OnMatch;
+filter_domain(not_equal,#{domain:=Domain},MatchDomain,OnMatch)
+ when Domain=/=MatchDomain ->
+ OnMatch;
+filter_domain(Compare,Meta,_,OnMatch) ->
+ case maps:is_key(domain,Meta) of
+ false when Compare==undefined; Compare==not_equal -> OnMatch;
+ _ -> ignore
+ end.
+
+is_prefix(D1,D2,OnMatch) when is_list(D1), is_list(D2) ->
+ case lists:prefix(D1,D2) of
+ true -> OnMatch;
+ false -> ignore
+ end;
+is_prefix(_,_,_) ->
+ ignore.
+
+filter_level(Op,L1,L2,OnMatch) ->
+ case logger:compare_levels(L1,L2) of
+ eq when Op==eq; Op==lteq; Op==gteq -> OnMatch;
+ lt when Op==lt; Op==lteq; Op==neq -> OnMatch;
+ gt when Op==gt; Op==gteq; Op==neq -> OnMatch;
+ _ -> ignore
+ end.
+
+filter_progress(#{msg:={report,#{label:={_,progress}}}},OnMatch) ->
+ OnMatch;
+filter_progress(_,_) ->
+ ignore.
+
+filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() ->
+ OnMatch;
+filter_remote_gl(_,_) ->
+ ignore.
+
+on_match(log,LogEvent) -> LogEvent;
+on_match(stop,_) -> stop.
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
new file mode 100644
index 0000000000..ded89bac9f
--- /dev/null
+++ b/lib/kernel/src/logger_formatter.erl
@@ -0,0 +1,514 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter).
+
+-export([format/2]).
+-export([check_config/1]).
+
+-include("logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type config() :: #{chars_limit => pos_integer() | unlimited,
+ depth => pos_integer() | unlimited,
+ legacy_header => boolean(),
+ max_size => pos_integer() | unlimited,
+ report_cb => logger:report_cb(),
+ single_line => boolean(),
+ template => template(),
+ time_designator => byte(),
+ time_offset => integer() | [byte()]}.
+-type template() :: [metakey() | {metakey(),template(),template()} | string()].
+-type metakey() :: atom() | [atom()].
+
+%%%-----------------------------------------------------------------
+%%% API
+-spec format(LogEvent,Config) -> unicode:chardata() when
+ LogEvent :: logger:log_event(),
+ Config :: config().
+format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
+ when is_map(Config0) ->
+ Config = add_default_config(Config0),
+ Meta1 = maybe_add_legacy_header(Level,Meta,Config),
+ Template = maps:get(template,Config),
+ {BT,AT0} = lists:splitwith(fun(msg) -> false; (_) -> true end, Template),
+ {DoMsg,AT} =
+ case AT0 of
+ [msg|Rest] -> {true,Rest};
+ _ ->{false,AT0}
+ end,
+ B = do_format(Level,Meta1,BT,Config),
+ A = do_format(Level,Meta1,AT,Config),
+ MsgStr =
+ if DoMsg ->
+ Config1 =
+ case maps:get(chars_limit,Config) of
+ unlimited ->
+ Config;
+ Size0 ->
+ Size =
+ case Size0 - string:length([B,A]) of
+ S when S>=0 -> S;
+ _ -> 0
+ end,
+ Config#{chars_limit=>Size}
+ end,
+ MsgStr0 = format_msg(Msg0,Meta1,Config1),
+ case maps:get(single_line,Config) of
+ true ->
+ %% Trim leading and trailing whitespaces, and replace
+ %% newlines with ", "
+ re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ",
+ [{return,list},global,unicode]);
+ _false ->
+ MsgStr0
+ end;
+ true ->
+ ""
+ end,
+ truncate([B,MsgStr,A],maps:get(max_size,Config)).
+
+do_format(Level,Data,[level|Format],Config) ->
+ [to_string(level,Level,Config)|do_format(Level,Data,Format,Config)];
+do_format(Level,Data,[{Key,IfExist,Else}|Format],Config) ->
+ String =
+ case value(Key,Data) of
+ {ok,Value} -> do_format(Level,Data#{Key=>Value},IfExist,Config);
+ error -> do_format(Level,Data,Else,Config)
+ end,
+ [String|do_format(Level,Data,Format,Config)];
+do_format(Level,Data,[Key|Format],Config)
+ when is_atom(Key) orelse
+ (is_list(Key) andalso is_atom(hd(Key))) ->
+ String =
+ case value(Key,Data) of
+ {ok,Value} -> to_string(Key,Value,Config);
+ error -> ""
+ end,
+ [String|do_format(Level,Data,Format,Config)];
+do_format(Level,Data,[Str|Format],Config) ->
+ [Str|do_format(Level,Data,Format,Config)];
+do_format(_Level,_Data,[],_Config) ->
+ [].
+
+value(Key,Meta) when is_map_key(Key,Meta) ->
+ {ok,maps:get(Key,Meta)};
+value([Key|Keys],Meta) when is_map_key(Key,Meta) ->
+ value(Keys,maps:get(Key,Meta));
+value([],Value) ->
+ {ok,Value};
+value(_,_) ->
+ error.
+
+to_string(time,Time,Config) ->
+ format_time(Time,Config);
+to_string(mfa,MFA,Config) ->
+ format_mfa(MFA,Config);
+to_string(_,Value,Config) ->
+ to_string(Value,Config).
+
+to_string(X,_) when is_atom(X) ->
+ atom_to_list(X);
+to_string(X,_) when is_integer(X) ->
+ integer_to_list(X);
+to_string(X,_) when is_pid(X) ->
+ pid_to_list(X);
+to_string(X,_) when is_reference(X) ->
+ ref_to_list(X);
+to_string(X,Config) when is_list(X) ->
+ case printable_list(lists:flatten(X)) of
+ true -> X;
+ _ -> io_lib:format(p(Config),[X])
+ end;
+to_string(X,Config) ->
+ io_lib:format(p(Config),[X]).
+
+printable_list([]) ->
+ false;
+printable_list(X) ->
+ io_lib:printable_list(X).
+
+format_msg({string,Chardata},Meta,Config) ->
+ format_msg({"~ts",[Chardata]},Meta,Config);
+format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config)
+ when is_function(Fun,1); is_function(Fun,2) ->
+ format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config));
+format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) ->
+ try Fun(Report) of
+ {Format,Args} when is_list(Format), is_list(Args) ->
+ format_msg({Format,Args},maps:remove(report_cb,Meta),Config);
+ Other ->
+ P = p(Config),
+ format_msg({"REPORT_CB/1 ERROR: "++P++"; Returned: "++P,
+ [Report,Other]},Meta,Config)
+ catch C:R:S ->
+ P = p(Config),
+ format_msg({"REPORT_CB/1 CRASH: "++P++"; Reason: "++P,
+ [Report,{C,R,logger:filter_stacktrace(?MODULE,S)}]},
+ Meta,Config)
+ end;
+format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,2) ->
+ try Fun(Report,maps:with([depth,chars_limit,single_line],Config)) of
+ Chardata when ?IS_STRING(Chardata) ->
+ try chardata_to_list(Chardata) % already size limited by report_cb
+ catch _:_ ->
+ P = p(Config),
+ format_msg({"REPORT_CB/2 ERROR: "++P++"; Returned: "++P,
+ [Report,Chardata]},Meta,Config)
+ end;
+ Other ->
+ P = p(Config),
+ format_msg({"REPORT_CB/2 ERROR: "++P++"; Returned: "++P,
+ [Report,Other]},Meta,Config)
+ catch C:R:S ->
+ P = p(Config),
+ format_msg({"REPORT_CB/2 CRASH: "++P++"; Reason: "++P,
+ [Report,{C,R,logger:filter_stacktrace(?MODULE,S)}]},
+ Meta,Config)
+ end;
+format_msg({report,Report},Meta,Config) ->
+ format_msg({report,Report},
+ Meta#{report_cb=>fun logger:format_report/1},
+ Config);
+format_msg(Msg,_Meta,#{depth:=Depth,chars_limit:=CharsLimit,
+ single_line:=Single}) ->
+ Opts = chars_limit_to_opts(CharsLimit),
+ format_msg(Msg, Depth, Opts, Single).
+
+chars_limit_to_opts(unlimited) -> [];
+chars_limit_to_opts(CharsLimit) -> [{chars_limit,CharsLimit}].
+
+format_msg({Format0,Args},Depth,Opts,Single) ->
+ try
+ Format1 = io_lib:scan_format(Format0, Args),
+ Format = reformat(Format1, Depth, Single),
+ io_lib:build_text(Format,Opts)
+ catch C:R:S ->
+ P = p(Single),
+ FormatError = "FORMAT ERROR: "++P++" - "++P,
+ case Format0 of
+ FormatError ->
+ %% already been here - avoid failing cyclically
+ erlang:raise(C,R,S);
+ _ ->
+ format_msg({FormatError,[Format0,Args]},Depth,Opts,Single)
+ end
+ end.
+
+reformat(Format,unlimited,false) ->
+ Format;
+reformat([#{control_char:=C}=M|T], Depth, true) when C =:= $p ->
+ [limit_depth(M#{width => 0}, Depth)|reformat(T, Depth, true)];
+reformat([#{control_char:=C}=M|T], Depth, true) when C =:= $P ->
+ [M#{width => 0}|reformat(T, Depth, true)];
+reformat([#{control_char:=C}=M|T], Depth, Single) when C =:= $p; C =:= $w ->
+ [limit_depth(M, Depth)|reformat(T, Depth, Single)];
+reformat([H|T], Depth, Single) ->
+ [H|reformat(T, Depth, Single)];
+reformat([], _, _) ->
+ [].
+
+limit_depth(M0, unlimited) ->
+ M0;
+limit_depth(#{control_char:=C0, args:=Args}=M0, Depth) ->
+ C = C0 - ($a - $A), %To uppercase.
+ M0#{control_char:=C,args:=Args++[Depth]}.
+
+chardata_to_list(Chardata) ->
+ case unicode:characters_to_list(Chardata,unicode) of
+ List when is_list(List) ->
+ List;
+ Error ->
+ throw(Error)
+ end.
+
+truncate(String,unlimited) ->
+ String;
+truncate(String,Size) ->
+ Length = string:length(String),
+ if Length>Size ->
+ case lists:reverse(lists:flatten(String)) of
+ [$\n|_] ->
+ string:slice(String,0,Size-4)++"...\n";
+ _ ->
+ string:slice(String,0,Size-3)++"..."
+ end;
+ true ->
+ String
+ end.
+
+%% SysTime is the system time in microseconds
+format_time(SysTime,#{time_offset:=Offset,time_designator:=Des})
+ when is_integer(SysTime) ->
+ calendar:system_time_to_rfc3339(SysTime,[{unit,microsecond},
+ {offset,Offset},
+ {time_designator,Des}]).
+
+%% SysTime is the system time in microseconds
+timestamp_to_datetimemicro(SysTime,Config) when is_integer(SysTime) ->
+ Micro = SysTime rem 1000000,
+ Sec = SysTime div 1000000,
+ UniversalTime = erlang:posixtime_to_universaltime(Sec),
+ {{Date,Time},UtcStr} =
+ case offset_to_utc(maps:get(time_offset,Config)) of
+ true -> {UniversalTime,"UTC "};
+ _ -> {erlang:universaltime_to_localtime(UniversalTime),""}
+ end,
+ {Date,Time,Micro,UtcStr}.
+
+format_mfa({M,F,A},_) when is_atom(M), is_atom(F), is_integer(A) ->
+ atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A);
+format_mfa({M,F,A},Config) when is_atom(M), is_atom(F), is_list(A) ->
+ format_mfa({M,F,length(A)},Config);
+format_mfa(MFA,Config) ->
+ to_string(MFA,Config).
+
+maybe_add_legacy_header(Level,
+ #{time:=Timestamp}=Meta,
+ #{legacy_header:=true}=Config) ->
+ #{title:=Title}=MyMeta = add_legacy_title(Level,Meta,Config),
+ {{Y,Mo,D},{H,Mi,S},Micro,UtcStr} =
+ timestamp_to_datetimemicro(Timestamp,Config),
+ Header =
+ io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===",
+ [Title,D,month(Mo),Y,H,Mi,S,Micro,UtcStr]),
+ Meta#{?MODULE=>MyMeta#{header=>Header}};
+maybe_add_legacy_header(_,Meta,_) ->
+ Meta.
+
+add_legacy_title(_Level,#{?MODULE:=#{title:=_}=MyMeta},_) ->
+ MyMeta;
+add_legacy_title(Level,Meta,Config) ->
+ case maps:get(?MODULE,Meta,#{}) of
+ #{title:=_}=MyMeta ->
+ MyMeta;
+ MyMeta ->
+ TitleLevel =
+ case (Level=:=notice andalso maps:find(error_logger,Meta)) of
+ {ok,_} ->
+ maps:get(error_logger_notice_header,Config);
+ _ ->
+ Level
+ end,
+ Title = string:uppercase(atom_to_list(TitleLevel)) ++ " REPORT",
+ MyMeta#{title=>Title}
+ end.
+
+month(1) -> "Jan";
+month(2) -> "Feb";
+month(3) -> "Mar";
+month(4) -> "Apr";
+month(5) -> "May";
+month(6) -> "Jun";
+month(7) -> "Jul";
+month(8) -> "Aug";
+month(9) -> "Sep";
+month(10) -> "Oct";
+month(11) -> "Nov";
+month(12) -> "Dec".
+
+%% Ensure that all valid configuration parameters exist in the final
+%% configuration map
+add_default_config(Config0) ->
+ Default =
+ #{chars_limit=>unlimited,
+ error_logger_notice_header=>info,
+ legacy_header=>false,
+ single_line=>true,
+ time_designator=>$T},
+ MaxSize = get_max_size(maps:get(max_size,Config0,undefined)),
+ Depth = get_depth(maps:get(depth,Config0,undefined)),
+ Offset = get_offset(maps:get(time_offset,Config0,undefined)),
+ add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize,
+ depth=>Depth,
+ time_offset=>Offset})).
+
+add_default_template(#{template:=_}=Config) ->
+ Config;
+add_default_template(Config) ->
+ Config#{template=>default_template(Config)}.
+
+default_template(#{legacy_header:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_HEADER;
+default_template(#{single_line:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_SINGLE;
+default_template(_) ->
+ ?DEFAULT_FORMAT_TEMPLATE.
+
+get_max_size(undefined) ->
+ unlimited;
+get_max_size(S) ->
+ max(10,S).
+
+get_depth(undefined) ->
+ error_logger:get_format_depth();
+get_depth(S) ->
+ max(5,S).
+
+get_offset(undefined) ->
+ utc_to_offset(get_utc_config());
+get_offset(Offset) ->
+ Offset.
+
+utc_to_offset(true) ->
+ "Z";
+utc_to_offset(false) ->
+ "".
+
+get_utc_config() ->
+ %% SASL utc_log overrides stdlib config - in order to have uniform
+ %% timestamps in log messages
+ case application:get_env(sasl, utc_log) of
+ {ok, Val} when is_boolean(Val) -> Val;
+ _ ->
+ case application:get_env(stdlib, utc_log) of
+ {ok, Val} when is_boolean(Val) -> Val;
+ _ -> false
+ end
+ end.
+
+offset_to_utc(Z) when Z=:=0; Z=:="z"; Z=:="Z" ->
+ true;
+offset_to_utc([$+|Tz]) ->
+ case io_lib:fread("~d:~d", Tz) of
+ {ok, [0, 0], []} ->
+ true;
+ _ ->
+ false
+ end;
+offset_to_utc(_) ->
+ false.
+
+-spec check_config(Config) -> ok | {error,term()} when
+ Config :: config().
+check_config(Config) when is_map(Config) ->
+ do_check_config(maps:to_list(Config));
+check_config(Config) ->
+ {error,{invalid_formatter_config,?MODULE,Config}}.
+
+do_check_config([{Type,L}|Config]) when Type == chars_limit;
+ Type == depth;
+ Type == max_size ->
+ case check_limit(L) of
+ ok -> do_check_config(Config);
+ error -> {error,{invalid_formatter_config,?MODULE,{Type,L}}}
+ end;
+do_check_config([{single_line,SL}|Config]) when is_boolean(SL) ->
+ do_check_config(Config);
+do_check_config([{legacy_header,LH}|Config]) when is_boolean(LH) ->
+ do_check_config(Config);
+do_check_config([{error_logger_notice_header,ELNH}|Config]) when ELNH == info;
+ ELNH == notice ->
+ do_check_config(Config);
+do_check_config([{report_cb,RCB}|Config]) when is_function(RCB,1);
+ is_function(RCB,2) ->
+ do_check_config(Config);
+do_check_config([{template,T}|Config]) ->
+ case check_template(T) of
+ ok -> do_check_config(Config);
+ error -> {error,{invalid_formatter_template,?MODULE,T}}
+ end;
+do_check_config([{time_offset,Offset}|Config]) ->
+ case check_offset(Offset) of
+ ok ->
+ do_check_config(Config);
+ error ->
+ {error,{invalid_formatter_config,?MODULE,{time_offset,Offset}}}
+ end;
+do_check_config([{time_designator,Char}|Config]) when Char>=0, Char=<255 ->
+ case io_lib:printable_latin1_list([Char]) of
+ true ->
+ do_check_config(Config);
+ false ->
+ {error,{invalid_formatter_config,?MODULE,{time_designator,Char}}}
+ end;
+do_check_config([C|_]) ->
+ {error,{invalid_formatter_config,?MODULE,C}};
+do_check_config([]) ->
+ ok.
+
+check_limit(L) when is_integer(L), L>0 ->
+ ok;
+check_limit(unlimited) ->
+ ok;
+check_limit(_) ->
+ error.
+
+check_template([Key|T]) when is_atom(Key) ->
+ check_template(T);
+check_template([Key|T]) when is_list(Key), is_atom(hd(Key)) ->
+ case lists:all(fun(X) when is_atom(X) -> true;
+ (_) -> false
+ end,
+ Key) of
+ true ->
+ check_template(T);
+ false ->
+ error
+ end;
+check_template([{Key,IfExist,Else}|T])
+ when is_atom(Key) orelse
+ (is_list(Key) andalso is_atom(hd(Key))) ->
+ case check_template(IfExist) of
+ ok ->
+ case check_template(Else) of
+ ok ->
+ check_template(T);
+ error ->
+ error
+ end;
+ error ->
+ error
+ end;
+check_template([Str|T]) when is_list(Str) ->
+ case io_lib:printable_unicode_list(Str) of
+ true -> check_template(T);
+ false -> error
+ end;
+check_template([]) ->
+ ok;
+check_template(_) ->
+ error.
+
+check_offset(I) when is_integer(I) ->
+ ok;
+check_offset(Tz) when Tz=:=""; Tz=:="Z"; Tz=:="z" ->
+ ok;
+check_offset([Sign|Tz]) when Sign=:=$+; Sign=:=$- ->
+ check_timezone(Tz);
+check_offset(_) ->
+ error.
+
+check_timezone(Tz) ->
+ try io_lib:fread("~d:~d", Tz) of
+ {ok, [_, _], []} ->
+ ok;
+ _ ->
+ error
+ catch _:_ ->
+ error
+ end.
+
+p(#{single_line:=Single}) ->
+ p(Single);
+p(true) ->
+ "~0tp";
+p(false) ->
+ "~tp".
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
new file mode 100644
index 0000000000..94c640cb92
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.erl
@@ -0,0 +1,339 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_h_common).
+
+-include("logger_h_common.hrl").
+-include("logger_internal.hrl").
+
+-export([log_to_binary/2,
+ check_common_config/1,
+ call_cast_or_drop/4,
+ check_load/1,
+ limit_burst/1,
+ kill_if_choked/5,
+ flush_log_events/0,
+ flush_log_events/1,
+ handler_exit/2,
+ set_restart_flag/2,
+ unset_restart_flag/2,
+ cancel_timer/1,
+ stop_or_restart/3,
+ overload_levels_ok/1,
+ error_notify/1,
+ info_notify/1]).
+
+%%%-----------------------------------------------------------------
+%%% Convert log data on any form to binary
+-spec log_to_binary(LogEvent,Config) -> LogString when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config(),
+ LogString :: binary().
+log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) ->
+ do_log_to_binary(Log,Config);
+log_to_binary(#{msg:={report,_},meta:=Meta}=Log,Config) ->
+ DefaultReportCb = fun logger:format_otp_report/1,
+ do_log_to_binary(Log#{meta=>Meta#{report_cb=>DefaultReportCb}},Config);
+log_to_binary(Log,Config) ->
+ do_log_to_binary(Log,Config).
+
+do_log_to_binary(Log,Config) ->
+ {Formatter,FormatterConfig} =
+ maps:get(formatter,Config,{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ String = try_format(Log,Formatter,FormatterConfig),
+ try string_to_binary(String)
+ catch C2:R2:S2 ->
+ ?LOG_INTERNAL(debug,[{formatter_error,Formatter},
+ {config,FormatterConfig},
+ {log_event,Log},
+ {bad_return_value,String},
+ {catched,{C2,R2,S2}}]),
+ <<"FORMATTER ERROR: bad return value">>
+ end.
+
+try_format(Log,Formatter,FormatterConfig) ->
+ try Formatter:format(Log,FormatterConfig)
+ catch
+ C:R:S ->
+ ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter},
+ {config,FormatterConfig},
+ {log_event,Log},
+ {reason,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}]),
+ case {?DEFAULT_FORMATTER,#{}} of
+ {Formatter,FormatterConfig} ->
+ "DEFAULT FORMATTER CRASHED";
+ {DefaultFormatter,DefaultConfig} ->
+ try_format(Log#{msg=>{"FORMATTER CRASH: ~tp",
+ [maps:get(msg,Log)]}},
+ DefaultFormatter,DefaultConfig)
+ end
+ end.
+
+string_to_binary(String) ->
+ case unicode:characters_to_binary(String) of
+ Binary when is_binary(Binary) ->
+ Binary;
+ Error ->
+ throw(Error)
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%% Check that the configuration term is valid
+check_common_config({mode_tab,_Tid}) ->
+ valid;
+check_common_config({handler_pid,Pid}) when is_pid(Pid) ->
+ valid;
+
+check_common_config({sync_mode_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({drop_mode_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({flush_qlen,N}) when is_integer(N) ->
+ valid;
+
+check_common_config({burst_limit_enable,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({burst_limit_max_count,N}) when is_integer(N) ->
+ valid;
+check_common_config({burst_limit_window_time,N}) when is_integer(N) ->
+ valid;
+
+check_common_config({overload_kill_enable,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({overload_kill_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({overload_kill_mem_size,N}) when is_integer(N) ->
+ valid;
+check_common_config({overload_kill_restart_after,NorA}) when is_integer(NorA);
+ NorA == infinity ->
+ valid;
+
+check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
+ NorA == no_repeat ->
+ valid;
+check_common_config(_) ->
+ invalid.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload Protection
+call_cast_or_drop(_Name, HandlerPid, ModeTab, Bin) ->
+ %% If the handler process is getting overloaded, the log event
+ %% will be synchronous instead of asynchronous (slows down the
+ %% logging tempo of a process doing lots of logging. If the
+ %% handler is choked, drop mode is set and no event will be sent.
+ try ?get_mode(ModeTab) of
+ async ->
+ gen_server:cast(HandlerPid, {log,Bin});
+ sync ->
+ try gen_server:call(HandlerPid, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of
+ %% if return value from call == dropped, the
+ %% message has been flushed by handler and should
+ %% therefore not be counted as dropped in stats
+ ok -> ok;
+ dropped -> ok
+ catch
+ _:{timeout,_} ->
+ ?observe(_Name,{dropped,1})
+ end;
+ drop ->
+ ?observe(_Name,{dropped,1})
+ catch
+ %% if the ETS table doesn't exist (maybe because of a
+ %% handler restart), we can only drop the event
+ _:_ -> ?observe(_Name,{dropped,1})
+ end,
+ ok.
+
+handler_exit(_Name, Reason) ->
+ exit(Reason).
+
+set_restart_flag(Name, Module) ->
+ Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
+ spawn(fun() ->
+ register(Flag, self()),
+ timer:sleep(infinity)
+ end),
+ ok.
+
+unset_restart_flag(Name, Module) ->
+ Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
+ case whereis(Flag) of
+ undefined ->
+ false;
+ Pid ->
+ exit(Pid, kill),
+ true
+ end.
+
+check_load(State = #{id:=_Name, mode_tab := ModeTab, mode := Mode,
+ sync_mode_qlen := SyncModeQLen,
+ drop_mode_qlen := DropModeQLen,
+ flush_qlen := FlushQLen}) ->
+ {_,Mem} = process_info(self(), memory),
+ ?observe(_Name,{max_mem,Mem}),
+ {_,QLen} = process_info(self(), message_queue_len),
+ ?observe(_Name,{max_qlen,QLen}),
+ %% When the handler process gets scheduled in, it's impossible
+ %% to predict the QLen. We could jump "up" arbitrarily from say
+ %% async to sync, async to drop, sync to flush, etc. However, when
+ %% the handler process manages the log events (without flushing),
+ %% one after the other, we will move "down" from drop to sync and
+ %% from sync to async. This way we don't risk getting stuck in
+ %% drop or sync mode with an empty mailbox.
+ {Mode1,_NewDrops,_NewFlushes} =
+ if
+ QLen >= FlushQLen ->
+ {flush, 0,1};
+ QLen >= DropModeQLen ->
+ %% Note that drop mode will force log events to
+ %% be dropped on the client side (never sent get to
+ %% the handler).
+ IncDrops = if Mode == drop -> 0; true -> 1 end,
+ {?change_mode(ModeTab, Mode, drop), IncDrops,0};
+ QLen >= SyncModeQLen ->
+ {?change_mode(ModeTab, Mode, sync), 0,0};
+ true ->
+ {?change_mode(ModeTab, Mode, async), 0,0}
+ end,
+ State1 = ?update_other(drops,DROPS,_NewDrops,State),
+ {Mode1, QLen, Mem,
+ ?update_other(flushes,FLUSHES,_NewFlushes,
+ State1#{last_qlen => QLen})}.
+
+limit_burst(#{burst_limit_enable := false}) ->
+ {true,0,0};
+limit_burst(#{burst_win_ts := BurstWinT0,
+ burst_msg_count := BurstMsgCount,
+ burst_limit_window_time := BurstLimitWinTime,
+ burst_limit_max_count := BurstLimitMaxCnt}) ->
+ if (BurstMsgCount >= BurstLimitMaxCnt) ->
+ %% the limit for allowed messages has been reached
+ BurstWinT1 = ?timestamp(),
+ case ?diff_time(BurstWinT1,BurstWinT0) of
+ BurstCheckTime when BurstCheckTime < (BurstLimitWinTime*1000) ->
+ %% we're still within the burst time frame
+ {false,BurstWinT0,BurstMsgCount};
+ _BurstCheckTime ->
+ %% burst time frame passed, reset counters
+ {true,BurstWinT1,0}
+ end;
+ true ->
+ %% the limit for allowed messages not yet reached
+ {true,BurstWinT0,BurstMsgCount+1}
+ end.
+
+kill_if_choked(Name, QLen, Mem, HandlerMod,
+ State = #{overload_kill_enable := KillIfOL,
+ overload_kill_qlen := OLKillQLen,
+ overload_kill_mem_size := OLKillMem}) ->
+ if KillIfOL andalso
+ ((QLen > OLKillQLen) orelse (Mem > OLKillMem)) ->
+ HandlerMod:log_handler_info(Name,
+ "Handler ~p overloaded and stopping",
+ [Name], State),
+ set_restart_flag(Name, HandlerMod),
+ handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}});
+ true ->
+ ok
+ end.
+
+flush_log_events() ->
+ flush_log_events(-1).
+
+flush_log_events(Limit) ->
+ process_flag(priority, high),
+ Flushed = flush_log_events(0, Limit),
+ process_flag(priority, normal),
+ Flushed.
+
+flush_log_events(Limit, Limit) ->
+ Limit;
+flush_log_events(N, Limit) ->
+ %% flush log events but leave other events, such as
+ %% filesync, info and change_config, so that these
+ %% have a chance to be processed even under heavy load
+ receive
+ {'$gen_cast',{log,_}} ->
+ flush_log_events(N+1, Limit);
+ {'$gen_call',{Pid,MRef},{log,_}} ->
+ Pid ! {MRef, dropped},
+ flush_log_events(N+1, Limit)
+ after
+ 0 -> N
+ end.
+
+cancel_timer(TRef) when is_atom(TRef) -> ok;
+cancel_timer(TRef) -> timer:cancel(TRef).
+
+
+stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
+ #{overload_kill_restart_after := RestartAfter}) ->
+ %% If we're terminating because of an overload situation (see
+ %% logger_h_common:kill_if_choked/4), we need to remove the handler
+ %% and set a restart timer. A separate process must perform this
+ %% in order to avoid deadlock.
+ HandlerPid = self(),
+ ConfigResult = logger:get_handler_config(Name),
+ RemoveAndRestart =
+ fun() ->
+ MRef = erlang:monitor(process, HandlerPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after 30000 ->
+ error_notify(Reason),
+ exit(HandlerPid, kill)
+ end,
+ case ConfigResult of
+ {ok,#{module:=HMod}=HConfig0} when is_integer(RestartAfter) ->
+ _ = logger:remove_handler(Name),
+ HConfig = try HMod:filter_config(HConfig0)
+ catch _:_ -> HConfig0
+ end,
+ _ = timer:apply_after(RestartAfter, logger, add_handler,
+ [Name,HMod,HConfig]);
+ {ok,_} ->
+ _ = logger:remove_handler(Name);
+ {error,CfgReason} when is_integer(RestartAfter) ->
+ error_notify({Name,restart_impossible,CfgReason});
+ {error,_} ->
+ ok
+ end
+ end,
+ spawn(RemoveAndRestart),
+ ok;
+stop_or_restart(_Name, _Reason, _State) ->
+ ok.
+
+overload_levels_ok(HandlerConfig) ->
+ SMQL = maps:get(sync_mode_qlen, HandlerConfig, ?SYNC_MODE_QLEN),
+ DMQL = maps:get(drop_mode_qlen, HandlerConfig, ?DROP_MODE_QLEN),
+ FQL = maps:get(flush_qlen, HandlerConfig, ?FLUSH_QLEN),
+ (DMQL > 1) andalso (SMQL =< DMQL) andalso (DMQL =< FQL).
+
+error_notify(Term) ->
+ ?internal_log(error, Term).
+
+info_notify(Term) ->
+ ?internal_log(info, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
new file mode 100644
index 0000000000..e0a7b6e3ca
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -0,0 +1,263 @@
+
+%%%-----------------------------------------------------------------
+%%% Overload protection configuration
+
+%%! *** NOTE ***
+%%! It's important that:
+%%! SYNC_MODE_QLEN =< DROP_MODE_QLEN =< FLUSH_QLEN
+%%! and that DROP_MODE_QLEN >= 2.
+%%! Otherwise the handler could end up in drop mode with no new
+%%! log requests to process. This would cause all future requests
+%%! to be dropped (no switch to async mode would ever take place).
+
+%% This specifies the message_queue_len value where the log
+%% requests switch from asynchronous casts to synchronous calls.
+-define(SYNC_MODE_QLEN, 10).
+%% Above this message_queue_len, log requests will be dropped,
+%% i.e. no log requests get sent to the handler process.
+-define(DROP_MODE_QLEN, 200).
+%% Above this message_queue_len, the handler process will flush
+%% its mailbox and only leave this number of messages in it.
+-define(FLUSH_QLEN, 1000).
+
+%% Never flush more than this number of messages in one go,
+%% or the handler will be unresponsive for seconds (keep this
+%% number as large as possible or the mailbox could grow large).
+-define(FLUSH_MAX_N, 5000).
+
+%% BURST_LIMIT_MAX_COUNT is the max number of log requests allowed
+%% to be written within a BURST_LIMIT_WINDOW_TIME time frame.
+-define(BURST_LIMIT_ENABLE, true).
+-define(BURST_LIMIT_MAX_COUNT, 500).
+-define(BURST_LIMIT_WINDOW_TIME, 1000).
+
+%% This enables/disables the feature to automatically get the
+%% handler terminated if it gets too loaded (and can't keep up).
+-define(OVERLOAD_KILL_ENABLE, false).
+%% If the message_queue_len goes above this size even after
+%% flushing has been performed, the handler is terminated.
+-define(OVERLOAD_KILL_QLEN, 20000).
+%% If the memory usage exceeds this level
+-define(OVERLOAD_KILL_MEM_SIZE, 3000000).
+
+%% This is the default time that the handler will wait before
+%% restarting and accepting new requests. The value 'infinity'
+%% disables restarts.
+-define(OVERLOAD_KILL_RESTART_AFTER, 5000).
+%%-define(OVERLOAD_KILL_RESTART_AFTER, infinity).
+
+%% The handler sends asynchronous write requests to the process
+%% controlling the i/o device, but every once in this interval
+%% will the write request be synchronous, so that the i/o device
+%% process doesn't get overloaded. This gives the handler time
+%% to keep up with its mailbox in overload situations, even if
+%% the i/o is slow.
+-define(CONTROLLER_SYNC_INTERVAL, 20).
+%% The handler will not perform a file sync operation if the
+%% mailbox size is greater than this number. This is to ensure
+%% the handler process doesn't get overloaded while waiting for
+%% an expensive file sync operation to finish.
+-define(FILESYNC_OK_QLEN, 2).
+%% Do a file/disk_log sync operation every integer() millisec
+%% (if necessary) or set to 'no_repeat' to only do file sync when
+%% the handler is idle. Note that file sync is not guaranteed to
+%% happen automatically if this operation is disabled.
+-define(FILESYNC_REPEAT_INTERVAL, 5000).
+%%-define(FILESYNC_REPEAT_INTERVAL, no_repeat).
+
+%% This is the time after last message received that we think/hope
+%% that the handler has an empty mailbox (no new log request has
+%% come in).
+-define(IDLE_DETECT_TIME_MSEC, 100).
+-define(IDLE_DETECT_TIME_USEC, 100000).
+
+%% Default disk log option values
+-define(DISK_LOG_TYPE, wrap).
+-define(DISK_LOG_MAX_NO_FILES, 10).
+-define(DISK_LOG_MAX_NO_BYTES, 1048576).
+
+%%%-----------------------------------------------------------------
+%%% Utility macros
+
+-define(name_to_reg_name(MODULE,Name),
+ list_to_atom(lists:concat([MODULE,"_",Name]))).
+
+%%%-----------------------------------------------------------------
+%%% Overload protection macros
+
+-define(timestamp(), erlang:monotonic_time(microsecond)).
+
+-define(get_mode(Tid),
+ case ets:lookup(Tid, mode) of
+ [{mode,M}] -> M;
+ _ -> async
+ end).
+
+-define(set_mode(Tid, M),
+ begin ets:insert(Tid, {mode,M}), M end).
+
+-define(change_mode(Tid, M0, M1),
+ if M0 == M1 ->
+ M0;
+ true ->
+ ets:insert(Tid, {mode,M1}),
+ M1
+ end).
+
+-define(min(X1, X2),
+ if X2 == undefined -> X1;
+ X2 < X1 -> X2;
+ true -> X1
+ end).
+
+-define(max(X1, X2),
+ if
+ X2 == undefined -> X1;
+ X2 > X1 -> X2;
+ true -> X1
+ end).
+
+-define(diff_time(OS_T1, OS_T0), OS_T1-OS_T0).
+
+%%%-----------------------------------------------------------------
+%%% The test hook macros make it possible to observe and manipulate
+%%% internal handler functionality. When enabled, these macros will
+%%% slow down execution and therefore should not be include in code
+%%% to be officially released.
+
+%%-define(TEST_HOOKS, true).
+-ifdef(TEST_HOOKS).
+ -define(TEST_HOOKS_TAB, logger_h_test_hooks).
+
+ -define(init_test_hooks(),
+ _ = case ets:whereis(?TEST_HOOKS_TAB) of
+ undefined -> ets:new(?TEST_HOOKS_TAB, [named_table,public]);
+ _ -> ok
+ end,
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(set_internal_log(MOD_FUNC),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,MOD_FUNC})).
+
+ -define(set_result(OPERATION, RESULT),
+ ets:insert(?TEST_HOOKS_TAB, {OPERATION,RESULT})).
+
+ -define(set_defaults(),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(internal_log(TYPE, TERM),
+ try ets:lookup(?TEST_HOOKS_TAB, internal_log) of
+ [{_,{LMOD,LFUNC}}] -> apply(LMOD, LFUNC, [TYPE,TERM]);
+ _ -> logger:internal_log(TYPE, TERM)
+ catch _:_ -> logger:internal_log(TYPE, TERM) end).
+
+ -define(file_write(DEVICE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, file_write) of
+ [{_,ok}] -> file:write(DEVICE, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:write(DEVICE, DATA) end).
+
+ -define(file_datasync(DEVICE),
+ try ets:lookup(?TEST_HOOKS_TAB, file_datasync) of
+ [{_,ok}] -> file:datasync(DEVICE);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:datasync(DEVICE) end).
+
+ -define(disk_log_blog(LOG, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of
+ [{_,ok}] -> disk_log:blog(LOG, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:blog(LOG, DATA) end).
+
+ -define(disk_log_sync(LOG),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of
+ [{_,ok}] -> disk_log:sync(LOG);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:sync(LOG) end).
+
+ -define(DEFAULT_CALL_TIMEOUT, 5000).
+
+-else. % DEFAULTS!
+ -define(TEST_HOOKS_TAB, undefined).
+ -define(init_test_hooks(), ok).
+ -define(set_internal_log(_MOD_FUNC), ok).
+ -define(set_result(_OPERATION, _RESULT), ok).
+ -define(set_defaults(), ok).
+ -define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)).
+ -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
+ -define(file_datasync(DEVICE), file:datasync(DEVICE)).
+ -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)).
+ -define(disk_log_sync(LOG), disk_log:sync(LOG)).
+ -define(DEFAULT_CALL_TIMEOUT, 10000).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable statistics counters in the state of the
+%%% handler which is useful for analysing the overload protection
+%%% behaviour. These counters should not be included in code to be
+%%% officially released (as some counters will grow very large
+%%% over time).
+
+%%-define(SAVE_STATS, true).
+-ifdef(SAVE_STATS).
+ -define(merge_with_stats(STATE),
+ STATE#{flushes => 0, flushed => 0, drops => 0,
+ casts => 0, calls => 0,
+ max_qlen => 0, max_time => 0}).
+
+ -define(update_max_qlen(QLEN, STATE),
+ begin #{max_qlen := QLEN0} = STATE,
+ STATE#{max_qlen => ?max(QLEN0,QLEN)} end).
+
+ -define(update_calls_or_casts(CALL_OR_CAST, INC, STATE),
+ case CALL_OR_CAST of
+ cast ->
+ #{casts := CASTS0} = STATE,
+ STATE#{casts => CASTS0+INC};
+ call ->
+ #{calls := CALLS0} = STATE,
+ STATE#{calls => CALLS0+INC}
+ end).
+
+ -define(update_max_time(TIME, STATE),
+ begin #{max_time := TIME0} = STATE,
+ STATE#{max_time => ?max(TIME0,TIME)} end).
+
+ -define(update_other(OTHER, VAR, INCVAL, STATE),
+ begin #{OTHER := VAR} = STATE,
+ STATE#{OTHER => VAR+INCVAL} end).
+
+-else. % DEFAULT!
+ -define(merge_with_stats(STATE), STATE).
+ -define(update_max_qlen(_QLEN, STATE), STATE).
+ -define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE).
+ -define(update_max_time(_TIME, STATE), STATE).
+ -define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable callbacks that make it possible to analyse
+%%% the overload protection behaviour from outside the handler
+%%% process (including dropped requests on the client side).
+%%% An external callback module (?OBSERVER_MOD) is required which
+%%% is not part of the kernel application. For this reason, these
+%%% callbacks should not be included in code to be officially released.
+
+%%-define(OBSERVER_MOD, logger_test).
+-ifdef(OBSERVER_MOD).
+ -define(start_observation(NAME), ?OBSERVER:start_observation(NAME)).
+ -define(observe(NAME,EVENT), ?OBSERVER:observe(NAME,EVENT)).
+
+-else. % DEFAULT!
+ -define(start_observation(_NAME), ok).
+ -define(observe(_NAME,_EVENT), ok).
+-endif.
+%%! <---
diff --git a/lib/kernel/src/logger_handler_watcher.erl b/lib/kernel/src/logger_handler_watcher.erl
new file mode 100644
index 0000000000..b75c74c643
--- /dev/null
+++ b/lib/kernel/src/logger_handler_watcher.erl
@@ -0,0 +1,113 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_handler_watcher).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0]).
+-export([register_handler/2]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {handlers}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+-spec start_link() -> {ok, Pid :: pid()} |
+ {error, Error :: {already_started, pid()}} |
+ {error, Error :: term()} |
+ ignore.
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+-spec register_handler(Id::logger:handler_id(),Pid::pid()) -> ok.
+register_handler(Id,Pid) ->
+ gen_server:call(?SERVER,{register,Id,Pid}).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+-spec init(Args :: term()) -> {ok, State :: term()} |
+ {ok, State :: term(), Timeout :: timeout()} |
+ {ok, State :: term(), hibernate} |
+ {stop, Reason :: term()} |
+ ignore.
+init([]) ->
+ process_flag(trap_exit, true),
+ {ok, #state{handlers=[]}}.
+
+-spec handle_call(Request :: term(), From :: {pid(), term()}, State :: term()) ->
+ {reply, Reply :: term(), NewState :: term()} |
+ {reply, Reply :: term(), NewState :: term(), Timeout :: timeout()} |
+ {reply, Reply :: term(), NewState :: term(), hibernate} |
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), Timeout :: timeout()} |
+ {noreply, NewState :: term(), hibernate} |
+ {stop, Reason :: term(), Reply :: term(), NewState :: term()} |
+ {stop, Reason :: term(), NewState :: term()}.
+handle_call({register,Id,Pid}, _From, #state{handlers=Hs}=State) ->
+ Ref = erlang:monitor(process,Pid),
+ Hs1 = lists:keystore(Id,1,Hs,{Id,Ref}),
+ {reply, ok, State#state{handlers=Hs1}}.
+
+-spec handle_cast(Request :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), Timeout :: timeout()} |
+ {noreply, NewState :: term(), hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+-spec handle_info(Info :: timeout() | term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), Timeout :: timeout()} |
+ {noreply, NewState :: term(), hibernate} |
+ {stop, Reason :: normal | term(), NewState :: term()}.
+handle_info({'DOWN',Ref,process,_,shutdown}, #state{handlers=Hs}=State) ->
+ case lists:keytake(Ref,2,Hs) of
+ {value,{Id,Ref},Hs1} ->
+ %% Probably terminated by supervisor. Remove the handler to avoid
+ %% error printouts due to failing handler.
+ _ = case logger:get_handler_config(Id) of
+ {ok,_} ->
+ logger:remove_handler(Id);
+ _ ->
+ ok
+ end,
+ {noreply,State#state{handlers=Hs1}};
+ false ->
+ {noreply, State}
+ end;
+handle_info({'DOWN',Ref,process,_,_OtherReason}, #state{handlers=Hs}=State) ->
+ {noreply,State#state{handlers=lists:keydelete(Ref,2,Hs)}};
+handle_info(_Other,State) ->
+ {noreply,State}.
+
+-spec terminate(Reason :: normal | shutdown | {shutdown, term()} | term(),
+ State :: term()) -> any().
+terminate(_Reason, _State) ->
+ ok.
diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl
new file mode 100644
index 0000000000..d96a4ac78b
--- /dev/null
+++ b/lib/kernel/src/logger_internal.hrl
@@ -0,0 +1,101 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-include_lib("kernel/include/logger.hrl").
+-define(LOGGER_TABLE,logger).
+-define(PRIMARY_KEY,'$primary_config$').
+-define(HANDLER_KEY,'$handler_config$').
+-define(LOGGER_META_KEY,'$logger_metadata$').
+-define(STANDARD_HANDLER, default).
+-define(DEFAULT_HANDLER_FILTERS,?DEFAULT_HANDLER_FILTERS([otp])).
+-define(DEFAULT_HANDLER_FILTERS(Domain),
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,Domain}}},
+ {no_domain,{fun logger_filters:domain/2,{log,undefined,[]}}}]).
+-define(DEFAULT_FORMATTER,logger_formatter).
+-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true,
+ single_line=>false}).
+-define(DEFAULT_FORMAT_TEMPLATE_HEADER,
+ [[logger_formatter,header],"\n",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE_SINGLE,
+ [time," ",level,": ",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE,
+ [time," ",level,":\n",msg,"\n"]).
+
+-define(DEFAULT_LOGGER_CALL_TIMEOUT, infinity).
+
+-define(LOG_INTERNAL(Level,Report),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ %% Spawn this to avoid deadlocks
+ _ = spawn(logger,macro_log,[?LOCATION,Level,Report,
+ logger:add_default_metadata(#{})]),
+ ok;
+ false ->
+ ok
+ end).
+
+%%%-----------------------------------------------------------------
+%%% Levels
+%%% Using same as syslog
+-define(LEVELS,[none,
+ emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug,
+ all]).
+-define(LOG_NONE,-1).
+-define(EMERGENCY,0).
+-define(ALERT,1).
+-define(CRITICAL,2).
+-define(ERROR,3).
+-define(WARNING,4).
+-define(NOTICE,5).
+-define(INFO,6).
+-define(DEBUG,7).
+-define(LOG_ALL,10).
+
+-define(IS_LEVEL(L),
+ (L=:=emergency orelse
+ L=:=alert orelse
+ L=:=critical orelse
+ L=:=error orelse
+ L=:=warning orelse
+ L=:=notice orelse
+ L=:=info orelse
+ L=:=debug)).
+
+-define(IS_MSG(Msg),
+ ((is_tuple(Msg) andalso tuple_size(Msg)==2)
+ andalso
+ (is_list(element(1,Msg)) andalso is_list(element(2,Msg)))
+ orelse
+ (element(1,Msg)==report andalso ?IS_REPORT(element(2,Msg)))
+ orelse
+ (element(1,Msg)==string andalso ?IS_STRING(element(2,Msg))))).
+
+-define(IS_REPORT(Report),
+ (is_map(Report) orelse (is_list(Report) andalso is_tuple(hd(Report))))).
+
+-define(IS_STRING(String),
+ (is_list(String) orelse is_binary(String))).
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
new file mode 100644
index 0000000000..b7735dbcf7
--- /dev/null
+++ b/lib/kernel/src/logger_server.erl
@@ -0,0 +1,595 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_server).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0,
+ add_handler/3, remove_handler/1,
+ add_filter/2, remove_filter/2,
+ set_module_level/2, unset_module_level/0,
+ unset_module_level/1, cache_module_level/1,
+ set_config/2, set_config/3,
+ update_config/2, update_config/3,
+ update_formatter_config/2]).
+
+%% Helper
+-export([diff_maps/2]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2]).
+
+-include("logger_internal.hrl").
+
+-define(SERVER, logger).
+-define(LOGGER_SERVER_TAG, '$logger_cb_process').
+
+-record(state, {tid, async_req, async_req_queue}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+add_handler(Id,Module,Config0) ->
+ try {check_id(Id),check_mod(Module)} of
+ {ok,ok} ->
+ case sanity_check(Id,Config0) of
+ ok ->
+ Default = default_config(Id,Module),
+ Config = maps:merge(Default,Config0),
+ call({add_handler,Id,Module,Config});
+ Error ->
+ Error
+ end
+ catch throw:Error ->
+ {error,Error}
+ end.
+
+remove_handler(HandlerId) ->
+ call({remove_handler,HandlerId}).
+
+add_filter(Owner,Filter) ->
+ case sanity_check(Owner,filters,[Filter]) of
+ ok -> call({add_filter,Owner,Filter});
+ Error -> Error
+ end.
+
+remove_filter(Owner,FilterId) ->
+ call({remove_filter,Owner,FilterId}).
+
+set_module_level(Modules,Level) when is_list(Modules) ->
+ case lists:all(fun(M) -> is_atom(M) end,Modules) of
+ true ->
+ case sanity_check(primary,level,Level) of
+ ok -> call({set_module_level,Modules,Level});
+ Error -> Error
+ end;
+ false ->
+ {error,{not_a_list_of_modules,Modules}}
+ end;
+set_module_level(Modules,_) ->
+ {error,{not_a_list_of_modules,Modules}}.
+
+unset_module_level() ->
+ call({unset_module_level,all}).
+
+unset_module_level(Modules) when is_list(Modules) ->
+ case lists:all(fun(M) -> is_atom(M) end,Modules) of
+ true ->
+ call({unset_module_level,Modules});
+ false ->
+ {error,{not_a_list_of_modules,Modules}}
+ end;
+unset_module_level(Modules) ->
+ {error,{not_a_list_of_modules,Modules}}.
+
+cache_module_level(Module) ->
+ gen_server:cast(?SERVER,{cache_module_level,Module}).
+
+set_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,set,Owner,Key,Value});
+ Error ->
+ Error
+ end.
+
+set_config(Owner,Config) ->
+ case sanity_check(Owner,Config) of
+ ok ->
+ call({change_config,set,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,update,Owner,Key,Value});
+ Error ->
+ Error
+ end.
+
+update_config(Owner, Config) ->
+ case sanity_check(Owner,Config) of
+ ok ->
+ call({change_config,update,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_formatter_config(HandlerId, FormatterConfig)
+ when is_map(FormatterConfig) ->
+ call({update_formatter_config,HandlerId,FormatterConfig});
+update_formatter_config(_HandlerId, FormatterConfig) ->
+ {error,{invalid_formatter_config,FormatterConfig}}.
+
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([]) ->
+ process_flag(trap_exit, true),
+ put(?LOGGER_SERVER_TAG,true),
+ Tid = logger_config:new(?LOGGER_TABLE),
+ PrimaryConfig = maps:merge(default_config(primary),
+ #{handlers=>[simple]}),
+ logger_config:create(Tid,primary,PrimaryConfig),
+ SimpleConfig0 = maps:merge(default_config(simple,logger_simple_h),
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS}),
+ %% If this fails, then the node should crash
+ {ok,SimpleConfig} = logger_simple_h:adding_handler(SimpleConfig0),
+ logger_config:create(Tid,simple,SimpleConfig),
+ {ok, #state{tid=Tid, async_req_queue = queue:new()}}.
+
+handle_call({add_handler,Id,Module,HConfig}, From, #state{tid=Tid}=State) ->
+ case logger_config:exist(Tid,Id) of
+ true ->
+ {reply,{error,{already_exist,Id}},State};
+ false ->
+ call_h_async(
+ fun() ->
+ %% inform the handler
+ call_h(Module,adding_handler,[HConfig],{ok,HConfig})
+ end,
+ fun({ok,HConfig1}) ->
+ %% We know that the call_h would have loaded the module
+ %% if it existed, so it is safe here to call function_exported
+ %% to find out if this is a valid handler
+ case erlang:function_exported(Module, log, 2) of
+ true ->
+ logger_config:create(Tid,Id,HConfig1),
+ {ok,Config} = logger_config:get(Tid,primary),
+ Handlers = maps:get(handlers,Config,[]),
+ logger_config:set(Tid,primary,
+ Config#{handlers=>[Id|Handlers]});
+ false ->
+ {error,{invalid_handler,
+ {function_not_exported,
+ {Module,log,2}}}}
+ end;
+ ({error,HReason}) ->
+ {error,{handler_not_added,HReason}}
+ end,From,State)
+ end;
+handle_call({remove_handler,HandlerId}, From, #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{module:=Module}=HConfig} ->
+ {ok,Config} = logger_config:get(Tid,primary),
+ Handlers0 = maps:get(handlers,Config,[]),
+ Handlers = lists:delete(HandlerId,Handlers0),
+ call_h_async(
+ fun() ->
+ %% inform the handler
+ call_h(Module,removing_handler,[HConfig],ok)
+ end,
+ fun(_Res) ->
+ logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
+ logger_config:delete(Tid,HandlerId),
+ ok
+ end,From,State);
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
+handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
+ Reply = do_add_filter(Tid,Id,Filter),
+ {reply,Reply,State};
+handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
+ Reply = do_remove_filter(Tid,Id,FilterId),
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,primary,Config0}, _From,
+ #state{tid=Tid}=State) ->
+ {ok,#{handlers:=Handlers}=OldConfig} = logger_config:get(Tid,primary),
+ Default =
+ case SetOrUpd of
+ set -> default_config(primary);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
+ {reply,Reply,State};
+handle_call({change_config,_SetOrUpd,primary,Key,Value}, _From,
+ #state{tid=Tid}=State) ->
+ {ok,OldConfig} = logger_config:get(Tid,primary),
+ Reply = logger_config:set(Tid,primary,OldConfig#{Key=>Value}),
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,HandlerId,Config0}, From,
+ #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{module:=Module}=OldConfig} ->
+ Default =
+ case SetOrUpd of
+ set -> default_config(HandlerId,Module);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
+handle_call({change_config,SetOrUpd,HandlerId,Key,Value}, From,
+ #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{module:=Module}=OldConfig} ->
+ Config = OldConfig#{Key=>Value},
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
+handle_call({update_formatter_config,HandlerId,NewFConfig},_From,
+ #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{formatter:={FMod,OldFConfig}}=Config} ->
+ try
+ FConfig = maps:merge(OldFConfig,NewFConfig),
+ check_formatter({FMod,FConfig}),
+ logger_config:set(Tid,HandlerId,
+ Config#{formatter=>{FMod,FConfig}})
+ catch throw:Reason -> {error,Reason}
+ end;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({set_module_level,Modules,Level}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set_module_level(Tid,Modules,Level),
+ {reply,Reply,State};
+handle_call({unset_module_level,Modules}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:unset_module_level(Tid,Modules),
+ {reply,Reply,State}.
+
+handle_cast({async_req_reply,_Ref,_Reply} = Reply,State) ->
+ call_h_reply(Reply,State);
+handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) ->
+ logger_config:cache_module_level(Tid,Module),
+ {noreply, State}.
+
+%% Interface for those who can't call the API - e.g. the emulator, or
+%% places related to code loading.
+%%
+%% This can also be log events from remote nodes which are sent from
+%% logger.erl when the group leader of the client process is on a
+%% same node as the client process itself.
+handle_info({log,Level,Format,Args,Meta}, State) ->
+ logger:log(Level,Format,Args,Meta),
+ {noreply, State};
+handle_info({log,Level,Report,Meta}, State) ->
+ logger:log(Level,Report,Meta),
+ {noreply, State};
+handle_info({Ref,_Reply},State) when is_reference(Ref) ->
+ %% Assuming this is a timed-out gen_server reply - ignoring
+ {noreply, State};
+handle_info({'DOWN',_Ref,_Proc,_Pid,_Reason} = Down,State) ->
+ call_h_reply(Down,State);
+handle_info(Unexpected,State) when element(1,Unexpected) == 'EXIT' ->
+ %% The simple handler will send an 'EXIT' message when it is replaced
+ %% We may as well ignore all 'EXIT' messages that we get
+ ?LOG_INTERNAL(debug,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State};
+handle_info(Unexpected,State) ->
+ ?LOG_INTERNAL(info,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+call(Request) ->
+ Action = element(1,Request),
+ case get(?LOGGER_SERVER_TAG) of
+ true when
+ Action == add_handler; Action == remove_handler;
+ Action == add_filter; Action == remove_filter;
+ Action == change_config ->
+ {error,{attempting_syncronous_call_to_self,Request}};
+ _ ->
+ gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
+ end.
+
+do_add_filter(Tid,Id,{FId,_} = Filter) ->
+ case logger_config:get(Tid,Id) of
+ {ok,Config} ->
+ Filters = maps:get(filters,Config,[]),
+ case lists:keymember(FId,1,Filters) of
+ true ->
+ {error,{already_exist,FId}};
+ false ->
+ logger_config:set(Tid,Id,Config#{filters=>[Filter|Filters]})
+ end;
+ Error ->
+ Error
+ end.
+
+do_remove_filter(Tid,Id,FilterId) ->
+ case logger_config:get(Tid,Id) of
+ {ok,Config} ->
+ Filters0 = maps:get(filters,Config,[]),
+ case lists:keytake(FilterId,1,Filters0) of
+ {value,_,Filters} ->
+ logger_config:set(Tid,Id,Config#{filters=>Filters});
+ false ->
+ {error,{not_found,FilterId}}
+ end;
+ Error ->
+ Error
+ end.
+
+default_config(primary) ->
+ #{level=>notice,
+ filters=>[],
+ filter_default=>log};
+default_config(Id) ->
+ #{id=>Id,
+ level=>all,
+ filters=>[],
+ filter_default=>log,
+ formatter=>{?DEFAULT_FORMATTER,#{}}}.
+default_config(Id,Module) ->
+ (default_config(Id))#{module=>Module}.
+
+sanity_check(Owner,Key,Value) ->
+ sanity_check_1(Owner,[{Key,Value}]).
+
+sanity_check(HandlerId,Config) when is_map(Config) ->
+ sanity_check_1(HandlerId,maps:to_list(Config));
+sanity_check(_,Config) ->
+ {error,{invalid_config,Config}}.
+
+sanity_check_1(Owner,Config) when is_list(Config) ->
+ try
+ Type = get_type(Owner),
+ check_config(Type,Config)
+ catch throw:Error -> {error,Error}
+ end.
+
+get_type(primary) ->
+ primary;
+get_type(Id) ->
+ check_id(Id),
+ handler.
+
+check_config(Owner,[{level,Level}|Config]) ->
+ check_level(Level),
+ check_config(Owner,Config);
+check_config(Owner,[{filters,Filters}|Config]) ->
+ check_filters(Filters),
+ check_config(Owner,Config);
+check_config(Owner,[{filter_default,FD}|Config]) ->
+ check_filter_default(FD),
+ check_config(Owner,Config);
+check_config(handler,[{formatter,Formatter}|Config]) ->
+ check_formatter(Formatter),
+ check_config(handler,Config);
+check_config(primary,[C|_]) ->
+ throw({invalid_primary_config,C});
+check_config(handler,[{_,_}|Config]) ->
+ %% Arbitrary config elements are allowed for handlers
+ check_config(handler,Config);
+check_config(_,[]) ->
+ ok.
+
+check_id(Id) when is_atom(Id) ->
+ ok;
+check_id(Id) ->
+ throw({invalid_id,Id}).
+
+check_mod(Mod) when is_atom(Mod) ->
+ ok;
+check_mod(Mod) ->
+ throw({invalid_module,Mod}).
+
+check_level(Level) ->
+ case lists:member(Level,?LEVELS) of
+ true ->
+ ok;
+ false ->
+ throw({invalid_level,Level})
+ end.
+
+check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) ->
+ check_filters(Filters);
+check_filters([Filter|_]) ->
+ throw({invalid_filter,Filter});
+check_filters([]) ->
+ ok;
+check_filters(Filters) ->
+ throw({invalid_filters,Filters}).
+
+check_filter_default(FD) when FD==stop; FD==log ->
+ ok;
+check_filter_default(FD) ->
+ throw({invalid_filter_default,FD}).
+
+check_formatter({Mod,Config}) ->
+ check_mod(Mod),
+ try Mod:check_config(Config) of
+ ok -> ok;
+ {error,Error} -> throw(Error)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Mod,check_config,[Config],_}|_]} ->
+ ok;
+ _ ->
+ throw({callback_crashed,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}})
+ end
+ end;
+check_formatter(Formatter) ->
+ throw({invalid_formatter,Formatter}).
+
+%% When changing configuration for a handler, the id and module fields
+%% can not be changed.
+check_config_change(#{id:=Id,module:=Module},#{id:=Id,module:=Module}) ->
+ ok;
+check_config_change(OldConfig,NewConfig) ->
+ {Old,New} = logger_server:diff_maps(maps:with([id,module],OldConfig),
+ maps:with([id,module],NewConfig)),
+ {error,{illegal_config_change,Old,New}}.
+
+call_h(Module, Function, Args, DefRet) ->
+ %% Not calling code:ensure_loaded + erlang:function_exported here,
+ %% since in some rare terminal cases, the code_server might not
+ %% exist and we'll get a deadlock in removing the handler.
+ try apply(Module, Function, Args)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Module,Function=changing_config,Args,_}|_]}
+ when length(Args)=:=3 ->
+ %% Backwards compatible call, if changing_config/3
+ %% did not exist.
+ call_h(Module, Function, tl(Args), DefRet);
+ {error,undef,[{Module,Function,Args,_}|_]} ->
+ DefRet;
+ _ ->
+ ST = logger:filter_stacktrace(?MODULE,S),
+ ?LOG_INTERNAL(error,
+ [{logger,callback_crashed},
+ {process,?SERVER},
+ {reason,{C,R,ST}}]),
+ {error,{callback_crashed,{C,R,ST}}}
+ end
+ end.
+
+%% There are all sort of API functions that can cause deadlocks if called
+%% from the handler callbacks. So we spawn a process that does the request
+%% for the logger_server. There are still APIs that will cause problems,
+%% namely logger:add_handler
+call_h_async(AsyncFun,PostFun,From,#state{ async_req = undefined } = State) ->
+ Parent = self(),
+ {Pid, Ref} = spawn_monitor(
+ fun() ->
+ put(?LOGGER_SERVER_TAG,true),
+ receive Ref -> Ref end,
+ gen_server:cast(Parent, {async_req_reply, Ref, AsyncFun()})
+ end),
+ Pid ! Ref,
+ {noreply,State#state{ async_req = {Ref,PostFun,From} }};
+call_h_async(AsyncFun,PostFun,From,#state{ async_req_queue = Q } = State) ->
+ {noreply,State#state{ async_req_queue = queue:in({AsyncFun,PostFun,From},Q) }}.
+
+call_h_reply({async_req_reply,Ref,Reply},
+ #state{ async_req = {Ref,PostFun,From}, async_req_queue = Q} = State) ->
+ erlang:demonitor(Ref,[flush]),
+ _ = gen_server:reply(From, PostFun(Reply)),
+ {Value,NewQ} = queue:out(Q),
+ NewState = State#state{ async_req = undefined,
+ async_req_queue = NewQ },
+ case Value of
+ {value,{AsyncFun,NPostFun,NFrom}} ->
+ call_h_async(AsyncFun,NPostFun,NFrom,NewState);
+ empty ->
+ {noreply,NewState}
+ end;
+call_h_reply({'DOWN',Ref,_Proc,Pid,Reason}, #state{ async_req = {Ref,_PostFun,_From}} = State) ->
+ %% This clause should only be triggered if someone explicitly sends an exit signal
+ %% to the spawned process. It is only here to make sure that the logger_server does
+ %% not deadlock if that happens.
+ ?LOG_INTERNAL(error,
+ [{logger,process_exited},
+ {process,Pid},
+ {reason,Reason}]),
+ call_h_reply(
+ {async_req_reply,Ref,{error,{logger_process_exited,Pid,Reason}}},
+ State);
+call_h_reply(Unexpected,State) ->
+ ?LOG_INTERNAL(info,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State}.
+
+%% Return two maps containing only the fields that differ.
+diff_maps(M1,M2) ->
+ diffs(lists:sort(maps:to_list(M1)),lists:sort(maps:to_list(M2)),#{},#{}).
+
+diffs([H|T1],[H|T2],D1,D2) ->
+ diffs(T1,T2,D1,D2);
+diffs([{K,V1}|T1],[{K,V2}|T2],D1,D2) ->
+ diffs(T1,T2,D1#{K=>V1},D2#{K=>V2});
+diffs([],[],D1,D2) ->
+ {D1,D2}.
diff --git a/lib/kernel/src/logger_simple_h.erl b/lib/kernel/src/logger_simple_h.erl
new file mode 100644
index 0000000000..8b51dd8569
--- /dev/null
+++ b/lib/kernel/src/logger_simple_h.erl
@@ -0,0 +1,212 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_h).
+
+-export([adding_handler/1, removing_handler/1, log/2]).
+
+%% This module implements a simple handler for logger. It is the
+%% default used during system start.
+
+%%%-----------------------------------------------------------------
+%%% Logger callback
+
+adding_handler(#{id:=simple}=Config) ->
+ Me = self(),
+ case whereis(?MODULE) of
+ undefined ->
+ {Pid,Ref} = spawn_opt(fun() -> init(Me) end,
+ [link,monitor,{message_queue_data,off_heap}]),
+ receive
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason};
+ {Pid,started} ->
+ erlang:demonitor(Ref),
+ {ok,Config}
+ end;
+ _ ->
+ {error,{handler_process_name_already_exists,?MODULE}}
+ end.
+
+removing_handler(#{id:=simple}) ->
+ case whereis(?MODULE) of
+ undefined ->
+ ok;
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ unlink(Pid),
+ Pid ! stop,
+ receive {'DOWN',Ref,process,Pid,_} ->
+ ok
+ end
+ end.
+
+log(#{meta:=#{error_logger:=#{tag:=info_report,type:=Type}}},_Config)
+ when Type=/=std_info ->
+ %% Skip info reports that are not 'std_info' (ref simple logger in
+ %% error_logger)
+ ok;
+log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) ->
+ _ = case whereis(?MODULE) of
+ undefined ->
+ %% Is the node on the way down? Real emergency?
+ %% Log directly from client just to get it out
+ do_log(
+ #{level=>error,
+ msg=>{report,{error,simple_handler_process_dead}},
+ meta=>#{time=>erlang:system_time(microsecond)}}),
+ do_log(Log);
+ _ ->
+ ?MODULE ! {log,Log}
+ end,
+ ok;
+log(_,_) ->
+ %% Unexpected log.
+ %% We don't want to crash the simple logger, so ignore this.
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Process
+init(Starter) ->
+ register(?MODULE,self()),
+ Starter ! {self(),started},
+ loop(#{buffer_size=>10,dropped=>0,buffer=>[]}).
+
+loop(Buffer) ->
+ receive
+ stop ->
+ %% We replay the logger messages of there is
+ %% a default handler when the simple handler
+ %% is removed.
+ case logger:get_handler_config(default) of
+ {ok, _} ->
+ replay_buffer(Buffer);
+ _ ->
+ ok
+ end;
+ {log,#{msg:=_,meta:=#{time:=_}}=Log} ->
+ do_log(Log),
+ loop(update_buffer(Buffer,Log));
+ _ ->
+ %% Unexpected message - flush it!
+ loop(Buffer)
+ end.
+
+update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) ->
+ Buffer#{dropped=>D+1};
+update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) ->
+ Buffer#{buffer_size=>S-1,buffer=>[Log|B]}.
+
+replay_buffer(#{ dropped := D, buffer := Buffer }) ->
+ lists:foreach(
+ fun F(#{msg := {Tag, Msg}} = L) when Tag =:= string; Tag =:= report ->
+ F(L#{ msg := Msg });
+ F(#{ level := Level, msg := Msg, meta := MD}) ->
+ logger:log(Level, Msg, MD)
+ end, lists:reverse(Buffer, drop_msg(D))).
+
+drop_msg(0) ->
+ [];
+drop_msg(N) ->
+ [#{level=>info,
+ msg=>{"Simple handler buffer full, dropped ~w messages",[N]},
+ meta=>#{time=>erlang:system_time(microsecond)}}].
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+%% Can't do io_lib:format
+
+do_log(#{msg:={report,Report},
+ meta:=#{time:=T,error_logger:=#{type:=Type}}}) ->
+ display_date(T),
+ display_report(Type,Report);
+do_log(#{msg:=Msg,meta:=#{time:=T}}) ->
+ display_date(T),
+ display(Msg).
+
+display_date(Timestamp) when is_integer(Timestamp) ->
+ Micro = Timestamp rem 1000000,
+ Sec = Timestamp div 1000000,
+ {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime(
+ erlang:posixtime_to_universaltime(Sec)),
+ erlang:display_string(
+ integer_to_list(Y) ++ "-" ++
+ pad(Mo,2) ++ "-" ++
+ pad(D,2) ++ " " ++
+ pad(H,2) ++ ":" ++
+ pad(Mi,2) ++ ":" ++
+ pad(S,2) ++ "." ++
+ pad(Micro,6) ++ " ").
+
+pad(Int,Size) when is_integer(Int) ->
+ pad(integer_to_list(Int),Size);
+pad(Str,Size) when length(Str)==Size ->
+ Str;
+pad(Str,Size) ->
+ pad([$0|Str],Size).
+
+display({string,Chardata}) ->
+ try unicode:characters_to_list(Chardata) of
+ String -> erlang:display_string(String), erlang:display_string("\n")
+ catch _:_ -> erlang:display(Chardata)
+ end;
+display({report,Report}) when is_map(Report) ->
+ display_report(maps:to_list(Report));
+display({report,Report}) ->
+ display_report(Report);
+display({F, A}) when is_list(F), is_list(A) ->
+ erlang:display_string(F ++ "\n"),
+ [begin
+ erlang:display_string("\t"),
+ erlang:display(Arg)
+ end || Arg <- A],
+ ok.
+
+display_report(Atom, A) when is_atom(Atom) ->
+ %% The widest atom seems to be 'supervisor_report' at 17.
+ ColumnWidth = 20,
+ AtomString = atom_to_list(Atom),
+ AtomLength = length(AtomString),
+ Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
+ erlang:display_string(AtomString ++ Padding),
+ display_report(A);
+display_report(F, A) ->
+ erlang:display({F, A}).
+
+display_report([A, []]) ->
+ %% Special case for crash reports when process has no links
+ display_report(A);
+display_report(A = [_|_]) ->
+ case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
+ true ->
+ erlang:display_string("\n"),
+ lists:foreach(
+ fun({Key, Value}) ->
+ erlang:display_string(
+ " " ++
+ atom_to_list(Key) ++
+ ": "),
+ erlang:display(Value)
+ end, A);
+ false ->
+ erlang:display(A)
+ end;
+display_report(A) ->
+ erlang:display(A).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
new file mode 100644
index 0000000000..42e0f5caf4
--- /dev/null
+++ b/lib/kernel/src/logger_std_h.erl
@@ -0,0 +1,843 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+-include_lib("kernel/include/file.hrl").
+
+%% API
+-export([start_link/3, info/1, filesync/1, reset/1]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+%% handler internal
+-export([log_handler_info/4]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a standard handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:handler_config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec filesync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+filesync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(?MODULE,Name),
+ filesync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+filesync(Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(?MODULE,Name),
+ info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(?name_to_reg_name(?MODULE,Name),
+ reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(#{id:=Name}=Config) ->
+ case check_config(adding, Config) of
+ {ok, #{config:=HConfig}=Config1} ->
+ %% create initial handler state by merging defaults with config
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ start(Name, Config1, HState);
+ false ->
+ #{sync_mode_qlen := SMQL,
+ drop_mode_qlen := DMQL,
+ flush_qlen := FQL} = HState,
+ {error,{invalid_levels,{SMQL,DMQL,FQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+ WriteOnce = maps:with([type],OldHConfig),
+ ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
+ NewHConfig0 = maps:get(config, NewConfig, #{}),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
+
+ %% Allow (accidentially) included read-only fields - just overwrite them
+ NewHConfig = maps:merge(maps:merge(Default, NewHConfig0),ReadOnly),
+
+ %% But fail if write-once fields are changed
+ case maps:with([type],NewHConfig) of
+ WriteOnce ->
+ changing_config1(maps:get(handler_pid,OldHConfig),
+ OldConfig,
+ NewConfig#{config=>NewHConfig});
+ Other ->
+ {error,{illegal_config_change,#{config=>WriteOnce},#{config=>Other}}}
+ end.
+
+changing_config1(HPid, OldConfig, NewConfig) ->
+ case check_config(changing, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ HError -> HError
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end.
+
+check_config(adding, Config) ->
+ %% Merge in defaults on handler level
+ HConfig0 = maps:get(config, Config, #{}),
+ HConfig = maps:merge(get_default_config(),HConfig0),
+ case check_h_config(maps:to_list(HConfig)) of
+ ok ->
+ {ok,Config#{config=>HConfig}};
+ Error ->
+ Error
+ end;
+check_config(changing, Config) ->
+ HConfig = maps:get(config, Config, #{}),
+ case check_h_config(maps:to_list(HConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+check_h_config([{type,Type} | Config]) when Type == standard_io;
+ Type == standard_error ->
+ check_h_config(Config);
+check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
+ check_h_config(Config);
+check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
+ is_list(Modes) ->
+ check_h_config(Config);
+check_h_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_h_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_h_config([]) ->
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(#{id:=Name}) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config = #{id := Name,
+ config := #{handler_pid := HPid,
+ mode_tab := ModeTab}}) ->
+ %% if the handler has crashed, we must drop this event
+ %% and hope the handler restarts so we can try again
+ true = is_process_alive(HPid),
+ Bin = logger_h_common:log_to_binary(LogEvent, Config),
+ logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config = #{config := HConfig},
+ State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) ->
+ RegName = ?name_to_reg_name(?MODULE,Name),
+ register(RegName, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case do_init(Name, Type) of
+ {ok,InitState} ->
+ try ets:new(Name, [public]) of
+ ModeTab ->
+ ?set_mode(ModeTab, async),
+ State = maps:merge(State0, InitState),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{
+ mode_tab => ModeTab,
+ mode => async,
+ file_ctrl_sync => FileCtrlSyncInt,
+ last_qlen => 0,
+ last_log_ts => T0,
+ last_op => sync,
+ burst_win_ts => T0,
+ burst_msg_count => 0}),
+ Config1 =
+ Config#{config => HConfig#{handler_pid => self(),
+ mode_tab => ModeTab}},
+ proc_lib:init_ack({ok,self(),Config1}),
+ gen_server:cast(self(), repeated_filesync),
+ gen_server:enter_loop(?MODULE, [], State1)
+ catch
+ _:Error ->
+ unregister(RegName),
+ logger_h_common:error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end;
+ Error ->
+ unregister(RegName),
+ logger_h_common:error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+do_init(Name, Type) ->
+ case open_log_file(Name, Type) of
+ {ok,FileCtrlPid} ->
+ case logger_h_common:unset_restart_flag(Name, ?MODULE) of
+ true ->
+ %% inform about restart
+ gen_server:cast(self(), {log_handler_info,
+ "Handler ~p restarted",
+ [Name]});
+ false ->
+ %% initial start
+ ok
+ end,
+ {ok,#{id=>Name,type=>Type,file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end.
+
+%% This is the synchronous log event.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply,Result, State1};
+
+handle_call(filesync, _From, State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid}) ->
+ if is_atom(Type) ->
+ {reply, ok, State};
+ true ->
+ {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}}
+ end;
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0}) ->
+ HConfig = maps:get(config, NewConfig, #{}),
+ State1 = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ gen_server:cast(self(), repeated_filesync)
+ end,
+ {reply, ok, State1};
+ false ->
+ #{sync_mode_qlen := SMQL,
+ drop_mode_qlen := DMQL,
+ flush_qlen := FQL} = State1,
+ {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp()}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+%% This is the asynchronous log event.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
+ log_handler_info(Name, Format, Args, State),
+ {noreply, State};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% event, the repeat operation must be active!
+handle_cast(repeated_filesync,
+ State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid,
+ filesync_repeat_interval := FSyncInt,
+ last_op := LastOp}) ->
+ State1 =
+ if not is_atom(Type), is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ if LastOp == sync ->
+ ok;
+ true ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),repeated_filesync]),
+ State#{rep_sync_tref => TRef, last_op => sync};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) ->
+ case maps:get(file_ctrl_pid, State, undefined) of
+ Pid ->
+ %% file error, terminate handler
+ logger_h_common:handler_exit(Name,
+ {error,{write_failed,FileInfo,Why}});
+ _Other ->
+ %% ignore EXIT
+ ok
+ end,
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
+ type:=_FileInfo}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ case is_process_alive(FWPid) of
+ true ->
+ unlink(FWPid),
+ _ = file_ctrl_stop(FWPid),
+ MRef = erlang:monitor(process, FWPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ exit(FWPid, kill)
+ end;
+ false ->
+ ok
+ end,
+ ok = logger_h_common:stop_or_restart(Name, Reason, State),
+ unregister(?name_to_reg_name(?MODULE, Name)),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%%
+get_default_config() ->
+ #{type => standard_io,
+ sync_mode_qlen => ?SYNC_MODE_QLEN,
+ drop_mode_qlen => ?DROP_MODE_QLEN,
+ flush_qlen => ?FLUSH_QLEN,
+ burst_limit_enable => ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+get_init_state() ->
+ #{file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
+
+%%%-----------------------------------------------------------------
+%%% Add a standard handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named 'config', e.g:
+%%%
+%%% Config = #{config => #{sync_mode_qlen => 50}
+%%%
+%%% The standard handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerStdH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerStdH) of
+ {ok,Pid,Config1} ->
+ ok = logger_handler_watcher:register_handler(Name,Pid),
+ {ok,Config1};
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(?name_to_reg_name(?MODULE,Name)) of
+ undefined ->
+ ok;
+ Pid ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ %% And we don't need to do supervisor:delete_child, since
+ %% the restart type is temporary, which means that the
+ %% child specification is automatically removed from the
+ %% supervisor when the process dies.
+ _ = gen_server:call(Pid, stop),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_file_ctrl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every event (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize events have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, mode:=Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% event to react quickly to large bursts of events and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ if (Mode1 == drop) andalso (Mode0 =/= drop) ->
+ log_handler_info(Name, "Handler ~p switched to drop mode",
+ [Name], State);
+ (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
+ log_handler_info(Name, "Handler ~p switched to ~w mode",
+ [Name,Mode1], State);
+ true ->
+ ok
+ end,
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this clause is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
+
+ %% write info in log about flushed messages
+ log_handler_info(Name, "Handler ~p flushed ~w log events",
+ [Name,NewFlushed], State),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log event
+ ?observe(Name,{flushed,NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,NewFlushed,
+ State1#{mode => ?set_mode(ModeTab,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this clause is called to write to file
+write(_Name, Mode, T1, Bin, _CallOrCast,
+ State = #{mode_tab := ModeTab,
+ file_ctrl_pid := FileCtrlPid,
+ file_ctrl_sync := FileCtrlSync,
+ last_qlen := LastQLen,
+ last_log_ts := T0,
+ file_ctrl_sync_int := FileCtrlSyncInt}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log events
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous event to the file controller process
+ %% every FileCtrlSyncInt time, to give the handler time between
+ %% file writes so it can keep up with incoming messages
+ {Result,LastQLen1} =
+ if DoWrite, FileCtrlSync == 0 ->
+ ?observe(_Name,{_CallOrCast,1}),
+ file_write_sync(FileCtrlPid, Bin, false),
+ {ok,element(2, process_info(self(), message_queue_len))};
+ DoWrite ->
+ ?observe(_Name,{_CallOrCast,1}),
+ file_write_async(FileCtrlPid, Bin),
+ {ok,LastQLen};
+ not DoWrite ->
+ ?observe(_Name,{flushed,1}),
+ {dropped,LastQLen}
+ end,
+
+ %% Check if the time since the previous log event is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log events is fast and no new
+ %% event comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ %% do filesync if necessary
+ case maps:get(type, State) of
+ Std when is_atom(Std) ->
+ ok;
+ _File ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {?change_mode(ModeTab, Mode, async),0};
+ true ->
+ {Mode,BurstMsgCount}
+ end,
+ State1 =
+ ?update_calls_or_casts(_CallOrCast,1,State),
+ State2 =
+ ?update_max_time(Time,
+ State1#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ last_op => write,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ file_ctrl_sync =>
+ ?update_file_ctrl_sync(FileCtrlSync,
+ FileCtrlSyncInt)}),
+ {Result,State2}.
+
+open_log_file(HandlerName, FileInfo) ->
+ case file_ctrl_start(HandlerName, FileInfo) of
+ OK = {ok,_FileCtrlPid} -> OK;
+ Error -> Error
+ end.
+
+do_open_log_file({file,File}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,[]}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,Modes}) ->
+ try
+ case filelib:ensure_dir(File) of
+ ok ->
+ file:open(File, Modes);
+ Error ->
+ Error
+ end
+ catch
+ _:Reason -> {error,Reason}
+ end.
+
+close_log_file(Std) when Std == standard_io; Std == standard_error ->
+ ok;
+close_log_file(Fd) ->
+ _ = file:datasync(Fd),
+ _ = file:close(Fd).
+
+
+log_handler_info(Name, Format, Args, #{file_ctrl_pid := FileCtrlPid}) ->
+ Config =
+ case logger:get_handler_config(Name) of
+ {ok,Conf} -> Conf;
+ _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
+ end,
+ Meta = #{time=>erlang:system_time(microsecond)},
+ Bin = logger_h_common:log_to_binary(#{level => notice,
+ msg => {Format,Args},
+ meta => Meta}, Config),
+ _ = file_write_async(FileCtrlPid, Bin),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% File control process
+
+file_ctrl_start(HandlerName, FileInfo) ->
+ Starter = self(),
+ FileCtrlPid =
+ spawn_link(fun() ->
+ file_ctrl_init(HandlerName, FileInfo, Starter)
+ end),
+ receive
+ {FileCtrlPid,ok} ->
+ {ok,FileCtrlPid};
+ {FileCtrlPid,Error} ->
+ Error
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,file_ctrl_process_not_started}
+ end.
+
+file_ctrl_stop(Pid) ->
+ Pid ! stop.
+
+file_write_async(Pid, Bin) ->
+ Pid ! {log,Bin},
+ ok.
+
+file_write_sync(Pid, Bin, FileSync) ->
+ case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of
+ {error,Reason} ->
+ {error,{write_failed,Bin,Reason}};
+ Result ->
+ Result
+ end.
+
+file_ctrl_filesync_async(Pid) ->
+ Pid ! filesync,
+ ok.
+
+file_ctrl_filesync_sync(Pid) ->
+ file_ctrl_call(Pid, {filesync,self()}).
+
+file_ctrl_call(Pid, Msg) ->
+ MRef = monitor(process, Pid),
+ Pid ! {Msg,MRef},
+ receive
+ {MRef,Result} ->
+ demonitor(MRef, [flush]),
+ Result;
+ {'DOWN',MRef,_Type,_Object,Reason} ->
+ {error,Reason}
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,{no_response,Pid}}
+ end.
+
+file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
+ process_flag(message_queue_data, off_heap),
+ FileName = element(2, FileInfo),
+ case do_open_log_file(FileInfo) of
+ {ok,Fd} ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName);
+ {error,Reason} ->
+ Starter ! {self(),{error,{open_failed,FileName,Reason}}}
+ end;
+file_ctrl_init(HandlerName, StdDev, Starter) ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName).
+
+file_ctrl_loop(Fd, Type, DevName, Synced,
+ PrevWriteResult, PrevSyncResult, HandlerName) ->
+ receive
+ %% asynchronous event
+ {log,Bin} ->
+ Result = if Type == file ->
+ write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName);
+ true ->
+ io:put_chars(Fd, Bin)
+ end,
+ file_ctrl_loop(Fd, Type, DevName, false,
+ Result, PrevSyncResult, HandlerName);
+
+ %% synchronous event
+ {{log,From,Bin,FileSync},MRef} ->
+ if Type == file ->
+ %% check that file hasn't been deleted
+ CheckFile =
+ fun() -> {ok,_} = file:read_file_info(DevName) end,
+ spawn_link(CheckFile),
+ WResult = write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName),
+ {Synced1,SResult} =
+ if not FileSync ->
+ {false,PrevSyncResult};
+ true ->
+ case sync_dev(Fd, DevName,
+ PrevSyncResult, HandlerName) of
+ ok -> {true,ok};
+ Error -> {false,Error}
+ end
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, Synced1,
+ WResult, SResult, HandlerName);
+ true ->
+ _ = io:put_chars(Fd, Bin),
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, false,
+ ok, PrevSyncResult, HandlerName)
+ end;
+
+ filesync when not Synced ->
+ Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName),
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ filesync ->
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, PrevSyncResult, HandlerName);
+
+ {{filesync,From},MRef} ->
+ Result = if not Synced ->
+ sync_dev(Fd, DevName, PrevSyncResult, HandlerName);
+ true ->
+ ok
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ stop ->
+ _ = close_log_file(Fd),
+ stopped
+ end.
+
+write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) ->
+ case ?file_write(Fd, Bin) of
+ ok ->
+ ok;
+ PrevWriteResult ->
+ %% don't report same error twice
+ PrevWriteResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,write,FileName,Error}),
+ Error
+ end.
+
+sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
+ case ?file_datasync(Fd) of
+ ok ->
+ ok;
+ PrevSyncResult ->
+ %% don't report same error twice
+ PrevSyncResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
+ Error
+ end.
diff --git a/lib/kernel/src/logger_sup.erl b/lib/kernel/src/logger_sup.erl
new file mode 100644
index 0000000000..3d6f482e20
--- /dev/null
+++ b/lib/kernel/src/logger_sup.erl
@@ -0,0 +1,57 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callbacks
+-export([init/1]).
+
+-define(SERVER, ?MODULE).
+
+%%%===================================================================
+%%% API functions
+%%%===================================================================
+
+start_link() ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%%===================================================================
+%%% Supervisor callbacks
+%%%===================================================================
+
+init([]) ->
+
+ SupFlags = #{strategy => one_for_one,
+ intensity => 1,
+ period => 5},
+
+ Watcher = #{id => logger_handler_watcher,
+ start => {logger_handler_watcher, start_link, []},
+ shutdown => brutal_kill},
+
+ {ok, {SupFlags, [Watcher]}}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
diff --git a/lib/kernel/src/net.erl b/lib/kernel/src/net.erl
index f058042bdd..2d0ae2ed0c 100644
--- a/lib/kernel/src/net.erl
+++ b/lib/kernel/src/net.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/net_adm.erl b/lib/kernel/src/net_adm.erl
index e6a81126c2..8ec275b88b 100644
--- a/lib/kernel/src/net_adm.erl
+++ b/lib/kernel/src/net_adm.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -96,7 +96,8 @@ names() ->
Reason :: address | file:posix().
names(Hostname) ->
- erl_epmd:names(Hostname).
+ ErlEpmd = net_kernel:epmd_module(),
+ ErlEpmd:names(Hostname).
-spec dns_hostname(Host) -> {ok, Name} | {error, Host} when
Host :: atom() | string(),
diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl
index 35a54f591e..3cf11fd7b1 100644
--- a/lib/kernel/src/net_kernel.erl
+++ b/lib/kernel/src/net_kernel.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -26,15 +26,13 @@
%%-define(dist_debug, true).
-%-define(DBG,erlang:display([?MODULE,?LINE])).
-
-ifdef(dist_debug).
-define(debug(Term), erlang:display(Term)).
-else.
-define(debug(Term), ok).
-endif.
--ifdef(DEBUG).
+-ifdef(dist_debug).
-define(connect_failure(Node,Term),
io:format("Net Kernel 2: Failed connection to node ~p, reason ~p~n",
[Node,Term])).
@@ -53,18 +51,27 @@
-define(tckr_dbg(X), ok).
-endif.
-%% User Interface Exports
--export([start/1, start_link/1, stop/0,
- kernel_apply/3,
+%% Documented API functions.
+
+-export([allow/1, allowed/0,
+ connect_node/1,
monitor_nodes/1,
monitor_nodes/2,
+ setopts/2,
+ getopts/2,
+ start/1,
+ stop/0]).
+
+%% Exports for internal use.
+
+-export([start_link/2,
+ kernel_apply/3,
longnames/0,
- allow/1,
protocol_childspecs/0,
epmd_module/0]).
--export([connect/1, disconnect/1, hidden_connect/1, passive_cnct/1]).
--export([connect_node/1, hidden_connect_node/1]). %% explicit connect
+-export([disconnect/1, passive_cnct/1]).
+-export([hidden_connect_node/1]).
-export([set_net_ticktime/1, set_net_ticktime/2, get_net_ticktime/0]).
-export([node_info/1, node_info/2, nodes_info/0,
@@ -73,7 +80,8 @@
-export([publish_on_node/1, update_publish_nodes/1]).
-%% Internal Exports
+%% Internal exports for spawning processes.
+
-export([do_spawn/3,
spawn_func/6,
ticker/2,
@@ -103,7 +111,7 @@
}).
-record(listen, {
- listen, %% listen pid
+ listen, %% listen socket
accept, %% accepting pid
address, %% #net_address
module %% proto module
@@ -114,6 +122,7 @@
-record(connection, {
node, %% remote node name
+ conn_id, %% Connection identity
state, %% pending | up | up_pending
owner, %% owner pid
pending_owner, %% possible new owner
@@ -162,6 +171,8 @@ kernel_apply(M,F,A) -> request({apply,M,F,A}).
Nodes :: [node()].
allow(Nodes) -> request({allow, Nodes}).
+allowed() -> request(allowed).
+
longnames() -> request(longnames).
-spec stop() -> ok | {error, Reason} when
@@ -213,8 +224,7 @@ get_net_ticktime() ->
Error :: error | {error, term()}.
monitor_nodes(Flag) ->
case catch process_flag(monitor_nodes, Flag) of
- true -> ok;
- false -> ok;
+ N when is_integer(N) -> ok;
_ -> mk_monitor_nodes_error(Flag, [])
end.
@@ -227,8 +237,7 @@ monitor_nodes(Flag) ->
Error :: error | {error, term()}.
monitor_nodes(Flag, Opts) ->
case catch process_flag({monitor_nodes, Opts}, Flag) of
- true -> ok;
- false -> ok;
+ N when is_integer(N) -> ok;
_ -> mk_monitor_nodes_error(Flag, Opts)
end.
@@ -239,14 +248,15 @@ ticktime_res(A) when is_atom(A) -> A.
%% Called though BIF's
-connect(Node) -> do_connect(Node, normal, false).
%%% Long timeout if blocked (== barred), only affects nodes with
%%% {dist_auto_connect, once} set.
-passive_cnct(Node) -> do_connect(Node, normal, true).
-disconnect(Node) -> request({disconnect, Node}).
+passive_cnct(Node) ->
+ case request({passive_cnct, Node}) of
+ ignored -> false;
+ Other -> Other
+ end.
-%% connect but not seen
-hidden_connect(Node) -> do_connect(Node, hidden, false).
+disconnect(Node) -> request({disconnect, Node}).
%% Should this node publish itself on Node?
publish_on_node(Node) when is_atom(Node) ->
@@ -264,67 +274,24 @@ connect_node(Node) when is_atom(Node) ->
hidden_connect_node(Node) when is_atom(Node) ->
request({connect, hidden, Node}).
-do_connect(Node, Type, WaitForBarred) -> %% Type = normal | hidden
- case catch ets:lookup(sys_dist, Node) of
- {'EXIT', _} ->
- ?connect_failure(Node,{table_missing, sys_dist}),
- false;
- [#barred_connection{}] ->
- case WaitForBarred of
- false ->
- false;
- true ->
- Pid = spawn(?MODULE,passive_connect_monitor,[self(),Node]),
- receive
- {Pid, true} ->
- %%io:format("Net Kernel: barred connection (~p) "
- %% "connected from other end.~n",[Node]),
- true;
- {Pid, false} ->
- ?connect_failure(Node,{barred_connection,
- ets:lookup(sys_dist, Node)}),
- %%io:format("Net Kernel: barred connection (~p) "
- %% "- failure.~n",[Node]),
- false
- end
- end;
- Else ->
- case application:get_env(kernel, dist_auto_connect) of
- {ok, never} ->
- ?connect_failure(Node,{dist_auto_connect,never}),
- false;
- % This might happen due to connection close
- % not beeing propagated to user space yet.
- % Save the day by just not connecting...
- {ok, once} when Else =/= [],
- (hd(Else))#connection.state =:= up ->
- ?connect_failure(Node,{barred_connection,
- ets:lookup(sys_dist, Node)}),
- false;
- _ ->
- request({connect, Type, Node})
- end
- end.
-passive_connect_monitor(Parent, Node) ->
+passive_connect_monitor(From, Node) ->
ok = monitor_nodes(true,[{node_type,all}]),
- case lists:member(Node,nodes([connected])) of
- true ->
- ok = monitor_nodes(false,[{node_type,all}]),
- Parent ! {self(),true};
- _ ->
- Ref = make_ref(),
- Tref = erlang:send_after(connecttime(),self(),Ref),
- receive
- Ref ->
- ok = monitor_nodes(false,[{node_type,all}]),
- Parent ! {self(), false};
- {nodeup,Node,_} ->
- ok = monitor_nodes(false,[{node_type,all}]),
- _ = erlang:cancel_timer(Tref),
- Parent ! {self(),true}
- end
- end.
+ Reply = case lists:member(Node,nodes([connected])) of
+ true ->
+ true;
+ _ ->
+ receive
+ {nodeup,Node,_} ->
+ true
+ after connecttime() ->
+ false
+ end
+ end,
+ ok = monitor_nodes(false,[{node_type,all}]),
+ {Pid, Tag} = From,
+ erlang:send(Pid, {Tag, Reply}).
+
%% If the net_kernel isn't running we ignore all requests to the
%% kernel, thus basically accepting them :-)
@@ -341,18 +308,18 @@ request(Req) ->
start(Args) ->
erl_distribution:start(Args).
-%% This is the main startup routine for net_kernel
-%% The defaults are longnames and a ticktime of 15 secs to the tcp_drv.
-
-start_link([Name]) ->
- start_link([Name, longnames]);
+%% This is the main startup routine for net_kernel (only for internal
+%% use by the Kernel application.
-start_link([Name, LongOrShortNames]) ->
- start_link([Name, LongOrShortNames, 15000]);
+start_link([Name], CleanHalt) ->
+ start_link([Name, longnames], CleanHalt);
+start_link([Name, LongOrShortNames], CleanHalt) ->
+ start_link([Name, LongOrShortNames, 15000], CleanHalt);
-start_link([Name, LongOrShortNames, Ticktime]) ->
- case gen_server:start_link({local, net_kernel}, net_kernel,
- {Name, LongOrShortNames, Ticktime}, []) of
+start_link([Name, LongOrShortNames, Ticktime], CleanHalt) ->
+ Args = {Name, LongOrShortNames, Ticktime, CleanHalt},
+ case gen_server:start_link({local, net_kernel}, ?MODULE,
+ Args, []) of
{ok, Pid} ->
{ok, Pid};
{error, {already_started, Pid}} ->
@@ -361,12 +328,9 @@ start_link([Name, LongOrShortNames, Ticktime]) ->
exit(nodistribution)
end.
-%% auth:get_cookie should only be able to return an atom
-%% tuple cookies are unknowns
-
-init({Name, LongOrShortNames, TickT}) ->
+init({Name, LongOrShortNames, TickT, CleanHalt}) ->
process_flag(trap_exit,true),
- case init_node(Name, LongOrShortNames) of
+ case init_node(Name, LongOrShortNames, CleanHalt) of
{ok, Node, Listeners} ->
process_flag(priority, max),
Ticktime = to_integer(TickT),
@@ -379,7 +343,7 @@ init({Name, LongOrShortNames, TickT}) ->
connections =
ets:new(sys_dist,[named_table,
protected,
- {keypos, 2}]),
+ {keypos, #connection.node}]),
listen = Listeners,
allowed = [],
verbose = 0
@@ -388,41 +352,140 @@ init({Name, LongOrShortNames, TickT}) ->
{stop, Error}
end.
+do_auto_connect_1(Node, ConnId, From, State) ->
+ case ets:lookup(sys_dist, Node) of
+ [#barred_connection{}] ->
+ case ConnId of
+ passive_cnct ->
+ spawn(?MODULE,passive_connect_monitor,[From,Node]),
+ {noreply, State};
+ _ ->
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State}
+ end;
+
+ ConnLookup ->
+ do_auto_connect_2(Node, ConnId, From, State, ConnLookup)
+ end.
+
+do_auto_connect_2(Node, passive_cnct, From, State, ConnLookup) ->
+ try erts_internal:new_connection(Node) of
+ ConnId ->
+ do_auto_connect_2(Node, ConnId, From, State, ConnLookup)
+ catch
+ _:_ ->
+ error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
+ [Node]),
+ {reply, false, State}
+ end;
+do_auto_connect_2(Node, ConnId, From, State, ConnLookup) ->
+ case ConnLookup of
+ [#connection{conn_id=ConnId, state = up}] ->
+ {reply, true, State};
+ [#connection{conn_id=ConnId, waiting=Waiting}=Conn] ->
+ case From of
+ noreply -> ok;
+ _ -> ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]})
+ end,
+ {noreply, State};
+
+ _ ->
+ case application:get_env(kernel, dist_auto_connect) of
+ {ok, never} ->
+ ?connect_failure(Node,{dist_auto_connect,never}),
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State};
+
+ %% This might happen due to connection close
+ %% not beeing propagated to user space yet.
+ %% Save the day by just not connecting...
+ {ok, once} when ConnLookup =/= [],
+ (hd(ConnLookup))#connection.state =:= up ->
+ ?connect_failure(Node,{barred_connection,
+ ets:lookup(sys_dist, Node)}),
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State};
+ _ ->
+ case setup(Node, ConnId, normal, From, State) of
+ {ok, SetupPid} ->
+ Owners = [{SetupPid, Node} | State#state.conn_owners],
+ {noreply,State#state{conn_owners=Owners}};
+ _Error ->
+ ?connect_failure(Node, {setup_call, failed, _Error}),
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State}
+ end
+ end
+ end.
+
+
+do_explicit_connect([#connection{conn_id = ConnId, state = up}], _, _, ConnId, _From, State) ->
+ {reply, true, State};
+do_explicit_connect([#connection{conn_id = ConnId}=Conn], _, _, ConnId, From, State)
+ when Conn#connection.state =:= pending;
+ Conn#connection.state =:= up_pending ->
+ Waiting = Conn#connection.waiting,
+ ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
+ {noreply, State};
+do_explicit_connect([#barred_connection{}], Type, Node, ConnId, From , State) ->
+ %% Barred connection only affects auto_connect, ignore it.
+ do_explicit_connect([], Type, Node, ConnId, From , State);
+do_explicit_connect(_ConnLookup, Type, Node, ConnId, From , State) ->
+ case setup(Node,ConnId,Type,From,State) of
+ {ok, SetupPid} ->
+ Owners = [{SetupPid, Node} | State#state.conn_owners],
+ {noreply,State#state{conn_owners=Owners}};
+ _Error ->
+ ?connect_failure(Node, {setup_call, failed, _Error}),
+ {reply, false, State}
+ end.
+
%% ------------------------------------------------------------
%% handle_call.
%% ------------------------------------------------------------
%%
-%% Set up a connection to Node.
-%% The response is delayed until the connection is up and
-%% running.
+%% Passive auto-connect to Node.
+%% The response is delayed until the connection is up and running.
+%%
+handle_call({passive_cnct, Node}, From, State) when Node =:= node() ->
+ async_reply({reply, true, State}, From);
+handle_call({passive_cnct, Node}, From, State) ->
+ verbose({passive_cnct, Node}, 1, State),
+ R = do_auto_connect_1(Node, passive_cnct, From, State),
+ return_call(R, From);
+
+%%
+%% Explicit connect
+%% The response is delayed until the connection is up and running.
%%
handle_call({connect, _, Node}, From, State) when Node =:= node() ->
async_reply({reply, true, State}, From);
handle_call({connect, Type, Node}, From, State) ->
verbose({connect, Type, Node}, 1, State),
- case ets:lookup(sys_dist, Node) of
- [Conn] when Conn#connection.state =:= up ->
- async_reply({reply, true, State}, From);
- [Conn] when Conn#connection.state =:= pending ->
- Waiting = Conn#connection.waiting,
- ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
- {noreply, State};
- [Conn] when Conn#connection.state =:= up_pending ->
- Waiting = Conn#connection.waiting,
- ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
- {noreply, State};
- _ ->
- case setup(Node,Type,From,State) of
- {ok, SetupPid} ->
- Owners = [{SetupPid, Node} | State#state.conn_owners],
- {noreply,State#state{conn_owners=Owners}};
- _ ->
- ?connect_failure(Node, {setup_call, failed}),
- async_reply({reply, false, State}, From)
- end
- end;
+ ConnLookup = ets:lookup(sys_dist, Node),
+ R = try erts_internal:new_connection(Node) of
+ ConnId ->
+ R1 = do_explicit_connect(ConnLookup, Type, Node, ConnId, From, State),
+ case R1 of
+ {reply, true, _S} -> %% already connected
+ ok;
+ {noreply, _S} -> %% connection pending
+ ok;
+ {reply, false, _S} -> %% connection refused
+ erts_internal:abort_connection(Node, ConnId)
+ end,
+ R1
+
+ catch
+ _:_ ->
+ error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
+ [Node]),
+ {reply, false, State}
+ end,
+ return_call(R, From);
+
%%
%% Close the connection to Node.
@@ -465,6 +528,9 @@ handle_call({allow, Nodes}, From, State) ->
async_reply({reply,error,State}, From)
end;
+handle_call(allowed, From, #state{allowed = Allowed} = State) ->
+ async_reply({reply,{ok,Allowed},State}, From);
+
%%
%% authentication, used by auth. Simply works as this:
%% if the message comes through, the other node IS authorized.
@@ -549,6 +615,38 @@ handle_call({new_ticktime,_T,_TP},
#state{tick = #tick_change{time = T}} = State) ->
async_reply({reply, {ongoing_change_to, T}, State}, From);
+handle_call({setopts, new, Opts}, From, State) ->
+ Ret = setopts_new(Opts, State),
+ async_reply({reply, Ret, State}, From);
+
+handle_call({setopts, Node, Opts}, From, State) ->
+ Return =
+ case ets:lookup(sys_dist, Node) of
+ [Conn] when Conn#connection.state =:= up ->
+ case call_owner(Conn#connection.owner, {setopts, Opts}) of
+ {ok, Ret} -> Ret;
+ _ -> {error, noconnection}
+ end;
+
+ _ ->
+ {error, noconnection}
+ end,
+ async_reply({reply, Return, State}, From);
+
+handle_call({getopts, Node, Opts}, From, State) ->
+ Return =
+ case ets:lookup(sys_dist, Node) of
+ [Conn] when Conn#connection.state =:= up ->
+ case call_owner(Conn#connection.owner, {getopts, Opts}) of
+ {ok, Ret} -> Ret;
+ _ -> {error, noconnection}
+ end;
+
+ _ ->
+ {error, noconnection}
+ end,
+ async_reply({reply, Return, State}, From);
+
handle_call(_Msg, _From, State) ->
{noreply, State}.
@@ -597,6 +695,25 @@ terminate(_Reason, State) ->
%% ------------------------------------------------------------
%%
+%% Asynchronous auto connect request
+%%
+handle_info({auto_connect,Node, DHandle}, State) ->
+ verbose({auto_connect, Node, DHandle}, 1, State),
+ ConnId = DHandle,
+ NewState =
+ case do_auto_connect_1(Node, ConnId, noreply, State) of
+ {noreply, S} -> %% Pending connection
+ S;
+
+ {reply, true, S} -> %% Already connected
+ S;
+
+ {reply, false, S} -> %% Connection refused
+ S
+ end,
+ {noreply, NewState};
+
+%%
%% accept a new connection.
%%
handle_info({accept,AcceptPid,Socket,Family,Proto}, State) ->
@@ -676,14 +793,23 @@ handle_info({AcceptPid, {accept_pending,MyNode,Node,Address,Type}}, State) ->
AcceptPid ! {self(), {accept_pending, already_pending}},
{noreply, State};
_ ->
- ets:insert(sys_dist, #connection{node = Node,
- state = pending,
- owner = AcceptPid,
- address = Address,
- type = Type}),
- AcceptPid ! {self(),{accept_pending,ok}},
- Owners = [{AcceptPid,Node} | State#state.conn_owners],
- {noreply, State#state{conn_owners = Owners}}
+ try erts_internal:new_connection(Node) of
+ ConnId ->
+ ets:insert(sys_dist, #connection{node = Node,
+ conn_id = ConnId,
+ state = pending,
+ owner = AcceptPid,
+ address = Address,
+ type = Type}),
+ AcceptPid ! {self(),{accept_pending,ok}},
+ Owners = [{AcceptPid,Node} | State#state.conn_owners],
+ {noreply, State#state{conn_owners = Owners}}
+ catch
+ _:_ ->
+ error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
+ [Node]),
+ AcceptPid ! {self(),{accept_pending,nok_pending}}
+ end
end;
handle_info({SetupPid, {is_pending, Node}}, State) ->
@@ -741,7 +867,7 @@ handle_info(transition_period_end,
{noreply,State#state{tick = #tick{ticker = Tckr, time = T}}};
handle_info(X, State) ->
- error_msg("Net kernel got ~w~n",[X]),
+ error_msg("Net kernel got ~tw~n",[X]),
{noreply,State}.
%% -----------------------------------------------------------
@@ -869,6 +995,7 @@ pending_nodedown(Conn, Node, Type, State) ->
% Don't bar connections that have never been alive
%mark_sys_dist_nodedown(Node),
% - instead just delete the node:
+ erts_internal:abort_connection(Node, Conn#connection.conn_id),
ets:delete(sys_dist, Node),
reply_waiting(Node,Conn#connection.waiting, false),
case Type of
@@ -883,7 +1010,9 @@ up_pending_nodedown(Conn, Node, _Reason, _Type, State) ->
AcceptPid = Conn#connection.pending_owner,
Owners = State#state.conn_owners,
Pend = lists:keydelete(AcceptPid, 1, State#state.pend_owners),
+ erts_internal:abort_connection(Node, Conn#connection.conn_id),
Conn1 = Conn#connection { owner = AcceptPid,
+ conn_id = erts_internal:new_connection(Node),
pending_owner = undefined,
state = pending },
ets:insert(sys_dist, Conn1),
@@ -891,15 +1020,16 @@ up_pending_nodedown(Conn, Node, _Reason, _Type, State) ->
State#state{conn_owners = [{AcceptPid,Node}|Owners], pend_owners = Pend}.
-up_nodedown(_Conn, Node, _Reason, Type, State) ->
- mark_sys_dist_nodedown(Node),
+up_nodedown(Conn, Node, _Reason, Type, State) ->
+ mark_sys_dist_nodedown(Conn, Node),
case Type of
normal -> ?nodedown(Node, State);
_ -> ok
end,
State.
-mark_sys_dist_nodedown(Node) ->
+mark_sys_dist_nodedown(Conn, Node) ->
+ erts_internal:abort_connection(Node, Conn#connection.conn_id),
case application:get_env(kernel, dist_auto_connect) of
{ok, once} ->
ets:insert(sys_dist, #barred_connection{node = Node});
@@ -1142,15 +1272,8 @@ spawn_func(_,{From,Tag},M,F,A,Gleader) ->
%% Set up connection to a new node.
%% -----------------------------------------------------------
-setup(Node,Type,From,State) ->
- Allowed = State#state.allowed,
- case lists:member(Node, Allowed) of
- false when Allowed =/= [] ->
- error_msg("** Connection attempt with "
- "disallowed node ~w ** ~n", [Node]),
- {error, bad_node};
- _ ->
- case select_mod(Node, State#state.listen) of
+setup(Node, ConnId, Type, From, State) ->
+ case setup_check(Node, State) of
{ok, L} ->
Mod = L#listen.module,
LAddr = L#listen.address,
@@ -1163,18 +1286,38 @@ setup(Node,Type,From,State) ->
Addr = LAddr#net_address {
address = undefined,
host = undefined },
+ Waiting = case From of
+ noreply -> [];
+ _ -> [From]
+ end,
ets:insert(sys_dist, #connection{node = Node,
+ conn_id = ConnId,
state = pending,
owner = Pid,
- waiting = [From],
+ waiting = Waiting,
address = Addr,
type = normal}),
{ok, Pid};
Error ->
Error
- end
end.
+setup_check(Node, State) ->
+ Allowed = State#state.allowed,
+ case lists:member(Node, Allowed) of
+ false when Allowed =/= [] ->
+ error_msg("** Connection attempt with "
+ "disallowed node ~w ** ~n", [Node]),
+ {error, bad_node};
+ _ ->
+ case select_mod(Node, State#state.listen) of
+ {ok, _L}=OK -> OK;
+ Error -> Error
+ end
+ end.
+
+
+
%%
%% Find a module that is willing to handle connection setup to Node
%%
@@ -1201,12 +1344,12 @@ get_proto_mod(_Family, _Protocol, []) ->
%% -------- Initialisation functions ------------------------
-init_node(Name, LongOrShortNames) ->
- {NameWithoutHost,_Host} = lists:splitwith(fun($@)->false;(_)->true end,
- atom_to_list(Name)),
+init_node(Name, LongOrShortNames, CleanHalt) ->
+ {NameWithoutHost0,_Host} = split_node(Name),
case create_name(Name, LongOrShortNames, 1) of
{ok,Node} ->
- case start_protos(list_to_atom(NameWithoutHost),Node) of
+ NameWithoutHost = list_to_atom(NameWithoutHost0),
+ case start_protos(NameWithoutHost, Node, CleanHalt) of
{ok, Ls} ->
{ok, Node, Ls};
Error ->
@@ -1225,11 +1368,22 @@ create_name(Name, LongOrShortNames, Try) ->
{Head,Host1} = create_hostpart(Name, LongOrShortNames),
case Host1 of
{ok,HostPart} ->
- {ok,list_to_atom(Head ++ HostPart)};
+ case valid_name_head(Head) of
+ true ->
+ {ok,list_to_atom(Head ++ HostPart)};
+ false ->
+ error_logger:info_msg("Invalid node name!\n"
+ "Please check your configuration\n"),
+ {error, badarg}
+ end;
{error,long} when Try =:= 1 ->
%% It could be we haven't read domain name from resolv file yet
inet_config:do_load_resolv(os:type(), longnames),
create_name(Name, LongOrShortNames, 0);
+ {error, hostname_not_allowed} ->
+ error_logger:info_msg("Invalid node name!\n"
+ "Please check your configuration\n"),
+ {error, badarg};
{error,Type} ->
error_logger:info_msg(
lists:concat(["Can\'t set ",
@@ -1240,15 +1394,15 @@ create_name(Name, LongOrShortNames, Try) ->
end.
create_hostpart(Name, LongOrShortNames) ->
- {Head,Host} = lists:splitwith(fun($@)->false;(_)->true end,
- atom_to_list(Name)),
+ {Head,Host} = split_node(Name),
Host1 = case {Host,LongOrShortNames} of
- {[$@,_|_],longnames} ->
- {ok,Host};
+ {[$@,_|_] = Host,longnames} ->
+ validate_hostname(Host);
{[$@,_|_],shortnames} ->
case lists:member($.,Host) of
true -> {error,short};
- _ -> {ok,Host}
+ _ ->
+ validate_hostname(Host)
end;
{_,shortnames} ->
case inet_db:gethostname() of
@@ -1268,6 +1422,27 @@ create_hostpart(Name, LongOrShortNames) ->
end,
{Head,Host1}.
+validate_hostname([$@|HostPart] = Host) ->
+ {ok, MP} = re:compile("^[!-ÿ]*$", [unicode]),
+ case re:run(HostPart, MP) of
+ {match, _} ->
+ {ok, Host};
+ nomatch ->
+ {error, hostname_not_allowed}
+ end.
+
+valid_name_head(Head) ->
+ {ok, MP} = re:compile("^[0-9A-Za-z_\\-]*$", [unicode]),
+ case re:run(Head, MP) of
+ {match, _} ->
+ true;
+ nomatch ->
+ false
+ end.
+
+split_node(Name) ->
+ lists:splitwith(fun(C) -> C =/= $@ end, atom_to_list(Name)).
+
%%
%%
%%
@@ -1307,21 +1482,26 @@ epmd_module() ->
%% Start all protocols
%%
-start_protos(Name,Node) ->
+start_protos(Name, Node, CleanHalt) ->
case init:get_argument(proto_dist) of
{ok, [Protos]} ->
- start_protos(Name,Protos, Node);
+ start_protos(Name, Protos, Node, CleanHalt);
_ ->
- start_protos(Name,["inet_tcp"], Node)
+ start_protos(Name, ["inet_tcp"], Node, CleanHalt)
end.
-start_protos(Name,Ps, Node) ->
- case start_protos(Name, Ps, Node, []) of
- [] -> {error, badarg};
- Ls -> {ok, Ls}
+start_protos(Name, Ps, Node, CleanHalt) ->
+ case start_protos(Name, Ps, Node, [], CleanHalt) of
+ [] ->
+ case CleanHalt of
+ true -> halt(1);
+ false -> {error, badarg}
+ end;
+ Ls ->
+ {ok, Ls}
end.
-start_protos(Name, [Proto | Ps], Node, Ls) ->
+start_protos(Name, [Proto | Ps], Node, Ls, CleanHalt) ->
Mod = list_to_atom(Proto ++ "_dist"),
case catch Mod:listen(Name) of
{ok, {Socket, Address, Creation}} ->
@@ -1334,33 +1514,48 @@ start_protos(Name, [Proto | Ps], Node, Ls) ->
address = Address,
accept = AcceptPid,
module = Mod },
- start_protos(Name,Ps, Node, [L|Ls]);
+ start_protos(Name,Ps, Node, [L|Ls], CleanHalt);
_ ->
Mod:close(Socket),
- error_logger:info_msg("Invalid node name: ~p~n", [Node]),
- start_protos(Name, Ps, Node, Ls)
+ S = "invalid node name: " ++ atom_to_list(Node),
+ proto_error(CleanHalt, Proto, S),
+ start_protos(Name, Ps, Node, Ls, CleanHalt)
end;
{'EXIT', {undef,_}} ->
- error_logger:info_msg("Protocol: ~tp: not supported~n", [Proto]),
- start_protos(Name,Ps, Node, Ls);
+ proto_error(CleanHalt, Proto, "not supported"),
+ start_protos(Name, Ps, Node, Ls, CleanHalt);
{'EXIT', Reason} ->
- error_logger:info_msg("Protocol: ~tp: register error: ~tp~n",
- [Proto, Reason]),
- start_protos(Name,Ps, Node, Ls);
+ register_error(CleanHalt, Proto, Reason),
+ start_protos(Name, Ps, Node, Ls, CleanHalt);
{error, duplicate_name} ->
- error_logger:info_msg("Protocol: ~tp: the name " ++
- atom_to_list(Node) ++
- " seems to be in use by another Erlang node",
- [Proto]),
- start_protos(Name,Ps, Node, Ls);
+ S = "the name " ++ atom_to_list(Node) ++
+ " seems to be in use by another Erlang node",
+ proto_error(CleanHalt, Proto, S),
+ start_protos(Name, Ps, Node, Ls, CleanHalt);
{error, Reason} ->
- error_logger:info_msg("Protocol: ~tp: register/listen error: ~tp~n",
- [Proto, Reason]),
- start_protos(Name,Ps, Node, Ls)
+ register_error(CleanHalt, Proto, Reason),
+ start_protos(Name, Ps, Node, Ls, CleanHalt)
end;
-start_protos(_,[], _Node, Ls) ->
+start_protos(_, [], _Node, Ls, _CleanHalt) ->
Ls.
+register_error(false, Proto, Reason) ->
+ S = io_lib:format("register/listen error: ~p", [Reason]),
+ proto_error(false, Proto, lists:flatten(S));
+register_error(true, Proto, Reason) ->
+ S = "Protocol '" ++ Proto ++ "': register/listen error: ",
+ erlang:display_string(S),
+ erlang:display(Reason).
+
+proto_error(CleanHalt, Proto, String) ->
+ S = "Protocol '" ++ Proto ++ "': " ++ String ++ "\n",
+ case CleanHalt of
+ false ->
+ error_logger:info_msg(S);
+ true ->
+ erlang:display_string(S)
+ end.
+
set_node(Node, Creation) when node() =:= nonode@nohost ->
case catch erlang:setnode(Node, Creation) of
true ->
@@ -1563,6 +1758,11 @@ verbose(_, _, _) ->
getnode(P) when is_pid(P) -> node(P);
getnode(P) -> P.
+return_call({noreply, _State}=R, _From) ->
+ R;
+return_call(R, From) ->
+ async_reply(R, From).
+
async_reply({reply, Msg, State}, From) ->
async_gen_server_reply(From, Msg),
{noreply, State}.
@@ -1570,14 +1770,104 @@ async_reply({reply, Msg, State}, From) ->
async_gen_server_reply(From, Msg) ->
{Pid, Tag} = From,
M = {Tag, Msg},
- case catch erlang:send(Pid, M, [nosuspend, noconnect]) of
+ try erlang:send(Pid, M, [nosuspend, noconnect]) of
ok ->
ok;
nosuspend ->
_ = spawn(fun() -> catch erlang:send(Pid, M, [noconnect]) end),
ok;
noconnect ->
- ok; % The gen module takes care of this case.
- {'EXIT', _} ->
- ok
+ ok % The gen module takes care of this case.
+ catch
+ _:_ -> ok
+ end.
+
+call_owner(Owner, Msg) ->
+ Mref = monitor(process, Owner),
+ Owner ! {self(), Mref, Msg},
+ receive
+ {Mref, Reply} ->
+ erlang:demonitor(Mref, [flush]),
+ {ok, Reply};
+ {'DOWN', Mref, _, _, _} ->
+ error
+ end.
+
+
+-spec setopts(Node, Options) -> ok | {error, Reason} | ignored when
+ Node :: node() | new,
+ Options :: [inet:socket_setopt()],
+ Reason :: inet:posix() | noconnection.
+
+setopts(Node, Opts) when is_atom(Node), is_list(Opts) ->
+ request({setopts, Node, Opts}).
+
+setopts_new(Opts, State) ->
+ %% First try setopts on listening socket(s)
+ %% Bail out on failure.
+ %% If successful, we are pretty sure Opts are ok
+ %% and we continue with config params and pending connections.
+ case setopts_on_listen(Opts, State#state.listen) of
+ ok ->
+ setopts_new_1(Opts);
+ Fail -> Fail
+ end.
+
+setopts_on_listen(_, []) -> ok;
+setopts_on_listen(Opts, [#listen {listen = LSocket, module = Mod} | T]) ->
+ try Mod:setopts(LSocket, Opts) of
+ ok ->
+ setopts_on_listen(Opts, T);
+ Fail -> Fail
+ catch
+ error:undef -> {error, enotsup}
end.
+
+setopts_new_1(Opts) ->
+ ConnectOpts = case application:get_env(kernel, inet_dist_connect_options) of
+ {ok, CO} -> CO;
+ _ -> []
+ end,
+ application:set_env(kernel, inet_dist_connect_options,
+ merge_opts(Opts,ConnectOpts)),
+ ListenOpts = case application:get_env(kernel, inet_dist_listen_options) of
+ {ok, LO} -> LO;
+ _ -> []
+ end,
+ application:set_env(kernel, inet_dist_listen_options,
+ merge_opts(Opts, ListenOpts)),
+ case lists:keyfind(nodelay, 1, Opts) of
+ {nodelay, ND} when is_boolean(ND) ->
+ application:set_env(kernel, dist_nodelay, ND);
+ _ -> ignore
+ end,
+
+ %% Update any pending connections
+ PendingConns = ets:select(sys_dist, [{'_',
+ [{'=/=',{element,#connection.state,'$_'},up}],
+ ['$_']}]),
+ lists:foreach(fun(#connection{state = pending, owner = Owner}) ->
+ call_owner(Owner, {setopts, Opts});
+ (#connection{state = up_pending, pending_owner = Owner}) ->
+ call_owner(Owner, {setopts, Opts});
+ (_) -> ignore
+ end, PendingConns),
+ ok.
+
+merge_opts([], B) ->
+ B;
+merge_opts([H|T], B0) ->
+ {Key, _} = H,
+ B1 = lists:filter(fun({K,_}) -> K =/= Key end, B0),
+ merge_opts(T, [H | B1]).
+
+-spec getopts(Node, Options) ->
+ {'ok', OptionValues} | {'error', Reason} | ignored when
+ Node :: node(),
+ Options :: [inet:socket_getopt()],
+ OptionValues :: [inet:socket_setopt()],
+ Reason :: inet:posix() | noconnection.
+
+getopts(Node, Opts) when is_atom(Node), is_list(Opts) ->
+ request({getopts, Node, Opts}).
+
diff --git a/lib/kernel/src/os.erl b/lib/kernel/src/os.erl
index 3330b38d84..29a26674ba 100644
--- a/lib/kernel/src/os.erl
+++ b/lib/kernel/src/os.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,38 +21,40 @@
%% Provides a common operating system interface.
--export([type/0, version/0, cmd/1, find_executable/1, find_executable/2]).
+-export([type/0, version/0, cmd/1, cmd/2, find_executable/1, find_executable/2]).
-include("file.hrl").
+-export_type([env_var_name/0, env_var_value/0, env_var_name_value/0]).
+
+-export([getenv/0, getenv/1, getenv/2, putenv/2, unsetenv/1]).
+
%%% BIFs
--export([getenv/0, getenv/1, getenv/2, getpid/0, putenv/2, system_time/0, system_time/1,
- timestamp/0, unsetenv/1]).
+-export([get_env_var/1, getpid/0, list_env_vars/0, perf_counter/0,
+ perf_counter/1, set_env_var/2, set_signal/2, system_time/0,
+ system_time/1, timestamp/0, unset_env_var/1]).
--spec getenv() -> [string()].
+-type os_command() :: atom() | io_lib:chars().
+-type os_command_opts() :: #{ max_size => non_neg_integer() | infinity }.
-getenv() -> erlang:nif_error(undef).
+-export_type([os_command/0, os_command_opts/0]).
--spec getenv(VarName) -> Value | false when
- VarName :: string(),
- Value :: string().
+-type env_var_name() :: nonempty_string().
-getenv(_) ->
- erlang:nif_error(undef).
+-type env_var_value() :: string().
--spec getenv(VarName, DefaultValue) -> Value when
- VarName :: string(),
- DefaultValue :: string(),
- Value :: string().
+-type env_var_name_value() :: nonempty_string().
-getenv(VarName, DefaultValue) ->
- case os:getenv(VarName) of
- false ->
- DefaultValue;
- Value ->
- Value
- end.
+-spec list_env_vars() -> [{env_var_name(), env_var_value()}].
+list_env_vars() ->
+ erlang:nif_error(undef).
+
+-spec get_env_var(VarName) -> Value | false when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+get_env_var(_VarName) ->
+ erlang:nif_error(undef).
-spec getpid() -> Value when
Value :: string().
@@ -60,11 +62,22 @@ getenv(VarName, DefaultValue) ->
getpid() ->
erlang:nif_error(undef).
--spec putenv(VarName, Value) -> true when
- VarName :: string(),
- Value :: string().
+-spec perf_counter() -> Counter when
+ Counter :: integer().
-putenv(_, _) ->
+perf_counter() ->
+ erlang:nif_error(undef).
+
+-spec perf_counter(Unit) -> integer() when
+ Unit :: erlang:time_unit().
+
+perf_counter(Unit) ->
+ erlang:convert_time_unit(os:perf_counter(), perf_counter, Unit).
+
+-spec set_env_var(VarName, Value) -> true when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+set_env_var(_, _) ->
erlang:nif_error(undef).
-spec system_time() -> integer().
@@ -84,16 +97,57 @@ system_time(_Unit) ->
timestamp() ->
erlang:nif_error(undef).
--spec unsetenv(VarName) -> true when
- VarName :: string().
+-spec unset_env_var(VarName) -> true when
+ VarName :: env_var_name().
+unset_env_var(_) ->
+ erlang:nif_error(undef).
-unsetenv(_) ->
+-spec set_signal(Signal, Option) -> 'ok' when
+ Signal :: 'sighup' | 'sigquit' | 'sigabrt' | 'sigalrm' |
+ 'sigterm' | 'sigusr1' | 'sigusr2' | 'sigchld' |
+ 'sigstop' | 'sigtstp',
+ Option :: 'default' | 'handle' | 'ignore'.
+
+set_signal(_Signal, _Option) ->
erlang:nif_error(undef).
%%% End of BIFs
+-spec getenv() -> [env_var_name_value()].
+getenv() ->
+ [lists:flatten([Key, $=, Value]) || {Key, Value} <- os:list_env_vars() ].
+
+-spec getenv(VarName) -> Value | false when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+getenv(VarName) ->
+ os:get_env_var(VarName).
+
+-spec getenv(VarName, DefaultValue) -> Value when
+ VarName :: env_var_name(),
+ DefaultValue :: env_var_value(),
+ Value :: env_var_value().
+getenv(VarName, DefaultValue) ->
+ case os:getenv(VarName) of
+ false ->
+ DefaultValue;
+ Value ->
+ Value
+ end.
+
+-spec putenv(VarName, Value) -> true when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+putenv(VarName, Value) ->
+ os:set_env_var(VarName, Value).
+
+-spec unsetenv(VarName) -> true when
+ VarName :: env_var_name().
+unsetenv(VarName) ->
+ os:unset_env_var(VarName).
+
-spec type() -> {Osfamily, Osname} when
- Osfamily :: unix | win32 | ose,
+ Osfamily :: unix | win32,
Osname :: atom().
type() ->
@@ -155,7 +209,7 @@ verify_executable(Name0, [Ext|Rest], OrigExtensions) ->
end;
verify_executable(Name, [], OrigExtensions) when OrigExtensions =/= [""] -> %% Windows
%% Will only happen on windows, hence case insensitivity
- case can_be_full_name(string:to_lower(Name),OrigExtensions) of
+ case can_be_full_name(string:lowercase(Name),OrigExtensions) of
true ->
verify_executable(Name,[""],[""]);
_ ->
@@ -209,206 +263,132 @@ extensions() ->
%% Executes the given command in the default shell for the operating system.
-spec cmd(Command) -> string() when
- Command :: atom() | io_lib:chars().
+ Command :: os_command().
cmd(Cmd) ->
- validate(Cmd),
- Bytes = case type() of
- {unix, _} ->
- unix_cmd(Cmd);
- {win32, Wtype} ->
- Command0 = case {os:getenv("COMSPEC"),Wtype} of
- {false,windows} -> lists:concat(["command.com /c", Cmd]);
- {false,_} -> lists:concat(["cmd /c", Cmd]);
- {Cspec,_} -> lists:concat([Cspec," /c",Cmd])
- end,
- %% open_port/2 awaits string() in Command, but io_lib:chars() can be
- %% deep lists according to io_lib module description.
- Command = lists:flatten(Command0),
- Port = open_port({spawn, Command}, [stream, in, eof, hide]),
- get_data(Port, [])
- end,
- String = unicode:characters_to_list(list_to_binary(Bytes)),
+ cmd(Cmd, #{ }).
+
+-spec cmd(Command, Options) -> string() when
+ Command :: os_command(),
+ Options :: os_command_opts().
+cmd(Cmd, Opts) ->
+ {SpawnCmd, SpawnOpts, SpawnInput, Eot} = mk_cmd(os:type(), validate(Cmd)),
+ Port = open_port({spawn, SpawnCmd}, [binary, stderr_to_stdout,
+ stream, in, hide | SpawnOpts]),
+ MonRef = erlang:monitor(port, Port),
+ true = port_command(Port, SpawnInput),
+ Bytes = get_data(Port, MonRef, Eot, [], 0, maps:get(max_size, Opts, infinity)),
+ demonitor(MonRef, [flush]),
+ String = unicode:characters_to_list(Bytes),
if %% Convert to unicode list if possible otherwise return bytes
is_list(String) -> String;
- true -> Bytes
+ true -> binary_to_list(Bytes)
end.
-unix_cmd(Cmd) ->
- Tag = make_ref(),
- {Pid,Mref} = erlang:spawn_monitor(
- fun() ->
- process_flag(trap_exit, true),
- Port = start_port(),
- erlang:port_command(Port, mk_cmd(Cmd)),
- exit({Tag,unix_get_data(Port)})
- end),
- receive
- {'DOWN',Mref,_,Pid,{Tag,Result}} ->
- Result;
- {'DOWN',Mref,_,Pid,Reason} ->
- exit(Reason)
+mk_cmd({win32,Wtype}, Cmd) ->
+ Command = case {os:getenv("COMSPEC"),Wtype} of
+ {false,windows} -> lists:concat(["command.com /c", Cmd]);
+ {false,_} -> lists:concat(["cmd /c", Cmd]);
+ {Cspec,_} -> lists:concat([Cspec," /c",Cmd])
+ end,
+ {Command, [], [], <<>>};
+mk_cmd(_,Cmd) ->
+ %% Have to send command in like this in order to make sh commands like
+ %% cd and ulimit available
+ {"/bin/sh -s unix:cmd", [out],
+ %% We insert a new line after the command, in case the command
+ %% contains a comment character.
+ %%
+ %% The </dev/null closes stdin, which means that programs
+ %% that use a closed stdin as an termination indicator works.
+ %% An example of such a program is 'more'.
+ %%
+ %% The "echo ^D" is used to indicate that the program has executed
+ %% and we should return any output we have gotten. We cannot use
+ %% termination of the child or closing of stdin/stdout as then
+ %% starting background jobs from os:cmd will block os:cmd.
+ %%
+ %% I tried changing this to be "better", but got bombarded with
+ %% backwards incompatibility bug reports, so leave this as it is.
+ ["(", unicode:characters_to_binary(Cmd), "\n) </dev/null; echo \"\^D\"\n"],
+ <<$\^D>>}.
+
+validate(Atom) when is_atom(Atom) ->
+ validate(atom_to_list(Atom));
+validate(List) when is_list(List) ->
+ case validate1(List) of
+ false ->
+ List;
+ true ->
+ %% Had zeros at end; remove them...
+ string:trim(List, trailing, [0])
end.
-%% The -s flag implies that only the positional parameters are set,
-%% and the commands are read from standard input. We set the
-%% $1 parameter for easy identification of the resident shell.
-%%
--define(ROOT, "/").
--define(ROOT_ANDROID, "/system").
--define(SHELL, "bin/sh -s unix:cmd 2>&1").
--define(PORT_CREATOR_NAME, os_cmd_port_creator).
+validate1([0|Rest]) ->
+ validate2(Rest);
+validate1([C|Rest]) when is_integer(C), C > 0 ->
+ validate1(Rest);
+validate1([List|Rest]) when is_list(List) ->
+ validate1(List) or validate1(Rest);
+validate1([]) ->
+ false.
-%%
-%% Serializing open_port through a process to avoid smp lock contention
-%% when many concurrent os:cmd() want to do vfork (OTP-7890).
-%%
--spec start_port() -> port().
-start_port() ->
- Ref = make_ref(),
- Request = {Ref,self()},
- {Pid, Mon} = case whereis(?PORT_CREATOR_NAME) of
- undefined ->
- spawn_monitor(fun() ->
- start_port_srv(Request)
- end);
- P ->
- P ! Request,
- M = erlang:monitor(process, P),
- {P, M}
- end,
+%% Ensure that the rest is zero only...
+validate2([]) ->
+ true;
+validate2([0|Rest]) ->
+ validate2(Rest);
+validate2([List|Rest]) when is_list(List) ->
+ validate2(List),
+ validate2(Rest).
+
+get_data(Port, MonRef, Eot, Sofar, Size, Max) ->
receive
- {Ref, Port} when is_port(Port) ->
- erlang:demonitor(Mon, [flush]),
- Port;
- {Ref, Error} ->
- erlang:demonitor(Mon, [flush]),
- exit(Error);
- {'DOWN', Mon, process, Pid, _Reason} ->
- start_port()
+ {Port, {data, Bytes}} ->
+ case eot(Bytes, Eot, Size, Max) of
+ more ->
+ get_data(Port, MonRef, Eot, [Sofar, Bytes],
+ Size + byte_size(Bytes), Max);
+ Last ->
+ catch port_close(Port),
+ flush_until_down(Port, MonRef),
+ iolist_to_binary([Sofar, Last])
+ end;
+ {'DOWN', MonRef, _, _, _} ->
+ flush_exit(Port),
+ iolist_to_binary(Sofar)
end.
-start_port_srv(Request) ->
- %% We don't want a group leader of some random application. Use
- %% kernel_sup's group leader.
- {group_leader, GL} = process_info(whereis(kernel_sup),
- group_leader),
- true = group_leader(GL, self()),
- process_flag(trap_exit, true),
- StayAlive = try register(?PORT_CREATOR_NAME, self())
- catch
- error:_ -> false
- end,
- start_port_srv_handle(Request),
- case StayAlive of
- true -> start_port_srv_loop();
- false -> exiting
+eot(Bs, <<>>, Size, Max) when Size + byte_size(Bs) < Max ->
+ more;
+eot(Bs, <<>>, Size, Max) ->
+ binary:part(Bs, {0, Max - Size});
+eot(Bs, Eot, Size, Max) ->
+ case binary:match(Bs, Eot) of
+ {Pos, _} when Size + Pos < Max ->
+ binary:part(Bs,{0, Pos});
+ _ ->
+ eot(Bs, <<>>, Size, Max)
end.
-start_port_srv_handle({Ref,Client}) ->
- Path = case lists:reverse(erlang:system_info(system_architecture)) of
- % androideabi
- "ibaediordna" ++ _ -> filename:join([?ROOT_ANDROID, ?SHELL]);
- _ -> filename:join([?ROOT, ?SHELL])
- end,
- Reply = try open_port({spawn, Path},[stream]) of
- Port when is_port(Port) ->
- (catch port_connect(Port, Client)),
- unlink(Port),
- Port
- catch
- error:Reason ->
- {Reason,erlang:get_stacktrace()}
- end,
- Client ! {Ref,Reply},
- ok.
-
-start_port_srv_loop() ->
- receive
- {Ref, Client} = Request when is_reference(Ref),
- is_pid(Client) ->
- start_port_srv_handle(Request);
- _Junk ->
- ok
- end,
- start_port_srv_loop().
-
-%%
-%% unix_get_data(Port) -> Result
-%%
-unix_get_data(Port) ->
- unix_get_data(Port, []).
-
-unix_get_data(Port, Sofar) ->
+%% When port_close returns we know that all the
+%% messages sent have been sent and that the
+%% DOWN message is after them all.
+flush_until_down(Port, MonRef) ->
receive
- {Port,{data, Bytes}} ->
- case eot(Bytes) of
- {done, Last} ->
- lists:flatten([Sofar|Last]);
- more ->
- unix_get_data(Port, [Sofar|Bytes])
- end;
- {'EXIT', Port, _} ->
- lists:flatten(Sofar)
+ {Port, {data, _Bytes}} ->
+ flush_until_down(Port, MonRef);
+ {'DOWN', MonRef, _, _, _} ->
+ flush_exit(Port)
end.
-%%
-%% eot(String) -> more | {done, Result}
-%%
-eot(Bs) ->
- eot(Bs, []).
-
-eot([4| _Bs], As) ->
- {done, lists:reverse(As)};
-eot([B| Bs], As) ->
- eot(Bs, [B| As]);
-eot([], _As) ->
- more.
-
-%%
-%% mk_cmd(Cmd) -> {ok, ShellCommandString} | {error, ErrorString}
-%%
-%% We do not allow any input to Cmd (hence commands that want
-%% to read from standard input will return immediately).
-%% Standard error is redirected to standard output.
-%%
-%% We use ^D (= EOT = 4) to mark the end of the stream.
-%%
-mk_cmd(Cmd) when is_atom(Cmd) -> % backward comp.
- mk_cmd(atom_to_list(Cmd));
-mk_cmd(Cmd) ->
- %% We insert a new line after the command, in case the command
- %% contains a comment character.
- [$(, unicode:characters_to_binary(Cmd), "\n) </dev/null; echo \"\^D\"\n"].
-
-
-validate(Atom) when is_atom(Atom) ->
- ok;
-validate(List) when is_list(List) ->
- validate1(List).
-
-validate1([C|Rest]) when is_integer(C) ->
- validate1(Rest);
-validate1([List|Rest]) when is_list(List) ->
- validate1(List),
- validate1(Rest);
-validate1([]) ->
- ok.
-
-get_data(Port, Sofar) ->
+%% The exit signal is always delivered before
+%% the down signal, so we can be sure that if there
+%% was an exit message sent, it will be in the
+%% mailbox now.
+flush_exit(Port) ->
receive
- {Port, {data, Bytes}} ->
- get_data(Port, [Sofar|Bytes]);
- {Port, eof} ->
- Port ! {self(), close},
- receive
- {Port, closed} ->
- true
- end,
- receive
- {'EXIT', Port, _} ->
- ok
- after 1 -> % force context switch
- ok
- end,
- lists:flatten(Sofar)
+ {'EXIT', Port, _} ->
+ ok
+ after 0 ->
+ ok
end.
diff --git a/lib/kernel/src/pg2.erl b/lib/kernel/src/pg2.erl
index ab98181b2a..c4732f37ee 100644
--- a/lib/kernel/src/pg2.erl
+++ b/lib/kernel/src/pg2.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -199,7 +199,7 @@ handle_call({delete, Name}, _From, S) ->
{reply, ok, S};
handle_call(Request, From, S) ->
error_logger:warning_msg("The pg2 server received an unexpected message:\n"
- "handle_call(~p, ~p, _)\n",
+ "handle_call(~tp, ~tp, _)\n",
[Request, From]),
{noreply, S}.
diff --git a/lib/kernel/src/ram_file.erl b/lib/kernel/src/ram_file.erl
index df335f7a8e..e427d130b7 100644
--- a/lib/kernel/src/ram_file.erl
+++ b/lib/kernel/src/ram_file.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/raw_file_io.erl b/lib/kernel/src/raw_file_io.erl
new file mode 100644
index 0000000000..e3c07c8f78
--- /dev/null
+++ b/lib/kernel/src/raw_file_io.erl
@@ -0,0 +1,75 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io).
+
+-export([open/2]).
+
+open(Filename, Modes) ->
+ %% Layers are applied in this order, and the listed modules will call this
+ %% function again as necessary. eg. a raw compressed delayed file in list
+ %% mode will walk through [_list -> _compressed -> _delayed -> _raw].
+ ModuleOrder = [{raw_file_io_list, fun match_list/1},
+ {raw_file_io_compressed, fun match_compressed/1},
+ {raw_file_io_delayed, fun match_delayed/1},
+ {raw_file_io_raw, fun match_raw/1}],
+ open_1(ModuleOrder, Filename, add_implicit_modes(Modes)).
+open_1([], _Filename, _Modes) ->
+ error(badarg);
+open_1([{Module, Match} | Rest], Filename, Modes) ->
+ case lists:any(Match, Modes) of
+ true ->
+ {Options, ChildModes} =
+ lists:partition(fun(Mode) -> Match(Mode) end, Modes),
+ Module:open_layer(Filename, ChildModes, Options);
+ false ->
+ open_1(Rest, Filename, Modes)
+ end.
+
+%% 'read' and 'list' mode are enabled unless disabled by another option, so
+%% we'll explicitly add them to avoid duplicating this logic in child layers.
+add_implicit_modes(Modes0) ->
+ Modes1 = add_unless_matched(Modes0, fun match_writable/1, read),
+ add_unless_matched(Modes1, fun match_binary/1, list).
+add_unless_matched(Modes, Match, Default) ->
+ case lists:any(Match, Modes) of
+ false -> [Default | Modes];
+ true -> Modes
+ end.
+
+match_list(list) -> true;
+match_list(_Other) -> false.
+
+match_compressed(compressed) -> true;
+match_compressed(_Other) -> false.
+
+match_delayed({delayed_write, _Size, _Timeout}) -> true;
+match_delayed(delayed_write) -> true;
+match_delayed(_Other) -> false.
+
+match_raw(raw) -> true;
+match_raw(_Other) -> false.
+
+match_writable(write) -> true;
+match_writable(append) -> true;
+match_writable(exclusive) -> true;
+match_writable(_Other) -> false.
+
+match_binary(binary) -> true;
+match_binary(_Other) -> false.
diff --git a/lib/kernel/src/raw_file_io_compressed.erl b/lib/kernel/src/raw_file_io_compressed.erl
new file mode 100644
index 0000000000..d5ab042d25
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_compressed.erl
@@ -0,0 +1,134 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_compressed).
+
+-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3,
+ position/2, write/2, pwrite/2, pwrite/3,
+ read_line/1, read/2, pread/2, pread/3]).
+
+%% OTP internal.
+-export([ipread_s32bu_p32bu/3, sendfile/8]).
+
+-export([open_layer/3]).
+
+-include("file_int.hrl").
+
+open_layer(Filename, Modes, Options) ->
+ IsAppend = lists:member(append, Modes),
+ IsDeflate = lists:member(write, Modes),
+ IsInflate = lists:member(read, Modes),
+ if
+ IsDeflate, IsInflate; IsAppend ->
+ {error, einval};
+ IsDeflate, not IsInflate ->
+ start_server_module(raw_file_io_deflate, Filename, Modes, Options);
+ IsInflate ->
+ start_server_module(raw_file_io_inflate, Filename, Modes, Options)
+ end.
+
+start_server_module(Module, Filename, Modes, Options) ->
+ Secret = make_ref(),
+ case gen_statem:start(Module, {self(), Secret, Options}, []) of
+ {ok, Pid} -> open_next_layer(Pid, Secret, Filename, Modes);
+ Other -> Other
+ end.
+
+open_next_layer(Pid, Secret, Filename, Modes) ->
+ case gen_statem:call(Pid, {'$open', Secret, Filename, Modes}, infinity) of
+ ok ->
+ PublicFd = #file_descriptor{
+ module = raw_file_io_compressed, data = {self(), Pid} },
+ {ok, PublicFd};
+ Other -> Other
+ end.
+
+close(Fd) ->
+ wrap_call(Fd, [close]).
+
+sync(Fd) ->
+ wrap_call(Fd, [sync]).
+datasync(Fd) ->
+ wrap_call(Fd, [datasync]).
+
+truncate(Fd) ->
+ wrap_call(Fd, [truncate]).
+
+advise(Fd, Offset, Length, Advise) ->
+ wrap_call(Fd, [advise, Offset, Length, Advise]).
+allocate(Fd, Offset, Length) ->
+ wrap_call(Fd, [allocate, Offset, Length]).
+
+position(Fd, Mark) ->
+ wrap_call(Fd, [position, Mark]).
+
+write(Fd, IOData) ->
+ try
+ CompactedData = erlang:iolist_to_iovec(IOData),
+ wrap_call(Fd, [write, CompactedData])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+
+pwrite(Fd, Offset, IOData) ->
+ try
+ CompactedData = erlang:iolist_to_iovec(IOData),
+ wrap_call(Fd, [pwrite, Offset, CompactedData])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+pwrite(Fd, LocBytes) ->
+ try
+ CompactedLocBytes =
+ [ {Offset, erlang:iolist_to_iovec(IOData)} ||
+ {Offset, IOData} <- LocBytes ],
+ wrap_call(Fd, [pwrite, CompactedLocBytes])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+
+read_line(Fd) ->
+ wrap_call(Fd, [read_line]).
+read(Fd, Size) ->
+ wrap_call(Fd, [read, Size]).
+pread(Fd, Offset, Size) ->
+ wrap_call(Fd, [pread, Offset, Size]).
+pread(Fd, LocNums) ->
+ wrap_call(Fd, [pread, LocNums]).
+
+ipread_s32bu_p32bu(Fd, Offset, MaxSize) ->
+ wrap_call(Fd, [ipread_s32bu_p32bu, Offset, MaxSize]).
+
+sendfile(_,_,_,_,_,_,_,_) ->
+ {error, enotsup}.
+
+wrap_call(Fd, Command) ->
+ {_Owner, Pid} = get_fd_data(Fd),
+ try gen_statem:call(Pid, Command, infinity) of
+ Result -> Result
+ catch
+ exit:{noproc, _StackTrace} -> {error, einval}
+ end.
+
+get_fd_data(#file_descriptor{ data = Data }) ->
+ {Owner, _ServerPid} = Data,
+ case self() of
+ Owner -> Data;
+ _ -> error(not_on_controlling_process)
+ end.
diff --git a/lib/kernel/src/raw_file_io_deflate.erl b/lib/kernel/src/raw_file_io_deflate.erl
new file mode 100644
index 0000000000..acfc546743
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_deflate.erl
@@ -0,0 +1,159 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_deflate).
+
+-behavior(gen_statem).
+
+-export([init/1, callback_mode/0, terminate/3]).
+-export([opening/3, opened/3]).
+
+-include("file_int.hrl").
+
+-define(GZIP_WBITS, 16 + 15).
+
+callback_mode() -> state_functions.
+
+init({Owner, Secret, [compressed]}) ->
+ Monitor = monitor(process, Owner),
+ Z = zlib:open(),
+ ok = zlib:deflateInit(Z, default, deflated, ?GZIP_WBITS, 8, default),
+ Data =
+ #{ owner => Owner,
+ monitor => Monitor,
+ secret => Secret,
+ position => 0,
+ zlib => Z },
+ {ok, opening, Data}.
+
+opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) ->
+ case raw_file_io:open(Filename, Modes) of
+ {ok, PrivateFd} ->
+ NewData = Data#{ handle => PrivateFd },
+ {next_state, opened, NewData, [{reply, From, ok}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+opening(_Event, _Contents, _Data) ->
+ {keep_state_and_data, [postpone]}.
+
+%%
+
+opened(info, {'DOWN', Monitor, process, _Owner, Reason}, #{ monitor := Monitor } = Data) ->
+ if
+ Reason =/= kill -> flush_deflate_state(Data);
+ Reason =:= kill -> ignored
+ end,
+ {stop, shutdown};
+
+opened(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ #{ handle := PrivateFd } = Data,
+ Response =
+ case flush_deflate_state(Data) of
+ ok -> ?CALL_FD(PrivateFd, close, []);
+ Other -> Other
+ end,
+ {stop_and_reply, normal, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, [position, Mark], #{ owner := Owner } = Data) ->
+ case position(Data, Mark) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened({call, {Owner, _Tag} = From}, [write, IOVec], #{ owner := Owner } = Data) ->
+ case write(Data, IOVec) of
+ {ok, NewData} -> {keep_state, NewData, [{reply, From, ok}]};
+ Other -> {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened({call, {Owner, _Tag} = From}, [read, _Size], #{ owner := Owner }) ->
+ Response = {error, ebadf},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, [read_line], #{ owner := Owner }) ->
+ Response = {error, ebadf},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, _Command, #{ owner := Owner }) ->
+ Response = {error, enotsup},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, _From}, _Command, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened(_Event, _Request, _Data) ->
+ keep_state_and_data.
+
+write(Data, IOVec) ->
+ #{ handle := PrivateFd, position := Position, zlib := Z } = Data,
+ UncompressedSize = iolist_size(IOVec),
+ case ?CALL_FD(PrivateFd, write, [zlib:deflate(Z, IOVec)]) of
+ ok -> {ok, Data#{ position := (Position + UncompressedSize) }};
+ Other -> Other
+ end.
+
+%%
+%% We support "seeking" forward as long as it isn't relative to EOF.
+%%
+%% Seeking is a bit of a misnomer as it's really just compressing zeroes until
+%% we reach the desired point, but it has always behaved like this.
+%%
+
+position(Data, Mark) when is_atom(Mark) ->
+ position(Data, {Mark, 0});
+position(Data, Offset) when is_integer(Offset) ->
+ position(Data, {bof, Offset});
+position(Data, {bof, Offset}) when is_integer(Offset) ->
+ position_1(Data, Offset);
+position(Data, {cur, Offset}) when is_integer(Offset) ->
+ #{ position := Position } = Data,
+ position_1(Data, Position + Offset);
+position(_Data, {eof, Offset}) when is_integer(Offset) ->
+ {error, einval};
+position(_Data, _Any) ->
+ {error, badarg}.
+
+position_1(#{ position := Desired } = Data, Desired) ->
+ {ok, Data, Desired};
+position_1(#{ position := Current } = Data, Desired) when Current < Desired ->
+ BytesToWrite = min(Desired - Current, 4 bsl 20),
+ case write(Data, <<0:(BytesToWrite)/unit:8>>) of
+ {ok, NewData} -> position_1(NewData, Desired);
+ Other -> Other
+ end;
+position_1(#{ position := Current }, Desired) when Current > Desired ->
+ {error, einval}.
+
+flush_deflate_state(#{ handle := PrivateFd, zlib := Z }) ->
+ case ?CALL_FD(PrivateFd, write, [zlib:deflate(Z, [], finish)]) of
+ ok -> ok;
+ Other -> Other
+ end.
+
+terminate(_Reason, _State, _Data) ->
+ ok.
diff --git a/lib/kernel/src/raw_file_io_delayed.erl b/lib/kernel/src/raw_file_io_delayed.erl
new file mode 100644
index 0000000000..d2ad7550a1
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_delayed.erl
@@ -0,0 +1,320 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_delayed).
+
+-behavior(gen_statem).
+
+-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3,
+ position/2, write/2, pwrite/2, pwrite/3,
+ read_line/1, read/2, pread/2, pread/3]).
+
+%% OTP internal.
+-export([ipread_s32bu_p32bu/3, sendfile/8]).
+
+-export([open_layer/3]).
+
+-export([init/1, callback_mode/0, terminate/3]).
+-export([opening/3, opened/3]).
+
+-include("file_int.hrl").
+
+open_layer(Filename, Modes, Options) ->
+ Secret = make_ref(),
+ case gen_statem:start(?MODULE, {self(), Secret, Options}, []) of
+ {ok, Pid} ->
+ gen_statem:call(Pid, {'$open', Secret, Filename, Modes}, infinity);
+ Other ->
+ Other
+ end.
+
+callback_mode() -> state_functions.
+
+init({Owner, Secret, Options}) ->
+ Monitor = monitor(process, Owner),
+ Defaults =
+ #{ owner => Owner,
+ monitor => Monitor,
+ secret => Secret,
+ timer => none,
+ pid => self(),
+ buffer => prim_buffer:new(),
+ delay_size => 64 bsl 10,
+ delay_time => 2000 },
+ Data = fill_delay_values(Defaults, Options),
+ {ok, opening, Data}.
+
+fill_delay_values(Data, []) ->
+ Data;
+fill_delay_values(Data, [{delayed_write, Size, Time} | Options]) ->
+ fill_delay_values(Data#{ delay_size => Size, delay_time => Time }, Options);
+fill_delay_values(Data, [_ | Options]) ->
+ fill_delay_values(Data, Options).
+
+opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) ->
+ case raw_file_io:open(Filename, Modes) of
+ {ok, PrivateFd} ->
+ PublicData = maps:with([owner, buffer, delay_size, pid], Data),
+ PublicFd = #file_descriptor{ module = ?MODULE, data = PublicData },
+
+ NewData = Data#{ handle => PrivateFd },
+ Response = {ok, PublicFd},
+ {next_state, opened, NewData, [{reply, From, Response}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+opening(_Event, _Contents, _Data) ->
+ {keep_state_and_data, [postpone]}.
+
+%%
+
+opened(info, {'$timed_out', Secret}, #{ secret := Secret } = Data) ->
+ %% If the user writes something at this exact moment, the flush will fail
+ %% and the timer won't reset on the next write since the buffer won't be
+ %% empty (Unless we collided on a flush). We therefore reset the timeout to
+ %% ensure that data won't sit idle for extended periods of time.
+ case try_flush_write_buffer(Data) of
+ busy -> gen_statem:cast(self(), '$reset_timeout');
+ ok -> ok
+ end,
+ {keep_state, Data#{ timer => none }, []};
+
+opened(info, {'DOWN', Monitor, process, _Owner, Reason}, #{ monitor := Monitor } = Data) ->
+ if
+ Reason =/= kill -> try_flush_write_buffer(Data);
+ Reason =:= kill -> ignored
+ end,
+ {stop, shutdown};
+
+opened(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ case flush_write_buffer(Data) of
+ ok ->
+ #{ handle := PrivateFd } = Data,
+ Response = ?CALL_FD(PrivateFd, close, []),
+ {stop_and_reply, normal, [{reply, From, Response}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+
+opened({call, {Owner, _Tag} = From}, '$wait', #{ owner := Owner }) ->
+ %% Used in write/2 to synchronize writes on lock conflicts.
+ {keep_state_and_data, [{reply, From, ok}]};
+
+opened({call, {Owner, _Tag} = From}, '$synchronous_flush', #{ owner := Owner } = Data) ->
+ cancel_flush_timeout(Data),
+ Response = flush_write_buffer(Data),
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, Command, #{ owner := Owner } = Data) ->
+ Response =
+ case flush_write_buffer(Data) of
+ ok -> dispatch_command(Data, Command);
+ Other -> Other
+ end,
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, _From}, _Command, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened(cast, '$reset_timeout', #{ delay_time := Timeout, secret := Secret } = Data) ->
+ cancel_flush_timeout(Data),
+ Timer = erlang:send_after(Timeout, self(), {'$timed_out', Secret}),
+ {keep_state, Data#{ timer => Timer }, []};
+
+opened(cast, _Message, _Data) ->
+ {keep_state_and_data, []}.
+
+dispatch_command(Data, [Function | Args]) ->
+ #{ handle := Handle } = Data,
+ Module = Handle#file_descriptor.module,
+ apply(Module, Function, [Handle | Args]).
+
+cancel_flush_timeout(#{ timer := none }) ->
+ ok;
+cancel_flush_timeout(#{ timer := Timer }) ->
+ _ = erlang:cancel_timer(Timer, [{async, true}]),
+ ok.
+
+try_flush_write_buffer(#{ buffer := Buffer, handle := PrivateFd }) ->
+ case prim_buffer:try_lock(Buffer) of
+ acquired ->
+ flush_write_buffer_1(Buffer, PrivateFd),
+ prim_buffer:unlock(Buffer),
+ ok;
+ busy ->
+ busy
+ end.
+
+%% This is only safe to use when there is no chance of conflict with the owner
+%% process, or in other words, "during synchronous calls outside of the locked
+%% section of write/2"
+flush_write_buffer(#{ buffer := Buffer, handle := PrivateFd }) ->
+ acquired = prim_buffer:try_lock(Buffer),
+ Result = flush_write_buffer_1(Buffer, PrivateFd),
+ prim_buffer:unlock(Buffer),
+ Result.
+
+flush_write_buffer_1(Buffer, PrivateFd) ->
+ case prim_buffer:size(Buffer) of
+ Size when Size > 0 ->
+ ?CALL_FD(PrivateFd, write, [prim_buffer:read_iovec(Buffer, Size)]);
+ 0 ->
+ ok
+ end.
+
+terminate(_Reason, _State, _Data) ->
+ ok.
+
+%% Client functions
+
+write(Fd, IOData) ->
+ try
+ enqueue_write(Fd, erlang:iolist_to_iovec(IOData))
+ catch
+ error:badarg -> {error, badarg}
+ end.
+enqueue_write(_Fd, []) ->
+ ok;
+enqueue_write(Fd, IOVec) ->
+ %% get_fd_data will reject everyone except the process that opened the Fd,
+ %% so we can't race with anyone except the wrapper process.
+ #{ delay_size := DelaySize,
+ buffer := Buffer,
+ pid := Pid } = get_fd_data(Fd),
+ case prim_buffer:try_lock(Buffer) of
+ acquired ->
+ %% (The wrapper process will exit without flushing if we're killed
+ %% while holding the lock).
+ enqueue_write_locked(Pid, Buffer, DelaySize, IOVec);
+ busy ->
+ %% This can only happen while we're processing a timeout in the
+ %% wrapper process, so we perform a bogus call to get a completion
+ %% notification before trying again.
+ gen_statem:call(Pid, '$wait'),
+ enqueue_write(Fd, IOVec)
+ end.
+enqueue_write_locked(Pid, Buffer, DelaySize, IOVec) ->
+ %% The synchronous operations (write, forced flush) are safe since we're
+ %% running on the only process that can fill the buffer; a timeout being
+ %% processed just before $synchronous_flush will cause the flush to nop,
+ %% and a timeout sneaking in just before a synchronous write won't do
+ %% anything since the buffer is guaranteed to be empty at that point.
+ BufSize = prim_buffer:size(Buffer),
+ case is_iovec_smaller_than(IOVec, DelaySize - BufSize) of
+ true when BufSize > 0 ->
+ prim_buffer:write(Buffer, IOVec),
+ prim_buffer:unlock(Buffer);
+ true ->
+ prim_buffer:write(Buffer, IOVec),
+ prim_buffer:unlock(Buffer),
+ gen_statem:cast(Pid, '$reset_timeout');
+ false when BufSize > 0 ->
+ prim_buffer:write(Buffer, IOVec),
+ prim_buffer:unlock(Buffer),
+ gen_statem:call(Pid, '$synchronous_flush');
+ false ->
+ prim_buffer:unlock(Buffer),
+ gen_statem:call(Pid, [write, IOVec])
+ end.
+
+%% iolist_size/1 will always look through the entire list to get a precise
+%% amount, which is pretty inefficient since we only need to know whether we've
+%% hit the buffer threshold or not.
+%%
+%% We only handle the binary case since write/2 forcibly translates input to
+%% erlang:iovec().
+is_iovec_smaller_than(IOVec, Max) ->
+ is_iovec_smaller_than_1(IOVec, Max, 0).
+is_iovec_smaller_than_1(_IOVec, Max, Acc) when Acc >= Max ->
+ false;
+is_iovec_smaller_than_1([], _Max, _Acc) ->
+ true;
+is_iovec_smaller_than_1([Binary | Rest], Max, Acc) when is_binary(Binary) ->
+ is_iovec_smaller_than_1(Rest, Max, Acc + byte_size(Binary)).
+
+close(Fd) ->
+ wrap_call(Fd, [close]).
+
+sync(Fd) ->
+ wrap_call(Fd, [sync]).
+datasync(Fd) ->
+ wrap_call(Fd, [datasync]).
+
+truncate(Fd) ->
+ wrap_call(Fd, [truncate]).
+
+advise(Fd, Offset, Length, Advise) ->
+ wrap_call(Fd, [advise, Offset, Length, Advise]).
+allocate(Fd, Offset, Length) ->
+ wrap_call(Fd, [allocate, Offset, Length]).
+
+position(Fd, Mark) ->
+ wrap_call(Fd, [position, Mark]).
+
+pwrite(Fd, Offset, IOData) ->
+ try
+ CompactedData = erlang:iolist_to_iovec(IOData),
+ wrap_call(Fd, [pwrite, Offset, CompactedData])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+pwrite(Fd, LocBytes) ->
+ try
+ CompactedLocBytes =
+ [ {Offset, erlang:iolist_to_iovec(IOData)} ||
+ {Offset, IOData} <- LocBytes ],
+ wrap_call(Fd, [pwrite, CompactedLocBytes])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+
+read_line(Fd) ->
+ wrap_call(Fd, [read_line]).
+read(Fd, Size) ->
+ wrap_call(Fd, [read, Size]).
+pread(Fd, Offset, Size) ->
+ wrap_call(Fd, [pread, Offset, Size]).
+pread(Fd, LocNums) ->
+ wrap_call(Fd, [pread, LocNums]).
+
+ipread_s32bu_p32bu(Fd, Offset, MaxSize) ->
+ wrap_call(Fd, [ipread_s32bu_p32bu, Offset, MaxSize]).
+
+sendfile(_,_,_,_,_,_,_,_) ->
+ {error, enotsup}.
+
+wrap_call(Fd, Command) ->
+ #{ pid := Pid } = get_fd_data(Fd),
+ try gen_statem:call(Pid, Command, infinity) of
+ Result -> Result
+ catch
+ exit:{noproc, _StackTrace} -> {error, einval}
+ end.
+
+get_fd_data(#file_descriptor{ data = Data }) ->
+ #{ owner := Owner } = Data,
+ case self() of
+ Owner -> Data;
+ _ -> error(not_on_controlling_process)
+ end.
diff --git a/lib/kernel/src/raw_file_io_inflate.erl b/lib/kernel/src/raw_file_io_inflate.erl
new file mode 100644
index 0000000000..7e9780310c
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_inflate.erl
@@ -0,0 +1,261 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_inflate).
+
+-behavior(gen_statem).
+
+-export([init/1, callback_mode/0, terminate/3]).
+-export([opening/3, opened_gzip/3, opened_passthrough/3]).
+
+-include("file_int.hrl").
+
+-define(INFLATE_CHUNK_SIZE, (1 bsl 10)).
+-define(GZIP_WBITS, (16 + 15)).
+
+callback_mode() -> state_functions.
+
+init({Owner, Secret, [compressed]}) ->
+ Monitor = monitor(process, Owner),
+ %% We're using the undocumented inflateInit/3 to open the stream in
+ %% 'reset mode', which resets the inflate state at the end of every stream,
+ %% allowing us to read concatenated gzip files.
+ Z = zlib:open(),
+ ok = zlib:inflateInit(Z, ?GZIP_WBITS, reset),
+ Data =
+ #{ owner => Owner,
+ monitor => Monitor,
+ secret => Secret,
+ position => 0,
+ buffer => prim_buffer:new(),
+ zlib => Z },
+ {ok, opening, Data}.
+
+%% The old driver fell back to plain reads if the file didn't start with the
+%% magic gzip bytes.
+choose_decompression_state(PrivateFd) ->
+ State =
+ case ?CALL_FD(PrivateFd, read, [2]) of
+ {ok, <<16#1F, 16#8B>>} -> opened_gzip;
+ _Other -> opened_passthrough
+ end,
+ {ok, 0} = ?CALL_FD(PrivateFd, position, [0]),
+ State.
+
+opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) ->
+ case raw_file_io:open(Filename, Modes) of
+ {ok, PrivateFd} ->
+ NextState = choose_decompression_state(PrivateFd),
+ NewData = Data#{ handle => PrivateFd },
+ {next_state, NextState, NewData, [{reply, From, ok}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+opening(_Event, _Contents, _Data) ->
+ {keep_state_and_data, [postpone]}.
+
+internal_close(From, Data) ->
+ #{ handle := PrivateFd } = Data,
+ Response = ?CALL_FD(PrivateFd, close, []),
+ {stop_and_reply, normal, [{reply, From, Response}]}.
+
+opened_passthrough(info, {'DOWN', Monitor, process, _Owner, _Reason}, #{ monitor := Monitor }) ->
+ {stop, shutdown};
+
+opened_passthrough(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened_passthrough({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ internal_close(From, Data);
+
+opened_passthrough({call, {Owner, _Tag} = From}, [Method | Args], #{ owner := Owner } = Data) ->
+ #{ handle := PrivateFd } = Data,
+ Response = ?CALL_FD(PrivateFd, Method, Args),
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened_passthrough({call, _From}, _Command, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened_passthrough(_Event, _Request, _Data) ->
+ keep_state_and_data.
+
+%%
+
+opened_gzip(info, {'DOWN', Monitor, process, _Owner, _Reason}, #{ monitor := Monitor }) ->
+ {stop, shutdown};
+
+opened_gzip(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened_gzip({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ internal_close(From, Data);
+
+opened_gzip({call, {Owner, _Tag} = From}, [position, Mark], #{ owner := Owner } = Data) ->
+ case position(Data, Mark) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened_gzip({call, {Owner, _Tag} = From}, [read, Size], #{ owner := Owner } = Data) ->
+ case read(Data, Size) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened_gzip({call, {Owner, _Tag} = From}, [read_line], #{ owner := Owner } = Data) ->
+ case read_line(Data) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened_gzip({call, {Owner, _Tag} = From}, [write, _IOData], #{ owner := Owner }) ->
+ Response = {error, ebadf},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened_gzip({call, {Owner, _Tag} = From}, _Request, #{ owner := Owner }) ->
+ Response = {error, enotsup},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened_gzip({call, _From}, _Request, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened_gzip(_Event, _Request, _Data) ->
+ keep_state_and_data.
+
+%%
+
+read(#{ buffer := Buffer } = Data, Size) ->
+ try read_1(Data, Buffer, prim_buffer:size(Buffer), Size) of
+ Result -> Result
+ catch
+ error:badarg -> {error, badarg};
+ error:_ -> {error, eio}
+ end.
+read_1(Data, Buffer, BufferSize, ReadSize) when BufferSize >= ReadSize ->
+ #{ position := Position } = Data,
+ Decompressed = prim_buffer:read(Buffer, ReadSize),
+ {ok, Data#{ position => (Position + ReadSize) }, Decompressed};
+read_1(Data, Buffer, BufferSize, ReadSize) when BufferSize < ReadSize ->
+ #{ handle := PrivateFd } = Data,
+ case ?CALL_FD(PrivateFd, read, [?INFLATE_CHUNK_SIZE]) of
+ {ok, Compressed} ->
+ #{ zlib := Z } = Data,
+ Uncompressed = erlang:iolist_to_iovec(zlib:inflate(Z, Compressed)),
+ prim_buffer:write(Buffer, Uncompressed),
+ read_1(Data, Buffer, prim_buffer:size(Buffer), ReadSize);
+ eof when BufferSize > 0 ->
+ read_1(Data, Buffer, BufferSize, BufferSize);
+ Other ->
+ Other
+ end.
+
+read_line(#{ buffer := Buffer } = Data) ->
+ try read_line_1(Data, Buffer, prim_buffer:find_byte_index(Buffer, $\n)) of
+ {ok, NewData, Decompressed} -> {ok, NewData, Decompressed};
+ Other -> Other
+ catch
+ error:badarg -> {error, badarg};
+ error:_ -> {error, eio}
+ end.
+
+read_line_1(Data, Buffer, not_found) ->
+ #{ handle := PrivateFd, zlib := Z } = Data,
+ case ?CALL_FD(PrivateFd, read, [?INFLATE_CHUNK_SIZE]) of
+ {ok, Compressed} ->
+ Uncompressed = erlang:iolist_to_iovec(zlib:inflate(Z, Compressed)),
+ prim_buffer:write(Buffer, Uncompressed),
+ read_line_1(Data, Buffer, prim_buffer:find_byte_index(Buffer, $\n));
+ eof ->
+ case prim_buffer:size(Buffer) of
+ Size when Size > 0 -> {ok, prim_buffer:read(Buffer, Size)};
+ Size when Size =:= 0 -> eof
+ end;
+ Error ->
+ Error
+ end;
+read_line_1(Data, Buffer, {ok, LFIndex}) ->
+ %% Translate CRLF into just LF, completely ignoring which encoding is used,
+ %% but treat the file position as including CR.
+ #{ position := Position } = Data,
+ NewData = Data#{ position => (Position + LFIndex + 1) },
+ CRIndex = (LFIndex - 1),
+ TranslatedLine =
+ case prim_buffer:read(Buffer, LFIndex + 1) of
+ <<Line:CRIndex/binary, "\r\n">> -> <<Line/binary, "\n">>;
+ Line -> Line
+ end,
+ {ok, NewData, TranslatedLine}.
+
+%%
+%% We support seeking in both directions as long as it isn't relative to EOF.
+%%
+%% Seeking backwards is extremely inefficient since we have to seek to the very
+%% beginning and then decompress up to the desired point.
+%%
+
+position(Data, Mark) when is_atom(Mark) ->
+ position(Data, {Mark, 0});
+position(Data, Offset) when is_integer(Offset) ->
+ position(Data, {bof, Offset});
+position(Data, {bof, Offset}) when is_integer(Offset) ->
+ position_1(Data, Offset);
+position(Data, {cur, Offset}) when is_integer(Offset) ->
+ #{ position := Position } = Data,
+ position_1(Data, Position + Offset);
+position(_Data, {eof, Offset}) when is_integer(Offset) ->
+ {error, einval};
+position(_Data, _Other) ->
+ {error, badarg}.
+
+position_1(_Data, Desired) when Desired < 0 ->
+ {error, einval};
+position_1(#{ position := Desired } = Data, Desired) ->
+ {ok, Data, Desired};
+position_1(#{ position := Current } = Data, Desired) when Current < Desired ->
+ case read(Data, min(Desired - Current, ?INFLATE_CHUNK_SIZE)) of
+ {ok, NewData, _Data} -> position_1(NewData, Desired);
+ eof -> {ok, Data, Current};
+ Other -> Other
+ end;
+position_1(#{ position := Current } = Data, Desired) when Current > Desired ->
+ #{ handle := PrivateFd, buffer := Buffer, zlib := Z } = Data,
+ case ?CALL_FD(PrivateFd, position, [bof]) of
+ {ok, 0} ->
+ ok = zlib:inflateReset(Z),
+ prim_buffer:wipe(Buffer),
+ position_1(Data#{ position => 0 }, Desired);
+ Other ->
+ Other
+ end.
+
+terminate(_Reason, _State, _Data) ->
+ ok.
diff --git a/lib/kernel/src/raw_file_io_list.erl b/lib/kernel/src/raw_file_io_list.erl
new file mode 100644
index 0000000000..2e16e63f0e
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_list.erl
@@ -0,0 +1,128 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_list).
+
+-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3,
+ position/2, write/2, pwrite/2, pwrite/3,
+ read_line/1, read/2, pread/2, pread/3]).
+
+%% OTP internal.
+-export([ipread_s32bu_p32bu/3, sendfile/8]).
+
+-export([open_layer/3]).
+
+-include("file_int.hrl").
+
+open_layer(Filename, Modes, [list]) ->
+ case raw_file_io:open(Filename, [binary | Modes]) of
+ {ok, PrivateFd} -> {ok, make_public_fd(PrivateFd, Modes)};
+ Other -> Other
+ end.
+
+%% We can skip wrapping the file if it's write-only since only read operations
+%% are affected by list mode. Since raw_file_io fills in all implicit options
+%% for us, all we need to do is check whether 'read' is among them.
+make_public_fd(PrivateFd, Modes) ->
+ case lists:member(read, Modes) of
+ true -> #file_descriptor{ module = ?MODULE, data = PrivateFd };
+ false -> PrivateFd
+ end.
+
+close(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, close, []).
+
+sync(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, sync, []).
+datasync(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, datasync, []).
+
+truncate(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, truncate, []).
+
+advise(Fd, Offset, Length, Advise) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, advise, [Offset, Length, Advise]).
+allocate(Fd, Offset, Length) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, allocate, [Offset, Length]).
+
+position(Fd, Mark) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, position, [Mark]).
+
+write(Fd, IOData) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, write, [IOData]).
+
+pwrite(Fd, Offset, IOData) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, pwrite, [Offset, IOData]).
+pwrite(Fd, LocBytes) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, pwrite, [LocBytes]).
+
+read_line(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, read_line, []) of
+ {ok, Binary} -> {ok, binary_to_list(Binary)};
+ Other -> Other
+ end.
+read(Fd, Size) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, read, [Size]) of
+ {ok, Binary} -> {ok, binary_to_list(Binary)};
+ Other -> Other
+ end.
+pread(Fd, Offset, Size) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, pread, [Offset, Size]) of
+ {ok, Binary} -> {ok, binary_to_list(Binary)};
+ Other -> Other
+ end.
+pread(Fd, LocNums) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, pread, [LocNums]) of
+ {ok, LocResults} ->
+ TranslatedResults =
+ [ case Result of
+ Result when is_binary(Result) -> binary_to_list(Result);
+ eof -> eof
+ end || Result <- LocResults ],
+ {ok, TranslatedResults};
+ Other -> Other
+ end.
+
+ipread_s32bu_p32bu(Fd, Offset, MaxSize) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, ipread_s32bu_p32bu, [Offset, MaxSize]) of
+ {ok, {Size, Pointer, Binary}} when is_binary(Binary) ->
+ {ok, {Size, Pointer, binary_to_list(Binary)}};
+ Other ->
+ Other
+ end.
+
+sendfile(Fd, Dest, Offset, Bytes, ChunkSize, Headers, Trailers, Flags) ->
+ Args = [Dest, Offset, Bytes, ChunkSize, Headers, Trailers, Flags],
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, sendfile, Args).
diff --git a/lib/kernel/src/raw_file_io_raw.erl b/lib/kernel/src/raw_file_io_raw.erl
new file mode 100644
index 0000000000..9a9fe78eb1
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_raw.erl
@@ -0,0 +1,25 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_raw).
+
+-export([open_layer/3]).
+
+open_layer(Filename, Modes, [raw]) ->
+ prim_file:open(Filename, [raw | Modes]).
diff --git a/lib/kernel/src/rpc.erl b/lib/kernel/src/rpc.erl
index d3db8eb80a..d197de942f 100644
--- a/lib/kernel/src/rpc.erl
+++ b/lib/kernel/src/rpc.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -52,10 +52,6 @@
parallel_eval/1,
pmap/3, pinfo/1, pinfo/2]).
-%% Deprecated calls.
--deprecated([{safe_multi_server_call,2},{safe_multi_server_call,3}]).
--export([safe_multi_server_call/2,safe_multi_server_call/3]).
-
%% gen_server exports
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
@@ -67,21 +63,31 @@
%%------------------------------------------------------------------------
--type state() :: gb_trees:tree(pid(), {pid(), reference()}).
+-type state() :: map().
%%------------------------------------------------------------------------
+
+%% The rex server may receive a huge amount of
+%% messages. Make sure that they are stored off heap to
+%% avoid exessive GCs.
+
+-define(SPAWN_OPTS, [{spawn_opt,[{message_queue_data,off_heap}]}]).
+
%% Remote execution and broadcasting facility
-spec start() -> {'ok', pid()} | 'ignore' | {'error', term()}.
start() ->
- gen_server:start({local,?NAME}, ?MODULE, [], []).
+ gen_server:start({local,?NAME}, ?MODULE, [], ?SPAWN_OPTS).
-spec start_link() -> {'ok', pid()} | 'ignore' | {'error', term()}.
start_link() ->
- gen_server:start_link({local,?NAME}, ?MODULE, [], []).
+ %% The rex server process may receive a huge amount of
+ %% messages. Make sure that they are stored off heap to
+ %% avoid exessive GCs.
+ gen_server:start_link({local,?NAME}, ?MODULE, [], ?SPAWN_OPTS).
-spec stop() -> term().
@@ -95,7 +101,7 @@ stop(Rpc) ->
init([]) ->
process_flag(trap_exit, true),
- {ok, gb_trees:empty()}.
+ {ok, maps:new()}.
-spec handle_call(term(), term(), state()) ->
{'noreply', state()} |
@@ -134,29 +140,15 @@ handle_cast(_, S) ->
-spec handle_info(term(), state()) -> {'noreply', state()}.
+handle_info({'DOWN', _, process, Caller, normal}, S) ->
+ {noreply, maps:remove(Caller, S)};
handle_info({'DOWN', _, process, Caller, Reason}, S) ->
- case gb_trees:lookup(Caller, S) of
- {value, To} ->
- receive
- {Caller, {reply, Reply}} ->
- gen_server:reply(To, Reply)
- after 0 ->
- gen_server:reply(To, {badrpc, {'EXIT', Reason}})
- end,
- {noreply, gb_trees:delete(Caller, S)};
- none ->
- {noreply, S}
- end;
-handle_info({Caller, {reply, Reply}}, S) ->
- case gb_trees:lookup(Caller, S) of
- {value, To} ->
- receive
- {'DOWN', _, process, Caller, _} ->
- gen_server:reply(To, Reply),
- {noreply, gb_trees:delete(Caller, S)}
- end;
- none ->
- {noreply, S}
+ case maps:get(Caller, S, undefined) of
+ undefined ->
+ {noreply, S};
+ {_, _} = To ->
+ gen_server:reply(To, {badrpc, {'EXIT', Reason}}),
+ {noreply, maps:remove(Caller, S)}
end;
handle_info({From, {sbcast, Name, Msg}}, S) ->
_ = case catch Name ! Msg of %% use catch to get the printout
@@ -194,7 +186,6 @@ code_change(_, S, _) ->
%% Auxiliary function to avoid a false dialyzer warning -- do not inline
%%
handle_call_call(Mod, Fun, Args, Gleader, To, S) ->
- RpcServer = self(),
%% Spawn not to block the rpc server.
{Caller,_} =
erlang:spawn_monitor(
@@ -209,9 +200,9 @@ handle_call_call(Mod, Fun, Args, Gleader, To, S) ->
Result ->
Result
end,
- RpcServer ! {self(), {reply, Reply}}
+ gen_server:reply(To, Reply)
end),
- {noreply, gb_trees:insert(Caller, To, S)}.
+ {noreply, maps:put(Caller, To, S)}.
%% RPC aid functions ....
@@ -357,8 +348,12 @@ do_call(Node, Request, Timeout) ->
rpc_check_t({'EXIT', {timeout,_}}) -> {badrpc, timeout};
rpc_check_t(X) -> rpc_check(X).
-rpc_check({'EXIT', {{nodedown,_},_}}) -> {badrpc, nodedown};
-rpc_check({'EXIT', X}) -> exit(X);
+rpc_check({'EXIT', {{nodedown,_},_}}) ->
+ {badrpc, nodedown};
+rpc_check({'EXIT', _}=Exit) ->
+ %% Should only happen if the rex process on the other node
+ %% died.
+ {badrpc, Exit};
rpc_check(X) -> X.
@@ -423,10 +418,7 @@ abcast(Name, Mess) ->
abcast([Node|Tail], Name, Mess) ->
Dest = {Name,Node},
- case catch erlang:send(Dest, Mess, [noconnect]) of
- noconnect -> spawn(erlang, send, [Dest,Mess]), ok;
- _ -> ok
- end,
+ try erlang:send(Dest, Mess) catch error:_ -> ok end,
abcast(Tail, Name, Mess);
abcast([], _,_) -> abcast.
@@ -503,7 +495,7 @@ start_monitor(Node, Name) ->
Module :: module(),
Function :: atom(),
Args :: [term()],
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()].
multicall(M, F, A) ->
@@ -514,14 +506,14 @@ multicall(M, F, A) ->
Module :: module(),
Function :: atom(),
Args :: [term()],
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()];
(Module, Function, Args, Timeout) -> {ResL, BadNodes} when
Module :: module(),
Function :: atom(),
Args :: [term()],
Timeout :: timeout(),
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()].
multicall(Nodes, M, F, A) when is_list(Nodes) ->
@@ -536,7 +528,7 @@ multicall(M, F, A, Timeout) ->
Function :: atom(),
Args :: [term()],
Timeout :: timeout(),
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()].
multicall(Nodes, M, F, A, infinity)
@@ -587,27 +579,6 @@ multi_server_call(Nodes, Name, Msg)
Monitors = send_nodes(Nodes, Name, Msg, []),
rec_nodes(Name, Monitors).
-%% Deprecated functions. Were only needed when communicating with R6 nodes.
-
--spec safe_multi_server_call(Name, Msg) -> {Replies, BadNodes} when
- Name :: atom(),
- Msg :: term(),
- Replies :: [Reply :: term()],
- BadNodes :: [node()].
-
-safe_multi_server_call(Name, Msg) ->
- multi_server_call(Name, Msg).
-
--spec safe_multi_server_call(Nodes, Name, Msg) -> {Replies, BadNodes} when
- Nodes :: [node()],
- Name :: atom(),
- Msg :: term(),
- Replies :: [Reply :: term()],
- BadNodes :: [node()].
-
-safe_multi_server_call(Nodes, Name, Msg) ->
- multi_server_call(Nodes, Name, Msg).
-
rec_nodes(Name, Nodes) ->
rec_nodes(Name, Nodes, [], []).
@@ -748,6 +719,11 @@ pinfo(Pid) ->
-spec pinfo(Pid, Item) -> {Item, Info} | undefined | [] when
Pid :: pid(),
Item :: atom(),
+ Info :: term();
+ (Pid, ItemList) -> [{Item, Info}] | undefined | [] when
+ Pid :: pid(),
+ Item :: atom(),
+ ItemList :: [Item],
Info :: term().
pinfo(Pid, Item) when node(Pid) =:= node() ->
diff --git a/lib/kernel/src/seq_trace.erl b/lib/kernel/src/seq_trace.erl
index a7a782c29c..14fe21e9de 100644
--- a/lib/kernel/src/seq_trace.erl
+++ b/lib/kernel/src/seq_trace.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@
-type flag() :: 'send' | 'receive' | 'print' | 'timestamp' | 'monotonic_timestamp' | 'strict_monotonic_timestamp'.
-type component() :: 'label' | 'serial' | flag().
--type value() :: (Integer :: non_neg_integer())
+-type value() :: (Label :: term())
| {Previous :: non_neg_integer(),
Current :: non_neg_integer()}
| (Bool :: boolean()).
@@ -59,10 +59,6 @@ set_token({Flags,Label,Serial,_From,Lastcnt}) ->
F = decode_flags(Flags),
set_token2([{label,Label},{serial,{Lastcnt, Serial}} | F]).
-%% We limit the label type to always be a small integer because erl_interface
-%% expects that, the BIF can however "unofficially" handle atoms as well, and
-%% atoms can be used if only Erlang nodes are involved
-
-spec set_token(Component, Val) -> {Component, OldVal} when
Component :: component(),
Val :: value(),
@@ -106,14 +102,24 @@ reset_trace() ->
%% reset_trace(Pid) -> % this might be a useful function too
--type tracer() :: (Pid :: pid()) | port() | 'false'.
+-type tracer() :: (Pid :: pid()) | port() |
+ (TracerModule :: {module(), term()}) |
+ 'false'.
-spec set_system_tracer(Tracer) -> OldTracer when
Tracer :: tracer(),
OldTracer :: tracer().
-set_system_tracer(Pid) ->
- erlang:system_flag(sequential_tracer, Pid).
+set_system_tracer({Module, State} = Tracer) ->
+ case erlang:module_loaded(Module) of
+ false ->
+ Module:enabled(trace_status, erlang:self(), State);
+ true ->
+ ok
+ end,
+ erlang:system_flag(sequential_tracer, Tracer);
+set_system_tracer(Tracer) ->
+ erlang:system_flag(sequential_tracer, Tracer).
-spec get_system_tracer() -> Tracer when
Tracer :: tracer().
diff --git a/lib/kernel/src/standard_error.erl b/lib/kernel/src/standard_error.erl
index 74dd004fa6..5d649e5f94 100644
--- a/lib/kernel/src/standard_error.erl
+++ b/lib/kernel/src/standard_error.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/user.erl b/lib/kernel/src/user.erl
index 77781e0251..872e63ab53 100644
--- a/lib/kernel/src/user.erl
+++ b/lib/kernel/src/user.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -398,7 +398,7 @@ get_line(Prompt, Port, Q, Acc, Enc) ->
get_line_bytes(Prompt, Port, Q, Acc, Bytes, Enc);
{Port, eof} ->
put(eof, true),
- {ok, eof, []};
+ {ok, eof, queue:new()};
{io_request,From,ReplyAs,{get_geometry,_}=Req} when is_pid(From) ->
do_io_request(Req, From, ReplyAs, Port,
queue:new()),
@@ -615,7 +615,7 @@ get_chars(Prompt, M, F, Xa, Port, Q, State, Enc) ->
get_chars_bytes(State, M, F, Xa, Port, Q, Bytes, Enc);
{Port, eof} ->
put(eof, true),
- {ok, eof, []};
+ {ok, eof, queue:new()};
%%{io_request,From,ReplyAs,Request} when is_pid(From) ->
%% get_chars_req(Prompt, M, F, Xa, Port, queue:new(), State,
%% Request, From, ReplyAs);
diff --git a/lib/kernel/src/user_drv.erl b/lib/kernel/src/user_drv.erl
index b794d4f45e..9f914aa222 100644
--- a/lib/kernel/src/user_drv.erl
+++ b/lib/kernel/src/user_drv.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -175,6 +175,18 @@ server_loop(Iport, Oport, Curr, User, Gr, {Resp, IOQ} = IOQueue) ->
{Iport,eof} ->
Curr ! {self(),eof},
server_loop(Iport, Oport, Curr, User, Gr, IOQueue);
+
+ %% We always handle geometry and unicode requests
+ {Requester,tty_geometry} ->
+ Requester ! {self(),tty_geometry,get_tty_geometry(Iport)},
+ server_loop(Iport, Oport, Curr, User, Gr, IOQueue);
+ {Requester,get_unicode_state} ->
+ Requester ! {self(),get_unicode_state,get_unicode_state(Iport)},
+ server_loop(Iport, Oport, Curr, User, Gr, IOQueue);
+ {Requester,set_unicode_state, Bool} ->
+ Requester ! {self(),set_unicode_state,set_unicode_state(Iport,Bool)},
+ server_loop(Iport, Oport, Curr, User, Gr, IOQueue);
+
Req when element(1,Req) =:= User orelse element(1,Req) =:= Curr,
tuple_size(Req) =:= 2 orelse tuple_size(Req) =:= 3 ->
%% We match {User|Curr,_}|{User|Curr,_,_}
@@ -224,21 +236,16 @@ server_loop(Iport, Oport, Curr, User, Gr, {Resp, IOQ} = IOQueue) ->
_ -> % not current, just remove it
server_loop(Iport, Oport, Curr, User, gr_del_pid(Gr, Pid), IOQueue)
end;
+ {Requester, {put_chars_sync, _, _, Reply}} ->
+ %% We need to ack the Req otherwise originating process will hang forever
+ %% Do discard the output to non visible shells (as was done previously)
+ Requester ! {reply, Reply},
+ server_loop(Iport, Oport, Curr, User, Gr, IOQueue);
_X ->
- %% Ignore unknown messages.
- server_loop(Iport, Oport, Curr, User, Gr, IOQueue)
+ %% Ignore unknown messages.
+ server_loop(Iport, Oport, Curr, User, Gr, IOQueue)
end.
-%% We always handle geometry and unicode requests
-handle_req({Curr,tty_geometry},Iport,_Oport,IOQueue) ->
- Curr ! {self(),tty_geometry,get_tty_geometry(Iport)},
- IOQueue;
-handle_req({Curr,get_unicode_state},Iport,_Oport,IOQueue) ->
- Curr ! {self(),get_unicode_state,get_unicode_state(Iport)},
- IOQueue;
-handle_req({Curr,set_unicode_state, Bool},Iport,_Oport,IOQueue) ->
- Curr ! {self(),set_unicode_state,set_unicode_state(Iport,Bool)},
- IOQueue;
handle_req(next,Iport,Oport,{false,IOQ}=IOQueue) ->
case queue:out(IOQ) of
{empty,_} ->
diff --git a/lib/kernel/src/user_sup.erl b/lib/kernel/src/user_sup.erl
index 72c3fad3a9..c1fb1b1a48 100644
--- a/lib/kernel/src/user_sup.erl
+++ b/lib/kernel/src/user_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/src/wrap_log_reader.erl b/lib/kernel/src/wrap_log_reader.erl
index 6622405d85..3a984e56c7 100644
--- a/lib/kernel/src/wrap_log_reader.erl
+++ b/lib/kernel/src/wrap_log_reader.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile
index 9e972b4f95..4a86265a4a 100644
--- a/lib/kernel/test/Makefile
+++ b/lib/kernel/test/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2013. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -70,6 +70,15 @@ MODULES= \
interactive_shell_SUITE \
init_SUITE \
kernel_config_SUITE \
+ logger_SUITE \
+ logger_disk_log_h_SUITE \
+ logger_env_var_SUITE \
+ logger_filters_SUITE \
+ logger_formatter_SUITE \
+ logger_legacy_SUITE \
+ logger_simple_h_SUITE \
+ logger_std_h_SUITE \
+ logger_test_lib \
os_SUITE \
pg2_SUITE \
seq_trace_SUITE \
@@ -79,7 +88,9 @@ MODULES= \
zlib_SUITE \
loose_node \
sendfile_SUITE \
- standard_error_SUITE
+ standard_error_SUITE \
+ multi_load_SUITE \
+ zzz_SUITE
APP_FILES = \
appinc.app \
@@ -100,7 +111,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
INSTALL_PROGS= $(TARGET_FILES)
EMAKEFILE=Emakefile
-COVERFILE=kernel.cover
+COVERFILE=kernel.cover logger.cover
# ----------------------------------------------------
# Release directory specification
@@ -112,7 +123,7 @@ RELSYSDIR = $(RELEASE_PATH)/kernel_test
# ----------------------------------------------------
ERL_MAKE_FLAGS +=
-ERL_COMPILE_FLAGS += -I$(ERL_TOP)/lib/test_server/include
+ERL_COMPILE_FLAGS +=
EBIN = .
@@ -147,8 +158,9 @@ release_tests_spec: make_emakefile
$(INSTALL_DIR) "$(RELSYSDIR)"
$(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)"
$(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)"
- $(INSTALL_DATA) kernel.spec kernel_smoke.spec $(EMAKEFILE)\
- $(COVERFILE) "$(RELSYSDIR)"
+ $(INSTALL_DATA) \
+ kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \
+ $(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/kernel/test/appinc1.erl b/lib/kernel/test/appinc1.erl
index fc9180b35a..b571208834 100644
--- a/lib/kernel/test/appinc1.erl
+++ b/lib/kernel/test/appinc1.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/appinc1x.erl b/lib/kernel/test/appinc1x.erl
index b05cd55b43..3aa6f3dcb9 100644
--- a/lib/kernel/test/appinc1x.erl
+++ b/lib/kernel/test/appinc1x.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/appinc2.erl b/lib/kernel/test/appinc2.erl
index 7e92e7e092..a665e628a2 100644
--- a/lib/kernel/test/appinc2.erl
+++ b/lib/kernel/test/appinc2.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/appinc2A.erl b/lib/kernel/test/appinc2A.erl
index f0e2cd4277..378eb179f2 100644
--- a/lib/kernel/test/appinc2A.erl
+++ b/lib/kernel/test/appinc2A.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/appinc2B.erl b/lib/kernel/test/appinc2B.erl
index 17623621bb..35b7016906 100644
--- a/lib/kernel/test/appinc2B.erl
+++ b/lib/kernel/test/appinc2B.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/appinc2top.erl b/lib/kernel/test/appinc2top.erl
index f3d9715a2a..3b6dc4ea31 100644
--- a/lib/kernel/test/appinc2top.erl
+++ b/lib/kernel/test/appinc2top.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 0c198b90ae..5c35b82207 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(application_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2
@@ -37,17 +37,18 @@
-export([config_change/1, persistent_env/1,
distr_changed_tc1/1, distr_changed_tc2/1,
ensure_started/1, ensure_all_started/1,
- shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1]).
+ shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1,
+ config_relative_paths/1]).
-define(TESTCASE, testcase_name).
--define(testcase, ?config(?TESTCASE, Config)).
+-define(testcase, proplists:get_value(?TESTCASE, Config)).
-export([init_per_testcase/2, end_per_testcase/2, start_type/0,
start_phase/0, conf_change/0]).
-% Default timetrap timeout (set in init_per_testcase).
--define(default_timeout, ?t:minutes(2)).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() ->
[failover, failover_comp, permissions, load,
@@ -55,7 +56,7 @@ all() ->
script_start, nodedown_start, permit_false_start_local,
permit_false_start_dist, get_key, get_env, ensure_all_started,
{group, distr_changed}, config_change, shutdown_func, shutdown_timeout,
- shutdown_deadlock,
+ shutdown_deadlock, config_relative_paths,
persistent_env].
groups() ->
@@ -81,21 +82,15 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(otp_2973=Case, Config) ->
- code:add_path(?config(data_dir,Config)),
- Dog = test_server:timetrap(?default_timeout),
- [{?TESTCASE, Case}, {watchdog, Dog}|Config];
+ code:add_path(proplists:get_value(data_dir,Config)),
+ [{?TESTCASE, Case}|Config];
init_per_testcase(Case, Config) ->
- Dog = test_server:timetrap(?default_timeout),
- [{?TESTCASE, Case}, {watchdog, Dog}|Config].
+ [{?TESTCASE, Case}|Config].
end_per_testcase(otp_2973, Config) ->
- code:del_path(?config(data_dir,Config)),
- Dog=?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+ code:del_path(proplists:get_value(data_dir,Config)),
ok;
-end_per_testcase(_Case, Config) ->
- Dog=?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+end_per_testcase(_Case, _Config) ->
ok.
-define(UNTIL(Seq), loop_until_true(fun() -> Seq end)).
@@ -120,10 +115,8 @@ loop_until_true(Fun) ->
%% Should be started in a CC view with:
%% erl -sname XXX -rsh ctrsh where XX not in [cp1, cp2, cp3]
%%-----------------------------------------------------------------
-failover(suite) -> [];
-failover(doc) ->
- ["Tests failover and takeover for distributed applications. Tests",
- "start, load etc implicitly."];
+%% Tests failover and takeover for distributed applications. Tests
+%% start, load etc implicitly.
failover(Conf) when is_list(Conf) ->
%% start a help process to check the start type
StPid = spawn_link(?MODULE, start_type, []),
@@ -133,14 +126,14 @@ failover(Conf) when is_list(Conf) ->
NoSyncTime = config_fun_fast(config_fo(NodeNames)),
WithSyncTime = config_fun(config_fo(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, NoSyncTime, Conf),
{ok, Cp3} = start_node_config(Ncp3, WithSyncTime, Conf),
Cps = [Cp1, Cp2, Cp3],
wait_for_ready_net(),
- % Start app1 and make sure cp1 starts it
+ %% Start app1 and make sure cp1 starts it
{[ok,ok,ok],[]} =
rpc:multicall(Cps, application, load, [app1()]),
?UNTIL(is_loaded(app1, Cps)),
@@ -150,12 +143,12 @@ failover(Conf) when is_list(Conf) ->
false = is_started(app1, Cp2),
ok = get_start_type(#st{normal = 3}),
- % Stop cp1 and make sure cp2 starts app1
+ %% Stop cp1 and make sure cp2 starts app1
stop_node_nice(Cp1),
?UNTIL(is_started(app1, Cp2)),
ok = get_start_type(#st{normal = 3}),
- % Restart cp1 and make sure it restarts app1
+ %% Restart cp1 and make sure it restarts app1
{ok, Cp1_2} = start_node_config(Ncp1, NoSyncTime, Conf),
global:sync(),
ok = rpc:call(Cp1_2, application, load, [app1()]),
@@ -164,8 +157,8 @@ failover(Conf) when is_list(Conf) ->
?UNTIL(not is_started(app1, Cp2)),
ok = get_start_type(#st{takeover = 3}),
- % Test [{cp1, cp2}, cp3]
- % Start app_sp and make sure cp2 starts it (cp1 has more apps started)
+ %% Test [{cp1, cp2}, cp3]
+ %% Start app_sp and make sure cp2 starts it (cp1 has more apps started)
{[ok,ok,ok],[]} =
rpc:multicall([Cp1_2, Cp2, Cp3], application, load, [app_sp()]),
{[ok,ok,ok],[]} =
@@ -175,17 +168,17 @@ failover(Conf) when is_list(Conf) ->
false = is_started(app_sp, Cp3),
ok = get_start_type(#st{normal = 3}),
- % Stop cp2 and make sure cp1 starts app_sp
+ %% Stop cp2 and make sure cp1 starts app_sp
stop_node_nice(Cp2),
?UNTIL(is_started(app_sp, Cp1_2)),
ok = get_start_type(#st{failover = 3}),
- % Stop cp1 and make sure cp3 starts app_sp
+ %% Stop cp1 and make sure cp3 starts app_sp
stop_node_nice(Cp1_2),
?UNTIL(is_started(app_sp, Cp3)),
ok = get_start_type(#st{normal = 3, failover = 3}),
- % Restart cp2 and make sure it restarts app_sp
+ %% Restart cp2 and make sure it restarts app_sp
{ok, Cp2_2} = start_node_config(Ncp2, NoSyncTime, Conf),
global:sync(),
ok = rpc:call(Cp2_2, application, load, [app_sp()]),
@@ -194,16 +187,16 @@ failover(Conf) when is_list(Conf) ->
?UNTIL(not is_started(app_sp, Cp3)),
ok = get_start_type(#st{takeover = 3}),
- % Restart cp1 and make sure it doesn't restart app_sp
+ %% Restart cp1 and make sure it doesn't restart app_sp
{ok, Cp1_3} = start_node_config(Ncp1, NoSyncTime, Conf),
global:sync(),
ok = rpc:call(Cp1_3, application, load, [app_sp()]),
ok = rpc:call(Cp1_3, application, start, [app_sp, permanent]),
- test_server:sleep(500),
+ ct:sleep(500),
false = is_started(app_sp, Cp1_3),
true = is_started(app_sp, Cp2_2),
- % Force takeover to cp1
+ %% Force takeover to cp1
ok = rpc:call(Cp1_3, application, takeover, [app_sp, permanent]),
?UNTIL(is_started(app_sp, Cp1_3)),
?UNTIL(not is_started(app_sp, Cp2_2)),
@@ -225,11 +218,9 @@ failover(Conf) when is_list(Conf) ->
%% Should be started in a CC view with:
%% erl -sname XXX -rsh ctrsh where XX not in [cp1, cp2, cp3]
%%-----------------------------------------------------------------
-failover_comp(suite) -> [];
-failover_comp(doc) ->
- ["Tests failover and takeover for distributed applications. Tests",
- "start, load etc implicitly. The applications do not use start_phases,"
- "i.e the failover should be trasfered to normal start type."];
+%% Tests failover and takeover for distributed applications. Tests
+%% start, load etc implicitly. The applications do not use start_phases
+%% i.e. the failover should be transfered to normal start type.
failover_comp(Conf) when is_list(Conf) ->
%% start a help process to check the start type
StPid = spawn_link(?MODULE, start_type, []),
@@ -239,14 +230,14 @@ failover_comp(Conf) when is_list(Conf) ->
NoSyncTime = config_fun_fast(config(NodeNames)),
WithSyncTime = config_fun(config(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, NoSyncTime, Conf),
{ok, Cp3} = start_node_config(Ncp3, WithSyncTime, Conf),
Cps = [Cp1, Cp2, Cp3],
wait_for_ready_net(),
- % Start app1 and make sure cp1 starts it
+ %% Start app1 and make sure cp1 starts it
{[ok,ok,ok],[]} =
rpc:multicall(Cps, application, load, [app1()]),
?UNTIL(is_loaded(app1, Cps)),
@@ -256,12 +247,12 @@ failover_comp(Conf) when is_list(Conf) ->
false = is_started(app1, Cp2),
ok = get_start_type(#st{normal = 3}),
- % Stop cp1 and make sure cp2 starts app1
+ %% Stop cp1 and make sure cp2 starts app1
stop_node_nice(Cp1),
?UNTIL(is_started(app1, Cp2)),
ok = get_start_type(#st{normal = 3}),
- % Restart cp1 and make sure it restarts app1
+ %% Restart cp1 and make sure it restarts app1
{ok, Cp1_2} = start_node_config(Ncp1, NoSyncTime, Conf),
global:sync(),
ok = rpc:call(Cp1_2, application, load, [app1()]),
@@ -271,8 +262,8 @@ failover_comp(Conf) when is_list(Conf) ->
?UNTIL(not is_started(app1, Cp2)),
ok = get_start_type(#st{takeover = 3}),
- % Test [{cp1, cp2}, cp3]
- % Start app3 and make sure cp2 starts it (cp1 has more apps started)
+ %% Test [{cp1, cp2}, cp3]
+ %% Start app3 and make sure cp2 starts it (cp1 has more apps started)
{[ok,ok,ok],[]} =
rpc:multicall([Cp1_2, Cp2, Cp3], application, load, [app3()]),
?UNTIL(is_loaded(app3, [Cp1_2, Cp2, Cp3])),
@@ -283,17 +274,17 @@ failover_comp(Conf) when is_list(Conf) ->
false = is_started(app3, Cp3),
ok = get_start_type(#st{normal = 3}),
- % Stop cp2 and make sure cp1 starts app3
+ %% Stop cp2 and make sure cp1 starts app3
stop_node_nice(Cp2),
?UNTIL(is_started(app3, Cp1_2)),
ok = get_start_type(#st{normal = 3}),
- % Stop cp1 and make sure cp3 starts app3
+ %% Stop cp1 and make sure cp3 starts app3
stop_node_nice(Cp1_2),
?UNTIL(is_started(app3, Cp3)),
ok = get_start_type(#st{normal = 6}),
- % Restart cp2 and make sure it restarts app3
+ %% Restart cp2 and make sure it restarts app3
{ok, Cp2_2} = start_node_config(Ncp2, NoSyncTime, Conf),
global:sync(),
ok = rpc:call(Cp2_2, application, load, [app3()]),
@@ -303,17 +294,17 @@ failover_comp(Conf) when is_list(Conf) ->
?UNTIL(not is_started(app3, Cp3)),
ok = get_start_type(#st{takeover = 3}),
- % Restart cp1 and make sure it doesn't restart app3
+ %% Restart cp1 and make sure it doesn't restart app3
{ok, Cp1_3} = start_node_config(Ncp1, NoSyncTime, Conf),
global:sync(),
ok = rpc:call(Cp1_3, application, load, [app3()]),
true = is_loaded(app3, Cp1_3),
ok = rpc:call(Cp1_3, application, start, [app3, permanent]),
- test_server:sleep(5000),
+ ct:sleep(5000),
false = is_started(app3, Cp1_3),
true = is_started(app3, Cp2_2),
- % Force takeover to cp1
+ %% Force takeover to cp1
ok = rpc:call(Cp1_3, application, takeover, [app3, permanent]),
?UNTIL(is_started(app3, Cp1_3)),
?UNTIL(not is_started(app3, Cp2_2)),
@@ -335,23 +326,21 @@ failover_comp(Conf) when is_list(Conf) ->
%% Should be started in a CC view with:
%% erl -sname XXX -rsh ctrsh where XX not in [cp1, cp2, cp3]
%%-----------------------------------------------------------------
-permissions(suite) -> [];
-permissions(doc) ->
- ["Tests permissions for distributed applications."];
+%% Tests permissions for distributed applications.
permissions(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2, Ncp3] = node_names([cp1, cp2, cp3], Conf),
NoSyncTime = config_fun_fast(config2(NodeNames)),
WithSyncTime = config_fun(config2(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, NoSyncTime, Conf),
{ok, Cp3} = start_node_config(Ncp3, WithSyncTime, Conf),
Cps = [Cp1, Cp2, Cp3],
wait_for_ready_net(),
- % Start app1 and make sure cp1 starts it
+ %% Start app1 and make sure cp1 starts it
{[ok,ok,ok],[]} =
rpc:multicall(Cps, application, load, [app1()]),
?UNTIL(is_loaded(app1, Cps)),
@@ -360,50 +349,50 @@ permissions(Conf) when is_list(Conf) ->
?UNTIL(is_started(app1, Cp1)),
false = is_started(app1, Cp2),
- % Unpermit app1 on cp1, make sure cp2 starts it
+ %% Unpermit app1 on cp1, make sure cp2 starts it
ok = rpc:call(Cp1, application, permit, [app1, false]),
false = is_started(app1, Cp1),
true = is_started(app1, Cp2),
- % Unpermit app1 on cp2, make sure cp3 starts it
+ %% Unpermit app1 on cp2, make sure cp3 starts it
ok = rpc:call(Cp2, application, permit, [app1, false]),
false = is_started(app1, Cp1),
false = is_started(app1, Cp2),
true = is_started(app1, Cp3),
- % Permit cp2 again
+ %% Permit cp2 again
ok = rpc:call(Cp2, application, permit, [app1, true]),
false = is_started(app1, Cp1),
false = is_started(app1, Cp3),
true = is_started(app1, Cp2),
- % Start app3, make sure noone starts it
+ %% Start app3, make sure noone starts it
{[ok,ok,ok],[]} =
rpc:multicall(Cps, application, load, [app3()]),
?UNTIL(is_loaded(app3, Cps)),
{[ok,ok,ok],[]} =
rpc:multicall(Cps, application, start, [app3, permanent]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app3, Cp1),
false = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- % Permit app3 on Cp3
+ %% Permit app3 on Cp3
ok = rpc:call(Cp3, application, permit, [app3, true]),
true = is_started(app3, Cp3),
- % Permit app3 on Cp2, make sure it starts it
+ %% Permit app3 on Cp2, make sure it starts it
ok = rpc:call(Cp2, application, permit, [app3, true]),
true = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- % Permit app3 on Cp1, make sure it doesn't start it
+ %% Permit app3 on Cp1, make sure it doesn't start it
ok = rpc:call(Cp1, application, permit, [app3, true]),
false = is_started(app3, Cp1),
true = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- % Stop Cp2, make sure Cp1 starts app3
+ %% Stop Cp2, make sure Cp1 starts app3
stop_node_nice(Cp2),
?UNTIL(is_started(app3, Cp1)),
@@ -415,15 +404,13 @@ permissions(Conf) when is_list(Conf) ->
%% Should be started in a CC view with:
%% erl -sname XXX -rsh ctrsh where XX not in [cp1, cp2, cp3]
%%-----------------------------------------------------------------
-load(suite) -> [];
-load(doc) ->
- ["Tests loading of distributed applications."];
+%% Tests loading of distributed applications.
load(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2, Ncp3] = node_names([cp1, cp2, cp3], Conf),
NoSyncTime = config_fun_fast(config3(NodeNames)),
WithSyncTime = config_fun(config3(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, NoSyncTime, Conf),
{ok, Cp3} = start_node_config(Ncp3, WithSyncTime, Conf),
@@ -439,7 +426,7 @@ load(Conf) when is_list(Conf) ->
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Load app1 with different specs and make sure we get an error
+ %% Load app1 with different specs and make sure we get an error
{[{error,_},{error,_}],[]} =
rpc:multicall([Cp1, Cp2], application, load, [app1(), d1(NodeNames)]),
{error, _} = rpc:call(Cp3, application, load, [app1(), d2(NodeNames)]),
@@ -452,15 +439,13 @@ load(Conf) when is_list(Conf) ->
%%-----------------------------------------------------------------
%% Same test as load/1, only with code path cache enabled.
%%-----------------------------------------------------------------
-load_use_cache(suite) -> [];
-load_use_cache(doc) ->
- ["Tests loading of distributed applications. Code path cache enabled."];
+%% Tests loading of distributed applications. Code path cache enabled.
load_use_cache(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2, Ncp3] = node_names([cp1, cp2, cp3], Conf),
NoSyncTime = config_fun_fast(config3(NodeNames)),
WithSyncTime = config_fun(config3(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_with_cache(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_with_cache(Ncp2, NoSyncTime, Conf),
{ok, Cp3} = start_node_with_cache(Ncp3, WithSyncTime, Conf),
@@ -475,7 +460,7 @@ load_use_cache(Conf) when is_list(Conf) ->
?UNTIL(is_started(app1, Cp1)),
false = is_started(app1, Cp2),
- % Load app1 with different specs and make sure we get an error
+ %% Load app1 with different specs and make sure we get an error
{[{error,_},{error,_}],[]} =
rpc:multicall([Cp1, Cp2], application, load, [app1(), d1(NodeNames)]),
{error, _} = rpc:call(Cp3, application, load, [app1(), d2(NodeNames)]),
@@ -489,9 +474,7 @@ load_use_cache(Conf) when is_list(Conf) ->
%% Should be started in a CC view with:
%% erl -sname XXX -rsh ctrsh where XX not in [cp1, cp2, cp3]
%%-----------------------------------------------------------------
-start_phases(suite) -> [];
-start_phases(doc) ->
- ["Tests new start phases and failover."];
+%% Tests new start phases and failover.
start_phases(Conf) when is_list(Conf) ->
%% start a help process to check the start type
SpPid = spawn_link(?MODULE, start_phase, []),
@@ -553,17 +536,15 @@ start_phases(Conf) when is_list(Conf) ->
ok.
-script_start(doc) ->
- ["Start distributed applications from within a boot script. Test ",
- "same as failover."];
-script_start(suite) -> [];
+%% Start distributed applications from within a boot script. Test
+%% same as failover.
script_start(Conf) when is_list(Conf) ->
%% start a help process to check the start type
StPid = spawn_link(?MODULE, start_type, []),
yes = global:register_name(st_type, StPid),
- % Create the .app files and the boot script
+ %% Create the .app files and the boot script
ok = create_app(),
{{KernelVer,StdlibVer}, _} = create_script("latest"),
case is_real_system(KernelVer, StdlibVer) of
@@ -578,7 +559,7 @@ script_start(Conf) when is_list(Conf) ->
NoSyncTime = config_fun_fast(config_fo(NodeNames)),
WithSyncTime = config_fun(config_fo(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_boot_config(Ncp1, NoSyncTime, Conf, latest),
{ok, Cp2} = start_node_boot_config(Ncp2, NoSyncTime, Conf, latest),
{ok, Cp3} = start_node_boot_config(Ncp3, WithSyncTime, Conf, latest),
@@ -590,16 +571,16 @@ script_start(Conf) when is_list(Conf) ->
false = is_started(app1, Cp2),
ok = get_start_type(#st{normal = 9}),
- % Stop cp1 and make sure cp2 starts app1, app2 normally (no
- % start_phases defined) and app_sp as failover (start_phases
- % defined)
+ %% Stop cp1 and make sure cp2 starts app1, app2 normally (no
+ %% start_phases defined) and app_sp as failover (start_phases
+ %% defined)
stop_node_nice(Cp1),
?UNTIL(is_started(app1, Cp2)),
?UNTIL(is_started(app2, Cp2)),
?UNTIL(is_started(app_sp, Cp2)),
ok = get_start_type(#st{normal = 6, failover = 3}),
- % Restart cp1, Cp1 takesover app1 and app2
+ %% Restart cp1, Cp1 takesover app1 and app2
{ok, Cp1_2} = start_node_boot_config(Ncp1, NoSyncTime, Conf, latest),
global:sync(),
?UNTIL(is_started(app1, Cp1_2)),
@@ -610,20 +591,20 @@ script_start(Conf) when is_list(Conf) ->
?UNTIL(not is_started(app2, Cp2)),
ok = get_start_type(#st{takeover = 6}),
- % Stop cp2 and make sure cp1 starts app_sp.
+ %% Stop cp2 and make sure cp1 starts app_sp.
false = is_started(app_sp, Cp1_2),
stop_node_nice(Cp2),
?UNTIL(is_started(app_sp, Cp1_2)),
ok = get_start_type(#st{failover = 3}),
- % Stop cp1 and make sure cp3 starts app1, app2 and app_sp
+ %% Stop cp1 and make sure cp3 starts app1, app2 and app_sp
stop_node_nice(Cp1_2),
?UNTIL(is_started(app_sp, Cp3)),
?UNTIL(is_started(app1, Cp3)),
?UNTIL(is_started(app2, Cp3)),
ok = get_start_type(#st{normal = 6, failover = 3}),
- % Restart cp2 and make sure it takesover app1, app2 and app_sp
+ %% Restart cp2 and make sure it takesover app1, app2 and app_sp
{ok, Cp2_2} = start_node_boot_config(Ncp2, NoSyncTime, Conf, latest),
global:sync(),
?UNTIL(is_started(app_sp, Cp2_2)),
@@ -634,7 +615,7 @@ script_start(Conf) when is_list(Conf) ->
?UNTIL(not is_started(app2, Cp3)),
ok = get_start_type(#st{takeover = 9}),
- % Restart cp1 and make sure it takesover app1, app2
+ %% Restart cp1 and make sure it takesover app1, app2
{ok, Cp1_3} = start_node_boot_config(Ncp1, NoSyncTime, Conf, latest),
global:sync(),
?UNTIL(is_started(app1, Cp1_3)),
@@ -645,7 +626,7 @@ script_start(Conf) when is_list(Conf) ->
?UNTIL(not is_started(app2, Cp2_2)),
ok = get_start_type(#st{takeover = 6}),
- % Force takeover to cp1
+ %% Force takeover to cp1
ok = rpc:call(Cp1_3, application, takeover, [app_sp, permanent]),
?UNTIL(is_started(app_sp, Cp1_3)),
?UNTIL(not is_started(app_sp, Cp2_2)),
@@ -668,15 +649,13 @@ script_start(Conf) when is_list(Conf) ->
ok.
-permit_false_start_local(doc) ->
- ["Start local applications with permission false. Set",
- "permit true on different nodes."];
-permit_false_start_local(suite) -> [];
+%% Start local applications with permission false. Set
+%% permit true on different nodes.
permit_false_start_local(Conf) when is_list(Conf) ->
%% This configuration does not start dist_ac.
Config = write_config_file(fun config_perm/1, Conf),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
[Ncp1, Ncp2, Ncp3] = node_names([cp1, cp2, cp3], Conf),
{ok, Cp1} = start_node(Ncp1, Config),
{ok, Cp2} = start_node(Ncp2, Config),
@@ -694,99 +673,99 @@ permit_false_start_local(Conf) when is_list(Conf) ->
{[ok,ok,ok],[]} =
rpc:multicall([Cp1, Cp2, Cp3], application, load, [app3()]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app1, Cp1),
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- %Permit a not started application
+ %% Permit a not started application
ok = rpc:call(Cp1, application, permit, [app3, true]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app3, Cp1),
false = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- %Permit a not loaded application
+ %% Permit a not loaded application
{error,{not_loaded,app_notloaded}} =
rpc:call(Cp1, application, permit, [app_notloaded, true]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app_notloaded, Cp1),
false = is_started(app_notloaded, Cp2),
false = is_started(app_notloaded, Cp3),
- %Unpermit a not started application
+ %% Unpermit a not started application
ok = rpc:call(Cp1, application, permit, [app3, false]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app3, Cp1),
false = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- %Unpermit a not loaded application
+ %% Unpermit a not loaded application
{error,{not_loaded,app_notloaded}} =
rpc:call(Cp1, application, permit, [app_notloaded, false]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app_notloaded, Cp1),
false = is_started(app_notloaded, Cp2),
false = is_started(app_notloaded, Cp3),
- % Permit app1 on CP1 and make sure it is started
+ %% Permit app1 on CP1 and make sure it is started
ok = rpc:call(Cp1, application, permit, [app1, true]),
?UNTIL(is_started(app1, Cp1)),
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Permit it again
+ %% Permit it again
ok = rpc:call(Cp1, application, permit, [app1, true]),
- test_server:sleep(1000),
+ ct:sleep(1000),
true = is_started(app1, Cp1),
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Permit app2 on CP1 and make sure it is started
+ %% Permit app2 on CP1 and make sure it is started
ok = rpc:call(Cp1, application, permit, [app2, true]),
?UNTIL(is_started(app2, Cp1)),
false = is_started(app2, Cp2),
false = is_started(app2, Cp3),
- % Permit app1 on CP2 and make sure it is started
+ %% Permit app1 on CP2 and make sure it is started
ok = rpc:call(Cp2, application, permit, [app1, true]),
?UNTIL(is_started(app1, Cp2)),
true = is_started(app1, Cp1),
false = is_started(app1, Cp3),
- % Unpermit app1 on CP1 and make sure it is stopped
+ %% Unpermit app1 on CP1 and make sure it is stopped
ok = rpc:call(Cp1, application, permit, [app1, false]),
?UNTIL(false =:= is_started(app1, Cp1)),
true = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Unpermit it agin
+ %% Unpermit it agin
ok = rpc:call(Cp1, application, permit, [app1, false]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app1, Cp1),
true = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Permit app1 on CP1 and make sure it is started
+ %% Permit app1 on CP1 and make sure it is started
ok = rpc:call(Cp1, application, permit, [app1, true]),
?UNTIL(is_started(app1, Cp1)),
true = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Unpermit app1 on CP1 and make sure it is stopped
+ %% Unpermit app1 on CP1 and make sure it is stopped
ok = rpc:call(Cp1, application, permit, [app1, false]),
?UNTIL(false =:= is_started(app1, Cp1)),
true = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Unpermit app1 on CP2 and make sure it is stopped
+ %% Unpermit app1 on CP2 and make sure it is stopped
ok = rpc:call(Cp2, application, permit, [app1, false]),
- test_server:sleep(1000),
+ ct:sleep(1000),
?UNTIL(false =:= is_started(app1, Cp2)),
false = is_started(app1, Cp1),
false = is_started(app1, Cp3),
- % Unpermit app2 on CP1 and make sure it is stopped
+ %% Unpermit app2 on CP1 and make sure it is stopped
ok = rpc:call(Cp1, application, permit, [app2, false]),
?UNTIL(false =:= is_started(app2, Cp2)),
false = is_started(app2, Cp1),
@@ -798,16 +777,14 @@ permit_false_start_local(Conf) when is_list(Conf) ->
ok.
-permit_false_start_dist(doc) ->
- ["Start distributed applications with permission false. Set",
- "permit true on different nodes."];
-permit_false_start_dist(suite) -> [];
+%% Start distributed applications with permission false. Set
+%% permit true on different nodes.
permit_false_start_dist(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2, Ncp3] = node_names([cp1, cp2, cp3], Conf),
NoSyncTime = config_fun_fast(config_perm2(NodeNames)),
WithSyncTime = config_fun(config_perm2(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, NoSyncTime, Conf),
{ok, Cp3} = start_node_config(Ncp3, WithSyncTime, Conf),
@@ -822,36 +799,36 @@ permit_false_start_dist(Conf) when is_list(Conf) ->
{[ok,ok,ok],[]} =
rpc:multicall(Cps, application, load, [app2()]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app1, Cp1),
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- %Permit a not started application
+ %% Permit a not started application
ok = rpc:call(Cp1, application, permit, [app2, true]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app2, Cp1),
false = is_started(app2, Cp2),
false = is_started(app2, Cp3),
- %Permit a not loaded application
+ %% Permit a not loaded application
{error,{not_loaded,app3}} =
rpc:call(Cp1, application, permit, [app3, true]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app3, Cp1),
false = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- %Unpermit a not started application
+ %% Unpermit a not started application
ok = rpc:call(Cp1, application, permit, [app2, false]),
{[ok,ok,ok],[]} =
rpc:multicall([Cp1, Cp2, Cp3], application, start, [app2, permanent]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app2, Cp1),
false = is_started(app2, Cp2),
false = is_started(app2, Cp3),
- %Unpermit a not loaded application
+ %% Unpermit a not loaded application
{error,{not_loaded,app3}} =
rpc:call(Cp1, application, permit, [app3, false]),
{[ok,ok,ok],[]} =
@@ -859,42 +836,42 @@ permit_false_start_dist(Conf) when is_list(Conf) ->
?UNTIL(is_loaded(app3, Cps)),
{[ok,ok,ok],[]} =
rpc:multicall(Cps, application, start, [app3, permanent]),
- test_server:sleep(1000),
+ ct:sleep(1000),
false = is_started(app3, Cp1),
false = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- % Permit app1 on CP1 and make sure it is started
+ %% Permit app1 on CP1 and make sure it is started
ok = rpc:call(Cp1, application, permit, [app1, true]),
?UNTIL(is_started(app1, Cp1)),
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Permit it again
+ %% Permit it again
ok = rpc:call(Cp1, application, permit, [app1, true]),
?UNTIL(is_started(app1, Cp1)),
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Permit app2 on CP1 and make sure it is started
+ %% Permit app2 on CP1 and make sure it is started
ok = rpc:call(Cp1, application, permit, [app2, true]),
?UNTIL(is_started(app2, Cp1)),
false = is_started(app2, Cp2),
false = is_started(app2, Cp3),
- % Permit app1 on CP2 and make sure it is not started
+ %% Permit app1 on CP2 and make sure it is not started
ok = rpc:call(Cp2, application, permit, [app1, true]),
- test_server:sleep(1000),
+ ct:sleep(1000),
true = is_started(app1, Cp1),
false = is_started(app1, Cp2),
false = is_started(app1, Cp3),
- % Crash CP1 and make sure app1, but not app2, is started on CP2
+ %% Crash CP1 and make sure app1, but not app2, is started on CP2
stop_node_nice(Cp1),
?UNTIL(is_started(app1, Cp2)),
false = is_started(app2, Cp2),
- % Restart CP1 again, check nothing is running on it
+ %% Restart CP1 again, check nothing is running on it
{ok, Cp1_2} = start_node_config(Ncp1, NoSyncTime, Conf),
global:sync(),
ok = rpc:call(Cp1_2, application, load, [app1()]),
@@ -909,19 +886,19 @@ permit_false_start_dist(Conf) when is_list(Conf) ->
false = is_started(app1, Cp1_2),
false = is_started(app2, Cp1_2),
- % Permit app3 on CP3 and make sure it is started
+ %% Permit app3 on CP3 and make sure it is started
ok = rpc:call(Cp3, application, permit, [app3, true]),
?UNTIL(is_started(app3, Cp3)),
false = is_started(app3, Cp1_2),
false = is_started(app3, Cp2),
- % Permit app3 on CP1 and make sure it is moved there from CP3
+ %% Permit app3 on CP1 and make sure it is moved there from CP3
ok = rpc:call(Cp1_2, application, permit, [app3, true]),
?UNTIL(is_started(app3, Cp1_2)),
false = is_started(app3, Cp2),
false = is_started(app3, Cp3),
- % Unpermit app3 on CP3 and CP1 and make sure it is stopped
+ %% Unpermit app3 on CP3 and CP1 and make sure it is stopped
ok = rpc:call(Cp3, application, permit, [app3, false]),
ok = rpc:call(Cp1_2, application, permit, [app3, false]),
?UNTIL(false =:= is_started(app3, Cp1_2)),
@@ -933,27 +910,25 @@ permit_false_start_dist(Conf) when is_list(Conf) ->
stop_node_nice(Cp3),
ok.
-nodedown_start(doc) ->
- ["app1 distributed as [cp1, cp2]. Call application:start(app1) on",
- "cp2, but not on cp1. Kill cp1. Make sure app1 is started on cp2."];
-nodedown_start(suite) -> [];
+%% app1 distributed as [cp1, cp2]. Call application:start(app1) on
+%% cp2, but not on cp1. Kill cp1. Make sure app1 is started on cp2.
nodedown_start(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2] = node_names([cp1, cp2], Conf),
NoSyncTime = config_fun_fast(config4(NodeNames)),
WithSyncTime = config_fun(config4(NodeNames)),
- % Test [cp1, cp2]
+ %% Test [cp1, cp2]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, WithSyncTime, Conf),
wait_for_ready_net(),
- % Start app1 and make sure cp1 starts it
+ %% Start app1 and make sure cp1 starts it
{[ok,ok],[]} =
rpc:multicall([Cp1, Cp2], application, load, [app1()]),
_ = rpc:cast(Cp2, application, start, [app1, permanent]),
- test_server:sleep(1000),
+ ct:sleep(1000),
- % Crash CP1 and make sure app1 is started on CP2
+ %% Crash CP1 and make sure app1 is started on CP2
stop_node_nice(Cp1),
?UNTIL(is_started(app1, Cp2)),
@@ -961,8 +936,7 @@ nodedown_start(Conf) when is_list(Conf) ->
ok.
-ensure_started(suite) -> [];
-ensure_started(doc) -> ["Test application:ensure_started/1."];
+%% Test application:ensure_started/1.
ensure_started(_Conf) ->
{ok, Fd} = file:open("app1.app", [write]),
@@ -981,8 +955,7 @@ ensure_started(_Conf) ->
ok = application:unload(app1),
ok.
-ensure_all_started(suite) -> [];
-ensure_all_started(doc) -> ["Test application:ensure_all_started/1-2."];
+%% Test application:ensure_all_started/1-2.
ensure_all_started(_Conf) ->
{ok, Fd1} = file:open("app1.app", [write]),
@@ -1069,11 +1042,9 @@ ensure_all_started(_Conf) ->
%% Ticket: OTP-1586
%% Slogan: recursive load of applications fails
%%-----------------------------------------------------------------
-otp_1586(suite) -> [];
-otp_1586(doc) ->
- ["Test recursive load of applications."];
+%% Test recursive load of applications.
otp_1586(Conf) when is_list(Conf) ->
- Dir = ?config(priv_dir,Conf),
+ Dir = proplists:get_value(priv_dir,Conf),
{ok, Fd} = file:open(filename:join(Dir, "app5.app"), [write]),
w_app5(Fd),
file:close(Fd),
@@ -1090,21 +1061,19 @@ otp_1586(Conf) when is_list(Conf) ->
%% Slogan: start of distrib apps fails when the nodes start
%% simultaneously
%%-----------------------------------------------------------------
-otp_2078(suite) -> [];
-otp_2078(doc) ->
- ["Test start of distrib apps fails when the nodes start simultaneously."];
+%% Test start of distrib apps fails when the nodes start simultaneously.
otp_2078(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2] = node_names([cp1, cp2], Conf),
NoSyncTime = config_fun_fast(config4(NodeNames)),
WithSyncTime = config_fun(config4(NodeNames)),
- % Test [cp1, cp2]
+ %% Test [cp1, cp2]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, WithSyncTime, Conf),
Cps = [Cp1, Cp2],
wait_for_ready_net(),
- % Start app1 and make sure cp1 starts it
+ %% Start app1 and make sure cp1 starts it
{[ok,ok],[]} =
rpc:multicall(Cps, application, load, [app1()]),
?UNTIL(is_loaded(app1, Cps)),
@@ -1112,8 +1081,8 @@ otp_2078(Conf) when is_list(Conf) ->
?UNTIL(is_started(app1, Cp1)),
false = is_started(app1, Cp2),
- % Start app1 on cp2; make sure it works (the bug was that this start
- % returned error)
+ %% Start app1 on cp2; make sure it works (the bug was that this start
+ %% returned error)
ok = rpc:call(Cp2, application, start, [app1, permanent]),
true = is_started(app1, Cp1),
false = is_started(app1, Cp2),
@@ -1122,15 +1091,13 @@ otp_2078(Conf) when is_list(Conf) ->
stop_node_nice(Cp2),
ok.
-otp_2012(suite) -> [];
-otp_2012(doc) ->
- ["Test change of configuration parameters without changing code."];
+%% Test change of configuration parameters without changing code.
otp_2012(Conf) when is_list(Conf) ->
%% start a help process to check the config change
CcPid = spawn_link(?MODULE, conf_change, []),
yes = global:register_name(conf_change, CcPid),
- % Write a .app file
+ %% Write a .app file
{ok, Fd} = file:open("app1.app", [write]),
w_app1(Fd),
file:close(Fd),
@@ -1138,7 +1105,7 @@ otp_2012(Conf) when is_list(Conf) ->
w_app1(Fd2),
file:close(Fd2),
- % Start app1
+ %% Start app1
ok = application:load(app1()),
ok = application:start(app1, permanent),
@@ -1149,7 +1116,7 @@ otp_2012(Conf) when is_list(Conf) ->
ok = application_controller:config_change(EnvBefore),
ok = get_conf_change([{[], [{new1, hi}, {new2, moi}], []}]),
- % Start app2
+ %% Start app2
ok = application:load(app2()),
ok = application:start(app2, permanent),
@@ -1173,11 +1140,9 @@ otp_2012(Conf) when is_list(Conf) ->
%% Ticket: OTP-2718
%% Slogan: transient app which fails during start is ignored
%%-----------------------------------------------------------------
-otp_2718(suite) -> [];
-otp_2718(doc) ->
- ["Test fail of transient app at start."];
+%% Test fail of transient app at start.
otp_2718(Conf) when is_list(Conf) ->
- {ok, Cp1} = start_node_args(cp1, "-pa " ++ ?config(data_dir,Conf)),
+ {ok, Cp1} = start_node_args(cp1, "-pa " ++ proplists:get_value(data_dir,Conf)),
wait_for_ready_net(),
%% normal exit from the application
@@ -1185,7 +1150,7 @@ otp_2718(Conf) when is_list(Conf) ->
?UNTIL(is_loaded(trans_normal, Cp1)),
{error, {{'EXIT',normal},_}} =
rpc:call(Cp1, application, start, [trans_normal, transient]),
- test_server:sleep(2000),
+ ct:sleep(2000),
false = is_started(trans_normal, Cp1),
%% abnormal exit from the application
@@ -1193,7 +1158,7 @@ otp_2718(Conf) when is_list(Conf) ->
{error, {bad_return,{{trans_abnormal_sup,start,[normal,[]]},
{'EXIT',abnormal}}}} =
rpc:call(Cp1, application, start, [trans_abnormal, transient]),
- test_server:sleep(3000),
+ ct:sleep(3000),
{badrpc,nodedown} = which_applications(Cp1),
ok.
@@ -1201,11 +1166,9 @@ otp_2718(Conf) when is_list(Conf) ->
%% Ticket: OTP-2973
%% Slogan: application:start does not test if an appl is already starting...
%%-----------------------------------------------------------------
-otp_2973(suite) -> [];
-otp_2973(doc) ->
- ["Test of two processes simultanously starting the same application."];
+%% Test of two processes simultanously starting the same application.
otp_2973(Conf) when is_list(Conf) ->
- % Write a .app file
+ %% Write a .app file
{ok, Fd} = file:open("app0.app", [write]),
w_app(Fd, app0()),
file:close(Fd),
@@ -1222,14 +1185,14 @@ otp_2973(Conf) when is_list(Conf) ->
{Pid2, res, Res2x} ->
{Res1x, Res2x}
after 2000 ->
- test_server:fail(timeout_pid2)
+ ct:fail(timeout_pid2)
end;
{Pid2, res, Res2x} ->
receive
{Pid1, res, Res1x} ->
{Res1x, Res2x}
after 2000 ->
- test_server:fail(timeout_pid1)
+ ct:fail(timeout_pid1)
end
end,
@@ -1243,11 +1206,11 @@ otp_2973(Conf) when is_list(Conf) ->
_ ->
Txt = io_lib:format("Illegal results from start: ~p ~p ",
[Res1, Res2]),
- test_server:fail(lists:flatten(Txt))
+ ct:fail(lists:flatten(Txt))
end,
- % Write a .app file
+ %% Write a .app file
{ok, Fda} = file:open("app_start_error.app", [write]),
w_app_start_error(Fda),
file:close(Fda),
@@ -1261,14 +1224,14 @@ otp_2973(Conf) when is_list(Conf) ->
{Pid2, res, Res2y} ->
{Res1y, Res2y}
after 2000 ->
- test_server:fail(timeout_pid2)
+ ct:fail(timeout_pid2)
end;
{Pid2, res, Res2y} ->
receive
{Pid1, res, Res1y} ->
{Res1y, Res2y}
after 2000 ->
- test_server:fail(timeout_pid1)
+ ct:fail(timeout_pid1)
end
end,
@@ -1278,7 +1241,7 @@ otp_2973(Conf) when is_list(Conf) ->
ok;
_ ->
Txta = io_lib:format("Illegal results from start ~p ~p ",[Res1a, Res2a]),
- test_server:fail(lists:flatten(Txta))
+ ct:fail(lists:flatten(Txta))
end,
ok.
@@ -1289,36 +1252,34 @@ otp_2973(Conf) when is_list(Conf) ->
%% Ticket: OTP-3184
%% Slogan: crash the node if permanent appl has illegal env parameter values
%%-----------------------------------------------------------------
-otp_3184(suite) -> [];
-otp_3184(doc) ->
- ["When a distributed application is started the permit flag is checked "
- "that the permit flag is not changed during the start. "
- "Te check must only be made if the application is started on the own node"];
+%% When a distributed application is started the permit flag is checked
+%% that the permit flag is not changed during the start.
+%% The check must only be made if the application is started on the own node.
otp_3184(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2] = node_names([cp1, cp2], Conf),
NoSyncTime = config_fun_fast(config3184(NodeNames)),
WithSyncTime = config_fun(config3184(NodeNames)),
- % Test [cp1, cp2]
+ %% Test [cp1, cp2]
{ok, Cp1} = start_node_config(Ncp1, NoSyncTime, Conf),
{ok, Cp2} = start_node_config(Ncp2, WithSyncTime, Conf),
wait_for_ready_net(),
- % Start app1 and make sure it is not started
+ %% Start app1 and make sure it is not started
{[ok,ok],[]} =
rpc:multicall([Cp1, Cp2], application, load, [app1()]),
- test_server:sleep(3000),
+ ct:sleep(3000),
false = is_started(app1, Cp1),
false = is_started(app1, Cp2),
- % Start app1 on cp1
+ %% Start app1 on cp1
ok = rpc:call(Cp1, application, permit, [app1, true]),
ok = rpc:call(Cp1, application, start, [app1, permanent]),
ok = rpc:call(Cp2, application, start, [app1, permanent]),
?UNTIL(is_started(app1, Cp1)),
false = is_started(app1, Cp2),
- % Check that the application is marked as running in application_controller
+ %% Check that the application is marked as running in application_controller
X = rpc:call(Cp1, application_controller, info, []),
{value, {running, Xrunning}} = lists:keysearch(running, 1, X),
{value, Xapp1} = lists:keysearch(app1, 1, Xrunning),
@@ -1337,15 +1298,13 @@ otp_3184(Conf) when is_list(Conf) ->
%% Ticket: OTP-3002
%% Slogan: crash the node if permanent appl has illegal env parameter values
%%-----------------------------------------------------------------
-otp_3002(suite) -> [];
-otp_3002(doc) ->
- ["crash the node if permanent appl has illegal env parameter values."];
+%% crash the node if permanent appl has illegal env parameter values.
otp_3002(Conf) when is_list(Conf) ->
- % Create the boot script
+ %% Create the boot script
{{KernelVer,StdlibVer}, {LatestDir, LatestName}} =
create_script_3002("script_3002"),
- ?t:format(0, "LatestDir = ~p~n", [LatestDir]),
- ?t:format(0, "LatestName = ~p~n", [LatestName]),
+ ct:pal(?HI_VERBOSITY, "LatestDir = ~p~n", [LatestDir]),
+ ct:pal(?HI_VERBOSITY, "LatestName = ~p~n", [LatestName]),
case is_real_system(KernelVer, StdlibVer) of
true ->
@@ -1371,10 +1330,9 @@ otp_3002(Conf) when is_list(Conf) ->
%% when it received dist_ac_app_stopped).
%%-----------------------------------------------------------------
-otp_4066(suite) -> [];
-otp_4066(doc) -> ["Check that application stop don't cause dist_ac crash"];
+%% Check that application stop don't cause dist_ac crash.
otp_4066(Conf) when is_list(Conf) ->
- % Write config files
+ %% Write config files
[Ncp1, Ncp2] = node_names([cp1, cp2], Conf),
Host = from($@, atom_to_list(node())),
Cp1 = list_to_atom(Ncp1 ++ "@" ++ Host),
@@ -1382,12 +1340,12 @@ otp_4066(Conf) when is_list(Conf) ->
AllNodes = [Cp1, Cp2],
App1Nodes = {app1, AllNodes},
- Dir = ?config(priv_dir,Conf),
+ Dir = proplists:get_value(priv_dir,Conf),
{ok, FdC} = file:open(filename:join(Dir, "otp_4066.config"), [write]),
write_config(FdC, config_4066(AllNodes, 5000, [App1Nodes])),
file:close(FdC),
- % Write the app1.app file
+ %% Write the app1.app file
{ok, FdA12} = file:open(filename:join(Dir, "app1.app"), [write]),
w_app1(FdA12),
file:close(FdA12),
@@ -1402,29 +1360,29 @@ otp_4066(Conf) when is_list(Conf) ->
ok = rpc:call(Cp1, application, start, [app1]),
wait_until_started(app1, [Cp1]),
- test_server:format("--- App1 started at Cp1 ---~n", []),
+ io:format("--- App1 started at Cp1 ---~n", []),
print_dac_state(AllNodes),
- % Cp2 previously crashed on this stop
+ %% Cp2 previously crashed on this stop
ok = rpc:call(Cp1, application, stop, [app1]),
wait_until_stopped(app1, [Cp1]),
- test_server:format("--- App1 stopped at Cp1 ---~n", []),
+ io:format("--- App1 stopped at Cp1 ---~n", []),
print_dac_state(AllNodes),
ok = rpc:call(Cp1, application, start, [app1]),
wait_until_started(app1, [Cp1]),
- test_server:format("--- App1 started at Cp1 ---~n", []),
+ io:format("--- App1 started at Cp1 ---~n", []),
print_dac_state(AllNodes),
ok = rpc:call(Cp2, application, load, [app1, App1Nodes]),
ok = rpc:call(Cp2, application, start, [app1]),
- test_server:format("--- App1 started at Cp2 ---~n", []),
+ io:format("--- App1 started at Cp2 ---~n", []),
print_dac_state(AllNodes),
stop_node_nice(Cp1),
wait_until_started(app1, [Cp2]),
- test_server:format("--- Cp1 crashed; failover to Cp2 ---~n", []),
+ io:format("--- Cp1 crashed; failover to Cp2 ---~n", []),
print_dac_state(Cp2),
stop_node_nice(Cp2),
@@ -1440,7 +1398,7 @@ write_config(Fd, Config) ->
print_dac_state(Node) when is_atom(Node) ->
State = gen_server:call({dist_ac, Node}, info),
- test_server:format(" * dist_ac state on node ~p:~n ~p~n",
+ io:format(" * dist_ac state on node ~p:~n ~p~n",
[Node, State]);
print_dac_state(Nodes) when is_list(Nodes) ->
lists:foreach(fun (N) -> print_dac_state(N) end, Nodes).
@@ -1450,9 +1408,7 @@ print_dac_state(Nodes) when is_list(Nodes) ->
%% Ticket: OTP-4227
%% Slogan: Bad return value from application.
%%-----------------------------------------------------------------
-otp_4227(suite) -> [];
-otp_4227(doc) ->
- ["Test start of depending app when required app crashed."];
+%% Test start of depending app when required app crashed.
otp_4227(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, Ncp2] = node_names([cp1, cp2], Conf),
NoSyncTime = config_fun_fast(config_4227(NodeNames)),
@@ -1475,11 +1431,11 @@ otp_4227(Conf) when is_list(Conf) ->
%% Start app9 and brutally kill it, then try to start app10
ok = rpc:call(Cp1, application, start, [app9]),
- test_server:sleep(1000),
+ ct:sleep(1000),
Pid9 = rpc:call(Cp1, erlang, whereis, [ch_sup19]),
true = erlang:is_pid(Pid9),
true = erlang:exit(Pid9, kill),
- test_server:sleep(1000),
+ ct:sleep(1000),
%% This gave {error, no_report} before the patch
{error, {not_running, app9}} =
@@ -1518,7 +1474,7 @@ otp_5363(Conf) when is_list(Conf) ->
%% the code, but only that the correct processes ARE killed.
OldPath = code:get_path(),
- code:add_patha(?config(data_dir,Conf)),
+ code:add_patha(proplists:get_value(data_dir,Conf)),
try
ok = application:load(app_group_leader()),
ok = application:start(group_leader),
@@ -1532,7 +1488,7 @@ otp_5363(Conf) when is_list(Conf) ->
undefined = whereis(nisse);
Bad ->
io:format("~p\n", [Bad]),
- ?t:fail()
+ ct:fail(failed)
end
after
code:set_path(OldPath)
@@ -1543,14 +1499,12 @@ otp_5363(Conf) when is_list(Conf) ->
%% Ticket: OTP-5606
%% Slogan: Problems with starting a distributed application
%%-----------------------------------------------------------------
-otp_5606(suite) -> [];
-otp_5606(doc) ->
- ["Test of several processes simultanously starting the same "
- "distributed application."];
+%% Test of several processes simultaneously starting the same
+%% distributed application.
otp_5606(Conf) when is_list(Conf) ->
%% Write a config file
- Dir = ?config(priv_dir, Conf),
+ Dir = proplists:get_value(priv_dir, Conf),
{ok, Fd} = file:open(filename:join(Dir, "sys.config"), [write]),
NodeNames = [Ncp1, Ncp2] = node_names([cp1, cp2], Conf),
(config4(NodeNames))(Fd, 10000),
@@ -1586,7 +1540,7 @@ otp_5606(Conf) when is_list(Conf) ->
[Res1, Res2, Res3, Res4] ->
Txt = io_lib:format("Illegal results from start ~p ~p ~p ~p",
[Res1, Res2, Res3, Res4]),
- test_server:fail(lists:flatten(Txt))
+ ct:fail(lists:flatten(Txt))
end,
{error, {already_started, app1}} =
@@ -1601,7 +1555,7 @@ otp_5606_loop(ResL) when length(ResL)<4 ->
{_Pid, Res} ->
otp_5606_loop([Res|ResL])
after 5000 ->
- test_server:fail(timeout_waiting_for_res)
+ ct:fail(timeout_waiting_for_res)
end;
otp_5606_loop(ResL) ->
ResL.
@@ -1613,11 +1567,10 @@ loop5606(Pid) ->
Pid ! {self(), Res}
end.
-get_env(suite) -> [];
-get_env(doc) ->
- ["Tests get_env/* functions"];
+%% Tests get_env/* functions.
get_env(Conf) when is_list(Conf) ->
- {ok, _} = application:get_env(kernel, error_logger),
+ ok = application:set_env(kernel, new_var, new_val),
+ {ok, new_val} = application:get_env(kernel, new_var),
undefined = application:get_env(undefined_app, a),
undefined = application:get_env(kernel, error_logger_xyz),
default = application:get_env(kernel, error_logger_xyz, default),
@@ -1627,14 +1580,12 @@ get_env(Conf) when is_list(Conf) ->
%% Should be started in a CC view with:
%% erl -sname XXX -rsh ctrsh where XX not in [cp1, cp2, cp3]
%%-----------------------------------------------------------------
-get_key(suite) -> [];
-get_key(doc) ->
- ["Tests read the .app keys."];
+%% Tests read the .app keys.
get_key(Conf) when is_list(Conf) ->
NodeNames = [Ncp1, _Ncp2, _Ncp3] = node_names([cp1, cp2, cp3], Conf),
WithSyncTime = config_fun(config_inc(NodeNames)),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_config(Ncp1, WithSyncTime, Conf),
ok = rpc:call(Cp1, application, load, [appinc(), d3(NodeNames)]),
@@ -1653,8 +1604,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
rpc:call(Cp1, application, get_key, [appinc, start_phases]),
{ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
rpc:call(Cp1, application, get_key, [appinc, mod]),
@@ -1675,8 +1625,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
rpc:call(Cp1, application, get_all_key, [appinc]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, "Test of new app file, including appnew"} =
gen_server:call({global, {ch,41}}, {get_pid_key, description}),
@@ -1693,8 +1642,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}),
{ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} =
gen_server:call({global, {ch,41}}, {get_pid_key, modules}),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
@@ -1721,8 +1669,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
gen_server:call({global, {ch,41}}, get_pid_all_key),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
stop_node_nice(Cp1),
ok.
@@ -1731,8 +1678,7 @@ get_key(Conf) when is_list(Conf) ->
%%% Testing of change of distributed parameter.
%%%-----------------------------------------------------------------
-distr_changed_tc1(suite) -> [];
-distr_changed_tc1(doc) -> ["Test change of distributed parameter."];
+%% Test change of distributed parameter.
distr_changed_tc1(Conf) when is_list(Conf) ->
{OldKernel, OldEnv, {Cp1, Cp2, Cp3}, {_Ncp1, _Ncp2, _Ncp3}, _Config2} =
@@ -1757,7 +1703,7 @@ distr_changed_tc1(Conf) when is_list(Conf) ->
rpc:multicall([Cp1, Cp2, Cp3],
application_controller, config_change, [OldEnv]),
- test_server:sleep(7000),
+ ct:sleep(7000),
DcInfo1 = rpc:call(Cp1, dist_ac, info, []),
DcInfo2 = rpc:call(Cp2, dist_ac, info, []),
@@ -1778,7 +1724,7 @@ distr_changed_tc1(Conf) when is_list(Conf) ->
ok;
EWa1 ->
X1 = io_lib:format("distribution error: Cp1 ~p ",[EWa1]),
- test_server:fail(lists:flatten(X1))
+ ct:fail(lists:flatten(X1))
end,
case lists:sort(Wa2) of
@@ -1786,7 +1732,7 @@ distr_changed_tc1(Conf) when is_list(Conf) ->
ok;
EWa2 ->
X2 = io_lib:format("distribution error: Cp2 ~p ",[EWa2]),
- test_server:fail(lists:flatten(X2))
+ ct:fail(lists:flatten(X2))
end,
case lists:sort(Wa3) of
@@ -1794,7 +1740,7 @@ distr_changed_tc1(Conf) when is_list(Conf) ->
ok;
EWa3 ->
X3 = io_lib:format("distribution error: Cp3 ~p ",[EWa3]),
- test_server:fail(lists:flatten(X3))
+ ct:fail(lists:flatten(X3))
end,
DcInfo1n = rpc:call(Cp1, dist_ac, info, []),
@@ -1816,9 +1762,7 @@ distr_changed_tc1(Conf) when is_list(Conf) ->
ok.
-distr_changed_tc2(suite) -> [];
-distr_changed_tc2(doc) -> ["Test change of distributed parameter, "
- "move appls by crashing a node."];
+%% Test change of distributed parameter, move appls by crashing a node.
distr_changed_tc2(Conf) when is_list(Conf) ->
{OldKernel, OldEnv, {Cp1, Cp2, Cp3}, {Ncp1, _Ncp2, _Ncp3}, Config2} =
@@ -1843,21 +1787,16 @@ distr_changed_tc2(Conf) when is_list(Conf) ->
rpc:multicall([Cp1, Cp2, Cp3],
application_controller, config_change, [OldEnv]),
- test_server:sleep(4000),
+ ct:sleep(4000),
stop_node_nice(Cp1),
- test_server:sleep(10000),
+ ct:sleep(10000),
-% _DcInfo1 = rpc:call(Cp1, dist_ac, info, []),
_DcInfo2 = rpc:call(Cp2, dist_ac, info, []),
_DcInfo3 = rpc:call(Cp3, dist_ac, info, []),
-% ?t:format(0,"#### DcInfo1 ~n~p~n",[_DcInfo1]),
-% DcWa1 = which_applications(Cp1),
DcWa2 = which_applications(Cp2),
DcWa3 = which_applications(Cp3),
-% Wa1 = lists:foldl(fun({A1, _N1, _V1}, AccIn) -> [A1 | AccIn] end,
-% [], DcWa1),
Wa2 = lists:foldl(fun({A2, _N2, _V2}, AccIn) -> [A2 | AccIn] end,
[], DcWa2),
Wa3 = lists:foldl(fun({A3, _N3, _V3}, AccIn) -> [A3 | AccIn] end,
@@ -1869,7 +1808,7 @@ distr_changed_tc2(Conf) when is_list(Conf) ->
ok;
EWa2 ->
X2 = io_lib:format("distribution error: Cp2 ~p ",[EWa2]),
- test_server:fail(lists:flatten(X2))
+ ct:fail(lists:flatten(X2))
end,
case lists:sort(Wa3) of
@@ -1877,12 +1816,12 @@ distr_changed_tc2(Conf) when is_list(Conf) ->
ok;
EWa3 ->
X3 = io_lib:format("distribution error: Cp3 ~p ",[EWa3]),
- test_server:fail(lists:flatten(X3))
+ ct:fail(lists:flatten(X3))
end,
{ok, Cp1} = start_node_boot(Ncp1, Config2, dc),
- test_server:sleep(10000),
+ ct:sleep(10000),
_DcInfo1rs = rpc:call(Cp1, dist_ac, info, []),
_DcInfo2rs = rpc:call(Cp2, dist_ac, info, []),
@@ -1904,7 +1843,7 @@ distr_changed_tc2(Conf) when is_list(Conf) ->
ok;
EWa1rs ->
X1rs = io_lib:format("distribution error: Cp1 ~p ",[EWa1rs]),
- test_server:fail(lists:flatten(X1rs))
+ ct:fail(lists:flatten(X1rs))
end,
case lists:sort(Wa2rs) of
@@ -1912,7 +1851,7 @@ distr_changed_tc2(Conf) when is_list(Conf) ->
ok;
EWa2rs ->
X2rs = io_lib:format("distribution error: Cp2 ~p ",[EWa2rs]),
- test_server:fail(lists:flatten(X2rs))
+ ct:fail(lists:flatten(X2rs))
end,
case lists:sort(Wa3rs) of
@@ -1920,7 +1859,7 @@ distr_changed_tc2(Conf) when is_list(Conf) ->
ok;
EWa3rs ->
X3rs = io_lib:format("distribution error: Cp3 ~p ",[EWa3rs]),
- test_server:fail(lists:flatten(X3rs))
+ ct:fail(lists:flatten(X3rs))
end,
@@ -1939,15 +1878,12 @@ distr_changed_tc2(Conf) when is_list(Conf) ->
%%%-----------------------------------------------------------------
%%% Testing of application configuration change
%%%-----------------------------------------------------------------
-config_change(suite) ->
- [];
-config_change(doc) ->
- ["Test change of application configuration"];
+%% Test change of application configuration.
config_change(Conf) when is_list(Conf) ->
%% Change to data_dir
{ok, CWD} = file:get_cwd(),
- DataDir = ?config(data_dir, Conf),
+ DataDir = proplists:get_value(data_dir, Conf),
ok = file:set_cwd(DataDir),
%% Find out application data from boot script
@@ -2009,10 +1945,7 @@ get_appls([], Res) ->
Res.
-persistent_env(suite) ->
- [];
-persistent_env(doc) ->
- ["Test set_env/4 and unset_env/3 with persistent true"];
+%% Test set_env/4 and unset_env/3 with persistent true.
persistent_env(Conf) when is_list(Conf) ->
ok = application:set_env(appinc, own2, persist, [{persistent, true}]),
ok = application:set_env(appinc, key1, persist, [{persistent, true}]),
@@ -2056,10 +1989,7 @@ persistent_env(Conf) when is_list(Conf) ->
%%%-----------------------------------------------------------------
%%% Tests the 'shutdown_func' kernel config parameter
%%%-----------------------------------------------------------------
-shutdown_func(suite) ->
- [];
-shutdown_func(doc) ->
- ["Tests the 'shutdown_func' kernel config parameter"];
+%% Tests the 'shutdown_func' kernel config parameter.
shutdown_func(Config) when is_list(Config) ->
{ok,Cp1} = start_node(?MODULE_STRING++"_shutdown_func"),
wait_for_ready_net(),
@@ -2077,10 +2007,10 @@ shutdown_func(Config) when is_list(Config) ->
{'DOWN', Mref, _, Pid, noconnection} ->
ok
after 10000 ->
- test_server:fail(timeout)
+ ct:fail(timeout)
end
after 10000 ->
- test_server:fail(timeout)
+ ct:fail(timeout)
end.
@@ -2098,7 +2028,7 @@ do_shutdown(Reason) ->
%%% Tests the 'shutdown_timeout' kernel config parameter
%%%-----------------------------------------------------------------
shutdown_timeout(Config) when is_list(Config) ->
- DataDir = ?config(data_dir,Config),
+ DataDir = proplists:get_value(data_dir,Config),
{ok,Cp1} = start_node(?MODULE_STRING++"_shutdown_timeout"),
wait_for_ready_net(),
ok = rpc:call(Cp1, application, set_env, [kernel, shutdown_timeout, 1000]),
@@ -2121,7 +2051,7 @@ shutdown_timeout(Config) when is_list(Config) ->
%%% Provokes a (previous) application shutdown deadlock
%%%-----------------------------------------------------------------
shutdown_deadlock(Config) when is_list(Config) ->
- DataDir = ?config(data_dir,Config),
+ DataDir = proplists:get_value(data_dir,Config),
code:add_path(filename:join([DataDir,deadlock])),
%% ok = rpc:call(Cp1, application, start, [sasl]),
ok = application:start(deadlock),
@@ -2146,6 +2076,42 @@ shutdown_deadlock(Config) when is_list(Config) ->
%%-----------------------------------------------------------------
+%% Relative paths in sys.config
+%%-----------------------------------------------------------------
+config_relative_paths(Config) ->
+ Dir = ?config(priv_dir,Config),
+ SubDir = filename:join(Dir,"subdir"),
+ Sys = filename:join(SubDir,"sys.config"),
+ ok = filelib:ensure_dir(Sys),
+ ok = file:write_file(Sys,"[\"../up.config\",\"current\"].\n"),
+
+ Up = filename:join(Dir,"up.config"),
+ ok = file:write_file(Up,"[{app1,[{key1,value}]}].\n"),
+
+ {ok,Cwd} = file:get_cwd(),
+ Current1 = filename:join(Cwd,"current.config"),
+ ok = file:write_file(Current1,"[{app1,[{key2,value1}]}].\n"),
+
+ N1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ {ok,Node1} = start_node(N1,filename:rootname(Sys)),
+ ok = rpc:call(Node1, application, load, [app1()]),
+ {ok, value} = rpc:call(Node1, application, get_env,[app1,key1]),
+ {ok, value1} = rpc:call(Node1, application, get_env,[app1,key2]),
+
+ Current2 = filename:join(SubDir,"current.config"),
+ ok = file:write_file(Current2,"[{app1,[{key2,value2}]}].\n"),
+
+ N2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_2"])),
+ {ok, Node2} = start_node(N2,filename:rootname(Sys)),
+ ok = rpc:call(Node2, application, load, [app1()]),
+ {ok, value} = rpc:call(Node2, application, get_env,[app1,key1]),
+ {ok, value2} = rpc:call(Node2, application, get_env,[app1,key2]),
+
+ stop_node_nice([Node1,Node2]),
+
+ ok.
+
+%%-----------------------------------------------------------------
%% Utility functions
%%-----------------------------------------------------------------
app0() ->
@@ -2596,7 +2562,7 @@ is_started(Name, Node) ->
false -> false
end.
-% Waits until application Name is started on at least one node.
+%% Waits until application Name is started on at least one node.
wait_until_started(Name, Nodes) ->
case lists:member(true,
lists:map(fun (N) ->
@@ -2606,11 +2572,11 @@ wait_until_started(Name, Nodes) ->
true ->
true;
false ->
- test_server:sleep(500),
+ ct:sleep(500),
wait_until_started(Name, Nodes)
end.
-% Waits until application Name is stopped on all nodes.
+%% Waits until application Name is stopped on all nodes.
wait_until_stopped(Name, Nodes) ->
case lists:member(true,
lists:map(fun (N) ->
@@ -2620,7 +2586,7 @@ wait_until_stopped(Name, Nodes) ->
false ->
true;
true ->
- test_server:sleep(500),
+ ct:sleep(500),
wait_until_stopped(Name, Nodes)
end.
@@ -2662,13 +2628,13 @@ start_node_args(Name, Args) ->
start_node_boot_3002(Name, Boot) ->
Pa = filename:dirname(code:which(?MODULE)),
- ?t:format(0, "start_node_boot ~p~n",
- [" -pa " ++ Pa ++ " -env ERL_CRASH_DUMP erl_crash_dump." ++
- atom_to_list(Name) ++ " -boot " ++ Boot ++
- " -sasl dummy \"missing "]),
+ ct:pal(?HI_VERBOSITY, "start_node_boot ~p~n",
+ [" -pa " ++ Pa ++ " -env ERL_CRASH_DUMP erl_crash_dump." ++
+ atom_to_list(Name) ++ " -boot " ++ Boot ++
+ " -sasl dummy \"missing "]),
test_server:start_node(Name, slave,
[{args, " -pa " ++ Pa ++
- " -env ERL_CRASH_DUMP erl_crash_dump." ++
+ " -env ERL_CRASH_DUMP erl_crash_dump." ++
atom_to_list(Name) ++ " -boot " ++ Boot ++
" -sasl dummy \"missing "}]).
@@ -2678,18 +2644,20 @@ start_node_boot_config(Name, SysConfigFun, Conf, Boot) ->
start_node_boot(Name, Config, Boot) ->
Pa = filename:dirname(code:which(?MODULE)),
- ?t:format(0, "start_node_boot ~p~n",[" -pa " ++ Pa ++ " -config " ++ Config ++
- " -boot " ++ atom_to_list(Boot)]),
- test_server:start_node(Name, slave, [{args, " -pa " ++ Pa ++ " -config " ++ Config ++
- " -boot " ++ atom_to_list(Boot)}]).
+ ct:pal(?HI_VERBOSITY,
+ "start_node_boot ~p~n",[" -pa " ++ Pa ++ " -config " ++ Config ++
+ " -boot " ++ atom_to_list(Boot)]),
+ test_server:start_node(Name, slave,
+ [{args, " -pa " ++ Pa ++ " -config " ++ Config ++
+ " -boot " ++ atom_to_list(Boot)}]).
start_node_config_sf(Name, SysConfigFun, Conf) ->
ConfigFile = write_config_file(SysConfigFun, Conf),
- DataDir = ?config(data_dir, Conf), % is it used?
+ DataDir = proplists:get_value(data_dir, Conf), % is it used?
start_node(Name, ConfigFile, " -pa " ++ DataDir).
write_config_file(SysConfigFun, Conf) ->
- Dir = ?config(priv_dir, Conf),
+ Dir = proplists:get_value(priv_dir, Conf),
{ok, Fd} = file:open(filename:join(Dir, "sys.config"), [write]),
SysConfigFun(Fd),
file:close(Fd),
@@ -2713,8 +2681,8 @@ get_start_type(Expected) ->
get_start_type(Expected, 30*5, #st{}).
get_start_type(_Expected, 0, Ack) ->
- test_server:format("====== ~p ======~n", [Ack]),
- test_server:fail(not_valid_start_type);
+ io:format("====== ~p ======~n", [Ack]),
+ ct:fail(not_valid_start_type);
get_start_type(Expected, Times, Ack0) ->
#st{normal = N0, local = L0, takeover = T0, failover = F0} = Ack0,
global:send(st_type, {st, read, self()}),
@@ -2760,13 +2728,13 @@ get_start_phase(Expected) ->
Expected ->
ok;
{sp, T1, I1, So1, Sp1, G1} ->
- test_server:format("=============== {sp,T,I,So,Sp,G} ~p ~n",[" "]),
- test_server:format("=========== got ~p ~n",
+ io:format("=============== {sp,T,I,So,Sp,G} ~p ~n",[" "]),
+ io:format("=========== got ~p ~n",
[{sp, T1, I1, So1, Sp1, G1}]),
- test_server:format("====== expected ~p ~n", [Expected]),
- test_server:fail(not_valid_start_phase)
+ io:format("====== expected ~p ~n", [Expected]),
+ ct:fail(not_valid_start_phase)
after 5000 ->
- test_server:fail(not_valid_start_phase)
+ ct:fail(not_valid_start_phase)
end.
start_phase() ->
@@ -2797,10 +2765,10 @@ get_conf_change(Expected) ->
{cc, Expected} ->
ok;
{cc, List} ->
- test_server:format("====== ~p ======~n",[{cc, List}]),
- test_server:fail(not_valid_conf_change)
+ io:format("====== ~p ======~n",[{cc, List}]),
+ ct:fail(not_valid_conf_change)
after 5000 ->
- test_server:fail(not_valid_conf_change_to)
+ ct:fail(not_valid_conf_change_to)
end.
conf_change() ->
@@ -2896,7 +2864,7 @@ create_script_3002(ScriptName) ->
distr_changed_prep(Conf) when is_list(Conf) ->
- % Write .app files
+ %% Write .app files
{ok, Fd1} = file:open("app1.app", [write]),
w_app1(Fd1),
file:close(Fd1),
@@ -2917,7 +2885,7 @@ distr_changed_prep(Conf) when is_list(Conf) ->
file:close(Fd6),
- % Create the .app files and the boot script
+ %% Create the .app files and the boot script
{{KernelVer,StdlibVer}, _} = create_script_dc("dc"),
case is_real_system(KernelVer, StdlibVer) of
@@ -2933,13 +2901,13 @@ distr_changed_prep(Conf) when is_list(Conf) ->
NoSyncTime = config_fun_fast(config_dc(NodeNames)),
WithSyncTime = config_fun(config_dc(NodeNames)),
- Dir = ?config(priv_dir,Conf),
+ Dir = proplists:get_value(priv_dir,Conf),
{ok, Fd_dc2} = file:open(filename:join(Dir, "sys2.config"), [write]),
(config_dc2(NodeNames))(Fd_dc2),
file:close(Fd_dc2),
Config2 = filename:join(Dir, "sys2"),
- % Test [cp1, cp2, cp3]
+ %% Test [cp1, cp2, cp3]
{ok, Cp1} = start_node_boot_config(Ncp1, NoSyncTime, Conf, dc),
{ok, Cp2} = start_node_boot_config(Ncp2, NoSyncTime, Conf, dc),
{ok, Cp3} = start_node_boot_config(Ncp3, WithSyncTime, Conf, dc),
diff --git a/lib/kernel/test/application_SUITE_data/app_start_error.erl b/lib/kernel/test/application_SUITE_data/app_start_error.erl
index 531a10d442..aedcb537d8 100644
--- a/lib/kernel/test/application_SUITE_data/app_start_error.erl
+++ b/lib/kernel/test/application_SUITE_data/app_start_error.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/application_SUITE_data/group_leader.erl b/lib/kernel/test/application_SUITE_data/group_leader.erl
index e791437272..f18cef179b 100644
--- a/lib/kernel/test/application_SUITE_data/group_leader.erl
+++ b/lib/kernel/test/application_SUITE_data/group_leader.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/application_SUITE_data/group_leader_sup.erl b/lib/kernel/test/application_SUITE_data/group_leader_sup.erl
index ade8de8470..6e0df632c3 100644
--- a/lib/kernel/test/application_SUITE_data/group_leader_sup.erl
+++ b/lib/kernel/test/application_SUITE_data/group_leader_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/application_SUITE_data/trans_abnormal_sup.erl b/lib/kernel/test/application_SUITE_data/trans_abnormal_sup.erl
index 98d7e90235..bc2bf84089 100644
--- a/lib/kernel/test/application_SUITE_data/trans_abnormal_sup.erl
+++ b/lib/kernel/test/application_SUITE_data/trans_abnormal_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/application_SUITE_data/trans_normal_sup.erl b/lib/kernel/test/application_SUITE_data/trans_normal_sup.erl
index 233a79d8c3..c839888e39 100644
--- a/lib/kernel/test/application_SUITE_data/trans_normal_sup.erl
+++ b/lib/kernel/test/application_SUITE_data/trans_normal_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/application_SUITE_data/transient.erl b/lib/kernel/test/application_SUITE_data/transient.erl
index a58bc4600e..83cdacf673 100644
--- a/lib/kernel/test/application_SUITE_data/transient.erl
+++ b/lib/kernel/test/application_SUITE_data/transient.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/bif_SUITE.erl b/lib/kernel/test/bif_SUITE.erl
index dd3010567a..2369dd8b71 100644
--- a/lib/kernel/test/bif_SUITE.erl
+++ b/lib/kernel/test/bif_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -22,36 +22,33 @@
init_per_group/2,end_per_group/2]).
-export([
- spawn1/1, spawn2/1, spawn3/1, spawn4/1,
+ spawn1/1, spawn2/1, spawn3/1, spawn4/1,
-
- spawn_link1/1, spawn_link2/1, spawn_link3/1, spawn_link4/1,
-
- spawn_opt2/1, spawn_opt3/1, spawn_opt4/1, spawn_opt5/1,
+ spawn_link1/1, spawn_link2/1, spawn_link3/1, spawn_link4/1,
- spawn_failures/1,
- run_fun/1,
- decode_packet_delim/1,
- wilderness/1]).
+ spawn_opt2/1, spawn_opt3/1, spawn_opt4/1, spawn_opt5/1,
--export([init_per_testcase/2, end_per_testcase/2]).
+ spawn_failures/1,
+
+ run_fun/1,
+ decode_packet_delim/1,
+ wilderness/1]).
--include_lib("test_server/include/test_server.hrl").
+-export([init_per_testcase/2, end_per_testcase/2]).
-% Default timetrap timeout (set in init_per_testcase).
--define(default_timeout, ?t:minutes(1)).
+-include_lib("common_test/include/ct.hrl").
init_per_testcase(_Case, Config) ->
- ?line Dog = ?t:timetrap(?default_timeout),
- [{watchdog, Dog} | Config].
-end_per_testcase(_Case, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+ Config.
+
+end_per_testcase(_Case, _Config) ->
ok.
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[{group, spawn_tests}, {group, spawn_link_tests},
@@ -77,360 +74,333 @@ end_per_group(_GroupName, Config) ->
Config.
-spawn1(doc) -> ["Test spawn/1"];
-spawn1(suite) ->
- [];
+%% Test spawn/1.
spawn1(Config) when is_list(Config) ->
- ?line Node = node(),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
-
- % spawn
- ?line P = spawn(fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(false, normal, FA, 0, PV)
- end,
+ Node = node(),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+
+ %% spawn
+ P = spawn(fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(false, normal, FA, 0, PV)
+ end,
ok.
-spawn2(doc) -> ["Test spawn/2"];
-spawn2(suite) ->
- [];
+%% Test spawn/2.
spawn2(Config) when is_list(Config) ->
- ?line {ok, Node} = start_node(spawn2),
+ {ok, Node} = start_node(spawn2),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
- % spawn_link
- ?line P = spawn(Node,
- fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(false, normal, FA, 0, PV)
- end,
+ %% spawn_link
+ P = spawn(Node,
+ fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(false, normal, FA, 0, PV)
+ end,
- ?line true = stop_node(Node),
+ true = stop_node(Node),
ok.
-spawn3(doc) -> ["Test spawn/3"];
-spawn3(suite) ->
- [];
+%% Test spawn/3.
spawn3(Config) when is_list(Config) ->
- ?line Node = node(),
-
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
-
- % spawn_link
- ?line P = spawn(?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end]),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(false, normal, FA, 0, PV)
- end,
+ Node = node(),
+
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+
+ %% spawn_link
+ P = spawn(?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end]),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(false, normal, FA, 0, PV)
+ end,
ok.
-spawn4(doc) -> ["Test spawn/4"];
-spawn4(suite) ->
- [];
+%% Test spawn/4.
spawn4(Config) when is_list(Config) ->
- ?line {ok, Node} = start_node(spawn4),
-
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
-
- % spawn_link
- ?line P = spawn(Node,
- ?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end]),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(false, normal, FA, 0, PV)
- end,
-
- ?line true = stop_node(Node),
+ {ok, Node} = start_node(spawn4),
+
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+
+ %% spawn_link
+ P = spawn(Node,
+ ?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end]),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(false, normal, FA, 0, PV)
+ end,
+
+ true = stop_node(Node),
ok.
-spawn_link1(doc) -> ["Test spawn_link/1"];
-spawn_link1(suite) ->
- [];
+%% Test spawn_link/1.
spawn_link1(Config) when is_list(Config) ->
- ?line Node = node(),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
-
- % spawn_link
- ?line P = spawn_link(fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(true, normal, FA, 0, PV)
- end,
+ Node = node(),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+
+ %% spawn_link
+ P = spawn_link(fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(true, normal, FA, 0, PV)
+ end,
ok.
-spawn_link2(doc) -> ["Test spawn_link/2"];
-spawn_link2(suite) ->
- [];
+%% Test spawn_link/2.
spawn_link2(Config) when is_list(Config) ->
- ?line {ok, Node} = start_node(spawn_link2),
+ {ok, Node} = start_node(spawn_link2),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
- % spawn_link
- ?line P = spawn_link(Node,
- fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(true, normal, FA, 0, PV)
- end,
+ %% spawn_link
+ P = spawn_link(Node,
+ fun() -> Parent ! {self(), fetch_proc_vals(self())} end),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(true, normal, FA, 0, PV)
+ end,
- ?line true = stop_node(Node),
+ true = stop_node(Node),
ok.
-spawn_link3(doc) -> ["Test spawn_link/3"];
-spawn_link3(suite) ->
- [];
+%% Test spawn_link/3.
spawn_link3(Config) when is_list(Config) ->
- ?line Node = node(),
-
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
-
- % spawn_link
- ?line P = spawn_link(?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end]),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(true, normal, FA, 0, PV)
- end,
+ Node = node(),
+
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+
+ %% spawn_link
+ P = spawn_link(?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end]),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(true, normal, FA, 0, PV)
+ end,
ok.
-spawn_link4(doc) -> ["Test spawn_link/4"];
-spawn_link4(suite) ->
- [];
+%% Test spawn_link/4.
spawn_link4(Config) when is_list(Config) ->
- ?line {ok, Node} = start_node(spawn_link4),
-
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
-
- % spawn_link
- ?line P = spawn_link(Node,
- ?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end]),
- ?line receive
- {P, PV} ->
- ?line Node = node(P),
- ?line check_proc_vals(true, normal, FA, 0, PV)
- end,
-
- ?line true = stop_node(Node),
+ {ok, Node} = start_node(spawn_link4),
+
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+
+ %% spawn_link
+ P = spawn_link(Node,
+ ?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end]),
+ receive
+ {P, PV} ->
+ Node = node(P),
+ check_proc_vals(true, normal, FA, 0, PV)
+ end,
+
+ true = stop_node(Node),
ok.
-spawn_opt2(doc) -> ["Test spawn_opt/2"];
-spawn_opt2(suite) ->
- [];
+%% Test spawn_opt/2.
spawn_opt2(Config) when is_list(Config) ->
- ?line Node = node(),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
-
- ?line P1 = spawn_opt(fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end,
- [{fullsweep_after, 0},{min_heap_size, 1000},
- link, {priority, max}]),
- ?line receive
- {P1, PV1} ->
- ?line Node = node(P1),
- ?line check_proc_vals(true, max, 0, 1000, PV1)
- end,
- ?line P2 = spawn_opt(fun() -> Parent ! {self(), fetch_proc_vals(self())} end,
- [{min_heap_size, 10}]),
- ?line receive
- {P2, PV2} ->
- ?line Node = node(P2),
- ?line check_proc_vals(false, normal, FA, 10, PV2)
- end,
+ Node = node(),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+
+ P1 = spawn_opt(fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end,
+ [{fullsweep_after, 0},{min_heap_size, 1000},
+ link, {priority, max}]),
+ receive
+ {P1, PV1} ->
+ Node = node(P1),
+ check_proc_vals(true, max, 0, 1000, PV1)
+ end,
+ P2 = spawn_opt(fun() -> Parent ! {self(), fetch_proc_vals(self())} end,
+ [{min_heap_size, 10}]),
+ receive
+ {P2, PV2} ->
+ Node = node(P2),
+ check_proc_vals(false, normal, FA, 10, PV2)
+ end,
ok.
-spawn_opt3(doc) -> ["Test spawn_opt/3"];
-spawn_opt3(suite) ->
- [];
+%% Test spawn_opt/3.
spawn_opt3(Config) when is_list(Config) ->
- ?line {ok, Node} = start_node(spawn_opt3),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
- ?line P1 = spawn_opt(Node,
- fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end,
- [{fullsweep_after,0}, {min_heap_size,1000},
- link, {priority, max}]),
- ?line receive
- {P1, PV1} ->
- ?line Node = node(P1),
- ?line check_proc_vals(true, max, 0, 1000, PV1)
- end,
- ?line P2 = spawn_opt(Node,
- fun() -> Parent ! {self(), fetch_proc_vals(self())} end,
- [{min_heap_size, 10}]),
- ?line receive
- {P2, PV2} ->
- ?line Node = node(P2),
- ?line check_proc_vals(false, normal, FA, 10, PV2)
- end,
- ?line true = stop_node(Node),
+ {ok, Node} = start_node(spawn_opt3),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+ P1 = spawn_opt(Node,
+ fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end,
+ [{fullsweep_after,0}, {min_heap_size,1000},
+ link, {priority, max}]),
+ receive
+ {P1, PV1} ->
+ Node = node(P1),
+ check_proc_vals(true, max, 0, 1000, PV1)
+ end,
+ P2 = spawn_opt(Node,
+ fun() -> Parent ! {self(), fetch_proc_vals(self())} end,
+ [{min_heap_size, 10}]),
+ receive
+ {P2, PV2} ->
+ Node = node(P2),
+ check_proc_vals(false, normal, FA, 10, PV2)
+ end,
+ true = stop_node(Node),
ok.
-spawn_opt4(doc) -> ["Test spawn_opt/4"];
-spawn_opt4(suite) ->
- [];
+%% Test spawn_opt/4.
spawn_opt4(Config) when is_list(Config) ->
- ?line Node = node(),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
- ?line P1 = spawn_opt(?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end],
- [{fullsweep_after,0}, {min_heap_size,1000},
- link, {priority, max}]),
- ?line receive
- {P1, PV1} ->
- ?line Node = node(P1),
- ?line check_proc_vals(true, max, 0, 1000, PV1)
- end,
- ?line P2 = spawn_opt(?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end],
- [{min_heap_size, 10}]),
- ?line receive
- {P2, PV2} ->
- ?line Node = node(P2),
- ?line check_proc_vals(false, normal, FA, 10, PV2)
- end,
+ Node = node(),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+ P1 = spawn_opt(?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end],
+ [{fullsweep_after,0}, {min_heap_size,1000},
+ link, {priority, max}]),
+ receive
+ {P1, PV1} ->
+ Node = node(P1),
+ check_proc_vals(true, max, 0, 1000, PV1)
+ end,
+ P2 = spawn_opt(?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end],
+ [{min_heap_size, 10}]),
+ receive
+ {P2, PV2} ->
+ Node = node(P2),
+ check_proc_vals(false, normal, FA, 10, PV2)
+ end,
ok.
-spawn_opt5(doc) -> ["Test spawn_opt/5"];
-spawn_opt5(suite) ->
- [];
+%% Test spawn_opt/5.
spawn_opt5(Config) when is_list(Config) ->
- ?line {ok, Node} = start_node(spawn_opt5),
- ?line Parent = self(),
- ?line {_, _, FA, _} = fetch_proc_vals(self()),
- ?line P1 = spawn_opt(Node,
- ?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end],
- [{fullsweep_after,0}, {min_heap_size,1000},
- link, {priority, max}]),
- ?line receive
- {P1, PV1} ->
- ?line Node = node(P1),
- ?line check_proc_vals(true, max, 0, 1000, PV1)
- end,
- ?line P2 = spawn_opt(Node,
- ?MODULE,
- run_fun,
- [fun() ->
- Parent ! {self(), fetch_proc_vals(self())}
- end],
- [{min_heap_size, 10}]),
- ?line receive
- {P2, PV2} ->
- ?line Node = node(P2),
- ?line check_proc_vals(false, normal, FA, 10, PV2)
- end,
- ?line true = stop_node(Node),
+ {ok, Node} = start_node(spawn_opt5),
+ Parent = self(),
+ {_, _, FA, _} = fetch_proc_vals(self()),
+ P1 = spawn_opt(Node,
+ ?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end],
+ [{fullsweep_after,0}, {min_heap_size,1000},
+ link, {priority, max}]),
+ receive
+ {P1, PV1} ->
+ Node = node(P1),
+ check_proc_vals(true, max, 0, 1000, PV1)
+ end,
+ P2 = spawn_opt(Node,
+ ?MODULE,
+ run_fun,
+ [fun() ->
+ Parent ! {self(), fetch_proc_vals(self())}
+ end],
+ [{min_heap_size, 10}]),
+ receive
+ {P2, PV2} ->
+ Node = node(P2),
+ check_proc_vals(false, normal, FA, 10, PV2)
+ end,
+ true = stop_node(Node),
ok.
-spawn_failures(doc) ->
- ["Test failure behavior of spawn bifs"];
-spawn_failures(suite) ->
- [];
+%% Test failure behavior of spawn bifs.
spawn_failures(Config) when is_list(Config) ->
- ?line ThisNode = node(),
- ?line {ok, Node} = start_node(spawn_remote_failure),
-
- % unknown nodes
- test_server:format("Testing unknown nodes~n", []),
- ?line CrashPid1 = (catch spawn_opt('unknown@node',
- erlang,
- nodes,
- [],
- [])),
- ?line true = is_pid(CrashPid1),
- ?line ThisNode = node(CrashPid1),
- ?line CrashPid2 = (catch spawn_opt('unknown@node',
- fun () -> erlang:nodes() end,
- [])),
- ?line true = is_pid(CrashPid2),
- ?line ThisNode = node(CrashPid2),
-
- ?line CrashPid3 = (catch spawn('unknown@node',
- erlang,
- nodes,
- [])),
- ?line true = is_pid(CrashPid3),
- ?line ThisNode = node(CrashPid3),
- ?line CrashPid4 = (catch spawn('unknown@node',
- fun () -> erlang:nodes() end)),
- ?line true = is_pid(CrashPid4),
- ?line ThisNode = node(CrashPid4),
-
- ?line OTE = process_flag(trap_exit,true),
- ?line CrashPid5 = (catch spawn_link('unknown@node',
- erlang,
- nodes,
- [])),
+ ThisNode = node(),
+ {ok, Node} = start_node(spawn_remote_failure),
+
+ %% unknown nodes
+ io:format("Testing unknown nodes~n", []),
+ CrashPid1 = (catch spawn_opt('unknown@node',
+ erlang,
+ nodes,
+ [],
+ [])),
+ true = is_pid(CrashPid1),
+ ThisNode = node(CrashPid1),
+ CrashPid2 = (catch spawn_opt('unknown@node',
+ fun () -> erlang:nodes() end,
+ [])),
+ true = is_pid(CrashPid2),
+ ThisNode = node(CrashPid2),
+
+ CrashPid3 = (catch spawn('unknown@node',
+ erlang,
+ nodes,
+ [])),
+ true = is_pid(CrashPid3),
+ ThisNode = node(CrashPid3),
+ CrashPid4 = (catch spawn('unknown@node',
+ fun () -> erlang:nodes() end)),
+ true = is_pid(CrashPid4),
+ ThisNode = node(CrashPid4),
+
+ OTE = process_flag(trap_exit,true),
+ CrashPid5 = (catch spawn_link('unknown@node',
+ erlang,
+ nodes,
+ [])),
receive
{'EXIT', CrashPid5, noconnection} ->
- ?line true = is_pid(CrashPid5),
- ?line ThisNode = node(CrashPid5)
+ true = is_pid(CrashPid5),
+ ThisNode = node(CrashPid5)
end,
- ?line CrashPid6 = (catch spawn_link('unknown@node',
- fun () -> erlang:nodes() end)),
+ CrashPid6 = (catch spawn_link('unknown@node',
+ fun () -> erlang:nodes() end)),
receive
{'EXIT', CrashPid6, noconnection} ->
- ?line true = is_pid(CrashPid6),
- ?line ThisNode = node(CrashPid6)
+ true = is_pid(CrashPid6),
+ ThisNode = node(CrashPid6)
end,
process_flag(trap_exit,OTE),
case OTE of
false ->
receive
{'EXIT', P, R} ->
- ?line test_server:fail({'EXIT', P, R})
+ ct:fail({'EXIT', P, R})
after 0 ->
ok
end;
@@ -438,132 +408,125 @@ spawn_failures(Config) when is_list(Config) ->
ok
end,
- % bad node
- test_server:format("Testing bad nodes~n", []),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt("Node",erlang,nodes,[],[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt("Node",
- fun () ->
- erlang:nodes()
- end,
- [])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link("Node",
- fun () ->
- erlang:nodes()
- end)),
- ?line {'EXIT', {badarg, _}} = (catch spawn("Node",erlang,nodes,[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn("Node",
- fun () ->
- erlang:nodes()
- end)),
-
- % bad module
- test_server:format("Testing bad modules~n", []),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(Node,"erlang",nodes,[],[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt("erlang",nodes,[],[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link(Node,"erlang",nodes,[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link("erlang",nodes,[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn(Node,"erlang",nodes,[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn("erlang",nodes,[])),
-
- % bad function
- test_server:format("Testing bad functions~n", []),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(Node,erlang,"nodes",[],[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(Node,not_a_fun,[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(erlang,"nodes",[],[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(not_a_fun,[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link(Node,erlang,"nodes",[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link(Node,not_a_fun)),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link(erlang,"nodes",[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link(not_a_fun)),
- ?line {'EXIT', {badarg, _}} = (catch spawn(Node,erlang,"nodes",[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn(Node,not_a_fun)),
- ?line {'EXIT', {badarg, _}} = (catch spawn(erlang,"nodes",[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn(not_a_fun)),
-
-
- % bad argument
- test_server:format("Testing bad arguments~n", []),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(Node,erlang,nodes,[a|b],[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(erlang,nodes,[a|b],[])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link(Node,erlang,nodes,[a|b])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_link(erlang,nodes,[a|b])),
- ?line {'EXIT', {badarg, _}} = (catch spawn(Node,erlang,nodes,[a|b])),
- ?line {'EXIT', {badarg, _}} = (catch spawn(erlang,nodes,[a|b])),
-
- % bad option
- test_server:format("Testing bad options~n", []),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(Node,erlang,nodes,[],[a|b])),
- ?line {'EXIT', {badarg, _}} = (catch spawn_opt(erlang,nodes,[],[a|b])),
-
-
- ?line true = stop_node(Node),
+ %% bad node
+ io:format("Testing bad nodes~n", []),
+ {'EXIT', {badarg, _}} = (catch spawn_opt("Node",erlang,nodes,[],[])),
+ {'EXIT', {badarg, _}} = (catch spawn_opt("Node",
+ fun () ->
+ erlang:nodes()
+ end,
+ [])),
+ {'EXIT', {badarg, _}} = (catch spawn_link("Node",
+ fun () ->
+ erlang:nodes()
+ end)),
+ {'EXIT', {badarg, _}} = (catch spawn("Node",erlang,nodes,[])),
+ {'EXIT', {badarg, _}} = (catch spawn("Node",
+ fun () ->
+ erlang:nodes()
+ end)),
+
+ %% bad module
+ io:format("Testing bad modules~n", []),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(Node,"erlang",nodes,[],[])),
+ {'EXIT', {badarg, _}} = (catch spawn_opt("erlang",nodes,[],[])),
+ {'EXIT', {badarg, _}} = (catch spawn_link(Node,"erlang",nodes,[])),
+ {'EXIT', {badarg, _}} = (catch spawn_link("erlang",nodes,[])),
+ {'EXIT', {badarg, _}} = (catch spawn(Node,"erlang",nodes,[])),
+ {'EXIT', {badarg, _}} = (catch spawn("erlang",nodes,[])),
+
+ %% bad function
+ io:format("Testing bad functions~n", []),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(Node,erlang,"nodes",[],[])),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(Node,not_a_fun,[])),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(erlang,"nodes",[],[])),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(not_a_fun,[])),
+ {'EXIT', {badarg, _}} = (catch spawn_link(Node,erlang,"nodes",[])),
+ {'EXIT', {badarg, _}} = (catch spawn_link(Node,not_a_fun)),
+ {'EXIT', {badarg, _}} = (catch spawn_link(erlang,"nodes",[])),
+ {'EXIT', {badarg, _}} = (catch spawn_link(not_a_fun)),
+ {'EXIT', {badarg, _}} = (catch spawn(Node,erlang,"nodes",[])),
+ {'EXIT', {badarg, _}} = (catch spawn(Node,not_a_fun)),
+ {'EXIT', {badarg, _}} = (catch spawn(erlang,"nodes",[])),
+ {'EXIT', {badarg, _}} = (catch spawn(not_a_fun)),
+
+
+ %% bad argument
+ io:format("Testing bad arguments~n", []),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(Node,erlang,nodes,[a|b],[])),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(erlang,nodes,[a|b],[])),
+ {'EXIT', {badarg, _}} = (catch spawn_link(Node,erlang,nodes,[a|b])),
+ {'EXIT', {badarg, _}} = (catch spawn_link(erlang,nodes,[a|b])),
+ {'EXIT', {badarg, _}} = (catch spawn(Node,erlang,nodes,[a|b])),
+ {'EXIT', {badarg, _}} = (catch spawn(erlang,nodes,[a|b])),
+
+ %% bad option
+ io:format("Testing bad options~n", []),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(Node,erlang,nodes,[],[a|b])),
+ {'EXIT', {badarg, _}} = (catch spawn_opt(erlang,nodes,[],[a|b])),
+
+
+ true = stop_node(Node),
ok.
check_proc_vals(Link, Priority, FullsweepAfter, MinHeapSize, {Ls, P, FA, HS}) ->
- ?line Link = lists:member(self(), Ls),
- ?line Priority = P,
+ Link = lists:member(self(), Ls),
+ Priority = P,
FullsweepAfter = FA,
true = (HS >= MinHeapSize),
- ?line ok.
+ ok.
fetch_proc_vals(Pid) ->
- ?line PI = process_info(Pid),
- ?line {value,{links, Ls}} = lists:keysearch(links, 1, PI),
- ?line {value,{priority,P}} = lists:keysearch(priority, 1, PI),
+ PI = process_info(Pid),
+ {value,{links, Ls}} = lists:keysearch(links, 1, PI),
+ {value,{priority,P}} = lists:keysearch(priority, 1, PI),
{value,{garbage_collection,Gs}} =
lists:keysearch(garbage_collection, 1, PI),
{value,{fullsweep_after,FA}} =
lists:keysearch(fullsweep_after, 1, Gs),
{value,{heap_size,HS}} = lists:keysearch(heap_size, 1, PI),
- ?line {Ls, P, FA, HS}.
-
-decode_packet_delim(doc) ->
- ["Test erlang:packet_delim/3 with {line_delimiter,0} option"];
-decode_packet_delim(suite) ->
- [];
+ {Ls, P, FA, HS}.
+
+%% Test erlang:packet_delim/3 with {line_delimiter,0} option.
decode_packet_delim(Config) when is_list(Config) ->
{ok,<<"abc",0>>,<<"efg",0>>} =
erlang:decode_packet(line, <<"abc",0,"efg",0>>, [{line_delimiter, 0}]),
{more, undefined} = erlang:decode_packet(line, <<"abc",0,"efg",0>>, []).
-% This testcase should probably be moved somewhere else
-wilderness(doc) ->
- ["Test that memory allocation command line options affecting the"
- "wilderness of the heap are interpreted correct by the emulator "];
-wilderness(suite) ->
- [];
+%% This testcase should probably be moved somewhere else
+
+%% Test that memory allocation command line options affecting the
+%% wilderness of the heap are interpreted correct by the emulator.
wilderness(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?default_timeout),
- ?line OKParams = {512, 8},
- ?line Alloc = erlang:system_info(allocator),
- ?line test_server:format("Test server allocator info:~n~p", [Alloc]),
+ OKParams = {512, 8},
+ Alloc = erlang:system_info(allocator),
+ io:format("Test server allocator info:~n~p", [Alloc]),
Result = case Alloc of
{Allocator, _, _, _} when Allocator == glibc;
Allocator == dlmalloc ->
- ?line run_wilderness_test(OKParams, OKParams),
- ?line {comment,
- "Allocator used: " ++ atom_to_list(Allocator)};
+ run_wilderness_test(OKParams, OKParams),
+ {comment,
+ "Allocator used: " ++ atom_to_list(Allocator)};
{OtherAllocator, _, _, _} ->
- ?line {skipped,
- "Only run when glibc is used. "
- "Allocator used: "
- ++ atom_to_list(OtherAllocator)}
+ {skipped,
+ "Only run when glibc is used. "
+ "Allocator used: "
+ ++ atom_to_list(OtherAllocator)}
end,
- ?line test_server:timetrap_cancel(Dog),
Result.
-
+
run_wilderness_test({Set_tt, Set_tp}, {Exp_tt, Exp_tp}) ->
Self = self(),
Ref = make_ref(),
SuiteDir = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node} = test_server:start_node(allocator_test,
- slave,
- [{args,
- " -pa "
- ++ SuiteDir
- ++" +MYtt "++to_string(Set_tt)
- ++" +MYtp "++to_string(Set_tp)},
- {linked, false}]),
+ {ok, Node} = test_server:start_node(allocator_test,
+ slave,
+ [{args,
+ " -pa "
+ ++ SuiteDir
+ ++" +MYtt "++to_string(Set_tt)
+ ++" +MYtp "++to_string(Set_tp)},
+ {linked, false}]),
spawn(Node, fun () ->
Self ! {Ref, erlang:system_info(allocator)}
end),
@@ -571,15 +534,15 @@ run_wilderness_test({Set_tt, Set_tp}, {Exp_tt, Exp_tp}) ->
{Ref, {A, V, F, S}} ->
Ett = Exp_tt*1024,
Etp = Exp_tp*1024,
- ?line test_server:format("Test allocator info:~n~p",
- [{A, V, F, S}]),
- ?line {value, {sys_alloc, SA_Opts}}
+ io:format("Test allocator info:~n~p",
+ [{A, V, F, S}]),
+ {value, {sys_alloc, SA_Opts}}
= lists:keysearch(sys_alloc, 1, S),
- ?line {value, {tt, Ett}} = lists:keysearch(tt, 1, SA_Opts),
- ?line {value, {tp, Etp}} = lists:keysearch(tp, 1, SA_Opts)
+ {value, {tt, Ett}} = lists:keysearch(tt, 1, SA_Opts),
+ {value, {tp, Etp}} = lists:keysearch(tp, 1, SA_Opts)
end,
stop_node(Node).
-
+
to_string(X) when is_integer(X) ->
integer_to_list(X);
to_string(X) when is_atom(X) ->
@@ -605,12 +568,12 @@ get_nodenames(N, T, Acc) ->
++ integer_to_list(C)) | Acc]).
start_node(TestCase) ->
- ?line [Name] = get_nodenames(1, TestCase),
- ?line Pa = filename:dirname(code:which(?MODULE)),
- ?line test_server:start_node(Name, slave, [{args, "-pa " ++ Pa}]).
+ [Name] = get_nodenames(1, TestCase),
+ Pa = filename:dirname(code:which(?MODULE)),
+ test_server:start_node(Name, slave, [{args, "-pa " ++ Pa}]).
stop_node(Node) ->
- ?line true = test_server:stop_node(Node).
+ true = test_server:stop_node(Node).
run_fun(Fun) ->
Fun().
diff --git a/lib/kernel/test/ch.erl b/lib/kernel/test/ch.erl
index ba8aa9f05a..c9f378e387 100644
--- a/lib/kernel/test/ch.erl
+++ b/lib/kernel/test/ch.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/ch_sup.erl b/lib/kernel/test/ch_sup.erl
index a297b60200..1d7f17fc95 100644
--- a/lib/kernel/test/ch_sup.erl
+++ b/lib/kernel/test/ch_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/cleanup.erl b/lib/kernel/test/cleanup.erl
index 7eb0a9e140..db738c167e 100644
--- a/lib/kernel/test/cleanup.erl
+++ b/lib/kernel/test/cleanup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
-export([all/0,groups/0,init_per_group/2,end_per_group/2, cleanup/1]).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
all() ->
[cleanup].
@@ -36,15 +36,14 @@ end_per_group(_GroupName, Config) ->
Config.
-cleanup(suite) -> [];
cleanup(_) ->
- ?line Localhost = list_to_atom(net_adm:localhost()),
- ?line net_adm:world_list([Localhost]),
- ?line case nodes() of
- [] ->
- ok;
- Nodes when is_list(Nodes) ->
- Kill = fun(Node) -> spawn(Node, erlang, halt, []) end,
- ?line lists:foreach(Kill, Nodes),
- ?line test_server:fail({nodes_left, Nodes})
- end.
+ Localhost = list_to_atom(net_adm:localhost()),
+ net_adm:world_list([Localhost]),
+ case nodes() of
+ [] ->
+ ok;
+ Nodes when is_list(Nodes) ->
+ Kill = fun(Node) -> spawn(Node, erlang, halt, []) end,
+ lists:foreach(Kill, Nodes),
+ ct:fail({nodes_left, Nodes})
+ end.
diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl
index 73ade14fa1..1314316c13 100644
--- a/lib/kernel/test/code_SUITE.erl
+++ b/lib/kernel/test/code_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,8 @@
%%
-module(code_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("syntax_tools/include/merl.hrl").
-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2]).
-export([set_path/1, get_path/1, add_path/1, add_paths/1, del_path/1,
@@ -30,12 +31,17 @@
upgrade/1,
sticky_dir/1, pa_pz_option/1, add_del_path/1,
dir_disappeared/1, ext_mod_dep/1, clash/1,
- load_cached/1, start_node_with_cache/1, add_and_rehash/1,
- where_is_file_cached/1, where_is_file_no_cache/1,
+ where_is_file/1,
purge_stacktrace/1, mult_lib_roots/1, bad_erl_libs/1,
code_archive/1, code_archive2/1, on_load/1, on_load_binary/1,
- on_load_embedded/1, on_load_errors/1, big_boot_embedded/1,
- native_early_modules/1, get_mode/1]).
+ on_load_embedded/1, on_load_errors/1, on_load_update/1,
+ on_load_trace_on_load/1,
+ on_load_purge/1, on_load_self_call/1, on_load_pending/1,
+ on_load_deleted/1,
+ big_boot_embedded/1,
+ module_status/1,
+ native_early_modules/1, get_mode/1,
+ normalized_paths/1]).
-export([init_per_testcase/2, end_per_testcase/2,
init_per_suite/1, end_per_suite/1]).
@@ -47,7 +53,9 @@
-export([compile_load/4]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,5}}].
all() ->
[set_path, get_path, add_path, add_paths, del_path,
@@ -56,15 +64,19 @@ all() ->
load_binary, dir_req, object_code, set_path_file,
upgrade,
sticky_dir, pa_pz_option, add_del_path, dir_disappeared,
- ext_mod_dep, clash, load_cached, start_node_with_cache,
- add_and_rehash, where_is_file_no_cache,
- where_is_file_cached, purge_stacktrace, mult_lib_roots,
+ ext_mod_dep, clash, where_is_file,
+ purge_stacktrace, mult_lib_roots,
bad_erl_libs, code_archive, code_archive2, on_load,
on_load_binary, on_load_embedded, on_load_errors,
- big_boot_embedded, native_early_modules, get_mode].
+ {group, sequence},
+ on_load_purge, on_load_self_call, on_load_pending,
+ on_load_deleted,
+ module_status,
+ big_boot_embedded, native_early_modules, get_mode, normalized_paths].
-groups() ->
- [].
+%% These need to run in order
+groups() -> [{sequence, [sequence], [on_load_update,
+ on_load_trace_on_load]}].
init_per_group(_GroupName, Config) ->
Config.
@@ -86,6 +98,11 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
Config.
+-define(TESTMOD, test_dummy).
+-define(TESTMODSTR, "test_dummy").
+-define(TESTMODSRC, ?TESTMODSTR ".erl").
+-define(TESTMODOBJ, ?TESTMODSTR ".beam").
+
init_per_testcase(big_boot_embedded, Config) ->
case catch crypto:start() of
ok ->
@@ -93,35 +110,49 @@ init_per_testcase(big_boot_embedded, Config) ->
_Else ->
{skip, "Needs crypto!"}
end;
+init_per_testcase(on_load_embedded, Config0) ->
+ LibRoot = code:lib_dir(),
+ LinkName = filename:join(LibRoot, "on_load_app-1.0"),
+ Config = [{link_name,LinkName}|Config0],
+ init_per_testcase(Config);
init_per_testcase(_Func, Config) ->
- Dog=?t:timetrap(?t:minutes(5)),
- P=code:get_path(),
- P=code:get_path(),
- [{watchdog, Dog}, {code_path, P}|Config].
+ init_per_testcase(Config).
+init_per_testcase(Config) ->
+ P = code:get_path(),
+ [{code_path, P}|Config].
+
+
+end_per_testcase(module_status, Config) ->
+ code:purge(?TESTMOD),
+ code:delete(?TESTMOD),
+ code:purge(?TESTMOD),
+ file:delete(?TESTMODOBJ),
+ file:delete(?TESTMODSRC),
+ end_per_testcase(Config);
end_per_testcase(TC, Config) when TC == mult_lib_roots;
TC == big_boot_embedded ->
{ok, HostName} = inet:gethostname(),
NodeName = list_to_atom(atom_to_list(TC)++"@"++HostName),
- ?t:stop_node(NodeName),
+ test_server:stop_node(NodeName),
+ end_per_testcase(Config);
+end_per_testcase(on_load_embedded, Config) ->
+ LinkName = proplists:get_value(link_name, Config),
+ _ = del_link(LinkName),
end_per_testcase(Config);
end_per_testcase(_Func, Config) ->
end_per_testcase(Config).
end_per_testcase(Config) ->
code:purge(code_b_test),
- Dog=?config(watchdog, Config),
- ?t:timetrap_cancel(Dog),
- P=?config(code_path, Config),
+ P=proplists:get_value(code_path, Config),
true=code:set_path(P),
P=code:get_path(),
ok.
-set_path(suite) -> [];
-set_path(doc) -> [];
set_path(Config) when is_list(Config) ->
P = code:get_path(),
- NonExDir = filename:join(?config(priv_dir, Config), ?t:temp_name("hej")),
+ NonExDir = filename:join(proplists:get_value(priv_dir, Config), test_server:temp_name("hej")),
{'EXIT',_} = (catch code:set_path({a})),
{error, bad_directory} = (catch code:set_path([{a}])),
{error, bad_directory} = code:set_path(NonExDir),
@@ -135,19 +166,15 @@ set_path(Config) when is_list(Config) ->
[LibDir] = code:get_path(),
ok.
-get_path(suite) -> [];
-get_path(doc) -> [];
get_path(Config) when is_list(Config) ->
P = code:get_path(),
- % test that all directories are strings (lists).
+ %% test that all directories are strings (lists).
[] = lists:filter(fun
(Dir) when is_list(Dir) -> false;
(_) -> true
end, P),
ok.
-add_path(suite) -> [];
-add_path(doc) -> [];
add_path(Config) when is_list(Config) ->
P = code:get_path(),
{'EXIT',_} = (catch code:add_path({})),
@@ -168,8 +195,6 @@ add_path(Config) when is_list(Config) ->
code:set_path(P),
ok.
-add_paths(suite) -> [];
-add_paths(doc) -> [];
add_paths(Config) when is_list(Config) ->
P = code:get_path(),
ok = code:add_paths([{}]),
@@ -215,8 +240,6 @@ add_paths(Config) when is_list(Config) ->
code:set_path(P),
ok.
-del_path(suite) -> [];
-del_path(doc) -> [];
del_path(Config) when is_list(Config) ->
P = code:get_path(),
try
@@ -226,18 +249,18 @@ del_path(Config) when is_list(Config) ->
end.
del_path_1(P) ->
- test_server:format("Initial code:get_path()=~p~n",[P]),
+ io:format("Initial code:get_path()=~p~n",[P]),
{'EXIT',_} = (catch code:del_path(3)),
false = code:del_path(my_dummy_name),
false = code:del_path("/kdlk/my_dummy_dir"),
Dir = filename:join([code:lib_dir(kernel),"ebin"]),
- test_server:format("kernel dir: ~p~n",[Dir]),
+ io:format("kernel dir: ~p~n",[Dir]),
true = code:del_path(kernel),
NewP = code:get_path(),
- test_server:format("Path after removing 'kernel':~p~n",[NewP]),
+ io:format("Path after removing 'kernel':~p~n",[NewP]),
ReferenceP = lists:delete(Dir,P),
- test_server:format("Reference path:~p~n",[ReferenceP]),
+ io:format("Reference path:~p~n",[ReferenceP]),
NewP = ReferenceP, % check that dir is deleted
code:set_path(P),
@@ -251,10 +274,8 @@ del_path_1(P) ->
NewP1 = lists:delete(Dir,P), % check that dir is deleted
ok.
-replace_path(suite) -> [];
-replace_path(doc) -> [];
replace_path(Config) when is_list(Config) ->
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
P = code:get_path(),
{'EXIT',_} = (catch code:replace_path(3,"")),
{error, bad_name} = code:replace_path(dummy_name,""),
@@ -289,10 +310,9 @@ replace_path(Config) when is_list(Config) ->
ok.
-dir_disappeared(suite) -> [];
-dir_disappeared(doc) -> ["OTP-3977"];
+%% OTP-3977.
dir_disappeared(Config) when is_list(Config) ->
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
Dir = filename:join(PrivDir, "temp"),
ok = file:make_dir(Dir),
true = code:add_path(Dir),
@@ -300,8 +320,6 @@ dir_disappeared(Config) when is_list(Config) ->
non_existing = code:which(bubbelskrammel),
ok.
-load_file(suite) -> [];
-load_file(doc) -> [];
load_file(Config) when is_list(Config) ->
{error, nofile} = code:load_file(duuuumy_mod),
{error, badfile} = code:load_file(code_a_test),
@@ -316,22 +334,18 @@ load_file(Config) when is_list(Config) ->
test_dir() ->
filename:dirname(code:which(?MODULE)).
-load_abs(suite) -> [];
-load_abs(doc) -> [];
load_abs(Config) when is_list(Config) ->
TestDir = test_dir(),
{error, nofile} = code:load_abs(TestDir ++ "/duuuumy_mod"),
{error, badfile} = code:load_abs(TestDir ++ "/code_a_test"),
{'EXIT', _} = (catch code:load_abs({})),
- {'EXIT', _} = (catch code:load_abs("Non-latin-имя-файла")),
+ {error, nofile} = code:load_abs("Non-latin-имя-файла"),
{module, code_b_test} = code:load_abs(TestDir ++ "/code_b_test"),
code:stick_dir(TestDir),
{error, sticky_directory} = code:load_abs(TestDir ++ "/code_b_test"),
code:unstick_dir(TestDir),
ok.
-ensure_loaded(suite) -> [];
-ensure_loaded(doc) -> [];
ensure_loaded(Config) when is_list(Config) ->
{module, lists} = code:ensure_loaded(lists),
case init:get_argument(mode) of
@@ -348,8 +362,6 @@ ensure_loaded(Config) when is_list(Config) ->
ok
end.
-delete(suite) -> [];
-delete(doc) -> [];
delete(Config) when is_list(Config) ->
OldFlag = process_flag(trap_exit, true),
code:purge(code_b_test),
@@ -366,8 +378,6 @@ delete(Config) when is_list(Config) ->
process_flag(trap_exit, OldFlag),
ok.
-purge(suite) -> [];
-purge(doc) -> [];
purge(Config) when is_list(Config) ->
OldFlag = process_flag(trap_exit, true),
code:purge(code_b_test),
@@ -383,8 +393,23 @@ purge(Config) when is_list(Config) ->
purge_many_exits(Config) when is_list(Config) ->
OldFlag = process_flag(trap_exit, true),
+
code:purge(code_b_test),
{'EXIT',_} = (catch code:purge({})),
+
+ CodePurgeF = fun(M, Exp) -> Exp = code:purge(M) end,
+ purge_many_exits_do(CodePurgeF),
+
+ %% Let's repeat test for erlang:purge_module as it does the same thing
+ %% now in erts-8.0 (except for return value).
+ ErlangPurgeF = fun(M, _Exp) -> erlang:purge_module(M) end,
+ purge_many_exits_do(ErlangPurgeF),
+
+ process_flag(trap_exit, OldFlag),
+ ok.
+
+
+purge_many_exits_do(PurgeF) ->
false = code:purge(code_b_test),
TPids = lists:map(fun (_) ->
{code_b_test:do_spawn(),
@@ -395,7 +420,7 @@ purge_many_exits(Config) when is_list(Config) ->
end)}
end,
lists:seq(1, 1000)),
- % Give them time to start...
+ %% Give them time to start...
receive after 1000 -> ok end,
true = code:delete(code_b_test),
lists:foreach(fun ({Pid1, Pid2}) ->
@@ -403,7 +428,7 @@ purge_many_exits(Config) when is_list(Config) ->
false = code_b_test:check_exit(Pid1),
true = erlang:is_process_alive(Pid2)
end, TPids),
- true = code:purge(code_b_test),
+ PurgeF(code_b_test, true),
lists:foreach(fun ({Pid1, Pid2}) ->
false = erlang:is_process_alive(Pid1),
true = code_b_test:check_exit(Pid1),
@@ -412,13 +437,9 @@ purge_many_exits(Config) when is_list(Config) ->
end, TPids),
lists:foreach(fun ({_Pid1, Pid2}) ->
receive {'EXIT', Pid2, _} -> ok end
- end, TPids),
- process_flag(trap_exit, OldFlag),
- ok.
+ end, TPids).
-soft_purge(suite) -> [];
-soft_purge(doc) -> [];
soft_purge(Config) when is_list(Config) ->
OldFlag = process_flag(trap_exit, true),
code:purge(code_b_test),
@@ -435,8 +456,6 @@ soft_purge(Config) when is_list(Config) ->
process_flag(trap_exit, OldFlag),
ok.
-is_loaded(suite) -> [];
-is_loaded(doc) -> [];
is_loaded(Config) when is_list(Config) ->
code:purge(code_b_test),
code:delete(code_b_test),
@@ -450,10 +469,8 @@ is_loaded(Config) when is_list(Config) ->
code:delete(code_b_test),
ok.
-all_loaded(suite) -> [];
-all_loaded(doc) -> [];
all_loaded(Config) when is_list(Config) ->
- case ?t:is_cover() of
+ case test_server:is_cover() of
true -> {skip,"Cover is running"};
false -> all_loaded_1()
end.
@@ -481,8 +498,6 @@ all_unique([]) -> ok;
all_unique([_]) -> ok;
all_unique([{X,_}|[{Y,_}|_]=T]) when X < Y -> all_unique(T).
-load_binary(suite) -> [];
-load_binary(doc) -> [];
load_binary(Config) when is_list(Config) ->
TestDir = test_dir(),
File = TestDir ++ "/code_b_test" ++ code:objfile_extension(),
@@ -498,23 +513,35 @@ load_binary(Config) when is_list(Config) ->
code:delete(code_b_test),
ok.
-upgrade(Config) ->
- DataDir = ?config(data_dir, Config),
- %%T = [beam, hipe],
- T = [beam],
-
- [upgrade_do(DataDir, Client, U1, U2, O1, O2)
- || Client<-T, U1<-T, U2<-T, O1<-T, O2<-T],
+upgrade(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ upgrade_do(DataDir, beam, [beam]);
+
+ _ ->
+ T = [beam, hipe],
+ [upgrade_do(DataDir, Client, T) || Client <- T],
+
+ case hipe:llvm_support_available() of
+ false -> ok;
+ true ->
+ T2 = [beam, hipe_llvm],
+ [upgrade_do(DataDir, Client, T2) || Client <- T2]
+ end
+ end,
ok.
-upgrade_do(DataDir, Client, U1, U2, O1, O2) ->
+upgrade_do(DataDir, Client, T) ->
compile_load(upgrade_client, DataDir, undefined, Client),
- upgrade_client:run(DataDir, U1, U2, O1, O2),
+ [upgrade_client:run(DataDir, U1, U2, O1, O2)
+ || U1<-T, U2<-T, O1<-T, O2<-T],
ok.
compile_load(Mod, Dir, Ver, CodeType) ->
+ %%erlang:display({"{{{{{{{{{{{{{{{{Loading",Mod,Ver,CodeType}),
Version = case Ver of
undefined ->
io:format("Compiling '~p' as ~p\n", [Mod, CodeType]),
@@ -526,20 +553,23 @@ compile_load(Mod, Dir, Ver, CodeType) ->
end,
Target = case CodeType of
beam -> [];
- hipe -> [native]
+ hipe -> [native];
+ hipe_llvm -> [native,{hipe,[to_llvm]}]
end,
CompOpts = [binary, report] ++ Target ++ Version,
Src = filename:join(Dir, atom_to_list(Mod) ++ ".erl"),
- %io:format("compile:file(~p,~p)\n", [Src, CompOpts]),
+ T1 = erlang:now(),
{ok,Mod,Code} = compile:file(Src, CompOpts),
+ T2 = erlang:now(),
ObjFile = filename:basename(Src,".erl") ++ ".beam",
{module,Mod} = code:load_binary(Mod, ObjFile, Code),
- %IsNative = code:is_module_native(Mod),
+ T3 = erlang:now(),
+ io:format("Compile time ~p ms, Load time ~p ms\n",
+ [timer:now_diff(T2,T1) div 1000, timer:now_diff(T3,T2) div 1000]),
+ %%erlang:display({"}}}}}}}}}}}}}}}Loaded",Mod,Ver,CodeType}),
ok.
-dir_req(suite) -> [];
-dir_req(doc) -> [];
dir_req(Config) when is_list(Config) ->
{ok,[[Root0]]} = init:get_argument(root),
Root = filename:join([Root0]), % Normalised form.
@@ -554,8 +584,6 @@ dir_req(Config) when is_list(Config) ->
{error, bad_name} = code:priv_dir(duuumy),
ok.
-object_code(suite) -> [];
-object_code(doc) -> [];
object_code(Config) when is_list(Config) ->
TestDir = test_dir(),
P = code:get_path(),
@@ -576,22 +604,20 @@ object_code(Config) when is_list(Config) ->
P=code:get_path(),
ok.
-set_path_file(suite) -> [];
-set_path_file(doc) -> ["Test that set_path does not accept ",
- "files as pathnames (known previous bug)"];
+%% Test that set_path does not accept
+%% files as pathnames (known previous bug)
set_path_file(Config) when is_list(Config) ->
- File=filename:join(?config(priv_dir, Config), "testfil"),
+ File=filename:join(proplists:get_value(priv_dir, Config), "testfil"),
ok=file:write_file(File, list_to_binary("lite data")),
{error, bad_directory}=code:set_path([File]).
-sticky_dir(suite) -> [];
-sticky_dir(doc) -> ["Test that a module with the same name as a module in ",
- "a sticky directory cannot be loaded."];
+%% Test that a module with the same name as a module in
+%% a sticky directory cannot be loaded.
sticky_dir(Config) when is_list(Config) ->
Pa = filename:dirname(code:which(?MODULE)),
- {ok,Node} = ?t:start_node(sticky_dir, slave, [{args,"-pa "++Pa}]),
+ {ok,Node} = test_server:start_node(sticky_dir, slave, [{args,"-pa "++Pa}]),
Mods = [code,lists,erlang,init],
- OutDir = filename:join(?config(priv_dir, Config), sticky_dir),
+ OutDir = filename:join(proplists:get_value(priv_dir, Config), sticky_dir),
_ = file:make_dir(OutDir),
Ret = rpc:call(Node, erlang, apply,
[fun sticky_compiler/2,[Mods,OutDir]]),
@@ -600,9 +626,9 @@ sticky_dir(Config) when is_list(Config) ->
ok;
Other ->
io:format("~p\n", [Other]),
- ?t:fail()
+ ct:fail(failed)
end,
- ?t:stop_node(Node),
+ test_server:stop_node(Node),
ok.
sticky_compiler(Files, PrivDir) ->
@@ -611,50 +637,55 @@ sticky_compiler(Files, PrivDir) ->
[R || R <- Rets, R =/= ok].
do_sticky_compile(Mod, Dir) ->
- %% Make sure that the module is loaded. A module being sticky
- %% only prevents it from begin reloaded, not from being loaded
- %% from the wrong place to begin with.
- Mod = Mod:module_info(module),
- File = filename:append(Dir, atom_to_list(Mod)),
- Src = io_lib:format("-module(~s).\n"
- "-export([test/1]).\n"
- "test(me) -> fail.\n", [Mod]),
- ok = file:write_file(File++".erl", Src),
- case c:c(File, [{outdir,Dir}]) of
- {ok,Module} ->
- Module:test(me);
- {error,sticky_directory} ->
- ok
+ case code:is_sticky(Mod) of
+ true ->
+ %% Make sure that the module is loaded. A module being sticky
+ %% only prevents it from begin reloaded, not from being loaded
+ %% from the wrong place to begin with.
+ Mod = Mod:module_info(module),
+ File = filename:append(Dir, atom_to_list(Mod)),
+ Src = io_lib:format("-module(~s).\n"
+ "-export([test/1]).\n"
+ "test(me) -> fail.\n", [Mod]),
+ ok = file:write_file(File++".erl", Src),
+ case c:c(File, [{outdir,Dir}]) of
+ {ok,Module} ->
+ Module:test(me);
+ {error,sticky_directory} ->
+ ok
+ end;
+ false ->
+ %% For some reason the module is not sticky
+ %% could be that the .erlang file has
+ %% unstuck it?
+ {Mod, is_not_sticky}
end.
-pa_pz_option(suite) -> [];
-pa_pz_option(doc) -> ["Test that the -pa and -pz options work as expected"];
+%% Test that the -pa and -pz options work as expected.
pa_pz_option(Config) when is_list(Config) ->
- DDir = ?config(data_dir,Config),
+ DDir = proplists:get_value(data_dir,Config),
PaDir = filename:join(DDir,"pa"),
PzDir = filename:join(DDir,"pz"),
- {ok, Node}=?t:start_node(pa_pz1, slave,
+ {ok, Node}=test_server:start_node(pa_pz1, slave,
[{args,
"-pa " ++ PaDir
++ " -pz " ++ PzDir}]),
Ret=rpc:call(Node, code, get_path, []),
[PaDir|Paths] = Ret,
[PzDir|_] = lists:reverse(Paths),
- ?t:stop_node(Node),
- {ok, Node2}=?t:start_node(pa_pz2, slave,
+ test_server:stop_node(Node),
+ {ok, Node2}=test_server:start_node(pa_pz2, slave,
[{args,
"-mode embedded " ++ "-pa "
++ PaDir ++ " -pz " ++ PzDir}]),
Ret2=rpc:call(Node2, code, get_path, []),
[PaDir|Paths2] = Ret2,
[PzDir|_] = lists:reverse(Paths2),
- ?t:stop_node(Node2).
+ test_server:stop_node(Node2).
-add_del_path(suite) ->
- [];
-add_del_path(doc) -> ["add_path, del_path should not cause priv_dir(App) to fail"];
+%% add_path, del_path should not cause priv_dir(App) to fail.
add_del_path(Config) when is_list(Config) ->
- DDir = ?config(data_dir,Config),
+ DDir = proplists:get_value(data_dir,Config),
Dir1 = filename:join(DDir,"dummy_app-1.0/ebin"),
Dir2 = filename:join(DDir,"dummy_app-2.0/ebin"),
code:add_patha(Dir1),
@@ -668,43 +699,35 @@ add_del_path(Config) when is_list(Config) ->
clash(Config) when is_list(Config) ->
- DDir = ?config(data_dir,Config)++"clash/",
+ DDir = proplists:get_value(data_dir,Config)++"clash/",
P = code:get_path(),
- [TestServerPath|_] = [Path || Path <- code:get_path(),
- re:run(Path,"test_server/?$",[unicode]) /= nomatch],
%% test non-clashing entries
- %% remove TestServerPath to prevent clash with test-server path
- true = code:del_path(TestServerPath),
true = code:add_path(DDir++"foobar-0.1/ebin"),
true = code:add_path(DDir++"zork-0.8/ebin"),
- test_server:capture_start(),
+ ct:capture_start(),
ok = code:clash(),
- test_server:capture_stop(),
- [OKMsg|_] = test_server:capture_get(),
+ ct:capture_stop(),
+ [OKMsg|_] = ct:capture_get(),
true = lists:prefix("** Found 0 name clashes", OKMsg),
true = code:set_path(P),
%% test clashing entries
- %% remove TestServerPath to prevent clash with test-server path
- true = code:del_path(TestServerPath),
true = code:add_path(DDir++"foobar-0.1/ebin"),
true = code:add_path(DDir++"foobar-0.1.ez/foobar-0.1/ebin"),
- test_server:capture_start(),
+ ct:capture_start(),
ok = code:clash(),
- test_server:capture_stop(),
- [ClashMsg|_] = test_server:capture_get(),
+ ct:capture_stop(),
+ [ClashMsg|_] = ct:capture_get(),
{match, [" hides "]} = re:run(ClashMsg, "\\*\\* .*( hides ).*",
[{capture,all_but_first,list}]),
true = code:set_path(P),
%% test "Bad path can't read"
- %% remove TestServerPath to prevent clash with test-server path
- Priv = ?config(priv_dir, Config),
- true = code:del_path(TestServerPath),
+ Priv = proplists:get_value(priv_dir, Config),
TmpEzFile = Priv++"foobar-0.tmp.ez",
{ok, _} = file:copy(DDir++"foobar-0.1.ez", TmpEzFile),
true = code:add_path(TmpEzFile++"/foobar-0.1/ebin"),
@@ -716,20 +739,17 @@ clash(Config) when is_list(Config) ->
_ ->
ok = file:delete(TmpEzFile)
end,
- test_server:capture_start(),
+ ct:capture_start(),
ok = code:clash(),
- test_server:capture_stop(),
- [BadPathMsg|_] = test_server:capture_get(),
+ ct:capture_stop(),
+ [BadPathMsg|_] = ct:capture_get(),
true = lists:prefix("** Bad path can't read", BadPathMsg),
true = code:set_path(P),
file:delete(TmpEzFile++".moved"), %% Only effect on windows
ok.
-ext_mod_dep(suite) ->
- [];
-ext_mod_dep(doc) ->
- ["Every module that the code_server uses should be preloaded, "
- "this test case verifies that"];
+%% Every module that the code_server uses should be preloaded,
+%% this test case verifies that.
ext_mod_dep(Config) when is_list(Config) ->
xref:start(s),
xref:set_default(s, [{verbose,false},{warnings,false},
@@ -745,7 +765,7 @@ ext_mod_dep(Config) when is_list(Config) ->
xref:stop(s),
case Else of
ok -> ok;
- _ -> test_server:fail(Else)
+ _ -> ct:fail(Else)
end
end.
@@ -769,6 +789,7 @@ analyse([], [This={M,F,A}|Path], Visited, ErrCnt0) ->
OK = [erlang, os, prim_file, erl_prim_loader, init, ets,
code_server, lists, lists_sort, unicode, binary, filename,
gb_sets, gb_trees, hipe_unified_loader, hipe_bifs,
+ erts_code_purger,
prim_zip, zlib],
ErrCnt1 =
case lists:member(M, OK) or erlang:is_builtin(M,F,A) of
@@ -794,7 +815,7 @@ analyse2(MFA={_,_,_}, Path, Visited0) ->
analyse(FL, [MFA|Path], my_usort([MFA|Visited0]), 0).
%%%% We need to check these manually...
-% fun's are ok as long as they are defined locally.
+%% fun's are ok as long as they are defined locally.
check_funs({'$M_EXPR','$F_EXPR',_},
[{unicode,characters_to_binary_int,3},
{unicode,characters_to_binary,3},
@@ -839,26 +860,13 @@ check_funs({'$M_EXPR','$F_EXPR',1},
{code_server,start_link,1}]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',1},
[{lists,filter,2},
- {code_server,try_archive_subdirs,3},
- {code_server,all_archive_subdirs,1},
- {code_server,archive_subdirs,1},
- {code_server,insert_name,3},
- {code_server,replace_name,2},
- {code_server,update,2},
- {code_server,maybe_update,2},
- {code_server,do_add,4},
- {code_server,add_path,4},
- {code_server,handle_call,3},
- {code_server,loop,1},
- {code_server,system_continue,3}]) -> 0;
+ {code_server,try_archive_subdirs,3}|_]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',_},
[{erlang,apply,2},
{erlang,spawn_link,1},
{code_server,start_link,1}]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',_},
[{erlang,spawn_link,1},{code_server,start_link,1}]) -> 0;
-check_funs({'$M_EXPR',module_info,1},
- [{hipe_unified_loader,patch_to_emu_step1,1} | _]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',2},
[{hipe_unified_loader,write_words,3} | _]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',2},
@@ -870,26 +878,30 @@ check_funs({'$M_EXPR','$F_EXPR',2},
{hipe_unified_loader,sort_and_write,5} | _]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',1},
[{lists,foreach,2},
- {hipe_unified_loader,patch_consts,3} | _]) -> 0;
-check_funs({'$M_EXPR','$F_EXPR',1},
- [{lists,foreach,2},
- {hipe_unified_loader,mark_referred_from,1},
- {hipe_unified_loader,get_refs_from,2}| _]) -> 0;
+ {hipe_unified_loader,patch_consts,4} | _]) -> 0;
check_funs({'$M_EXPR',warning_msg,2},
[{code_server,finish_on_load_report,2} | _]) -> 0;
+check_funs({'$M_EXPR','$F_EXPR',1},
+ [{code_server,run,2}|_]) -> 0;
+check_funs({'$M_EXPR','$F_EXPR',2},
+ [{code_server,handle_on_load,5}|_]) -> 0;
+check_funs({'$M_EXPR','$F_EXPR',2},
+ [{code_server,handle_pending_on_load,4}|_]) -> 0;
+check_funs({'$M_EXPR','$F_EXPR',2},
+ [{code_server,finish_on_load_2,3}|_]) -> 0;
%% This is cheating! /raimo
%%
%% check_funs(This = {M,_,_}, Path) ->
%% case catch atom_to_list(M) of
%% [$h,$i,$p,$e | _] ->
-%% test_server:format("hipe_module_ignored(~p, ~p)~n", [This, Path]),
+%% io:format("hipe_module_ignored(~p, ~p)~n", [This, Path]),
%% 0;
%% _ ->
-%% test_server:format("not_verified(~p, ~p)~n", [This, Path]),
+%% io:format("not_verified(~p, ~p)~n", [This, Path]),
%% 1
%% end;
check_funs(This, Path) ->
- test_server:format("not_verified(~p, ~p)~n", [This, Path]),
+ io:format("not_verified(~p, ~p)~n", [This, Path]),
1.
my_usort(List) ->
@@ -905,140 +917,7 @@ uniq([H|T],A) ->
uniq(T,[H|A]).
-load_cached(suite) ->
- [];
-load_cached(doc) ->
- [];
-load_cached(Config) when is_list(Config) ->
- Priv = ?config(priv_dir, Config),
- WD = filename:dirname(code:which(?MODULE)),
- {ok,Node} =
- ?t:start_node(code_cache_node, peer, [{args,
- "-pa \"" ++ WD ++ "\""},
- {erl, [this]}]),
- CCTabCreated = fun(Tab) ->
- case ets:info(Tab, name) of
- code_cache -> true;
- _ -> false
- end
- end,
- Tabs = rpc:call(Node, ets, all, []),
- case rpc:call(Node, lists, any, [CCTabCreated,Tabs]) of
- true ->
- ?t:stop_node(Node),
- ?t:fail("Code cache should not be active!");
- false ->
- ok
- end,
- rpc:call(Node, code, del_path, [Priv]),
- rpc:call(Node, code, add_pathz, [Priv]),
-
- FullModName = Priv ++ "/code_cache_test",
- {ok,Dev} = file:open(FullModName ++ ".erl", [write]),
- io:format(Dev, "-module(code_cache_test). -export([a/0]). a() -> ok.~n", []),
- ok = file:close(Dev),
- {ok,code_cache_test} = compile:file(FullModName, [{outdir,Priv}]),
-
- F = fun load_loop/2,
- N = 1000,
- {T0,T1} = rpc:call(Node, erlang, apply, [F, [N,code_cache_test]]),
- TNoCache = now_diff(T1, T0),
- rpc:call(Node, code, rehash, []),
- {T2,T3} = rpc:call(Node, erlang, apply, [F, [N,code_cache_test]]),
- TCache = now_diff(T3, T2),
- AvgNoCache = TNoCache/N,
- AvgCache = TCache/N,
- io:format("Avg. load time (no_cache/cache): ~w/~w~n", [AvgNoCache,AvgCache]),
- ?t:stop_node(Node),
- if AvgNoCache =< AvgCache ->
- ?t:fail("Cache not working properly.");
- true ->
- ok
- end.
-
-load_loop(N, M) ->
- load_loop(N, M, now()).
-load_loop(0, _M, T0) ->
- {T0,now()};
-load_loop(N, M, T0) ->
- code:load_file(M),
- code:delete(M),
- code:purge(M),
- load_loop(N-1, M, T0).
-
-now_diff({A2, B2, C2}, {A1, B1, C1}) ->
- ((A2-A1)*1000000 + B2-B1)*1000000 + C2-C1.
-
-start_node_with_cache(suite) ->
- [];
-start_node_with_cache(doc) ->
- [];
-start_node_with_cache(Config) when is_list(Config) ->
- {ok,Node} =
- ?t:start_node(code_cache_node, peer, [{args,
- "-code_path_cache"},
- {erl, [this]}]),
- Tabs = rpc:call(Node, ets, all, []),
- io:format("Tabs: ~w~n", [Tabs]),
- CCTabCreated = fun(Tab) ->
- case rpc:call(Node, ets, info, [Tab,name]) of
- code_cache -> true;
- _ -> false
- end
- end,
- true = lists:any(CCTabCreated, Tabs),
- ?t:stop_node(Node),
- ok.
-
-add_and_rehash(suite) ->
- [];
-add_and_rehash(doc) ->
- [];
-add_and_rehash(Config) when is_list(Config) ->
- Priv = ?config(priv_dir, Config),
- WD = filename:dirname(code:which(?MODULE)),
- {ok,Node} =
- ?t:start_node(code_cache_node, peer, [{args,
- "-pa \"" ++ WD ++ "\""},
- {erl, [this]}]),
- CCTabCreated = fun(Tab) ->
- case ets:info(Tab, name) of
- code_cache -> true;
- _ -> false
- end
- end,
- Tabs0 = rpc:call(Node, ets, all, []),
- case rpc:call(Node, lists, any, [CCTabCreated,Tabs0]) of
- true ->
- ?t:stop_node(Node),
- ?t:fail("Code cache should not be active!");
- false ->
- ok
- end,
- ok = rpc:call(Node, code, rehash, []), % create cache
- Tabs1 = rpc:call(Node, ets, all, []),
- true = rpc:call(Node, lists, any, [CCTabCreated,Tabs1]), % cache table created
- ok = rpc:call(Node, code, rehash, []),
- OkDir = filename:join(Priv, ""),
- BadDir = filename:join(Priv, "guggemuffsussiputt"),
- CP = [OkDir | rpc:call(Node, code, get_path, [])],
- true = rpc:call(Node, code, set_path, [CP]),
- CP1 = [BadDir | CP],
- {error,_} = rpc:call(Node, code, set_path, [CP1]),
- true = rpc:call(Node, code, del_path, [OkDir]),
- true = rpc:call(Node, code, add_path, [OkDir]),
- true = rpc:call(Node, code, add_path, [OkDir]),
- {error,_} = rpc:call(Node, code, add_path, [BadDir]),
- ok = rpc:call(Node, code, rehash, []),
-
- ?t:stop_node(Node),
- ok.
-
-where_is_file_no_cache(suite) ->
- [];
-where_is_file_no_cache(doc) ->
- [];
-where_is_file_no_cache(Config) when is_list(Config) ->
+where_is_file(Config) when is_list(Config) ->
{T,KernelBeamFile} = timer:tc(code, where_is_file, ["kernel.beam"]),
io:format("Load time: ~w ms~n", [T]),
KernelEbinDir = filename:dirname(KernelBeamFile),
@@ -1047,80 +926,45 @@ where_is_file_no_cache(Config) when is_list(Config) ->
non_existing = code:where_is_file("kernel"), % no such file
ok.
-where_is_file_cached(suite) ->
- [];
-where_is_file_cached(doc) ->
- [];
-where_is_file_cached(Config) when is_list(Config) ->
- {ok,Node} =
- ?t:start_node(code_cache_node, peer, [{args,
- "-code_path_cache"},
- {erl, [this]}]),
- Tabs = rpc:call(Node, ets, all, []),
- io:format("Tabs: ~w~n", [Tabs]),
- CCTabCreated = fun(Tab) ->
- case rpc:call(Node, ets, info, [Tab,name]) of
- code_cache -> true;
- _ -> false
- end
- end,
- true = lists:any(CCTabCreated, Tabs),
- KernelBeamFile = rpc:call(Node, code, where_is_file, ["kernel.beam"]),
- {T,KernelBeamFile} = rpc:call(Node, timer, tc, [code,where_is_file,["kernel.beam"]]),
- io:format("Load time: ~w ms~n", [T]),
- KernelEbinDir = rpc:call(Node, filename, dirname, [KernelBeamFile]),
- AppFile = rpc:call(Node, filename, join, [KernelEbinDir,"kernel.app"]),
- AppFile = rpc:call(Node, code, where_is_file, ["kernel.app"]),
- non_existing = rpc:call(Node, code, where_is_file, ["kernel"]), % no such file
- ?t:stop_node(Node),
- ok.
-
-
-purge_stacktrace(suite) ->
- [];
-purge_stacktrace(doc) ->
- ["Test that stacktrace is deleted when purging a referred module"];
+%% Test that stacktrace is deleted when purging a referred module.
purge_stacktrace(Config) when is_list(Config) ->
code:purge(code_b_test),
try code_b_test:call(fun(b) -> ok end, a)
catch
- error:function_clause ->
+ error:function_clause:Stacktrace ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace of
[{?MODULE,_,[a],_},
{code_b_test,call,2,_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
try code_b_test:call(nofun, 2)
catch
- error:function_clause ->
+ error:function_clause:Stacktrace2 ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace2 of
[{code_b_test,call,[nofun,2],_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
Args = [erlang,error,[badarg]],
try code_b_test:call(erlang, error, [badarg,Args])
catch
- error:badarg ->
+ error:badarg:Stacktrace3 ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace3 of
[{code_b_test,call,Args,_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
ok.
mult_lib_roots(Config) when is_list(Config) ->
- DataDir = filename:join(?config(data_dir, Config), "mult_lib_roots"),
+ DataDir = filename:join(proplists:get_value(data_dir, Config), "mult_lib_roots"),
mult_lib_compile(DataDir, "my_dummy_app-b/ebin/lists"),
mult_lib_compile(DataDir,
"my_dummy_app-c/ebin/code_SUITE_mult_root_module"),
@@ -1130,7 +974,7 @@ mult_lib_roots(Config) when is_list(Config) ->
filename:join(DataDir, "second_root"),
{ok,Node} =
- ?t:start_node(mult_lib_roots, slave,
+ test_server:start_node(mult_lib_roots, slave,
[{args,"-env ERL_LIBS "++ErlLibs}]),
Path0 = rpc:call(Node, code, get_path, []),
@@ -1178,15 +1022,15 @@ mult_lib_remove_prefix([$/|T], []) -> T.
bad_erl_libs(Config) when is_list(Config) ->
{ok,Node} =
- ?t:start_node(bad_erl_libs, slave, []),
+ test_server:start_node(bad_erl_libs, slave, []),
Code = rpc:call(Node,code,get_path,[]),
- ?t:stop_node(Node),
+ test_server:stop_node(Node),
{ok,Node2} =
- ?t:start_node(bad_erl_libs, slave,
- [{args,"-env ERL_LIBS /no/such/dir"}]),
+ test_server:start_node(bad_erl_libs, slave,
+ [{args,"-env ERL_LIBS /no/such/dir"}]),
Code2 = rpc:call(Node,code,get_path,[]),
- ?t:stop_node(Node2),
+ test_server:stop_node(Node2),
%% Test that code path is not affected by the faulty ERL_LIBS
Code = Code2,
@@ -1205,8 +1049,8 @@ code_archive2(Config) when is_list(Config) ->
do_code_archive(Config, Root, StripVsn) when is_list(Config) ->
%% Copy the orig files to priv_dir
- DataDir = ?config(data_dir, Config),
- PrivDir = ?config(priv_dir, Config),
+ DataDir = proplists:get_value(data_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
App = code_archive_dict,
VsnBase = atom_to_list(App) ++ "-1.0",
Base =
@@ -1239,9 +1083,15 @@ do_code_archive(Config, Root, StripVsn) when is_list(Config) ->
{ok, _} = zip:create(Archive, [Base],
[{compress, []}, {cwd, PrivDir}]),
+ %% Create a directory and a file outside of the archive.
+ OtherFile = filename:join([RootDir,VsnBase,"other","other.txt"]),
+ OtherContents = ?MODULE:module_info(md5),
+ filelib:ensure_dir(OtherFile),
+ ok = file:write_file(OtherFile, OtherContents),
+
%% Set up ERL_LIBS and start a slave node.
{ok, Node} =
- ?t:start_node(code_archive, slave,
+ test_server:start_node(code_archive, slave,
[{args,"-env ERL_LIBS " ++ RootDir}]),
CodePath = rpc:call(Node, code, get_path, []),
AppEbin = filename:join([Archive, Base, "ebin"]),
@@ -1253,13 +1103,25 @@ do_code_archive(Config, Root, StripVsn) when is_list(Config) ->
%% Start the app
ok = rpc:call(Node, application, start, [App]),
+ %% Get the lib dir for the app.
+ AppLibDir = rpc:call(Node, code, lib_dir, [App]),
+ io:format("AppLibDir: ~p\n", [AppLibDir]),
+ AppLibDir = filename:join(RootDir, VsnBase),
+
%% Access the app priv dir
AppPrivDir = rpc:call(Node, code, priv_dir, [App]),
AppPrivFile = filename:join([AppPrivDir, "code_archive.txt"]),
io:format("AppPrivFile: ~p\n", [AppPrivFile]),
- {ok, _Bin, _Path} =
+ {ok, _Bin, _} =
rpc:call(Node, erl_prim_loader, get_file, [AppPrivFile]),
+ %% Read back the other text file.
+ OtherDirPath = rpc:call(Node, code, lib_dir, [App,other]),
+ OtherFilePath = filename:join(OtherDirPath, "other.txt"),
+ io:format("OtherFilePath: ~p\n", [OtherFilePath]),
+ {ok, OtherContents, _} =
+ rpc:call(Node, erl_prim_loader, get_file, [OtherFilePath]),
+
%% Use the app
Tab = code_archive_tab,
Key = foo,
@@ -1272,7 +1134,7 @@ do_code_archive(Config, Root, StripVsn) when is_list(Config) ->
error = rpc:call(Node, App, find, [Tab, Key]),
ok = rpc:call(Node, App, erase, [Tab]),
- ?t:stop_node(Node),
+ test_server:stop_node(Node),
ok.
compile_app(TopDir, AppName) ->
@@ -1298,15 +1160,12 @@ compile_files([File | Files], SrcDir, OutDir) ->
compile_files([], _, _) ->
ok.
-big_boot_embedded(suite) ->
- [];
-big_boot_embedded(doc) ->
- ["Test that a boot file with (almost) all of OTP can be used to start an"
- " embeddedd system."];
+%% Test that a boot file with (almost) all of OTP can be used to start an
+%% embeddedd system.
big_boot_embedded(Config) when is_list(Config) ->
{BootArg,AppsInBoot} = create_big_boot(Config),
{ok, Node} =
- ?t:start_node(big_boot_embedded, slave,
+ test_server:start_node(big_boot_embedded, slave,
[{args,"-boot "++BootArg++" -mode embedded"}]),
RemoteNodeApps =
[ {X,Y} || {X,_,Y} <-
@@ -1317,7 +1176,7 @@ big_boot_embedded(Config) when is_list(Config) ->
on_load(Config) when is_list(Config) ->
Master = on_load_test_case_process,
- Data = filename:join([?config(data_dir, Config),"on_load"]),
+ Data = filename:join([proplists:get_value(data_dir, Config),"on_load"]),
ok = file:set_cwd(Data),
up_to_date = make:all([{d,'MASTER',Master}]),
@@ -1361,7 +1220,7 @@ on_load(Config) when is_list(Config) ->
on_load_wait_for_all(Refs),
receive
Any ->
- ?t:fail({unexpected,Any})
+ ct:fail({unexpected,Any})
after 10 ->
ok
end.
@@ -1385,22 +1244,17 @@ on_load_binary(_) ->
register(Master, self()),
%% Construct, compile and pretty-print.
- Mod = on_load_binary,
+ Mod = ?FUNCTION_NAME,
File = atom_to_list(Mod) ++ ".erl",
- Forms = [{attribute,1,file,{File,1}},
- {attribute,1,module,Mod},
- {attribute,2,export,[{ok,0}]},
- {attribute,3,on_load,{init,0}},
- {function,5,init,0,
- [{clause,5,[],[],
- [{op,6,'!',
- {atom,6,Master},
- {tuple,6,[{atom,6,Mod},{call,6,{atom,6,self},[]}]}},
- {'receive',7,[{clause,8,[{atom,8,go}],[],[{atom,8,ok}]}]}]}]},
- {function,11,ok,0,[{clause,11,[],[],[{atom,11,true}]}]}],
- Forms1 = erl_parse:new_anno(Forms),
- {ok,Mod,Bin} = compile:forms(Forms1, [report]),
- [io:put_chars(erl_pp:form(F)) || F <- Forms1],
+ Tree = ?Q(["-module('@Mod@').\n",
+ "-export([ok/0]).\n",
+ "-on_load({init,0}).\n",
+ "init() ->\n",
+ " '@Master@' ! {on_load_binary,self()},\n",
+ " receive go -> ok end.\n",
+ "ok() -> true.\n"]),
+ {ok,Mod,Bin} = merl:compile(Tree),
+ merl:print(Tree),
{Pid1,Ref1} = spawn_monitor(fun() ->
code:load_binary(Mod, File, Bin),
@@ -1429,17 +1283,19 @@ on_load_embedded(Config) when is_list(Config) ->
end.
on_load_embedded_1(Config) ->
- DataDir = ?config(data_dir, Config),
+ DataDir = proplists:get_value(data_dir, Config),
+ LinkName = proplists:get_value(link_name, Config),
%% Link the on_load_app application into the lib directory.
- LibRoot = code:lib_dir(),
- LinkName = filename:join(LibRoot, "on_load_app-1.0"),
OnLoadApp = filename:join(DataDir, "on_load_app-1.0"),
del_link(LinkName),
io:format("LinkName :~p, OnLoadApp: ~p~n",[LinkName,OnLoadApp]),
case file:make_symlink(OnLoadApp, LinkName) of
{error,enotsup} ->
throw({skip,"Support for symlinks required"});
+ {error,eperm} ->
+ %% On Windows, we may not have permissions to create symlinks.
+ throw({skip,"Support for symlinks required"});
ok -> ok
end,
@@ -1464,8 +1320,7 @@ on_load_embedded_1(Config) ->
ok = rpc:call(Node, on_load_embedded, status, []),
%% Clean up.
- stop_node(Node),
- ok = del_link(LinkName).
+ stop_node(Node).
del_link(LinkName) ->
case file:delete(LinkName) of
@@ -1484,7 +1339,7 @@ create_boot(Config, Options) ->
filename:join(LatestDir, LatestName).
create_script(Config) ->
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
Name = PrivDir ++ "on_load_test",
Apps = application_controller:which_applications(),
{value,{_,_,KernelVer}} = lists:keysearch(kernel, 1, Apps),
@@ -1511,33 +1366,45 @@ create_big_boot(Config) ->
ok = file:set_cwd(OldDir),
{filename:join(LatestDir, LatestName),Apps}.
-% The following apps cannot be loaded
-% hipe .app references (or can reference) files that have no
-% corresponding beam file (if hipe is not enabled)
+%% The following apps cannot be loaded.
+%% hipe .app references (or can reference) files that have no
+%% corresponding beam file (if hipe is not enabled).
filter_app("hipe",_) -> false;
-% Dialyzer and typer depends on hipe
+
+%% Dialyzer depends on hipe
filter_app("dialyzer",_) -> false;
-filter_app("typer",_) -> false;
-% Orber requires explicit configuration
+
+%% Orber requires explicit configuration
filter_app("orber",_) -> false;
-% cos* depends on orber
+
+%% cos* depends on orber
filter_app("cos"++_,_) -> false;
-% ic has a mod instruction in the app file but no corresponding start function
+
+%% ic has a mod instruction in the app file but no corresponding start
+%% function
filter_app("ic",_) -> false;
-% Netconf has some dependency that I really do not understand (maybe like orber)
+
+%% Netconf has some dependency that I really do not understand (maybe
+%% like orber)
filter_app("netconf",_) -> false;
-% Safe has the same kind of error in the .app file as ic
+
+%% Safe has the same kind of error in the .app file as ic
filter_app("safe",_) -> false;
-% Comte cannot be started in the "usual" way
+
+%% Comte cannot be started in the "usual" way
filter_app("comte",_) -> false;
-% OS_mon does not find it's port program when running cerl
+
+%% OS_mon does not find its port program when running cerl
filter_app("os_mon",true) -> false;
-% erts is not a "real" app either =/
+
+%% erts is not a "real" app either =/
filter_app("erts",_) -> false;
-% Other apps should be OK.
+
+%% Other apps should be OK.
filter_app(_,_) -> true.
+
create_big_script(Config,Local) ->
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
Name = filename:join(PrivDir,"full_script_test"),
InitialApplications=application:loaded_applications(),
%% Applications left loaded by the application suite, unload them!
@@ -1571,7 +1438,7 @@ on_load_errors(Config) when is_list(Config) ->
Master = on_load_error_test_case_process,
register(Master, self()),
- Data = filename:join([?config(data_dir, Config),"on_load_errors"]),
+ Data = filename:join([proplists:get_value(data_dir, Config),"on_load_errors"]),
ok = file:set_cwd(Data),
up_to_date = make:all([{d,'MASTER',Master}]),
@@ -1597,7 +1464,7 @@ on_load_errors(Config) when is_list(Config) ->
%% There should be no more messages.
receive
Unexpected ->
- ?t:fail({unexpected,Unexpected})
+ ct:fail({unexpected,Unexpected})
after 10 ->
ok
end,
@@ -1626,8 +1493,284 @@ do_on_load_error(ReturnValue) ->
{undef,[{on_load_error,main,[],_}|_]} = Exit
end.
-native_early_modules(suite) -> [];
-native_early_modules(doc) -> ["Test that the native code of early loaded modules is loaded"];
+on_load_update(Config) ->
+ {Mod,Code1} = on_load_update_code(1),
+ {module,Mod} = code:load_binary(Mod, "", Code1),
+ 42 = Mod:a(),
+ 100 = Mod:b(99),
+ 4 = erlang:trace_pattern({Mod,'_','_'}, true),
+
+ {Mod,Code2} = on_load_update_code(2),
+ {error,on_load_failure} = code:load_binary(Mod, "", Code2),
+ 42 = Mod:a(),
+ 78 = Mod:b(77),
+ {'EXIT',{undef,_}} = (catch Mod:never()),
+ 4 = erlang:trace_pattern({Mod,'_','_'}, false),
+
+ {Mod,Code3} = on_load_update_code(3),
+ {module,Mod} = code:load_binary(Mod, "", Code3),
+ 100 = Mod:c(),
+ {'EXIT',{undef,_}} = (catch Mod:a()),
+ {'EXIT',{undef,_}} = (catch Mod:b(10)),
+ {'EXIT',{undef,_}} = (catch Mod:never()),
+
+ code:purge(Mod),
+ code:delete(Mod),
+ code:purge(Mod),
+ ok.
+
+on_load_update_code(Version) ->
+ Mod = ?FUNCTION_NAME,
+ Tree = on_load_update_code_1(Version, Mod),
+ io:format("Version ~p", [Version]),
+ {ok,Mod,Code} = merl:compile(Tree),
+ merl:print(Tree),
+ io:nl(),
+ {Mod,Code}.
+
+on_load_update_code_1(1, Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-export([a/0,b/1]).\n"
+ "-on_load(f/0).\n",
+ "f() -> ok.\n",
+ "a() -> 42.\n"
+ "b(I) -> I+1.\n"]);
+on_load_update_code_1(2, Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-export([never/0]).\n"
+ "-on_load(f/0).\n",
+ "f() -> 42 = '@Mod@':a(), 1 = '@Mod@':b(0), fail.\n",
+ "never() -> never.\n"]);
+on_load_update_code_1(3, Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-export([c/0]).\n"
+ "-on_load(f/0).\n",
+ "f() -> ok.\n",
+ "c() -> 100.\n"]).
+
+%% Test -on_load while trace feature 'on_load' is enabled (OTP-14612)
+on_load_trace_on_load(Config) ->
+ Papa = self(),
+ Tracer = spawn_link(fun F() -> receive M -> Papa ! M end, F() end),
+ {tracer,[]} = erlang:trace_info(self(),tracer),
+ erlang:trace(self(), true, [call, {tracer, Tracer}]),
+ erlang:trace_pattern(on_load, true, []),
+ on_load_update(Config),
+ erlang:trace_pattern(on_load, false, []),
+ erlang:trace(self(), false, [call]),
+
+ Ms = flush(),
+ [{trace, Papa, call, {on_load_update_code, a, []}},
+ {trace, Papa, call, {on_load_update_code, b, [99]}},
+ {trace, Papa, call, {on_load_update_code, c, []}}] = Ms,
+
+ exit(Tracer, normal),
+ ok.
+
+flush() ->
+ receive M -> [M | flush()]
+ after 100 -> []
+ end.
+
+
+on_load_purge(_Config) ->
+ Mod = ?FUNCTION_NAME,
+ register(Mod, self()),
+ Tree = ?Q(["-module('@Mod@').\n",
+ "-on_load(f/0).\n",
+ "loop() -> loop().\n",
+ "f() ->\n",
+ "'@Mod@' ! {self(),spawn(fun loop/0)},\n",
+ "receive Ack -> Ack end.\n"]),
+ merl:print(Tree),
+ {ok,Mod,Code} = merl:compile(Tree),
+ P = spawn(fun() ->
+ exit(code:load_binary(Mod, "", Code))
+ end),
+ monitor(process, P),
+ receive
+ {Pid1,Pid2} ->
+ monitor(process, Pid2),
+ Pid1 ! ack_and_failure,
+ receive
+ {'DOWN',_,process,P,Exit1} ->
+ {error,on_load_failure} = Exit1
+ end,
+ receive
+ {'DOWN',_,process,Pid2,Exit2} ->
+ io:format("~p\n", [Exit2])
+ after 10000 ->
+ ct:fail(no_down_message)
+ end
+ end.
+
+on_load_self_call(_Config) ->
+ Mod = ?FUNCTION_NAME,
+ register(Mod, self()),
+ Tree = ?Q(["-module('@Mod@').\n",
+ "-export([ext/0]).\n",
+ "-on_load(f/0).\n",
+ "f() ->\n",
+ " '@Mod@' ! (catch '@Mod@':ext()),\n",
+ " ok.\n",
+ "ext() -> good_work.\n"]),
+ merl:print(Tree),
+ {ok,Mod,Code} = merl:compile(Tree),
+
+ {'EXIT',{undef,_}} = on_load_do_load(Mod, Code),
+ good_work = on_load_do_load(Mod, Code),
+
+ ok.
+
+on_load_do_load(Mod, Code) ->
+ spawn(fun() ->
+ {module,Mod} = code:load_binary(Mod, "", Code)
+ end),
+ receive
+ Any -> Any
+ end.
+
+on_load_pending(_Config) ->
+ Mod = ?FUNCTION_NAME,
+ Tree1 = ?Q(["-module('@Mod@').\n",
+ "-on_load(f/0).\n",
+ "f() ->\n",
+ " register('@Mod@', self()),\n",
+ " receive _ -> ok end.\n"]),
+ merl:print(Tree1),
+ {ok,Mod,Code1} = merl:compile(Tree1),
+
+ Tree2 = ?Q(["-module('@Mod@').\n",
+ "-export([t/0]).\n",
+ "t() -> ok.\n"]),
+ merl:print(Tree2),
+ {ok,Mod,Code2} = merl:compile(Tree2),
+
+ Self = self(),
+ {_,Ref1} =
+ spawn_monitor(fun() ->
+ Self ! started1,
+ {module,Mod} = code:load_binary(Mod, "", Code1)
+ end),
+ receive started1 -> ok end,
+ timer:sleep(10),
+ {_,Ref2} =
+ spawn_monitor(fun() ->
+ Self ! started2,
+ {module,Mod} = code:load_binary(Mod, "", Code2),
+ ok = Mod:t()
+ end),
+ receive started2 -> ok end,
+ receive
+ Unexpected ->
+ ct:fail({unexpected,Unexpected})
+ after 100 ->
+ ok
+ end,
+ Mod ! go,
+ receive
+ {'DOWN',Ref1,process,_,normal} -> ok
+ end,
+ receive
+ {'DOWN',Ref2,process,_,normal} -> ok
+ end,
+ ok = Mod:t(),
+ ok.
+
+on_load_deleted(_Config) ->
+ Mod = ?FUNCTION_NAME,
+
+ R0 = fun() ->
+ Tree = ?Q(["-module('@Mod@').\n",
+ "-on_load(f/0).\n",
+ "f() -> ok.\n"]),
+ merl:print(Tree),
+ {ok,Mod,Code} = merl:compile(Tree),
+ {module,Mod} = code:load_binary(Mod, "", Code)
+ end,
+ delete_before_reload(Mod, R0),
+ delete_before_reload(Mod, R0),
+
+ R1 = fun() ->
+ Tree = ?Q(["-module('@Mod@').\n",
+ "-on_load(f/0).\n",
+ "f() -> fail.\n"]),
+ merl:print(Tree),
+ {ok,Mod,Code} = merl:compile(Tree),
+ {error,on_load_failure} = code:load_binary(Mod, "", Code)
+ end,
+ delete_before_reload(Mod, R1),
+ delete_before_reload(Mod, R1),
+
+ OtherMod = list_to_atom(lists:concat([Mod,"_42"])),
+ OtherTree = ?Q(["-module('@OtherMod@').\n"]),
+ merl:print(OtherTree),
+ {ok,OtherMod,OtherCode} = merl:compile(OtherTree),
+
+ R2 = fun() ->
+ RegName = 'on_load__registered_name',
+ Tree = ?Q(["-module('@Mod@').\n",
+ "-on_load(f/0).\n",
+ "f() ->\n",
+ " register('@RegName@', self()),\n",
+ " receive _ -> ok end.\n"]),
+ merl:print(Tree),
+ {ok,Mod,Code} = merl:compile(Tree),
+ spawn(fun() ->
+ {module,Mod} = code:load_binary(Mod, "", Code)
+ end),
+ receive after 1 -> ok end,
+ {module,OtherMod} = code:load_binary(OtherMod, "",
+ OtherCode),
+ RegName ! stop
+ end,
+ delete_before_reload(Mod, R2),
+
+ ok.
+
+delete_before_reload(Mod, Reload) ->
+ false = check_old_code(Mod),
+
+ Tree1 = ?Q(["-module('@Mod@').\n",
+ "-export([f/1]).\n",
+ "f(Parent) ->\n",
+ " register('@Mod@', self()),\n",
+ " Parent ! started,\n",
+ " receive _ -> ok end.\n"]),
+ merl:print(Tree1),
+ {ok,Mod,Code1} = merl:compile(Tree1),
+
+ Self = self(),
+ spawn(fun() ->
+ {module,Mod} = code:load_binary(Mod, "", Code1),
+ Mod:f(Self)
+ end),
+ receive started -> ok end,
+
+ true = code:delete(Mod),
+ true = check_old_code(Mod),
+
+ Reload(),
+
+ %% When loading the the module with the -on_load() function,
+ %% the reference to the old code would be lost. Make sure that
+ %% the old code is remembered and is still preventing the
+ %% purge.
+ false = code:soft_purge(Mod),
+
+ %% Get rid of the old code.
+ Mod ! stop,
+ receive after 1 -> ok end,
+ true = code:soft_purge(Mod),
+
+ %% Unload the version of the module with the -on_load() function.
+ true = code:delete(Mod),
+ true = code:soft_purge(Mod),
+
+ ok.
+
+
+%% Test that the native code of early loaded modules is loaded.
native_early_modules(Config) when is_list(Config) ->
case erlang:system_info(hipe_architecture) of
undefined ->
@@ -1655,11 +1798,205 @@ native_early_modules_1(Architecture) ->
ok
end.
-get_mode(suite) -> [];
-get_mode(doc) -> ["Test that the mode of the code server is properly retrieved"];
+%% Test that the mode of the code server is properly retrieved.
get_mode(Config) when is_list(Config) ->
interactive = code:get_mode().
+%% Make sure that the paths for all loaded modules have been normalized.
+normalized_paths(_Config) ->
+ do_normalized_paths(erlang:loaded()).
+
+do_normalized_paths([M|Ms]) ->
+ case code:which(M) of
+ Special when is_atom(Special) ->
+ do_normalized_paths(Ms);
+ File when is_list(File) ->
+ File = filename:join([File]),
+ do_normalized_paths(Ms)
+ end;
+do_normalized_paths([]) ->
+ ok.
+
+%% Test that module_status/1 behaves as expected
+module_status(_Config) ->
+ case test_server:is_cover() of
+ true ->
+ module_status();
+ false ->
+ %% Make sure that we terminate the cover server.
+ try
+ module_status()
+ after
+ cover:stop()
+ end
+ end.
+
+module_status() ->
+ %% basics
+ not_loaded = code:module_status(fubar), % nonexisting
+ {file, preloaded} = code:is_loaded(erlang),
+ loaded = code:module_status(erlang), % preloaded
+ loaded = code:module_status(?MODULE), % normal known loaded
+
+ non_existing = code:which(?TESTMOD), % verify dummy name not in path
+ code:purge(?TESTMOD), % ensure no previous version in memory
+ code:delete(?TESTMOD),
+ code:purge(?TESTMOD),
+
+ %% generated code is detected as such
+ {ok,?TESTMOD,Bin} = compile:forms(dummy_ast(), []),
+ {module,?TESTMOD} = code:load_binary(?TESTMOD,"",Bin), % no source file
+ ok = ?TESTMOD:f(),
+ "" = code:which(?TESTMOD), % verify empty string for source file
+ loaded = code:module_status(?TESTMOD),
+
+ %% deleting generated code
+ true = code:delete(?TESTMOD),
+ non_existing = code:which(?TESTMOD), % verify still not in path
+ not_loaded = code:module_status(?TESTMOD),
+
+ %% beam file exists but not loaded
+ make_source_file(<<"0">>),
+ compile_beam(0),
+ true = (non_existing =/= code:which(?TESTMOD)), % verify in path
+ not_loaded = code:module_status(?TESTMOD),
+
+ %% loading code from disk makes it loaded
+ load_code(),
+ loaded = code:module_status(?TESTMOD), % loaded
+
+ %% cover compiling a module
+ {ok,?TESTMOD} = cover:compile(?TESTMOD),
+ {file, cover_compiled} = code:is_loaded(?TESTMOD), % verify cover compiled
+ modified = code:module_status(?TESTMOD), % loaded cover code but file exists
+ remove_code(),
+ removed = code:module_status(?TESTMOD), % removed
+ compile_beam(0),
+ modified = code:module_status(?TESTMOD), % recreated
+ load_code(),
+ loaded = code:module_status(?TESTMOD), % loading removes cover status
+ code:purge(?TESTMOD),
+ true = code:delete(?TESTMOD),
+ not_loaded = code:module_status(?TESTMOD), % deleted
+
+ %% recompilation ignores timestamps, only md5 matters
+ load_code(),
+ compile_beam(1100),
+ loaded = code:module_status(?TESTMOD),
+
+ %% modifying module detects different md5
+ make_source_file(<<"1">>),
+ compile_beam(0),
+ modified = code:module_status(?TESTMOD),
+
+ %% loading the modified code from disk makes it loaded
+ load_code(),
+ loaded = code:module_status(?TESTMOD),
+
+ %% removing and recreating a module with same md5
+ remove_code(),
+ removed = code:module_status(?TESTMOD),
+ compile_beam(0),
+ loaded = code:module_status(?TESTMOD),
+
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ %% no native support
+ ok;
+ _ ->
+ %% native chunk is ignored if beam code is already loaded
+ load_code(),
+ loaded = code:module_status(?TESTMOD),
+ false = has_native(?TESTMOD),
+ compile_native(0),
+ BeamMD5 = erlang:get_module_info(?TESTMOD, md5),
+ {ok,{?TESTMOD,BeamMD5}} = beam_lib:md5(?TESTMODOBJ), % beam md5 unchanged
+ loaded = code:module_status(?TESTMOD),
+
+ %% native code reported as loaded, though different md5 from beam
+ load_code(),
+ true = has_native(?TESTMOD),
+ NativeMD5 = erlang:get_module_info(?TESTMOD, md5),
+ true = (BeamMD5 =/= NativeMD5),
+ loaded = code:module_status(?TESTMOD),
+
+ %% recompilation ignores timestamps, only md5 matters
+ compile_native(1100), % later timestamp
+ loaded = code:module_status(?TESTMOD),
+
+ %% modifying native module detects different md5
+ make_source_file(<<"2">>),
+ compile_native(0),
+ modified = code:module_status(?TESTMOD),
+
+ %% loading the modified native code from disk makes it loaded
+ load_code(),
+ true = has_native(?TESTMOD),
+ NativeMD5_2 = erlang:get_module_info(?TESTMOD, md5),
+ true = (NativeMD5 =/= NativeMD5_2), % verify native md5 changed
+ {ok,{?TESTMOD,BeamMD5_2}} = beam_lib:md5(?TESTMODOBJ),
+ true = (BeamMD5_2 =/= NativeMD5_2), % verify md5 differs from beam
+ loaded = code:module_status(?TESTMOD),
+
+ %% removing and recreating a native module with same md5
+ remove_code(),
+ removed = code:module_status(?TESTMOD),
+ compile_native(0),
+ loaded = code:module_status(?TESTMOD),
+
+ %% purging/deleting native module
+ code:purge(?TESTMOD),
+ true = code:delete(?TESTMOD),
+ not_loaded = code:module_status(?TESTMOD)
+ end,
+ ok.
+
+compile_beam(Sleep) ->
+ compile(Sleep, []).
+
+compile_native(Sleep) ->
+ compile(Sleep, [native]).
+
+compile(Sleep, Opts) ->
+ timer:sleep(Sleep), % increment compilation timestamp
+ {ok,?TESTMOD} = compile:file(?TESTMODSRC, Opts).
+
+load_code() ->
+ code:purge(?TESTMOD),
+ {module,?TESTMOD} = code:load_file(?TESTMOD).
+
+remove_code() ->
+ ok = file:delete(?TESTMODOBJ).
+
+has_native(Module) ->
+ case erlang:get_module_info(Module, native_addresses) of
+ [] -> false;
+ [_|_] -> true
+ end.
+
+make_source_file(Body) ->
+ ok = file:write_file(?TESTMODSRC, dummy_source(Body)).
+
+dummy_source(Body) ->
+ [<<"-module(" ?TESTMODSTR ").\n"
+ "-export([f/0]).\n"
+ "f() -> ">>, Body, <<".\n">>].
+
+dummy_ast() ->
+ dummy_ast(?TESTMODSTR).
+
+dummy_ast(Mod) when is_atom(Mod) ->
+ dummy_ast(atom_to_list(Mod));
+dummy_ast(ModStr) ->
+ [scan_form("-module(" ++ ModStr ++ ")."),
+ scan_form("-export([f/0])."),
+ scan_form("f() -> ok.")].
+
+scan_form(String) ->
+ {ok,Ts,_} = erl_scan:string(String),
+ {ok,F} = erl_parse:parse_form(Ts),
+ F.
+
%%-----------------------------------------------------------------
%% error_logger handler.
%% (Copied from stdlib/test/proc_lib_SUITE.erl.)
@@ -1686,7 +2023,7 @@ terminate(_Reason, State) ->
%%%
start_node(Name, Param) ->
- ?t:start_node(Name, slave, [{args, Param}]).
+ test_server:start_node(Name, slave, [{args, Param}]).
stop_node(Node) ->
- ?t:stop_node(Node).
+ test_server:stop_node(Node).
diff --git a/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict.erl b/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict.erl
index 9fb5a31358..1fcf05a0a1 100644
--- a/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict.erl
+++ b/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_app.erl b/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_app.erl
index 8012ddafb4..d164db9794 100644
--- a/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_app.erl
+++ b/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_app.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_sup.erl b/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_sup.erl
index 8c7b49644a..e659202e08 100644
--- a/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_sup.erl
+++ b/lib/kernel/test/code_SUITE_data/code_archive_dict-1.0/src/code_archive_dict_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-b/ebin/lists.erl b/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-b/ebin/lists.erl
index 07a4ffc8b6..6accd00191 100644
--- a/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-b/ebin/lists.erl
+++ b/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-b/ebin/lists.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-c/ebin/code_SUITE_mult_root_module.erl b/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-c/ebin/code_SUITE_mult_root_module.erl
index 15ff5d4896..f45e5b2dba 100644
--- a/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-c/ebin/code_SUITE_mult_root_module.erl
+++ b/lib/kernel/test/code_SUITE_data/mult_lib_roots/first_root/my_dummy_app-c/ebin/code_SUITE_mult_root_module.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/code_SUITE_data/upgrade_client.erl b/lib/kernel/test/code_SUITE_data/upgrade_client.erl
index bb655e01d3..1c3c2def53 100644
--- a/lib/kernel/test/code_SUITE_data/upgrade_client.erl
+++ b/lib/kernel/test/code_SUITE_data/upgrade_client.erl
@@ -9,6 +9,8 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
%% Load version 1 of upgradee
code_SUITE:compile_load(upgradee, Dir, 1, Upgradee1),
+ Tracer = start_tracing(),
+
?line 1 = upgradee:exp1(),
?line 1 = upgradee:exp1exp2(),
?line 1 = upgradee:exp1loc2(),
@@ -56,6 +58,15 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc1),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ Env1 = "Env1",
+ put(loc1_fun, upgradee:get_local_fun(Env1)),
+ ?line {1,Env1} = (get(loc1_fun))(),
+
+ put(exp1exp2_fun, upgradee:get_exp1exp2_fun()),
+ ?line 1 = (get(exp1exp2_fun))(),
+
+ ok = check_tracing(Tracer, 13),
+
%%
%% Load version 1 of other
%%
@@ -78,6 +89,8 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, exp2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ ok = check_tracing(Tracer, 5),
+
%%
%% Load version 2 of upgradee
%%
@@ -130,6 +143,15 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, exp2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ ?line {1,Env1} = (get(loc1_fun))(),
+ Env2 = "Env2",
+ put(loc2_fun, upgradee:get_local_fun(Env2)),
+ ?line {2,Env2} = (get(loc2_fun))(),
+
+ ?line 2 = (get(exp1exp2_fun))(),
+
+ ok = check_tracing(Tracer, 10),
+
%%
%% Load version 2 of other
%%
@@ -182,17 +204,26 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc1loc2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ ?line {1,Env1} = (get(loc1_fun))(),
+ ?line {2,Env2} = (get(loc2_fun))(),
+ ?line 2 = (get(exp1exp2_fun))(),
+
+ ok = check_tracing(Tracer, 10),
%%
%% Upgrade proxy to version 2
%%
P ! upgrade_order,
-
%%
- io:format("Delete version 2 of 'upgradee'\n",[]),
+ io:format("Purge version 1 of 'upgradee'\n",[]),
%%
+ put(loc1_fun,undefined),
code:purge(upgradee),
+
+ %%
+ io:format("Delete version 2 of 'upgradee'\n",[]),
+ %%
code:delete(upgradee),
?line {'EXIT',{undef,_}} = (catch upgradee:exp2()),
@@ -239,17 +270,24 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, exp1loc2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc1loc2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+
+ ?line {'EXIT',{undef,_}} = (catch (get(exp1exp2_fun))()),
+ ok = check_tracing(Tracer, 14),
+
unlink(P),
exit(P, die_please),
io:format("Purge 'upgradee'\n",[]),
+ put(loc2_fun,undefined),
code:purge(upgradee),
io:format("Delete and purge 'other'\n",[]),
code:purge(other),
code:delete(other),
code:purge(other),
+
+ stop_tracing(Tracer),
ok.
proxy_call(Pid, CallType, Func) ->
@@ -257,3 +295,55 @@ proxy_call(Pid, CallType, Func) ->
receive
{Pid, call_result, Func, Ret} -> Ret
end.
+
+
+start_tracing() ->
+ Self = self(),
+ {Tracer,_} = spawn_opt(fun() -> tracer_loop(Self) end, [link,monitor]),
+ ?line 1 = erlang:trace_pattern({error_handler,undefined_function,3},
+ true, [global]),
+ ?line 1 = erlang:trace(Self, true, [call,{tracer,Tracer}]),
+ Tracer.
+
+
+tracer_loop(Receiver) ->
+ receive
+ die_please ->
+ ok;
+ {do_trace_delivered, Tracee} ->
+ _ = erlang:trace_delivered(Tracee),
+ tracer_loop(Receiver);
+
+ Msg ->
+ Receiver ! Msg,
+ tracer_loop(Receiver)
+ end.
+
+check_tracing(Tracer, Expected) ->
+ Tracer ! {do_trace_delivered, self()},
+ case check_tracing_loop(0,[]) of
+ {Expected,_} ->
+ ok;
+ {Got, MsgList} ->
+ io:format("Expected ~p trace msg, got ~p:\n~p\n",
+ [Expected, Got, lists:reverse(MsgList)]),
+ "Trace msg mismatch"
+ end.
+
+check_tracing_loop(N, MsgList) ->
+ Self = self(),
+ receive
+ {trace, _Pid, call, {_M, _F, _Args}} = Msg ->
+ check_tracing_loop(N+1, [Msg | MsgList]);
+ {trace_delivered, Self, _} ->
+ {N, MsgList}
+ end.
+
+
+stop_tracing(Tracer) ->
+ erlang:trace_pattern({error_handler,undefined_function,3}, false, [global]),
+ erlang:trace(self(), false, [call]),
+ Tracer ! die_please,
+ receive
+ {'DOWN', _, process, Tracer, _} -> ok
+ end.
diff --git a/lib/kernel/test/code_SUITE_data/upgradee.erl b/lib/kernel/test/code_SUITE_data/upgradee.erl
index 62b1d95e30..8ca660c19c 100644
--- a/lib/kernel/test/code_SUITE_data/upgradee.erl
+++ b/lib/kernel/test/code_SUITE_data/upgradee.erl
@@ -8,6 +8,9 @@
-export([exp1/0]). % only exported in v1
-export([exp1loc2/0]). % exported in v1, local in v2
-export([exp1exp2/0]). % exported in v1 and v2
+-export([get_local_fun/1]).
+-export([get_exp1exp2_fun/0]).
+-export([exp1exp2_fun/0]).
exp1() -> ?VERSION.
loc1() -> ?VERSION.
@@ -20,6 +23,9 @@ loc1() -> ?VERSION.
-export([exp2/0]).
-export([loc1exp2/0]).
-export([exp1exp2/0]).
+-export([get_local_fun/1]).
+-export([get_exp1exp2_fun/0]).
+-export([exp1exp2_fun/0]).
exp2() -> ?VERSION.
loc2() -> ?VERSION.
@@ -31,6 +37,12 @@ exp1loc2() -> ?VERSION.
loc1exp2() -> ?VERSION.
loc1loc2() -> ?VERSION.
+get_local_fun(Env) -> fun() -> {?VERSION,Env} end.
+get_exp1exp2_fun() -> fun ?MODULE:exp1exp2_fun/0.
+
+exp1exp2_fun() ->
+ ?VERSION.
+
dispatch_loop() ->
receive
upgrade_order ->
diff --git a/lib/kernel/test/code_a_test.erl b/lib/kernel/test/code_a_test.erl
index fa33c6f57a..bb5d814f04 100644
--- a/lib/kernel/test/code_a_test.erl
+++ b/lib/kernel/test/code_a_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/code_b_test.erl b/lib/kernel/test/code_b_test.erl
index 559698dd8e..9c362eb0d0 100644
--- a/lib/kernel/test/code_b_test.erl
+++ b/lib/kernel/test/code_b_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl
index 9988347581..9704c3b28c 100644
--- a/lib/kernel/test/disk_log_SUITE.erl
+++ b/lib/kernel/test/disk_log_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(disk_log_SUITE).
-%-define(debug, true).
+%%-define(debug, true).
-ifdef(debug).
-define(format(S, A), io:format(S, A)).
@@ -29,10 +29,10 @@
-define(config(X,Y), foo).
-define(t,test_server).
-else.
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(format(S, A), ok).
--define(privdir(Conf), ?config(priv_dir, Conf)).
--define(datadir(Conf), ?config(data_dir, Conf)).
+-define(privdir(Conf), proplists:get_value(priv_dir, Conf)).
+-define(datadir(Conf), proplists:get_value(data_dir, Conf)).
-endif.
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -89,8 +89,6 @@
dist_terminate/1, dist_accessible/1, dist_deadlock/1,
dist_open2/1, other_groups/1,
- evil/1,
-
otp_6278/1, otp_10131/1]).
-export([head_fun/1, hf/0, lserv/1,
@@ -102,8 +100,6 @@
-export([client/4]).
--define(default_timeout, ?t:minutes(1)).
-
%% error_logger
-export([init/1,
handle_event/2, handle_call/2, handle_info/2,
@@ -125,7 +121,7 @@
[halt_int, wrap_int, halt_ext, wrap_ext, read_mode, head,
notif, new_idx_vsn, reopen, block, unblock, open, close,
error, chunk, truncate, many_users, info, change_size,
- change_attribute, distribution, evil, otp_6278, otp_10131]).
+ change_attribute, distribution, otp_6278, otp_10131]).
%% These test cases should be skipped if the VxWorks card is
%% configured without NFS cache.
@@ -139,7 +135,9 @@
change_size_after, default_size]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() ->
[{group, halt_int}, {group, wrap_int},
@@ -149,7 +147,7 @@ all() ->
{group, open}, {group, close}, {group, error}, chunk,
truncate, many_users, {group, info},
{group, change_size}, change_attribute,
- {group, distribution}, evil, otp_6278, otp_10131].
+ {group, distribution}, otp_6278, otp_10131].
groups() ->
[{halt_int, [], [halt_int_inf, {group, halt_int_sz}]},
@@ -194,264 +192,234 @@ end_per_group(_GroupName, Config) ->
-init_per_testcase(Case, Config) ->
- case should_skip(Case,Config) of
- true ->
- CS = check_nfs(Config),
- {skipped, lists:flatten
- (io_lib:format
- ("The test does not work "
- "with current NFS cache size (~w),"
- " to get this test to run, "
- "~s the NFS cache size~n",
- [CS, case CS of
- 0 ->
- "enlarge";
- _ ->
- "zero"
- end]))};
- _ ->
- Dog=?t:timetrap(?t:minutes(2)),
- [{watchdog, Dog}|Config]
- end.
+init_per_testcase(_Case, Config) ->
+ Config.
-end_per_testcase(_Case, Config) ->
- Dog=?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+end_per_testcase(_Case, _Config) ->
ok.
-halt_int_inf(suite) -> [];
-halt_int_inf(doc) -> ["Test simple halt disk log, size infinity"];
+%% Test simple halt disk log, size infinity.
halt_int_inf(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line ok = disk_log:start(),
+ ok = disk_log:start(),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal},
- {file, File}]),
- ?line simple_log(a),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal},
+ {file, File}]),
+ simple_log(a),
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-halt_int_sz_1(suite) -> [];
-halt_int_sz_1(doc) -> ["Test simple halt disk log, size defined"];
+%% Test simple halt disk log, size defined.
halt_int_sz_1(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,18000},
- {format,internal},
- {file, File}]),
- ?line simple_log(a),
- ?line ok = disk_log:truncate(a),
- ?line [] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,18000},
+ {format,internal},
+ {file, File}]),
+ simple_log(a),
+ ok = disk_log:truncate(a),
+ [] = get_all_terms(a),
T1 = mk_bytes(10000),
T2 = mk_bytes(5000),
- ?line ok = disk_log:log(a, T1),
- ?line case get_all_terms(a) of
- [T1] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, [T1]})
- end,
- ?line ok = disk_log:log(a, T2),
- ?line {error, {full, a}} = disk_log:log(a, T1),
- ?line ok = disk_log:alog(a, T1),
- ?line case get_all_terms(a) of
- [T1, T2] ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, [T1, T2]})
- end,
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ ok = disk_log:log(a, T1),
+ case get_all_terms(a) of
+ [T1] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, [T1]})
+ end,
+ ok = disk_log:log(a, T2),
+ {error, {full, a}} = disk_log:log(a, T1),
+ ok = disk_log:alog(a, T1),
+ case get_all_terms(a) of
+ [T1, T2] ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, [T1, T2]})
+ end,
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-halt_int_sz_2(suite) -> [];
-halt_int_sz_2(doc) -> ["Test simple halt disk log, size ~8192"];
+%% Test simple halt disk log, size ~8192.
halt_int_sz_2(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File1 = filename:join(Dir, "a.LOG"),
File2 = filename:join(Dir, "b.LOG"),
File3 = filename:join(Dir, "c.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,8191},
- {format,internal},
- {file, File1}]),
- ?line {ok, b} = disk_log:open([{name,b}, {type,halt}, {size,8192},
- {format,internal},
- {file, File2}]),
- ?line {ok, c} = disk_log:open([{name,c}, {type,halt}, {size,8193},
- {format,internal},
- {file, File3}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,8191},
+ {format,internal},
+ {file, File1}]),
+ {ok, b} = disk_log:open([{name,b}, {type,halt}, {size,8192},
+ {format,internal},
+ {file, File2}]),
+ {ok, c} = disk_log:open([{name,c}, {type,halt}, {size,8193},
+ {format,internal},
+ {file, File3}]),
T1 = mk_bytes(8191-16), % 16 is size of header + magics for 1 item
T2 = mk_bytes(8192-16),
T3 = mk_bytes(8193-16),
- ?line ok = disk_log:log(a, T1),
- ?line ok = disk_log:log(b, T2),
- ?line ok = disk_log:log(c, T3),
- ?line case get_all_terms(a) of
- [T1] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, [T1]})
- end,
- ?line case get_all_terms(b) of
- [T2] ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, [T2]})
- end,
- ?line case get_all_terms(c) of
- [T3] ->
- ok;
- E3 ->
- test_server_fail({bad_terms, E3, [T3]})
- end,
- ?line ok = disk_log:truncate(a),
- ?line ok = disk_log:truncate(b),
- ?line {error, {full, a}} = disk_log:log(a, T2),
- ?line {error, {full, b}} = disk_log:log(b, T3),
- ?line [] = get_all_terms(a),
- ?line [] = get_all_terms(b),
- ?line ok = disk_log:close(a),
- ?line ok = disk_log:close(b),
- ?line ok = disk_log:close(c),
- ?line ok = file:delete(File1),
- ?line ok = file:delete(File2),
- ?line ok = file:delete(File3),
+ ok = disk_log:log(a, T1),
+ ok = disk_log:log(b, T2),
+ ok = disk_log:log(c, T3),
+ case get_all_terms(a) of
+ [T1] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, [T1]})
+ end,
+ case get_all_terms(b) of
+ [T2] ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, [T2]})
+ end,
+ case get_all_terms(c) of
+ [T3] ->
+ ok;
+ E3 ->
+ test_server_fail({bad_terms, E3, [T3]})
+ end,
+ ok = disk_log:truncate(a),
+ ok = disk_log:truncate(b),
+ {error, {full, a}} = disk_log:log(a, T2),
+ {error, {full, b}} = disk_log:log(b, T3),
+ [] = get_all_terms(a),
+ [] = get_all_terms(b),
+ ok = disk_log:close(a),
+ ok = disk_log:close(b),
+ ok = disk_log:close(c),
+ ok = file:delete(File1),
+ ok = file:delete(File2),
+ ok = file:delete(File3),
ok.
-halt_int_ro(suite) -> [];
-halt_int_ro(doc) -> ["Test simple halt disk log, read only, internal"];
+%% Test simple halt disk log, read only, internal.
halt_int_ro(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal}, {file, File}]),
simple_log(a),
- ?line ok = disk_log:close(a),
+ ok = disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal}, {file, File},
- {mode,read_only}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal}, {file, File},
+ {mode,read_only}]),
T1 = "not allowed to write",
- ?line {error, {read_only_mode, a}} = disk_log:log(a, T1),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ {error, {read_only_mode, a}} = disk_log:log(a, T1),
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-halt_ext_ro(suite) -> [];
-halt_ext_ro(doc) -> ["Test simple halt disk log, read only, external"];
+%% Test simple halt disk log, read only, external.
halt_ext_ro(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,external}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,external}, {file, File}]),
xsimple_log(File, a),
- ?line ok = disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,external}, {file, File},
- {mode,read_only}]),
+ ok = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,external}, {file, File},
+ {mode,read_only}]),
T1 = "not allowed to write",
- ?line {error, {read_only_mode, a}} = disk_log:blog(a, T1),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ {error, {read_only_mode, a}} = disk_log:blog(a, T1),
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-wrap_int_ro(suite) -> [];
-wrap_int_ro(doc) -> ["Test simple wrap disk log, read only, internal"];
+%% Test simple wrap disk log, read only, internal.
wrap_int_ro(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,internal}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,internal}, {file, File}]),
simple_log(a),
- ?line ok = disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,internal}, {file, File}, {mode,read_only}]),
+ ok = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,internal}, {file, File}, {mode,read_only}]),
T1 = "not allowed to write",
- ?line {error, {read_only_mode, a}} = disk_log:log(a, T1),
- ?line ok = disk_log:close(a),
- ?line del(File, 4).
+ {error, {read_only_mode, a}} = disk_log:log(a, T1),
+ ok = disk_log:close(a),
+ del(File, 4).
-wrap_ext_ro(suite) -> [];
-wrap_ext_ro(doc) -> ["Test simple wrap disk log, read only, external"];
+%% Test simple wrap disk log, read only, external.
wrap_ext_ro(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,external}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,external}, {file, File}]),
x2simple_log(File ++ ".1", a),
- ?line ok = disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,external}, {file, File},
- {mode,read_only}]),
+ ok = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,external}, {file, File},
+ {mode,read_only}]),
T1 = "not allowed to write",
- ?line {error, {read_only_mode, a}} = disk_log:blog(a, T1),
- ?line {error, {read_only_mode, a}} = disk_log:inc_wrap_file(a),
- ?line ok = disk_log:close(a),
+ {error, {read_only_mode, a}} = disk_log:blog(a, T1),
+ {error, {read_only_mode, a}} = disk_log:inc_wrap_file(a),
+ ok = disk_log:close(a),
del(File, 4).
-halt_trunc(suite) -> [];
-halt_trunc(doc) -> ["Test truncation of halt disk log"];
+%% Test truncation of halt disk log.
halt_trunc(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal}, {file, File}]),
simple_log(a),
- ?line ok = disk_log:close(a),
- ?line {error,{badarg,repair_read_only}} =
+ ok = disk_log:close(a),
+ {error,{badarg,repair_read_only}} =
disk_log:open([{name,a}, {type,halt}, {size,infinity},
{repair, truncate}, {format,internal},
{file, File}, {mode,read_only}]),
- ?line ok = file:delete(File).
+ ok = file:delete(File).
-halt_misc(suite) -> [];
-halt_misc(doc) -> ["Test truncation of halt disk log"];
+%% Test truncation of halt disk log.
halt_misc(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal}, {file, File}]),
simple_log(a),
- ?line ok = disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal}, {file, File},
- {mode,read_only}]),
+ ok = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal}, {file, File},
+ {mode,read_only}]),
T1 = "not allowed to write",
- ?line {error, {read_only_mode, a}} = disk_log:log(a, T1),
- ?line {error, {read_only_mode, a}} = disk_log:sync(a),
- ?line {error, {read_only_mode, a}} = disk_log:reopen(a, "b.LOG"),
- ?line {error, {read_only_mode, a}} =
+ {error, {read_only_mode, a}} = disk_log:log(a, T1),
+ {error, {read_only_mode, a}} = disk_log:sync(a),
+ {error, {read_only_mode, a}} = disk_log:reopen(a, "b.LOG"),
+ {error, {read_only_mode, a}} =
disk_log:change_header(a, {head,header}),
- ?line {error, {read_only_mode, a}} =
+ {error, {read_only_mode, a}} =
disk_log:change_size(a, infinity),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-halt_ro_alog(suite) -> [];
-halt_ro_alog(doc) -> ["Test truncation of halt disk log, read only"];
+%% Test truncation of halt disk log, read only.
halt_ro_alog(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal}, {file, File}]),
simple_log(a),
- ?line ok = disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {notify,true}, {format,internal},
- {file, File}, {mode,read_only}]),
+ ok = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {notify,true}, {format,internal},
+ {file, File}, {mode,read_only}]),
T1 = "not allowed to write",
- ?line ok = disk_log:alog(a, T1),
- ?line ok = halt_ro_alog_wait_notify(a, T1),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ ok = disk_log:alog(a, T1),
+ ok = halt_ro_alog_wait_notify(a, T1),
+ ok = disk_log:close(a),
+ ok = file:delete(File).
halt_ro_alog_wait_notify(Log, T) ->
Term = term_to_binary(T),
receive
- {disk_log, _, Log,{read_only, Term}} ->
+ {disk_log, _, Log,{read_only, [Term]}} ->
ok;
Other ->
Other
@@ -459,28 +427,27 @@ halt_ro_alog_wait_notify(Log, T) ->
failed
end.
-halt_ro_balog(suite) -> [];
-halt_ro_balog(doc) -> ["Test truncation of halt disk log, read only"];
+%% Test truncation of halt disk log, read only.
halt_ro_balog(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal}, {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal}, {file, File}]),
simple_log(a),
- ?line ok = disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {notify,true}, {format,external},
- {file, File}, {mode,read_only}]),
+ ok = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {notify,true}, {format,external},
+ {file, File}, {mode,read_only}]),
T1 = "not allowed to write",
- ?line ok = disk_log:balog(a, T1),
- ?line ok = halt_ro_balog_wait_notify(a, T1),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ ok = disk_log:balog(a, T1),
+ ok = halt_ro_balog_wait_notify(a, T1),
+ ok = disk_log:close(a),
+ ok = file:delete(File).
halt_ro_balog_wait_notify(Log, T) ->
Term = list_to_binary(T),
receive
- {disk_log, _, Log,{read_only, Term}} ->
+ {disk_log, _, Log,{read_only, [Term]}} ->
ok;
Other ->
Other
@@ -488,127 +455,123 @@ halt_ro_balog_wait_notify(Log, T) ->
failed
end.
-halt_ro_crash(suite) -> [];
-halt_ro_crash(doc) -> ["Test truncation of halt disk log, read only, repair"];
+%% Test truncation of halt disk log, read only, repair.
halt_ro_crash(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line file:delete(File),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,internal},{file, File}]),
+ file:delete(File),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,internal},{file, File}]),
simple_log(a),
- ?line ok = disk_log:close(a),
+ ok = disk_log:close(a),
crash(File, 10),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {notify,true}, {format,internal},
- {file, File}, {mode,read_only}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {notify,true}, {format,internal},
+ {file, File}, {mode,read_only}]),
- ?line Error1 = {error, {read_only_mode, a}} = disk_log:truncate(a),
- ?line "The disk log" ++ _ = format_error(Error1),
+ Error1 = {error, {read_only_mode, a}} = disk_log:truncate(a),
+ "The disk log" ++ _ = format_error(Error1),
%% crash/1 sets the length of the first item to something big (2.5 kb).
%% In R6B, binary_to_term accepts garbage at the end of the binary,
%% which means that the first item is recognized!
%% This is how it was before R6B:
- %% ?line {C1,T1,15} = disk_log:chunk(a,start),
- %% ?line {C2,T2} = disk_log:chunk(a,C1),
+ %% {C1,T1,15} = disk_log:chunk(a,start),
+ %% {C2,T2} = disk_log:chunk(a,C1),
{C1,_OneItem,7478} = disk_log:chunk(a,start),
{C2, [], 7} = disk_log:chunk(a,C1),
- ?line eof = disk_log:chunk(a,C2),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ eof = disk_log:chunk(a,C2),
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-wrap_int_1(suite) -> [];
-wrap_int_1(doc) -> ["Test wrap disk log, internal"];
+%% Test wrap disk log, internal.
wrap_int_1(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,internal},
- {file, File}]),
- ?line [_] =
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,internal},
+ {file, File}]),
+ [_] =
lists:filter(fun(P) -> disk_log:pid2name(P) =/= undefined end,
erlang:processes()),
simple_log(a),
- ?line ok = disk_log:close(a),
+ ok = disk_log:close(a),
del(File, 4),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,internal},
- {file, File}]),
- ?line [] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,internal},
+ {file, File}]),
+ [] = get_all_terms(a),
T1 = mk_bytes(10000), % file 2
T2 = mk_bytes(5000), % file 3
T3 = mk_bytes(4000), % file 4
T4 = mk_bytes(2000), % file 4
T5 = mk_bytes(5000), % file 1
T6 = mk_bytes(5000), % file 2
- ?line ok = disk_log:log(a, T1),
- ?line ok = disk_log:log(a, T2),
- ?line ok = disk_log:log(a, T3),
- ?line ok = disk_log:log_terms(a, [T4, T5, T6]),
- ?line case get_all_terms(a) of
- [T2,T3,T4,T5,T6] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, [T2,T3,T4,T5,T6]})
- end,
- ?line ok = disk_log:close(a),
+ ok = disk_log:log(a, T1),
+ ok = disk_log:log(a, T2),
+ ok = disk_log:log(a, T3),
+ ok = disk_log:log_terms(a, [T4, T5, T6]),
+ case get_all_terms(a) of
+ [T2,T3,T4,T5,T6] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, [T2,T3,T4,T5,T6]})
+ end,
+ ok = disk_log:close(a),
del(File, 4).
-wrap_int_2(suite) -> [];
-wrap_int_2(doc) -> ["Test wrap disk log, internal"];
+%% Test wrap disk log, internal.
wrap_int_2(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File1 = filename:join(Dir, "a.LOG"),
File2 = filename:join(Dir, "b.LOG"),
File3 = filename:join(Dir, "c.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8191,3}},
- {format,internal},
- {file, File1}]),
- ?line {ok, b} = disk_log:open([{name,b}, {type,wrap}, {size,{8192,3}},
- {format,internal},
- {file, File2}]),
- ?line {ok, c} = disk_log:open([{name,c}, {type,wrap}, {size,{8193,3}},
- {format,internal},
- {file, File3}]),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8191,3}},
+ {format,internal},
+ {file, File1}]),
+ {ok, b} = disk_log:open([{name,b}, {type,wrap}, {size,{8192,3}},
+ {format,internal},
+ {file, File2}]),
+ {ok, c} = disk_log:open([{name,c}, {type,wrap}, {size,{8193,3}},
+ {format,internal},
+ {file, File3}]),
T1 = mk_bytes(8191-16), % 16 is size of header + magics for 1 item
T2 = mk_bytes(8192-16),
T3 = mk_bytes(8193-16),
- ?line ok = disk_log:log(a, T1),
- ?line ok = disk_log:log(b, T2),
- ?line ok = disk_log:log(c, T3),
- ?line case get_all_terms(a) of
- [T1] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, [T1]})
- end,
- ?line case get_all_terms(b) of
- [T2] ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, [T2]})
- end,
- ?line case get_all_terms(c) of
- [T3] ->
- ok;
- E3 ->
- test_server_fail({bad_terms, E3, [T3]})
- end,
- ?line ok = disk_log:close(a),
- ?line ok = disk_log:close(b),
- ?line ok = disk_log:close(c),
+ ok = disk_log:log(a, T1),
+ ok = disk_log:log(b, T2),
+ ok = disk_log:log(c, T3),
+ case get_all_terms(a) of
+ [T1] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, [T1]})
+ end,
+ case get_all_terms(b) of
+ [T2] ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, [T2]})
+ end,
+ case get_all_terms(c) of
+ [T3] ->
+ ok;
+ E3 ->
+ test_server_fail({bad_terms, E3, [T3]})
+ end,
+ ok = disk_log:close(a),
+ ok = disk_log:close(b),
+ ok = disk_log:close(c),
del(File1, 3),
del(File2, 3),
del(File3, 3).
-inc_wrap_file(suite) -> [];
-inc_wrap_file(doc) -> ["Test disk log, force a change to next file"];
+%% Test disk log, force a change to next file.
inc_wrap_file(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File1 = filename:join(Dir, "a.LOG"),
@@ -616,262 +579,257 @@ inc_wrap_file(Conf) when is_list(Conf) ->
File3 = filename:join(Dir, "c.LOG"),
%% Test that halt logs gets an error message
- ?line {ok, a} = disk_log:open([{name, a}, {type, halt},
- {format, internal},
- {file, File1}]),
- ?line ok = disk_log:log(a, "message one"),
- ?line {error, {halt_log, a}} = disk_log:inc_wrap_file(a),
+ {ok, a} = disk_log:open([{name, a}, {type, halt},
+ {format, internal},
+ {file, File1}]),
+ ok = disk_log:log(a, "message one"),
+ {error, {halt_log, a}} = disk_log:inc_wrap_file(a),
%% test an internally formatted wrap log file
- ?line {ok, b} = disk_log:open([{name, b}, {type, wrap}, {size, {100,3}},
- {format, internal}, {head, 'thisisahead'},
- {file, File2}]),
- ?line ok = disk_log:log(b, "message one"),
- ?line ok = disk_log:inc_wrap_file(b),
- ?line ok = disk_log:log(b, "message two"),
- ?line ok = disk_log:inc_wrap_file(b),
- ?line ok = disk_log:log(b, "message three"),
- ?line ok = disk_log:inc_wrap_file(b),
- ?line ok = disk_log:log(b, "message four"),
- ?line T1 = get_all_terms(b),
- ?line ['thisisahead', "message two",
- 'thisisahead', "message three",
- 'thisisahead', "message four"] = T1,
+ {ok, b} = disk_log:open([{name, b}, {type, wrap}, {size, {100,3}},
+ {format, internal}, {head, 'thisisahead'},
+ {file, File2}]),
+ ok = disk_log:log(b, "message one"),
+ ok = disk_log:inc_wrap_file(b),
+ ok = disk_log:log(b, "message two"),
+ ok = disk_log:inc_wrap_file(b),
+ ok = disk_log:log(b, "message three"),
+ ok = disk_log:inc_wrap_file(b),
+ ok = disk_log:log(b, "message four"),
+ T1 = get_all_terms(b),
+ ['thisisahead', "message two",
+ 'thisisahead', "message three",
+ 'thisisahead', "message four"] = T1,
%% test an externally formatted wrap log file
- ?line {ok, c} = disk_log:open([{name, c}, {type, wrap}, {size, {100,3}},
- {format,external}, {head,"this is a head "},
- {file, File3}]),
- ?line ok = disk_log:blog(c, "message one"),
- ?line ok = disk_log:inc_wrap_file(c),
- ?line ok = disk_log:blog(c, "message two"),
- ?line ok = disk_log:inc_wrap_file(c),
- ?line ok = disk_log:blog(c, "message three"),
- ?line ok = disk_log:inc_wrap_file(c),
- ?line ok = disk_log:blog(c, "message four"),
- ?line ok = disk_log:sync(c),
- ?line {ok, Fd31} = file:open(File3 ++ ".1", [read]),
- ?line {ok,"this is a head message four"} = file:read(Fd31, 200),
- ?line {ok, Fd32} = file:open(File3 ++ ".2", [read]),
- ?line {ok,"this is a head message two"} = file:read(Fd32, 200),
- ?line {ok, Fd33} = file:open(File3 ++ ".3", [read]),
- ?line {ok,"this is a head message three"} = file:read(Fd33, 200),
- ?line ok = file:close(Fd31),
- ?line ok = file:close(Fd32),
- ?line ok = file:close(Fd33),
-
- ?line ok = disk_log:close(a),
- ?line ok = disk_log:close(b),
- ?line ok = disk_log:close(c),
- ?line ok = file:delete(File1),
+ {ok, c} = disk_log:open([{name, c}, {type, wrap}, {size, {100,3}},
+ {format,external}, {head,"this is a head "},
+ {file, File3}]),
+ ok = disk_log:blog(c, "message one"),
+ ok = disk_log:inc_wrap_file(c),
+ ok = disk_log:blog(c, "message two"),
+ ok = disk_log:inc_wrap_file(c),
+ ok = disk_log:blog(c, "message three"),
+ ok = disk_log:inc_wrap_file(c),
+ ok = disk_log:blog(c, "message four"),
+ ok = disk_log:sync(c),
+ {ok, Fd31} = file:open(File3 ++ ".1", [read]),
+ {ok,"this is a head message four"} = file:read(Fd31, 200),
+ {ok, Fd32} = file:open(File3 ++ ".2", [read]),
+ {ok,"this is a head message two"} = file:read(Fd32, 200),
+ {ok, Fd33} = file:open(File3 ++ ".3", [read]),
+ {ok,"this is a head message three"} = file:read(Fd33, 200),
+ ok = file:close(Fd31),
+ ok = file:close(Fd32),
+ ok = file:close(Fd33),
+
+ ok = disk_log:close(a),
+ ok = disk_log:close(b),
+ ok = disk_log:close(c),
+ ok = file:delete(File1),
del(File2, 3),
del(File3, 3).
-halt_ext_inf(suite) -> [];
-halt_ext_inf(doc) -> ["Test halt disk log, external, infinity"];
+%% Test halt disk log, external, infinity.
halt_ext_inf(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
- {format,external},
- {file, File}]),
- ?line xsimple_log(File, a),
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,infinity},
+ {format,external},
+ {file, File}]),
+ xsimple_log(File, a),
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-halt_ext_sz_1(suite) -> [];
-halt_ext_sz_1(doc) -> ["Test halt disk log, external, size defined"];
+%% Test halt disk log, external, size defined.
halt_ext_sz_1(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,18000},
- {format,external},
- {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,18000},
+ {format,external},
+ {file, File}]),
xsimple_log(File, a),
- ?line ok = disk_log:truncate(a),
- ?line [] = get_list(File, a),
+ ok = disk_log:truncate(a),
+ [] = get_list(File, a),
{B1, T1} = x_mk_bytes(10000),
{B2, T2} = x_mk_bytes(5000),
{B3, T3} = x_mk_bytes(1000),
- ?line ok = disk_log:blog(a, B1),
- ?line case get_list(File, a) of
- T1 ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, T1})
- end,
- ?line ok = disk_log:blog(a, B2),
- ?line {error, {full, a}} = disk_log:blog_terms(a, [B3,B3,B1]),
- ?line ok = disk_log:balog(a, B1),
- ?line Tmp = T1 ++ T2 ++ T3 ++ T3,
- ?line case get_list(File, a) of
- Tmp ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, Tmp})
- end,
- ?line ok = disk_log:close(a),
- ?line ok = file:delete(File).
+ ok = disk_log:blog(a, B1),
+ case get_list(File, a) of
+ T1 ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, T1})
+ end,
+ ok = disk_log:blog(a, B2),
+ {error, {full, a}} = disk_log:blog_terms(a, [B3,B3,B1]),
+ ok = disk_log:balog(a, B1),
+ Tmp = T1 ++ T2 ++ T3 ++ T3,
+ case get_list(File, a) of
+ Tmp ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, Tmp})
+ end,
+ ok = disk_log:close(a),
+ ok = file:delete(File).
-halt_ext_sz_2(suite) -> [];
-halt_ext_sz_2(doc) -> ["Test halt disk log, external, size defined"];
+%% Test halt disk log, external, size defined.
halt_ext_sz_2(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File1 = filename:join(Dir, "a.LOG"),
File2 = filename:join(Dir, "b.LOG"),
File3 = filename:join(Dir, "c.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,8191},
- {format,external},
- {file, File1}]),
- ?line {ok, b} = disk_log:open([{name,b}, {type,halt}, {size,8192},
- {format,external},
- {file, File2}]),
- ?line {ok, c} = disk_log:open([{name,c}, {type,halt}, {size,8193},
- {format,external},
- {file, File3}]),
+ {ok, a} = disk_log:open([{name,a}, {type,halt}, {size,8191},
+ {format,external},
+ {file, File1}]),
+ {ok, b} = disk_log:open([{name,b}, {type,halt}, {size,8192},
+ {format,external},
+ {file, File2}]),
+ {ok, c} = disk_log:open([{name,c}, {type,halt}, {size,8193},
+ {format,external},
+ {file, File3}]),
{B1, T1} = x_mk_bytes(8191),
{B2, T2} = x_mk_bytes(8192),
{B3, T3} = x_mk_bytes(8193),
- ?line ok = disk_log:blog(a, B1),
- ?line ok = disk_log:blog(b, B2),
- ?line ok = disk_log:blog(c, B3),
- ?line case get_list(File1, a) of
- T1 ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, T1})
- end,
- ?line case get_list(File2, b) of
- T2 ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, T2})
- end,
- ?line case get_list(File3, c) of
- T3 ->
- ok;
- E3 ->
- test_server_fail({bad_terms, E3, T3})
- end,
- ?line ok = disk_log:truncate(a),
- ?line ok = disk_log:truncate(b),
- ?line {error, {full, a}} = disk_log:blog(a, B2),
- ?line Error1 = {error, {full, b}} = disk_log:blog(b, B3),
- ?line "The halt log" ++ _ = format_error(Error1),
- ?line true = info(b, full, false),
- ?line [] = get_list(File1, a),
- ?line [] = get_list(File2, b),
- ?line ok = disk_log:close(a),
- ?line ok = disk_log:close(b),
- ?line ok = disk_log:close(c),
- ?line ok = file:delete(File1),
- ?line ok = file:delete(File2),
- ?line ok = file:delete(File3),
+ ok = disk_log:blog(a, B1),
+ ok = disk_log:blog(b, B2),
+ ok = disk_log:blog(c, B3),
+ case get_list(File1, a) of
+ T1 ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, T1})
+ end,
+ case get_list(File2, b) of
+ T2 ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, T2})
+ end,
+ case get_list(File3, c) of
+ T3 ->
+ ok;
+ E3 ->
+ test_server_fail({bad_terms, E3, T3})
+ end,
+ ok = disk_log:truncate(a),
+ ok = disk_log:truncate(b),
+ {error, {full, a}} = disk_log:blog(a, B2),
+ Error1 = {error, {full, b}} = disk_log:blog(b, B3),
+ "The halt log" ++ _ = format_error(Error1),
+ true = info(b, full, false),
+ [] = get_list(File1, a),
+ [] = get_list(File2, b),
+ ok = disk_log:close(a),
+ ok = disk_log:close(b),
+ ok = disk_log:close(c),
+ ok = file:delete(File1),
+ ok = file:delete(File2),
+ ok = file:delete(File3),
ok.
-wrap_ext_1(suite) -> [];
-wrap_ext_1(doc) -> ["Test wrap disk log, external, size defined"];
+%% Test wrap disk log, external, size defined.
wrap_ext_1(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,external},
- {file, File}]),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,external},
+ {file, File}]),
x2simple_log(File ++ ".1", a),
- ?line ok = disk_log:close(a),
-% del(File, 4),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
- {format,external},
- {file, File}]),
+ ok = disk_log:close(a),
+ %% del(File, 4),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8000, 4}},
+ {format,external},
+ {file, File}]),
{B1, _T1} = x_mk_bytes(10000), % file 2
{B2, T2} = x_mk_bytes(5000), % file 3
{B3, T3} = x_mk_bytes(4000), % file 4
{B4, T4} = x_mk_bytes(2000), % file 4
{B5, T5} = x_mk_bytes(5000), % file 1
{B6, T6} = x_mk_bytes(5000), % file 2
- ?line ok = disk_log:blog(a, B1),
- ?line ok = disk_log:blog(a, B2),
- ?line ok = disk_log:blog(a, B3),
- ?line ok = disk_log:blog_terms(a, [B4, B5, B6]),
- ?line case get_list(File ++ ".3", a) of
- T2 ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, T2})
- end,
- ?line T34 = T3 ++ T4,
- ?line case get_list(File ++ ".4", a) of
- T34 ->
- ok;
- E34 ->
- test_server_fail({bad_terms, E34, T34})
- end,
- ?line case get_list(File ++ ".1", a) of
- T5 ->
- ok;
- E5 ->
- test_server_fail({bad_terms, E5, T5})
- end,
- ?line case get_list(File ++ ".2", a) of
- T6 ->
- ok;
- E6 ->
- test_server_fail({bad_terms, E6, T6})
- end,
- ?line ok = disk_log:close(a),
+ ok = disk_log:blog(a, B1),
+ ok = disk_log:blog(a, B2),
+ ok = disk_log:blog(a, B3),
+ ok = disk_log:blog_terms(a, [B4, B5, B6]),
+ case get_list(File ++ ".3", a) of
+ T2 ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, T2})
+ end,
+ T34 = T3 ++ T4,
+ case get_list(File ++ ".4", a) of
+ T34 ->
+ ok;
+ E34 ->
+ test_server_fail({bad_terms, E34, T34})
+ end,
+ case get_list(File ++ ".1", a) of
+ T5 ->
+ ok;
+ E5 ->
+ test_server_fail({bad_terms, E5, T5})
+ end,
+ case get_list(File ++ ".2", a) of
+ T6 ->
+ ok;
+ E6 ->
+ test_server_fail({bad_terms, E6, T6})
+ end,
+ ok = disk_log:close(a),
del(File, 4).
-wrap_ext_2(suite) -> [];
-wrap_ext_2(doc) -> ["Test wrap disk log, external, size defined"];
+%% Test wrap disk log, external, size defined.
wrap_ext_2(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File1 = filename:join(Dir, "a.LOG"),
File2 = filename:join(Dir, "b.LOG"),
File3 = filename:join(Dir, "c.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8191,3}},
- {format,external},
- {file, File1}]),
- ?line {ok, b} = disk_log:open([{name,b}, {type,wrap}, {size,{8192,3}},
- {format,external},
- {file, File2}]),
- ?line {ok, c} = disk_log:open([{name,c}, {type,wrap}, {size,{8193,3}},
- {format,external},
- {file, File3}]),
+ {ok, a} = disk_log:open([{name,a}, {type,wrap}, {size,{8191,3}},
+ {format,external},
+ {file, File1}]),
+ {ok, b} = disk_log:open([{name,b}, {type,wrap}, {size,{8192,3}},
+ {format,external},
+ {file, File2}]),
+ {ok, c} = disk_log:open([{name,c}, {type,wrap}, {size,{8193,3}},
+ {format,external},
+ {file, File3}]),
{B1, T1} = x_mk_bytes(8191),
{B2, T2} = x_mk_bytes(8192),
{B3, T3} = x_mk_bytes(8193),
- ?line ok = disk_log:blog(a, B1),
- ?line ok = disk_log:blog(b, B2),
- ?line ok = disk_log:blog(c, B3),
- ?line case get_list(File1 ++ ".1", a) of
- T1 ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, T1})
- end,
- ?line case get_list(File2 ++ ".1", b) of
- T2 ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, T2})
- end,
- ?line case get_list(File3 ++ ".1", c) of
- T3 ->
- ok;
- E3 ->
- test_server_fail({bad_terms, E3, T3})
- end,
- ?line ok = disk_log:close(a),
- ?line ok = disk_log:close(b),
- ?line ok = disk_log:close(c),
- ?line del(File1, 3),
- ?line del(File2, 3),
- ?line del(File3, 3),
+ ok = disk_log:blog(a, B1),
+ ok = disk_log:blog(b, B2),
+ ok = disk_log:blog(c, B3),
+ case get_list(File1 ++ ".1", a) of
+ T1 ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, T1})
+ end,
+ case get_list(File2 ++ ".1", b) of
+ T2 ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, T2})
+ end,
+ case get_list(File3 ++ ".1", c) of
+ T3 ->
+ ok;
+ E3 ->
+ test_server_fail({bad_terms, E3, T3})
+ end,
+ ok = disk_log:close(a),
+ ok = disk_log:close(b),
+ ok = disk_log:close(c),
+ del(File1, 3),
+ del(File2, 3),
+ del(File3, 3),
ok.
simple_log(Log) ->
@@ -879,61 +837,61 @@ simple_log(Log) ->
T2 = hopp,
T3 = {tjena, 12},
T4 = mk_bytes(10000),
- ?line ok = disk_log:log(Log, T1),
- ?line ok = disk_log:log_terms(Log, [T2, T3]),
- ?line case get_all_terms(Log) of
- [T1, T2, T3] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1, [T1, T2, T3]})
- end,
- ?line ok = disk_log:log(a, T4),
- ?line case get_all_terms(Log) of
- [T1, T2, T3, T4] ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2, [T1, T2, T3, T4]})
- end.
+ ok = disk_log:log(Log, T1),
+ ok = disk_log:log_terms(Log, [T2, T3]),
+ case get_all_terms(Log) of
+ [T1, T2, T3] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1, [T1, T2, T3]})
+ end,
+ ok = disk_log:log(a, T4),
+ case get_all_terms(Log) of
+ [T1, T2, T3, T4] ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2, [T1, T2, T3, T4]})
+ end.
xsimple_log(File, Log) ->
T1 = "hej",
T2 = list_to_binary("hopp"),
T3 = list_to_binary(["sena", list_to_binary("sejer")]),
T4 = list_to_binary(By = mk_bytes(10000)),
- ?line ok = disk_log:blog(Log, T1),
- ?line ok = disk_log:blog_terms(Log, [T2, T3]),
- ?line X = "hejhoppsenasejer",
- ?line X2 = get_list(File, Log),
- ?line case X2 of
- X -> ok;
- Z1 -> test_server_fail({bad_terms, Z1, X2})
- end,
- ?line ok = disk_log:blog(Log, T4),
- ?line Tmp = get_list(File, Log),
- ?line case X ++ By of
- Tmp -> ok;
- Z2 -> test_server_fail({bad_terms, Z2, X ++ By})
- end.
+ ok = disk_log:blog(Log, T1),
+ ok = disk_log:blog_terms(Log, [T2, T3]),
+ X = "hejhoppsenasejer",
+ X2 = get_list(File, Log),
+ case X2 of
+ X -> ok;
+ Z1 -> test_server_fail({bad_terms, Z1, X2})
+ end,
+ ok = disk_log:blog(Log, T4),
+ Tmp = get_list(File, Log),
+ case X ++ By of
+ Tmp -> ok;
+ Z2 -> test_server_fail({bad_terms, Z2, X ++ By})
+ end.
x2simple_log(File, Log) ->
T1 = "hej",
T2 = list_to_binary("hopp"),
T3 = list_to_binary(["sena", list_to_binary("sejer")]),
T4 = list_to_binary(By = mk_bytes(1000)),
- ?line ok = disk_log:blog(Log, T1),
- ?line ok = disk_log:blog_terms(Log, [T2, T3]),
- ?line X = "hejhoppsenasejer",
- ?line X2 = get_list(File, Log),
- ?line case X2 of
- X -> ok;
- Z1 -> test_server_fail({bad_terms, Z1, X2})
- end,
- ?line ok = disk_log:blog(Log, T4),
- ?line Tmp = get_list(File, Log),
- ?line case X ++ By of
- Tmp -> ok;
- Z2 -> test_server_fail({bad_terms, Z2, X ++ By})
- end.
+ ok = disk_log:blog(Log, T1),
+ ok = disk_log:blog_terms(Log, [T2, T3]),
+ X = "hejhoppsenasejer",
+ X2 = get_list(File, Log),
+ case X2 of
+ X -> ok;
+ Z1 -> test_server_fail({bad_terms, Z1, X2})
+ end,
+ ok = disk_log:blog(Log, T4),
+ Tmp = get_list(File, Log),
+ case X ++ By of
+ Tmp -> ok;
+ Z2 -> test_server_fail({bad_terms, Z2, X ++ By})
+ end.
x_mk_bytes(N) ->
X = lists:duplicate(N, $a),
@@ -947,7 +905,7 @@ mk_bytes(N) when N > 4 ->
end.
get_list(File, Log) ->
- ?t:format(0, "File ~p~n",[File]),
+ ct:pal(?HI_VERBOSITY, "File ~p~n", [File]),
ok = disk_log:sync(Log),
{ok, B} = file:read_file(File),
binary_to_list(B).
@@ -955,7 +913,7 @@ get_list(File, Log) ->
get_all_terms(Log, File, Type) ->
{ok, _Log} = disk_log:open([{name,Log}, {type,Type}, {size,infinity},
- {format,internal}, {file, File},
+ {format,internal}, {file, File},
{mode, read_only}]),
Ts = get_all_terms(Log),
ok = disk_log:close(Log),
@@ -976,14 +934,14 @@ get_all_terms1(Log, Cont, Res) ->
get_all_terms_and_bad(Log, File, Type) ->
{ok, _Log} = disk_log:open([{name,Log}, {type,Type}, {size,infinity},
- {format,internal}, {file, File},
+ {format,internal}, {file, File},
{mode, read_only}]),
Ts = get_all_terms_and_bad(Log),
ok = disk_log:close(Log),
Ts.
get_all_terms_and_bad(Log) ->
- ?line read_only = info(Log, mode, foo),
+ read_only = info(Log, mode, foo),
get_all_terms_and_bad1(Log, start, [], 0).
%%
@@ -999,7 +957,7 @@ get_all_terms_and_bad1(Log, Cont, Res, Bad0) ->
get_all_binary_terms_and_bad(Log, File, Type) ->
{ok, _Log} = disk_log:open([{name,Log}, {type,Type}, {size,infinity},
- {format,internal}, {file, File},
+ {format,internal}, {file, File},
{mode, read_only}]),
Ts = get_all_binary_terms_and_bad(Log),
ok = disk_log:close(Log),
@@ -1037,7 +995,7 @@ xx() ->
{format,internal}, {file, File}]),
W = xwr(a, 400),
disk_log:close(a),
-% file:delete(File),
+ %% file:delete(File),
W.
%% old: 6150
@@ -1186,73 +1144,72 @@ end_times({T1,W1}) ->
{T2-T1, W2-W1}.
-head_func(suite) -> [];
-head_func(doc) -> ["Test head parameter"];
+%% Test head parameter.
head_func(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
ets:new(xxx, [named_table, set, public]),
ets:insert(xxx, {wrapc, 0}),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,4}},
- {head_func, {?MODULE, hf, []}}]),
- ?line B = mk_bytes(60),
- ?line disk_log:log(a, B),
- ?line disk_log:alog(a, B),
- ?line disk_log:alog(a, B),
- ?line disk_log:log(a, B),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,4}},
+ {head_func, {?MODULE, hf, []}}]),
+ B = mk_bytes(60),
+ disk_log:log(a, B),
+ disk_log:alog(a, B),
+ disk_log:alog(a, B),
+ disk_log:log(a, B),
H = [1,2,3],
- ?line [{wrapc, 4}] = ets:lookup(xxx, wrapc),
+ [{wrapc, 4}] = ets:lookup(xxx, wrapc),
ets:delete(xxx),
- ?line case get_all_terms(a) of
- [H,B,H,B,H,B,H,B] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1,
- [H,B,H,B,H,B,H,B]})
- end,
- ?line 8 = no_written_items(a),
+ case get_all_terms(a) of
+ [H,B,H,B,H,B,H,B] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1,
+ [H,B,H,B,H,B,H,B]})
+ end,
+ 8 = no_written_items(a),
disk_log:close(a),
del(File, 4),
- % invalid header function
- ?line {error, {invalid_header, {_, {term}}}} =
+ %% invalid header function
+ {error, {invalid_header, {_, {term}}}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external},
{head_func, {?MODULE, head_fun, [{term}]}}]),
file:delete(File),
- ?line {error, {invalid_header, _}} =
+ {error, {invalid_header, _}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external},
{head_func, {?MODULE, head_fun, [{ok,{term}}]}}]),
file:delete(File),
- ?line {ok,n} =
+ {ok,n} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external},
{head_func, {?MODULE, head_fun, [{ok,<<"head">>}]}}]),
- ?line ok = disk_log:close(n),
- ?line {ok,<<"head">>} = file:read_file(File),
+ ok = disk_log:close(n),
+ {ok,<<"head">>} = file:read_file(File),
file:delete(File),
- ?line {ok,n} =
+ {ok,n} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external},
{head_func, {?MODULE, head_fun, [{ok,"head"}]}}]),
- ?line ok = disk_log:close(n),
- ?line {ok,<<"head">>} = file:read_file(File),
+ ok = disk_log:close(n),
+ {ok,<<"head">>} = file:read_file(File),
file:delete(File),
- ?line Error1 = {error, {badarg, _}} =
+ Error1 = {error, {badarg, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{head_func, {tjo,hej,san}},{size, {100, 4}}]),
- ?line "The argument " ++ _ = format_error(Error1),
-
- ?line Error2 = {error, {invalid_header, _}} =
+ "The argument " ++ _ = format_error(Error1),
+
+ Error2 = {error, {invalid_header, _}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{head_func, {tjo,hej,[san]}}]),
- ?line "The disk log header" ++ _ = format_error(Error2),
+ "The disk log header" ++ _ = format_error(Error2),
file:delete(File).
@@ -1263,194 +1220,186 @@ hf() ->
ets:update_counter(xxx, wrapc, 1),
{ok, [1,2,3]}.
-plain_head(suite) -> [];
-plain_head(doc) -> ["Test head parameter"];
+%% Test head parameter.
plain_head(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
H = [1,2,3],
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,4}}, {head, H}]),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,4}}, {head, H}]),
%% This one is not "counted".
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,4}}, {head, H}]),
- ?line B = mk_bytes(60),
- ?line disk_log:log(a, B),
- ?line disk_log:alog(a, B),
- ?line disk_log:alog(a, B),
- ?line disk_log:log(a, B),
- ?line case get_all_terms(a) of
- [H,B,H,B,H,B,H,B] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1,
- [H,B,H,B,H,B,H,B]})
- end,
- ?line 8 = no_written_items(a),
- ?line ok = disk_log:close(a),
- ?line {error, no_such_log} = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,4}}, {head, H}]),
+ B = mk_bytes(60),
+ disk_log:log(a, B),
+ disk_log:alog(a, B),
+ disk_log:alog(a, B),
+ disk_log:log(a, B),
+ case get_all_terms(a) of
+ [H,B,H,B,H,B,H,B] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1,
+ [H,B,H,B,H,B,H,B]})
+ end,
+ 8 = no_written_items(a),
+ ok = disk_log:close(a),
+ {error, no_such_log} = disk_log:close(a),
del(File, 4).
-one_header(suite) -> [];
-one_header(doc) -> ["Test that a header is just printed once in a log file"];
+%% Test that a header is just printed once in a log file.
one_header(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
H = [1,2,3],
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,4}}, {head, H}]),
- ?line B = mk_bytes(60),
- ?line ok = disk_log:log(a, B),
- ?line ok = disk_log:alog(a, B),
- ?line ok = disk_log:alog(a, B),
- ?line ok = disk_log:log(a, B),
- ?line case get_all_terms(a) of
- [H,B,H,B,H,B,H,B] ->
- ok;
- E1 ->
- test_server_fail({bad_terms, E1,
- [H,B,H,B,H,B,H,B]})
- end,
- ?line 8 = no_written_items(a),
- ?line ok = disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,4}}, {head, H}]),
+ B = mk_bytes(60),
+ ok = disk_log:log(a, B),
+ ok = disk_log:alog(a, B),
+ ok = disk_log:alog(a, B),
+ ok = disk_log:log(a, B),
+ case get_all_terms(a) of
+ [H,B,H,B,H,B,H,B] ->
+ ok;
+ E1 ->
+ test_server_fail({bad_terms, E1,
+ [H,B,H,B,H,B,H,B]})
+ end,
+ 8 = no_written_items(a),
+ ok = disk_log:close(a),
del(File, 4),
Fileb = filename:join(Dir, "b.LOG"),
- ?line {ok, b} = disk_log:open([{name,b}, {file, Fileb}, {head, H}]),
- ?line ok = disk_log:close(b),
- ?line {ok, b} = disk_log:open([{name,b}, {file, Fileb}, {head, H}]),
- ?line ok = disk_log:log(b, "first log"),
- ?line ok = disk_log:alog(b, "second log"),
- ?line ok = disk_log:close(b),
- ?line {ok, b} = disk_log:open([{name,b}, {file, Fileb}, {head, H}]),
- ?line ok = disk_log:alog(b, "3rd log"),
- ?line ok = disk_log:log(b, "4th log"),
- ?line case get_all_terms(b) of
- [H, "first log", "second log", "3rd log", "4th log"] ->
- ok;
- E2 ->
- test_server_fail({bad_terms, E2,
- [H, "first log", "second log",
- "3rd log", "4th log"]})
- end,
- ?line 2 = no_written_items(b),
- ?line ok = disk_log:close(b),
- ?line ok = file:delete(Fileb),
+ {ok, b} = disk_log:open([{name,b}, {file, Fileb}, {head, H}]),
+ ok = disk_log:close(b),
+ {ok, b} = disk_log:open([{name,b}, {file, Fileb}, {head, H}]),
+ ok = disk_log:log(b, "first log"),
+ ok = disk_log:alog(b, "second log"),
+ ok = disk_log:close(b),
+ {ok, b} = disk_log:open([{name,b}, {file, Fileb}, {head, H}]),
+ ok = disk_log:alog(b, "3rd log"),
+ ok = disk_log:log(b, "4th log"),
+ case get_all_terms(b) of
+ [H, "first log", "second log", "3rd log", "4th log"] ->
+ ok;
+ E2 ->
+ test_server_fail({bad_terms, E2,
+ [H, "first log", "second log",
+ "3rd log", "4th log"]})
+ end,
+ 2 = no_written_items(b),
+ ok = disk_log:close(b),
+ ok = file:delete(Fileb),
Filec = filename:join(Dir, "c.LOG"),
H2 = "this is a header ",
- ?line {ok, c} = disk_log:open([{name,c}, {format, external},
- {file, Filec}, {head, H2}]),
- ?line ok = disk_log:close(c),
- ?line {ok, c} = disk_log:open([{name,c}, {format, external},
- {file, Filec}, {head, H2}]),
- ?line ok = disk_log:blog(c, "first log"),
- ?line ok = disk_log:balog(c, "second log"),
- ?line ok = disk_log:close(c),
- ?line {ok, c} = disk_log:open([{name,c}, {format, external},
- {file, Filec}, {head, H2}]),
- ?line ok = disk_log:balog(c, "3rd log"),
- ?line ok = disk_log:blog(c, "4th log"),
- ?line ok = disk_log:sync(c),
- ?line {ok, Fdc} = file:open(Filec, [read]),
- ?line {ok,"this is a header first logsecond log3rd log4th log"} =
+ {ok, c} = disk_log:open([{name,c}, {format, external},
+ {file, Filec}, {head, H2}]),
+ ok = disk_log:close(c),
+ {ok, c} = disk_log:open([{name,c}, {format, external},
+ {file, Filec}, {head, H2}]),
+ ok = disk_log:blog(c, "first log"),
+ ok = disk_log:balog(c, "second log"),
+ ok = disk_log:close(c),
+ {ok, c} = disk_log:open([{name,c}, {format, external},
+ {file, Filec}, {head, H2}]),
+ ok = disk_log:balog(c, "3rd log"),
+ ok = disk_log:blog(c, "4th log"),
+ ok = disk_log:sync(c),
+ {ok, Fdc} = file:open(Filec, [read]),
+ {ok,"this is a header first logsecond log3rd log4th log"} =
file:read(Fdc, 200),
- ?line ok = file:close(Fdc),
- ?line 2 = no_written_items(c),
- ?line disk_log:close(c),
- ?line ok = file:delete(Filec),
+ ok = file:close(Fdc),
+ 2 = no_written_items(c),
+ disk_log:close(c),
+ ok = file:delete(Filec),
ok.
-wrap_notif(suite) -> [];
-wrap_notif(doc) -> ["Test notify parameter, wrap"];
+%% Test notify parameter, wrap.
wrap_notif(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,4}}, {notify, true}]),
- ?line B = mk_bytes(60),
- ?line disk_log:log(a, B),
- ?line disk_log:alog(a, B),
- ?line disk_log:alog(a, B),
- ?line disk_log:log(a, B),
- ?line disk_log:log(a, B),
- ?line rec(3, {disk_log, node(), a, {wrap, 0}}),
- ?line rec(1, {disk_log, node(), a, {wrap, 1}}),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,4}}, {notify, true}]),
+ B = mk_bytes(60),
+ disk_log:log(a, B),
+ disk_log:alog(a, B),
+ disk_log:alog(a, B),
+ disk_log:log(a, B),
+ disk_log:log(a, B),
+ rec(3, {disk_log, node(), a, {wrap, 0}}),
+ rec(1, {disk_log, node(), a, {wrap, 1}}),
disk_log:close(a),
del(File, 4).
-full_notif(suite) -> [];
-full_notif(doc) -> ["Test notify parameter, wrap, filled file"];
+%% Test notify parameter, wrap, filled file.
full_notif(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
file:delete(File),
- ?line {ok, a} = disk_log:open([{name, a}, {file, File}, {type, halt},
- {size, 100}, {notify, true}]),
- ?line B = mk_bytes(60),
- ?line disk_log:log(a, B),
- ?line disk_log:alog(a, B),
- ?line rec(1, {disk_log, node(), a, full}),
+ {ok, a} = disk_log:open([{name, a}, {file, File}, {type, halt},
+ {size, 100}, {notify, true}]),
+ B = mk_bytes(60),
+ disk_log:log(a, B),
+ disk_log:alog(a, B),
+ rec(1, {disk_log, node(), a, full}),
disk_log:close(a),
file:delete(File).
-trunc_notif(suite) -> [];
-trunc_notif(doc) -> ["Test notify parameter, wrap, truncated file"];
+%% Test notify parameter, wrap, truncated file.
trunc_notif(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
File2 = filename:join(Dir, "a.DUMP"),
- ?line {ok, a} = disk_log:open([{name, a}, {file, File}, {type, halt},
- {size, 100}, {notify, true}]),
- ?line B = mk_bytes(60),
- ?line disk_log:log(a, B),
- ?line disk_log:truncate(a),
- ?line rec(1, {disk_log, node(), a, {truncated, 1}}),
- ?line disk_log:log(a, B),
- ?line ok = disk_log:reopen(a, File2),
- ?line rec(1, {disk_log, node(), a, {truncated, 1}}),
+ {ok, a} = disk_log:open([{name, a}, {file, File}, {type, halt},
+ {size, 100}, {notify, true}]),
+ B = mk_bytes(60),
+ disk_log:log(a, B),
+ disk_log:truncate(a),
+ rec(1, {disk_log, node(), a, {truncated, 1}}),
+ disk_log:log(a, B),
+ ok = disk_log:reopen(a, File2),
+ rec(1, {disk_log, node(), a, {truncated, 1}}),
disk_log:close(a),
file:delete(File),
file:delete(File2).
-blocked_notif(suite) -> [];
-blocked_notif(doc) ->
- ["Test notify parameters 'format_external' and 'blocked_log"];
+%% Test notify parameters 'format_external' and 'blocked_log.
blocked_notif(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "n.LOG"),
No = 4,
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {size, {100,No}}, {notify, true},
- {format, external}]),
- ?line B = mk_bytes(60),
- ?line Error1 = {error,{format_external,n}} = disk_log:log(n, B),
- ?line "The requested operation" ++ _ = format_error(Error1),
- ?line ok = disk_log:blog(n, B),
- ?line ok = disk_log:alog(n, B),
- ?line rec(1, {disk_log, node(), n, {format_external, term_to_binary(B)}}),
- ?line ok = disk_log:alog_terms(n, [B,B,B,B]),
- ?line rec(1, {disk_log, node(), n, {format_external,
- lists:map(fun term_to_binary/1, [B,B,B,B])}}),
- ?line ok = disk_log:block(n, false),
- ?line ok = disk_log:alog(n, B),
- ?line rec(1, {disk_log, node(), n, {blocked_log, term_to_binary(B)}}),
- ?line ok = disk_log:balog(n, B),
- ?line rec(1, {disk_log, node(), n, {blocked_log, list_to_binary(B)}}),
- ?line ok = disk_log:balog_terms(n, [B,B,B,B]),
- ?line disk_log:close(n),
- ?line rec(1, {disk_log, node(), n, {blocked_log,
- lists:map(fun list_to_binary/1, [B,B,B,B])}}),
- ?line del(File, No).
-
-
-new_idx_vsn(suite) -> [];
-new_idx_vsn(doc) -> ["Test the new version of the .idx file"];
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {size, {100,No}}, {notify, true},
+ {format, external}]),
+ B = mk_bytes(60),
+ Error1 = {error,{format_external,n}} = disk_log:log(n, B),
+ "The requested operation" ++ _ = format_error(Error1),
+ ok = disk_log:blog(n, B),
+ ok = disk_log:alog(n, B),
+ rec(1, {disk_log, node(), n, {format_external, [term_to_binary(B)]}}),
+ ok = disk_log:alog_terms(n, [B,B,B,B]),
+ rec(1, {disk_log, node(), n, {format_external,
+ lists:map(fun term_to_binary/1, [B,B,B,B])}}),
+ ok = disk_log:block(n, false),
+ ok = disk_log:alog(n, B),
+ rec(1, {disk_log, node(), n, {blocked_log, [term_to_binary(B)]}}),
+ ok = disk_log:balog(n, B),
+ rec(1, {disk_log, node(), n, {blocked_log, [list_to_binary(B)]}}),
+ ok = disk_log:balog_terms(n, [B,B,B,B]),
+ disk_log:close(n),
+ rec(1, {disk_log, node(), n, {blocked_log,
+ lists:map(fun list_to_binary/1, [B,B,B,B])}}),
+ del(File, No).
+
+
+%% Test the new version of the .idx file.
new_idx_vsn(Conf) when is_list(Conf) ->
DataDir = ?datadir(Conf),
PrivDir = ?privdir(Conf),
@@ -1459,201 +1408,197 @@ new_idx_vsn(Conf) when is_list(Conf) ->
Kurt2 = filename:join(PrivDir, "kurt2.LOG"),
%% Test that a wrap log file can have more than 255 files
- ?line {ok, new_vsn} = disk_log:open([{file, File}, {name, new_vsn},
- {type, wrap}, {size, {40, 270}}]),
- ?line ok = log(new_vsn, 280),
- ?line {ok, Bin} = file:read_file(add_ext(File, "idx")),
- ?line <<0,0:32,2,10:32,1:64,1:64,_/binary>> = Bin,
- ?line disk_log:close(new_vsn),
- ?line del(File, 270),
+ {ok, new_vsn} = disk_log:open([{file, File}, {name, new_vsn},
+ {type, wrap}, {size, {40, 270}}]),
+ ok = log(new_vsn, 280),
+ {ok, Bin} = file:read_file(add_ext(File, "idx")),
+ <<0,0:32,2,10:32,1:64,1:64,_/binary>> = Bin,
+ disk_log:close(new_vsn),
+ del(File, 270),
%% convert a very old version (0) of wrap log file to the new format (2)
copy_wrap_log("kurt.LOG", 4, DataDir, PrivDir),
- ?line {repaired, kurt, {recovered, 1}, {badbytes, 0}} =
+ {repaired, kurt, {recovered, 1}, {badbytes, 0}} =
disk_log:open([{file, Kurt}, {name, kurt},
{type, wrap}, {size, {40, 4}}]),
- ?line ok = disk_log:log(kurt, "this is a logged message number X"),
- ?line ok = disk_log:log(kurt, "this is a logged message number Y"),
- ?line {ok, BinK} = file:read_file(add_ext(Kurt, "idx")),
- ?line <<0,0:32,2,2:32,1:64,1:64,1:64,1:64>> = BinK,
- ?line {{40,4}, 2} = disk_log_1:read_size_file_version(Kurt),
+ ok = disk_log:log(kurt, "this is a logged message number X"),
+ ok = disk_log:log(kurt, "this is a logged message number Y"),
+ {ok, BinK} = file:read_file(add_ext(Kurt, "idx")),
+ <<0,0:32,2,2:32,1:64,1:64,1:64,1:64>> = BinK,
+ {{40,4}, 2} = disk_log_1:read_size_file_version(Kurt),
disk_log:close(kurt),
- ?line del(Kurt, 4),
+ del(Kurt, 4),
%% keep the old format (1)
copy_wrap_log("kurt2.LOG", 4, DataDir, PrivDir),
- ?line {repaired, kurt2, {recovered, 1}, {badbytes, 0}} =
+ {repaired, kurt2, {recovered, 1}, {badbytes, 0}} =
disk_log:open([{file, Kurt2}, {name, kurt2},
{type, wrap}, {size, {40, 4}}]),
- ?line ok = disk_log:log(kurt2, "this is a logged message number X"),
- ?line ok = disk_log:log(kurt2, "this is a logged message number Y"),
- ?line {ok, BinK2} = file:read_file(add_ext(Kurt2, "idx")),
- ?line <<0,2:32,1:32,1:32,1:32,1:32>> = BinK2,
- ?line {{40,4}, 1} = disk_log_1:read_size_file_version(Kurt2),
+ ok = disk_log:log(kurt2, "this is a logged message number X"),
+ ok = disk_log:log(kurt2, "this is a logged message number Y"),
+ {ok, BinK2} = file:read_file(add_ext(Kurt2, "idx")),
+ <<0,2:32,1:32,1:32,1:32,1:32>> = BinK2,
+ {{40,4}, 1} = disk_log_1:read_size_file_version(Kurt2),
disk_log:close(kurt2),
- ?line del(Kurt2, 4),
+ del(Kurt2, 4),
ok.
-reopen(suite) -> [];
-reopen(doc) ->
- ["Test reopen/1 on halt and wrap logs."];
+%% Test reopen/1 on halt and wrap logs.
reopen(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line NewFile = filename:join(Dir, "nn.LOG"),
- ?line B = mk_bytes(60),
-
- ?line file:delete(File), % cleanup
- ?line file:delete(NewFile), % cleanup
- ?line Q = qlen(),
+ File = filename:join(Dir, "n.LOG"),
+ NewFile = filename:join(Dir, "nn.LOG"),
+ B = mk_bytes(60),
+
+ file:delete(File), % cleanup
+ file:delete(NewFile), % cleanup
+ Q = qlen(),
%% External halt log.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {notify, true}, {head, "header"},
- {size, infinity},{format, external}]),
- ?line ok = disk_log:blog(n, B),
- ?line ok = disk_log:breopen(n, NewFile, "head"),
- ?line rec(1, {disk_log, node(), n, {truncated, 2}}),
- ?line ok = disk_log:blog(n, B),
- ?line ok = disk_log:blog(n, B),
- ?line ok = disk_log:breopen(n, NewFile, "head"),
- ?line rec(1, {disk_log, node(), n, {truncated, 3}}),
- ?line ok = disk_log:close(n),
- ?line {ok,BinaryFile} = file:read_file(File),
- ?line "head" = binary_to_list(BinaryFile),
- ?line file:delete(File),
- ?line file:delete(NewFile),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {notify, true}, {head, "header"},
+ {size, infinity},{format, external}]),
+ ok = disk_log:blog(n, B),
+ ok = disk_log:breopen(n, NewFile, "head"),
+ rec(1, {disk_log, node(), n, {truncated, 2}}),
+ ok = disk_log:blog(n, B),
+ ok = disk_log:blog(n, B),
+ ok = disk_log:breopen(n, NewFile, "head"),
+ rec(1, {disk_log, node(), n, {truncated, 3}}),
+ ok = disk_log:close(n),
+ {ok,BinaryFile} = file:read_file(File),
+ "head" = binary_to_list(BinaryFile),
+ file:delete(File),
+ file:delete(NewFile),
%% Internal halt log.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {notify, true}, {head, header},
- {size, infinity}]),
- ?line ok = disk_log:log(n, B),
- ?line Error1 = {error, {same_file_name, n}} = disk_log:reopen(n, File),
- ?line "Current and new" ++ _ = format_error(Error1),
- ?line ok = disk_log:reopen(n, NewFile),
- ?line rec(1, {disk_log, node(), n, {truncated, 2}}),
- ?line ok = disk_log:log(n, B),
- ?line ok = disk_log:log(n, B),
- ?line ok = disk_log:reopen(n, NewFile),
- ?line rec(1, {disk_log, node(), n, {truncated, 3}}),
- ?line ok = disk_log:close(n),
- ?line [header, _B, _B] = get_all_terms(nn, NewFile, halt),
- ?line file:delete(File),
- ?line file:delete(NewFile),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {notify, true}, {head, header},
+ {size, infinity}]),
+ ok = disk_log:log(n, B),
+ Error1 = {error, {same_file_name, n}} = disk_log:reopen(n, File),
+ "Current and new" ++ _ = format_error(Error1),
+ ok = disk_log:reopen(n, NewFile),
+ rec(1, {disk_log, node(), n, {truncated, 2}}),
+ ok = disk_log:log(n, B),
+ ok = disk_log:log(n, B),
+ ok = disk_log:reopen(n, NewFile),
+ rec(1, {disk_log, node(), n, {truncated, 3}}),
+ ok = disk_log:close(n),
+ [header, _B, _B] = get_all_terms(nn, NewFile, halt),
+ file:delete(File),
+ file:delete(NewFile),
%% Internal wrap log.
- ?line No = 4,
- ?line del(File, No), % cleanup
- ?line del(NewFile, No), % cleanup
-
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {notify, true},
- {head, header}, {size, {100, No}}]),
- ?line ok = disk_log:log(n, B),
- ?line ok = disk_log:log_terms(n, [B,B,B]),
+ No = 4,
+ del(File, No), % cleanup
+ del(NewFile, No), % cleanup
+
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {notify, true},
+ {head, header}, {size, {100, No}}]),
+ ok = disk_log:log(n, B),
+ ok = disk_log:log_terms(n, [B,B,B]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(3, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:log_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line ok = disk_log:log_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line ok = disk_log:reopen(n, NewFile, new_header),
- ?line rec(1, {disk_log, node(), n, {truncated, 8}}),
- ?line ok = disk_log:log_terms(n, [B,B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:log_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:close(n),
- ?line [header, _, header, _, header, _, header, _] =
+ rec(3, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:log_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ ok = disk_log:log_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ ok = disk_log:reopen(n, NewFile, new_header),
+ rec(1, {disk_log, node(), n, {truncated, 8}}),
+ ok = disk_log:log_terms(n, [B,B]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:log_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:close(n),
+ [header, _, header, _, header, _, header, _] =
get_all_terms(nn, NewFile, wrap),
- ?line [new_header, _, header, _, header, _] = get_all_terms(n, File, wrap),
+ [new_header, _, header, _, header, _] = get_all_terms(n, File, wrap),
- ?line del(NewFile, No),
- ?line file:delete(File ++ ".2"),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {notify, true},
- {head, header}, {size, {100, No}}]),
+ del(NewFile, No),
+ file:delete(File ++ ".2"),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {notify, true},
+ {head, header}, {size, {100, No}}]),
%% One file is missing...
- ?line ok = disk_log:reopen(n, NewFile),
- ?line rec(1, {disk_log, node(), n, {truncated, 6}}),
- ?line ok = disk_log:close(n),
+ ok = disk_log:reopen(n, NewFile),
+ rec(1, {disk_log, node(), n, {truncated, 6}}),
+ ok = disk_log:close(n),
- ?line del(File, No),
- ?line del(NewFile, No),
- ?line Q = qlen(),
+ del(File, No),
+ del(NewFile, No),
+ Q = qlen(),
ok.
-block_blocked(suite) -> [];
-block_blocked(doc) ->
- ["Test block/1 on external and internal logs."];
+%% Test block/1 on external and internal logs.
block_blocked(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line B = mk_bytes(60),
+ B = mk_bytes(60),
Halt = join(Dir, "halt.LOG"),
- % External logs.
- ?line file:delete(Halt), % cleanup
- ?line {ok, halt} = disk_log:open([{name, halt}, {type, halt},
- {format, external}, {file, Halt}]),
- ?line ok = disk_log:sync(halt),
- ?line ok = disk_log:block(halt, false),
- ?line Error1 = {error, {blocked_log, halt}} = disk_log:block(halt),
- ?line "The blocked disk" ++ _ = format_error(Error1),
- ?line {error, {blocked_log, halt}} = disk_log:sync(halt),
- ?line {error, {blocked_log, halt}} = disk_log:truncate(halt),
- ?line {error, {blocked_log, halt}} = disk_log:change_size(halt, infinity),
- ?line {error, {blocked_log, halt}} =
+ %% External logs.
+ file:delete(Halt), % cleanup
+ {ok, halt} = disk_log:open([{name, halt}, {type, halt},
+ {format, external}, {file, Halt}]),
+ ok = disk_log:sync(halt),
+ ok = disk_log:block(halt, false),
+ Error1 = {error, {blocked_log, halt}} = disk_log:block(halt),
+ "The blocked disk" ++ _ = format_error(Error1),
+ {error, {blocked_log, halt}} = disk_log:sync(halt),
+ {error, {blocked_log, halt}} = disk_log:truncate(halt),
+ {error, {blocked_log, halt}} = disk_log:change_size(halt, infinity),
+ {error, {blocked_log, halt}} =
disk_log:change_notify(halt, self(), false),
- ?line {error, {blocked_log, halt}} =
+ {error, {blocked_log, halt}} =
disk_log:change_header(halt, {head, header}),
- ?line {error, {blocked_log, halt}} = disk_log:reopen(halt, "foo"),
- ?line ok = disk_log:close(halt),
-
- ?line {ok, halt} = disk_log:open([{name, halt}, {type, halt},
- {format, external}]),
- ?line ok = disk_log:sync(halt),
- ?line ok = disk_log:block(halt, true),
- ?line {error, {blocked_log, halt}} = disk_log:blog(halt, B),
- ?line {error, {blocked_log, halt}} = disk_log:blog(halt, B),
- ?line {error, {blocked_log, halt}} = disk_log:block(halt),
- ?line {error, {blocked_log, halt}} = disk_log:sync(halt),
- ?line {error, {blocked_log, halt}} = disk_log:truncate(halt),
- ?line {error, {blocked_log, halt}} = disk_log:change_size(halt, infinity),
- ?line {error, {blocked_log, halt}} =
+ {error, {blocked_log, halt}} = disk_log:reopen(halt, "foo"),
+ ok = disk_log:close(halt),
+
+ {ok, halt} = disk_log:open([{name, halt}, {type, halt},
+ {format, external}]),
+ ok = disk_log:sync(halt),
+ ok = disk_log:block(halt, true),
+ {error, {blocked_log, halt}} = disk_log:blog(halt, B),
+ {error, {blocked_log, halt}} = disk_log:blog(halt, B),
+ {error, {blocked_log, halt}} = disk_log:block(halt),
+ {error, {blocked_log, halt}} = disk_log:sync(halt),
+ {error, {blocked_log, halt}} = disk_log:truncate(halt),
+ {error, {blocked_log, halt}} = disk_log:change_size(halt, infinity),
+ {error, {blocked_log, halt}} =
disk_log:change_notify(halt, self(), false),
- ?line {error, {blocked_log, halt}} =
+ {error, {blocked_log, halt}} =
disk_log:change_header(halt, {head, header}),
- ?line {error, {blocked_log, halt}} = disk_log:reopen(halt, "foo"),
-
- ?line ok = disk_log:unblock(halt),
- ?line ok = disk_log:close(halt),
- ?line file:delete(Halt),
-
- % Internal logs.
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line del(File, No), % cleanup
- ?line {ok, halt} = disk_log:open([{name, halt}, {file, File}, {type, wrap},
- {size, {100, No}}]),
- ?line ok = disk_log:block(halt, true),
- ?line eof = disk_log:chunk(halt, start),
- ?line Error2 = {error, end_of_log} = disk_log:chunk_step(halt, start, 1),
- ?line "An attempt" ++ _ = format_error(Error2),
- ?line {error, {blocked_log, halt}} = disk_log:log(halt, B),
- ?line {error, {blocked_log, halt}} = disk_log:inc_wrap_file(halt),
- ?line ok = disk_log:unblock(halt),
- ?line ok = disk_log:block(halt, false),
- ?line {error, {blocked_log, halt}} = disk_log:log(halt, B),
- ?line {error, {blocked_log, halt}} = disk_log:inc_wrap_file(halt),
- ?line Parent = self(),
- ?line Pid =
+ {error, {blocked_log, halt}} = disk_log:reopen(halt, "foo"),
+
+ ok = disk_log:unblock(halt),
+ ok = disk_log:close(halt),
+ file:delete(Halt),
+
+ %% Internal logs.
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ del(File, No), % cleanup
+ {ok, halt} = disk_log:open([{name, halt}, {file, File}, {type, wrap},
+ {size, {100, No}}]),
+ ok = disk_log:block(halt, true),
+ eof = disk_log:chunk(halt, start),
+ Error2 = {error, end_of_log} = disk_log:chunk_step(halt, start, 1),
+ "An attempt" ++ _ = format_error(Error2),
+ {error, {blocked_log, halt}} = disk_log:log(halt, B),
+ {error, {blocked_log, halt}} = disk_log:inc_wrap_file(halt),
+ ok = disk_log:unblock(halt),
+ ok = disk_log:block(halt, false),
+ {error, {blocked_log, halt}} = disk_log:log(halt, B),
+ {error, {blocked_log, halt}} = disk_log:inc_wrap_file(halt),
+ Parent = self(),
+ Pid =
spawn_link(fun() ->
{error, {blocked_log, halt}} =
disk_log:chunk(halt, start),
@@ -1661,109 +1606,107 @@ block_blocked(Conf) when is_list(Conf) ->
disk_log:chunk_step(halt, start, 1),
Parent ! {self(), stopped}
end),
- ?line receive {Pid,stopped} -> ok end,
- ?line ok = disk_log:close(halt),
- ?line del(File, No).
+ receive {Pid,stopped} -> ok end,
+ ok = disk_log:close(halt),
+ del(File, No).
-block_queue(suite) -> [];
-block_queue(doc) ->
- ["Run commands from the queue by unblocking."];
+%% Run commands from the queue by unblocking.
block_queue(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line Q = qlen(),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line del(File, No), % cleanup
- ?line B = mk_bytes(60),
-
- ?line Pid = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid, {open, File}),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {blog, B}),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
- ?line 1 = no_written_items(n),
- ?line Error1 = {error,{not_blocked,n}} = disk_log:unblock(n),
- ?line "The disk log" ++ _ = format_error(Error1),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {balog, "one string"}),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
- ?line 2 = no_written_items(n),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, sync),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, truncate),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
- ?line 0 = no_items(n),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {block, false}),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
- ?line {error, {blocked_log, _}} = disk_log:blog(n, B),
- ?line ok = sync_do(Pid, unblock),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {change_notify, Pid, true}),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
- ?line [{_, true}] = owners(n),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {change_notify, Pid, false}),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
- ?line [{_, false}] = owners(n),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {change_header, {head, header}}),
- ?line ok = disk_log:unblock(n),
- ?line {error, {badarg, head}} = get_reply(),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {change_size, 17}),
- ?line ok = disk_log:unblock(n),
- ?line {error, {badarg, size}} = get_reply(),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, inc_wrap_file),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
-
- ?line ok = sync_do(Pid, close),
- ?line del(File, No),
-
- ?line _Pid2 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid, {int_open, File}),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {chunk, start}),
- ?line ok = disk_log:unblock(n),
- ?line eof = get_reply(),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {chunk_step, start, 100}),
- ?line ok = disk_log:unblock(n),
- ?line {ok, _Cont} = get_reply(),
-
- ?line ok = disk_log:block(n, true),
- ?line async_do(Pid, {log,a_term}),
- ?line ok = disk_log:unblock(n),
- ?line ok = get_reply(),
- ?line 1 = no_written_items(n),
-
- ?line ok = sync_do(Pid, close),
- ?line sync_do(Pid, terminate),
- ?line del(File, No),
+ Q = qlen(),
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ del(File, No), % cleanup
+ B = mk_bytes(60),
+
+ Pid = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid, {open, File}),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {blog, B}),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+ 1 = no_written_items(n),
+ Error1 = {error,{not_blocked,n}} = disk_log:unblock(n),
+ "The disk log" ++ _ = format_error(Error1),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {balog, "one string"}),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+ 2 = no_written_items(n),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, sync),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, truncate),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+ 0 = no_items(n),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {block, false}),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+ {error, {blocked_log, _}} = disk_log:blog(n, B),
+ ok = sync_do(Pid, unblock),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {change_notify, Pid, true}),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+ [{_, true}] = owners(n),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {change_notify, Pid, false}),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+ [{_, false}] = owners(n),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {change_header, {head, header}}),
+ ok = disk_log:unblock(n),
+ {error, {badarg, head}} = get_reply(),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {change_size, 17}),
+ ok = disk_log:unblock(n),
+ {error, {badarg, size}} = get_reply(),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, inc_wrap_file),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+
+ ok = sync_do(Pid, close),
+ del(File, No),
+
+ _Pid2 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid, {int_open, File}),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {chunk, start}),
+ ok = disk_log:unblock(n),
+ eof = get_reply(),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {chunk_step, start, 100}),
+ ok = disk_log:unblock(n),
+ {ok, _Cont} = get_reply(),
+
+ ok = disk_log:block(n, true),
+ async_do(Pid, {log,a_term}),
+ ok = disk_log:unblock(n),
+ ok = get_reply(),
+ 1 = no_written_items(n),
+
+ ok = sync_do(Pid, close),
+ sync_do(Pid, terminate),
+ del(File, No),
%% Test of the queue. Three processes involved here. Pid1's block
%% request is queued. Pid2's log requests are put in the queue.
@@ -1771,171 +1714,165 @@ block_queue(Conf) when is_list(Conf) ->
%% Pid2's log requests are executed when Pid1 unblocks.
%% (This example should show that the pair 'queue' and 'messages'
%% in State does the trick - one does not need a "real" queue.)
- ?line P0 = pps(),
+ P0 = pps(),
Name = n,
- ?line Pid1 = spawn_link(?MODULE, lserv, [Name]),
- ?line {ok, Name} = sync_do(Pid1, {int_open, File, {1000,2}}),
- ?line Pid2 = spawn_link(?MODULE, lserv, [Name]),
- ?line {ok, Name} = sync_do(Pid2, {int_open, File, {1000,2}}),
- ?line ok = disk_log:block(Name),
- ?line async_do(Pid1, {alog,{1,a}}),
- ?line ok = get_reply(),
- ?line async_do(Pid1, {alog,{2,b}}),
- ?line ok = get_reply(),
- ?line async_do(Pid1, {alog,{3,c}}),
- ?line ok = get_reply(),
- ?line async_do(Pid1, {alog,{4,d}}),
- ?line ok = get_reply(),
- ?line async_do(Pid1, block),
- ?line async_do(Pid2, {alog,{5,e}}),
- ?line ok = get_reply(),
- ?line async_do(Pid2, {alog,{6,f}}),
- ?line ok = get_reply(),
- ?line ok = disk_log:unblock(Name),
- ?line ok = get_reply(),
- ?line async_do(Pid2, {alog,{7,g}}),
- ?line ok = get_reply(),
- ?line async_do(Pid2, {alog,{8,h}}),
- ?line ok = get_reply(),
- ?line async_do(Pid1, unblock),
- ?line ok = get_reply(),
- ?line ok = sync_do(Pid1, close),
- ?line ok = sync_do(Pid2, close),
- ?line sync_do(Pid1, terminate),
- ?line sync_do(Pid2, terminate),
+ Pid1 = spawn_link(?MODULE, lserv, [Name]),
+ {ok, Name} = sync_do(Pid1, {int_open, File, {1000,2}}),
+ Pid2 = spawn_link(?MODULE, lserv, [Name]),
+ {ok, Name} = sync_do(Pid2, {int_open, File, {1000,2}}),
+ ok = disk_log:block(Name),
+ async_do(Pid1, {alog,{1,a}}),
+ ok = get_reply(),
+ async_do(Pid1, {alog,{2,b}}),
+ ok = get_reply(),
+ async_do(Pid1, {alog,{3,c}}),
+ ok = get_reply(),
+ async_do(Pid1, {alog,{4,d}}),
+ ok = get_reply(),
+ async_do(Pid1, block),
+ async_do(Pid2, {alog,{5,e}}),
+ ok = get_reply(),
+ async_do(Pid2, {alog,{6,f}}),
+ ok = get_reply(),
+ ok = disk_log:unblock(Name),
+ ok = get_reply(),
+ async_do(Pid2, {alog,{7,g}}),
+ ok = get_reply(),
+ async_do(Pid2, {alog,{8,h}}),
+ ok = get_reply(),
+ async_do(Pid1, unblock),
+ ok = get_reply(),
+ ok = sync_do(Pid1, close),
+ ok = sync_do(Pid2, close),
+ sync_do(Pid1, terminate),
+ sync_do(Pid2, terminate),
Terms = get_all_terms(Name, File, wrap),
- ?line true = [{1,a},{2,b},{3,c},{4,d},{5,e},{6,f},{7,g},{8,h}] == Terms,
+ true = [{1,a},{2,b},{3,c},{4,d},{5,e},{6,f},{7,g},{8,h}] == Terms,
del(File, 2),
- ?line Q = qlen(),
- ?line true = (P0 == pps()),
+ Q = qlen(),
+ check_pps(P0),
ok.
-block_queue2(suite) -> [];
-block_queue2(doc) ->
- ["OTP-4880. Blocked processes did not get disk_log_stopped message."];
+%% OTP-4880. Blocked processes did not get disk_log_stopped message.
block_queue2(Conf) when is_list(Conf) ->
- ?line Q = qlen(),
- ?line P0 = pps(),
+ Q = qlen(),
+ P0 = pps(),
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
%% log requests are queued, and processed when the log is closed
- ?line Pid = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid, {open, File}),
- ?line ok = sync_do(Pid, block),
+ Pid = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid, {open, File}),
+ ok = sync_do(Pid, block),
%% Asynchronous stuff is ignored.
- ?line ok = disk_log:balog_terms(n, [<<"foo">>,<<"bar">>]),
- ?line ok = disk_log:balog_terms(n, [<<"more">>,<<"terms">>]),
+ ok = disk_log:balog_terms(n, [<<"foo">>,<<"bar">>]),
+ ok = disk_log:balog_terms(n, [<<"more">>,<<"terms">>]),
Parent = self(),
- ?line Fun =
+ Fun =
fun() ->
{error,no_such_log} = disk_log:sync(n),
receive {disk_log, _, {error, disk_log_stopped}} -> ok end,
Parent ! disk_log_stopped_ok
end,
- ?line spawn(Fun),
- ?line ok = sync_do(Pid, close),
- ?line receive disk_log_stopped_ok -> ok end,
- ?line sync_do(Pid, terminate),
- ?line {ok,<<>>} = file:read_file(File ++ ".1"),
- ?line del(File, No),
- ?line Q = qlen(),
- ?line true = (P0 == pps()),
+ spawn(Fun),
+ ok = sync_do(Pid, close),
+ receive disk_log_stopped_ok -> ok end,
+ sync_do(Pid, terminate),
+ {ok,<<>>} = file:read_file(File ++ ".1"),
+ del(File, No),
+ Q = qlen(),
+ check_pps(P0),
ok.
-unblock(suite) -> [];
-unblock(doc) ->
- ["Test unblock/1."];
+%% Test unblock/1.
unblock(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "n.LOG"),
No = 1,
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {size, {100,No}}, {notify, true},
- {format, external}]),
- ?line ok = disk_log:block(n),
- ?line spawn_link(?MODULE, try_unblock, [n]),
- ?line timer:sleep(100),
- ?line disk_log:close(n),
- ?line del(File, No).
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {size, {100,No}}, {notify, true},
+ {format, external}]),
+ ok = disk_log:block(n),
+ spawn_link(?MODULE, try_unblock, [n]),
+ timer:sleep(100),
+ disk_log:close(n),
+ del(File, No).
try_unblock(Log) ->
- ?line Error = {error, {not_blocked_by_pid, n}} = disk_log:unblock(Log),
- ?line "The disk log" ++ _ = format_error(Error).
+ Error = {error, {not_blocked_by_pid, n}} = disk_log:unblock(Log),
+ "The disk log" ++ _ = format_error(Error).
-open_overwrite(suite) -> [];
-open_overwrite(doc) ->
- ["Test open/1 when old files exist."];
+%% Test open/1 when old files exist.
open_overwrite(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line del(File, No), % cleanup
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ del(File, No), % cleanup
- % read write
- ?line First = "n.LOG.1",
- ?line make_file(Dir, First, 8),
+ %% read write
+ First = "n.LOG.1",
+ make_file(Dir, First, 8),
- ?line Error1 = {error, {not_a_log_file, _}} =
+ Error1 = {error, {not_a_log_file, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {100, No}}]),
- ?line "The file" ++ _ = format_error(Error1),
- ?line del(File, No),
+ "The file" ++ _ = format_error(Error1),
+ del(File, No),
- ?line make_file(Dir, First, 4),
+ make_file(Dir, First, 4),
- ?line {error, {not_a_log_file, _}} =
+ {error, {not_a_log_file, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {100, No}}]),
- ?line del(File, No),
+ del(File, No),
- ?line make_file(Dir, First, 0),
+ make_file(Dir, First, 0),
- ?line {error, {not_a_log_file, _}} =
+ {error, {not_a_log_file, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {100, No}}]),
- % read only
- ?line make_file(Dir, First, 6),
+ %% read only
+ make_file(Dir, First, 6),
- ?line {error, {not_a_log_file, _}} =
+ {error, {not_a_log_file, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},{mode, read_only},
{format, internal}, {size, {100, No}}]),
- ?line del(File, No),
+ del(File, No),
- ?line make_file(Dir, First, 0),
+ make_file(Dir, First, 0),
- ?line {error, {not_a_log_file, _}} =
+ {error, {not_a_log_file, _}} =
disk_log:open([{name, n}, {file, File},{type, wrap},
{mode, read_only}, {format, internal},
{size, {100, No}}]),
- ?line del(File, No),
-
- ?line {error, _} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {mode, read_only},
- {format, internal},{size, {100, No}}]),
+ del(File, No),
+
+ {error, _} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {mode, read_only},
+ {format, internal},{size, {100, No}}]),
file:delete(File),
- ?line {ok,n} = disk_log:open([{name,n},{file,File},
- {mode,read_write},{type,halt}]),
- ?line ok = disk_log:close(n),
- ?line ok = unwritable(File),
- ?line {error, {file_error, File, _}} =
- disk_log:open([{name,n},{file,File},{mode,read_write},{type,halt}]),
- ?line ok = writable(File),
+ {ok,n} = disk_log:open([{name,n},{file,File},
+ {mode,read_write},{type,halt}]),
+ ok = disk_log:close(n),
+ ok = unwritable(File),
+ {error, {file_error, File, _}} =
+ disk_log:open([{name,n},{file,File},{mode,read_write},{type,halt}]),
+ ok = writable(File),
file:delete(File),
- ?line {ok,n} = disk_log:open([{name,n},{file,File},{format,external},
- {mode,read_write},{type,halt}]),
- ?line ok = disk_log:close(n),
- ?line ok = unwritable(File),
- ?line {error, {file_error, File, _}} =
+ {ok,n} = disk_log:open([{name,n},{file,File},{format,external},
+ {mode,read_write},{type,halt}]),
+ ok = disk_log:close(n),
+ ok = unwritable(File),
+ {error, {file_error, File, _}} =
disk_log:open([{name,n},{file,File},{format,external},
{mode,read_write},{type,halt}]),
- ?line ok = writable(File),
+ ok = writable(File),
file:delete(File),
ok.
@@ -1953,424 +1890,412 @@ make_file(Dir, File, N) ->
end,
ok = file:close(F).
-open_size(suite) -> [];
-open_size(doc) ->
- ["Test open/1 option size."];
+%% Test open/1 option size.
open_size(Conf) when is_list(Conf) ->
- ?line Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
+ Dir = ?privdir(Conf),
+ File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line file:delete(File),
- ?line del(File, No), % cleanup
+ No = 4,
+ file:delete(File),
+ del(File, No), % cleanup
%% missing size option
- ?line {error, {badarg, size}} =
+ {error, {badarg, size}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal},{size, {100, No}}]),
- ?line B = mk_bytes(60),
- ?line ok = disk_log:log_terms(n, [B, B, B, B]),
- ?line ok = disk_log:sync(n),
- ?line ok = disk_log:block(n),
+ B = mk_bytes(60),
+ ok = disk_log:log_terms(n, [B, B, B, B]),
+ ok = disk_log:sync(n),
+ ok = disk_log:block(n),
%% size option does not match existing size file, read_only
- ?line Error1 = {error, {size_mismatch, _, _}} =
+ Error1 = {error, {size_mismatch, _, _}} =
disk_log:open([{name, nn}, {file, File}, {type, wrap},
{mode, read_only}, {format, internal},
{size, {100, No + 1}}]),
- ?line "The given size" ++ _ = format_error(Error1),
- ?line {ok, nn} = disk_log:open([{name, nn}, {file, File}, {type, wrap},
+ "The given size" ++ _ = format_error(Error1),
+ {ok, nn} = disk_log:open([{name, nn}, {file, File}, {type, wrap},
{mode, read_only},
{format, internal},{size, {100, No}}]),
- ?line [_, _, _, _] = get_all_terms1(nn, start, []),
- ?line disk_log:close(nn),
+ [_, _, _, _] = get_all_terms1(nn, start, []),
+ disk_log:close(nn),
- ?line ok = disk_log:unblock(n),
- ?line ok = disk_log:close(n),
+ ok = disk_log:unblock(n),
+ ok = disk_log:close(n),
%% size option does not match existing size file, read_write
- ?line {error, {size_mismatch, _, _}} =
+ {error, {size_mismatch, _, _}} =
disk_log:open([{name, nn}, {file, File}, {type, wrap},
{format, internal}, {size, {100, No + 1}}]),
%% size option does not match existing size file, truncating
- ?line {ok, nn} =
+ {ok, nn} =
disk_log:open([{name, nn}, {file, File}, {type, wrap},
{repair, truncate}, {format, internal},
{size, {100, No + 1}}]),
- ?line ok = disk_log:close(nn),
+ ok = disk_log:close(nn),
- ?line del(File, No),
+ del(File, No),
ok.
-open_truncate(suite) -> [];
-open_truncate(doc) ->
- ["Test open/1 with {repair, truncate}."];
+%% Test open/1 with {repair, truncate}.
open_truncate(Conf) when is_list(Conf) ->
- ?line Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line del(File, No), % cleanup
-
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal},{size, {100, No}}]),
- ?line B = mk_bytes(60),
- ?line ok = disk_log:log_terms(n, [B, B, B, B]),
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {repair,truncate},
- {format, internal},{size, {100, No}}]),
- ?line ok = disk_log:close(n),
- ?line [] = get_all_terms(n, File, wrap),
- ?line del(File, No),
+ Dir = ?privdir(Conf),
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ del(File, No), % cleanup
+
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal},{size, {100, No}}]),
+ B = mk_bytes(60),
+ ok = disk_log:log_terms(n, [B, B, B, B]),
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {repair,truncate},
+ {format, internal},{size, {100, No}}]),
+ ok = disk_log:close(n),
+ [] = get_all_terms(n, File, wrap),
+ del(File, No),
ok.
-
-open_error(suite) -> [];
-open_error(doc) ->
- ["Try some invalid open/1 options."];
+
+%% Try some invalid open/1 options.
open_error(Conf) when is_list(Conf) ->
- ?line Dir = ?privdir(Conf),
+ Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line del(File, No), % cleanup
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ del(File, No), % cleanup
- ?line {error, {badarg, name}} = disk_log:open([{file, File}]),
- ?line {error, {badarg, file}} = disk_log:open([{name,{foo,bar}}]),
- ?line {error, {badarg, [{foo,bar}]}} = disk_log:open([{foo,bar}]),
+ {error, {badarg, name}} = disk_log:open([{file, File}]),
+ {error, {badarg, file}} = disk_log:open([{name,{foo,bar}}]),
+ {error, {badarg, [{foo,bar}]}} = disk_log:open([{foo,bar}]),
%% external logs, read_only.
- ?line {error, {file_error, _, enoent}} =
+ {error, {file_error, _, enoent}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{size, {100,No}},
{format, external}, {mode, read_only}]),
- ?line Error5 = {error, {file_error, _, enoent}} =
+ Error5 = {error, {file_error, _, enoent}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{size, 100},
{format, external}, {mode, read_only}]),
- ?line true = lists:prefix("\"" ++ File, format_error(Error5)),
+ true = lists:prefix("\"" ++ File, format_error(Error5)),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external},{size, {100, No}}]),
%% Already owner, ignored.
- ?line {ok, n} =
+ {ok, n} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external}, {size, {100, No}}]),
- ?line Error2 = {error, {name_already_open, n}} =
+ Error2 = {error, {name_already_open, n}} =
disk_log:open([{name, n}, {file, another_file}, {type, wrap},
{format, external}, {size, {100, No}}]),
- ?line "The disk log" ++ _ = format_error(Error2),
- ?line Error1 = {error, {arg_mismatch, notify, false, true}} =
+ "The disk log" ++ _ = format_error(Error2),
+ Error1 = {error, {arg_mismatch, notify, false, true}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external}, {size, {100, No}}, {notify, true}]),
- ?line "The value" ++ _ = format_error(Error1),
- ?line Error3 = {error, {open_read_write, n}} =
+ "The value" ++ _ = format_error(Error1),
+ Error3 = {error, {open_read_write, n}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{mode, read_only},
{format, external}, {size, {100, No}}]),
- ?line "The disk log" ++ _ = format_error(Error3),
- ?line {error, {badarg, size}} =
+ "The disk log" ++ _ = format_error(Error3),
+ {error, {badarg, size}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external}, {size, {100, No}}]),
- ?line {error, {arg_mismatch, type, wrap, halt}} =
+ {error, {arg_mismatch, type, wrap, halt}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external}]),
- ?line {error, {arg_mismatch, format, external, internal}} =
+ {error, {arg_mismatch, format, external, internal}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {100, No}}]),
- ?line {error, {arg_mismatch, repair, true, false}} =
+ {error, {arg_mismatch, repair, true, false}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external}, {repair, false}]),
- ?line {error, {size_mismatch, {100,4}, {1000,4}}} =
+ {error, {size_mismatch, {100,4}, {1000,4}}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external}, {size, {1000, No}}]),
- ?line {error, {arg_mismatch, head, none, _}} =
+ {error, {arg_mismatch, head, none, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{head, "header"},
{format, external}, {size, {100, No}}]),
- ?line {error, {badarg, size}} =
+ {error, {badarg, size}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external}, {size, 100}]),
- ?line ok = disk_log:close(n),
+ ok = disk_log:close(n),
- ?line {ok, n} =
+ {ok, n} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{mode, read_only},
{format, external}, {size, {100, No}}]),
- ?line Error4 = {error, {open_read_only, n}} =
+ Error4 = {error, {open_read_only, n}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{mode, read_write},
{format, external}, {size, {100, No}}]),
- ?line "The disk log" ++ _ = format_error(Error4),
- ?line ok = disk_log:close(n),
+ "The disk log" ++ _ = format_error(Error4),
+ ok = disk_log:close(n),
- ?line del(File, No).
+ del(File, No).
-close_race(suite) -> [];
-close_race(doc) ->
- ["Do something quickly after close/1"];
+%% Do something quickly after close/1.
close_race(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 1,
- ?line del(File, No), % cleanup
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {size, {100,No}}, {notify, true},
- {format, internal}]),
- ?line ok = disk_log:close(n),
- ?line Error1 = {error, no_such_log} = disk_log:close(n),
- ?line "There is no disk" ++ _ = format_error(Error1),
-
- % Pid1 blocks, Pid2 closes without being suspended.
- ?line Pid1 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid2 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid1, {open, File}),
- ?line {ok, n} = sync_do(Pid2, {open, File}),
- ?line ok = sync_do(Pid1, block),
- ?line [{_, false}, {_, false}] = sync_do(Pid1, owners),
- ?line ok = sync_do(Pid2, close),
- ?line [{_, false}] = sync_do(Pid1, owners),
- ?line ok = sync_do(Pid1, close),
- ?line sync_do(Pid1, terminate),
- ?line sync_do(Pid2, terminate),
- ?line {error, no_such_log} = disk_log:info(n),
-
- % Pid3 blocks, Pid3 closes. Pid4 should still be ablo to use log.
- ?line Pid3 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid4 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid3, {open, File}),
- ?line {ok, n} = sync_do(Pid4, {open, File}),
- ?line ok = sync_do(Pid3, block),
- ?line ok = sync_do(Pid3, close),
- ?line [{_Pid4, false}] = sync_do(Pid4, owners),
- ?line sync_do(Pid3, terminate),
- ?line sync_do(Pid4, terminate),
- ?line {error, no_such_log} = disk_log:info(n),
-
- % Pid5 blocks, Pid5 terminates. Pid6 should still be ablo to use log.
- ?line Pid5 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid6 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid5, {open, File}),
- ?line {ok, n} = sync_do(Pid6, {open, File}),
- ?line ok = sync_do(Pid5, block),
- ?line sync_do(Pid5, terminate),
- ?line [{_Pid6, false}] = sync_do(Pid6, owners),
- ?line sync_do(Pid6, terminate),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line del(File, No), % cleanup
+ File = filename:join(Dir, "n.LOG"),
+ No = 1,
+ del(File, No), % cleanup
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {size, {100,No}}, {notify, true},
+ {format, internal}]),
+ ok = disk_log:close(n),
+ Error1 = {error, no_such_log} = disk_log:close(n),
+ "There is no disk" ++ _ = format_error(Error1),
+
+ %% Pid1 blocks, Pid2 closes without being suspended.
+ Pid1 = spawn_link(?MODULE, lserv, [n]),
+ Pid2 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid1, {open, File}),
+ {ok, n} = sync_do(Pid2, {open, File}),
+ ok = sync_do(Pid1, block),
+ [{_, false}, {_, false}] = sync_do(Pid1, owners),
+ ok = sync_do(Pid2, close),
+ [{_, false}] = sync_do(Pid1, owners),
+ ok = sync_do(Pid1, close),
+ sync_do(Pid1, terminate),
+ sync_do(Pid2, terminate),
+ {error, no_such_log} = disk_log:info(n),
+
+ %% Pid3 blocks, Pid3 closes. Pid4 should still be ablo to use log.
+ Pid3 = spawn_link(?MODULE, lserv, [n]),
+ Pid4 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid3, {open, File}),
+ {ok, n} = sync_do(Pid4, {open, File}),
+ ok = sync_do(Pid3, block),
+ ok = sync_do(Pid3, close),
+ [{_Pid4, false}] = sync_do(Pid4, owners),
+ sync_do(Pid3, terminate),
+ sync_do(Pid4, terminate),
+ {error, no_such_log} = disk_log:info(n),
+
+ %% Pid5 blocks, Pid5 terminates. Pid6 should still be ablo to use log.
+ Pid5 = spawn_link(?MODULE, lserv, [n]),
+ Pid6 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid5, {open, File}),
+ {ok, n} = sync_do(Pid6, {open, File}),
+ ok = sync_do(Pid5, block),
+ sync_do(Pid5, terminate),
+ [{_Pid6, false}] = sync_do(Pid6, owners),
+ sync_do(Pid6, terminate),
+ {error, no_such_log} = disk_log:info(n),
+ del(File, No), % cleanup
ok.
-close_block(suite) -> [];
-close_block(doc) ->
- ["Block, unblock, close, terminate."];
+%% Block, unblock, close, terminate.
close_block(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
+ File = filename:join(Dir, "n.LOG"),
No = 1,
del(File, No), % cleanup
P0 = pps(),
%% One of two owners terminates.
- ?line Pid1 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid2 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid1, {open, File}),
- ?line {ok, n} = sync_do(Pid2, {open, File}),
- ?line [_, _] = sync_do(Pid1, owners),
- ?line [_, _] = sync_do(Pid2, owners),
- ?line 0 = sync_do(Pid1, users),
- ?line 0 = sync_do(Pid2, users),
- ?line sync_do(Pid1, terminate),
- ?line [_] = sync_do(Pid2, owners),
- ?line 0 = sync_do(Pid2, users),
- ?line sync_do(Pid2, terminate),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line true = (P0 == pps()),
+ Pid1 = spawn_link(?MODULE, lserv, [n]),
+ Pid2 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid1, {open, File}),
+ {ok, n} = sync_do(Pid2, {open, File}),
+ [_, _] = sync_do(Pid1, owners),
+ [_, _] = sync_do(Pid2, owners),
+ 0 = sync_do(Pid1, users),
+ 0 = sync_do(Pid2, users),
+ sync_do(Pid1, terminate),
+ [_] = sync_do(Pid2, owners),
+ 0 = sync_do(Pid2, users),
+ sync_do(Pid2, terminate),
+ {error, no_such_log} = disk_log:info(n),
+ check_pps(P0),
%% Users terminate (no link...).
- ?line Pid3 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid4 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid3, {open, File, none}),
- ?line {ok, n} = sync_do(Pid4, {open, File, none}),
- ?line [] = sync_do(Pid3, owners),
- ?line [] = sync_do(Pid4, owners),
- ?line 2 = sync_do(Pid3, users),
- ?line 2 = sync_do(Pid4, users),
- ?line sync_do(Pid3, terminate),
- ?line [] = sync_do(Pid4, owners),
- ?line 2 = sync_do(Pid4, users),
- ?line sync_do(Pid4, terminate),
- ?line disk_log:close(n),
- ?line disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line true = (P0 == pps()),
-
- % Blocking owner terminates.
- ?line Pid5 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {linkto, none},{size, {100,No}},
- {format, external}]),
- ?line {ok, n} = sync_do(Pid5, {open, File}),
- ?line ok = sync_do(Pid5, block),
- ?line {blocked, true} = status(n),
- ?line [_] = owners(n),
- ?line sync_do(Pid5, terminate),
- ?line ok = status(n),
- ?line [] = owners(n),
- ?line 1 = users(n),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line true = (P0 == pps()),
-
- % Blocking user terminates.
- ?line Pid6 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {size, {100,No}}, {format, external}]),
- ?line {ok, n} = sync_do(Pid6, {open, File, none}),
- ?line ok = sync_do(Pid6, block),
- ?line {blocked, true} = status(n),
- ?line [_] = owners(n),
- ?line 1 = users(n),
- ?line sync_do(Pid6, terminate), % very silently...
- ?line ok = status(n),
- ?line [_] = owners(n),
- ?line 1 = users(n),
- ?line ok = disk_log:close(n),
- ?line [] = owners(n),
- ?line 1 = users(n),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line true = (P0 == pps()),
-
- % Blocking owner terminates.
- ?line Pid7 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {linkto, none},
- {size, {100,No}}, {format, external}]),
- ?line {ok, n} = sync_do(Pid7, {open, File}),
- ?line ok = sync_do(Pid7, block),
- ?line {blocked, true} = status(n),
- ?line [_] = owners(n),
- ?line 1 = users(n),
- ?line sync_do(Pid7, terminate),
- ?line ok = status(n),
- ?line [] = owners(n),
- ?line 1 = users(n),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line true = (P0 == pps()),
+ Pid3 = spawn_link(?MODULE, lserv, [n]),
+ Pid4 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid3, {open, File, none}),
+ {ok, n} = sync_do(Pid4, {open, File, none}),
+ [] = sync_do(Pid3, owners),
+ [] = sync_do(Pid4, owners),
+ 2 = sync_do(Pid3, users),
+ 2 = sync_do(Pid4, users),
+ sync_do(Pid3, terminate),
+ [] = sync_do(Pid4, owners),
+ 2 = sync_do(Pid4, users),
+ sync_do(Pid4, terminate),
+ disk_log:close(n),
+ disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ check_pps(P0),
+
+ %% Blocking owner terminates.
+ Pid5 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {linkto, none},{size, {100,No}},
+ {format, external}]),
+ {ok, n} = sync_do(Pid5, {open, File}),
+ ok = sync_do(Pid5, block),
+ {blocked, true} = status(n),
+ [_] = owners(n),
+ sync_do(Pid5, terminate),
+ ok = status(n),
+ [] = owners(n),
+ 1 = users(n),
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ check_pps(P0),
+
+ %% Blocking user terminates.
+ Pid6 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {size, {100,No}}, {format, external}]),
+ {ok, n} = sync_do(Pid6, {open, File, none}),
+ ok = sync_do(Pid6, block),
+ {blocked, true} = status(n),
+ [_] = owners(n),
+ 1 = users(n),
+ sync_do(Pid6, terminate), % very silently...
+ ok = status(n),
+ [_] = owners(n),
+ 1 = users(n),
+ ok = disk_log:close(n),
+ [] = owners(n),
+ 1 = users(n),
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ check_pps(P0),
+
+ %% Blocking owner terminates.
+ Pid7 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {linkto, none},
+ {size, {100,No}}, {format, external}]),
+ {ok, n} = sync_do(Pid7, {open, File}),
+ ok = sync_do(Pid7, block),
+ {blocked, true} = status(n),
+ [_] = owners(n),
+ 1 = users(n),
+ sync_do(Pid7, terminate),
+ ok = status(n),
+ [] = owners(n),
+ 1 = users(n),
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ check_pps(P0),
%% Two owners, the blocking one terminates.
- ?line Pid8 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid9 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = sync_do(Pid8, {open, File}),
- ?line {ok, n} = sync_do(Pid9, {open, File}),
- ?line ok = sync_do(Pid8, block),
- ?line {blocked, true} = status(n),
- ?line sync_do(Pid8, terminate),
- ?line ok = status(n),
- ?line [_] = sync_do(Pid9, owners),
- ?line 0 = sync_do(Pid9, users),
- ?line sync_do(Pid9, terminate),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line true = (P0 == pps()),
-
- % Blocking user closes.
- ?line Pid10 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {size, {100,No}}, {format, external}]),
- ?line {ok, n} = sync_do(Pid10, {open, File, none}),
- ?line ok = sync_do(Pid10, block),
- ?line {blocked, true} = status(n),
- ?line [_] = owners(n),
- ?line 1 = users(n),
- ?line ok = sync_do(Pid10, close),
- ?line ok = status(n),
- ?line [_] = owners(n),
- ?line 0 = users(n),
- ?line ok = disk_log:close(n),
- ?line sync_do(Pid10, terminate),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line true = (P0 == pps()),
-
- % Blocking user unblocks and closes.
- ?line Pid11 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {size, {100,No}}, {format, external}]),
- ?line {ok, n} = sync_do(Pid11, {open, File, none}),
- ?line ok = sync_do(Pid11, block),
- ?line {blocked, true} = status(n),
- ?line [_] = owners(n),
- ?line 1 = users(n),
- ?line ok = sync_do(Pid11, unblock),
- ?line ok = sync_do(Pid11, close),
- ?line ok = status(n),
- ?line [_] = owners(n),
- ?line 0 = users(n),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line sync_do(Pid11, terminate),
- ?line true = (P0 == pps()),
-
- % Blocking owner closes.
- ?line Pid12 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {linkto, none},
- {size, {100,No}}, {format, external}]),
- ?line {ok, n} = sync_do(Pid12, {open, File}),
- ?line ok = sync_do(Pid12, block),
- ?line {blocked, true} = status(n),
- ?line [_] = owners(n),
- ?line 1 = users(n),
- ?line ok = sync_do(Pid12, close),
- ?line ok = status(n),
- ?line [] = owners(n),
- ?line 1 = users(n),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line sync_do(Pid12, terminate),
- ?line true = (P0 == pps()),
-
- % Blocking owner unblocks and closes.
- ?line Pid13 = spawn_link(?MODULE, lserv, [n]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {linkto, none},
- {size, {100,No}}, {format, external}]),
- ?line {ok, n} = sync_do(Pid13, {open, File}),
- ?line ok = sync_do(Pid13, block),
- ?line {blocked, true} = status(n),
- ?line [_] = owners(n),
- ?line 1 = users(n),
- ?line ok = sync_do(Pid13, unblock),
- ?line ok = sync_do(Pid13, close),
- ?line ok = status(n),
- ?line [] = owners(n),
- ?line 1 = users(n),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line sync_do(Pid13, terminate),
- ?line true = (P0 == pps()),
+ Pid8 = spawn_link(?MODULE, lserv, [n]),
+ Pid9 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = sync_do(Pid8, {open, File}),
+ {ok, n} = sync_do(Pid9, {open, File}),
+ ok = sync_do(Pid8, block),
+ {blocked, true} = status(n),
+ sync_do(Pid8, terminate),
+ ok = status(n),
+ [_] = sync_do(Pid9, owners),
+ 0 = sync_do(Pid9, users),
+ sync_do(Pid9, terminate),
+ {error, no_such_log} = disk_log:info(n),
+ check_pps(P0),
+
+ %% Blocking user closes.
+ Pid10 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {size, {100,No}}, {format, external}]),
+ {ok, n} = sync_do(Pid10, {open, File, none}),
+ ok = sync_do(Pid10, block),
+ {blocked, true} = status(n),
+ [_] = owners(n),
+ 1 = users(n),
+ ok = sync_do(Pid10, close),
+ ok = status(n),
+ [_] = owners(n),
+ 0 = users(n),
+ ok = disk_log:close(n),
+ sync_do(Pid10, terminate),
+ {error, no_such_log} = disk_log:info(n),
+ check_pps(P0),
+
+ %% Blocking user unblocks and closes.
+ Pid11 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {size, {100,No}}, {format, external}]),
+ {ok, n} = sync_do(Pid11, {open, File, none}),
+ ok = sync_do(Pid11, block),
+ {blocked, true} = status(n),
+ [_] = owners(n),
+ 1 = users(n),
+ ok = sync_do(Pid11, unblock),
+ ok = sync_do(Pid11, close),
+ ok = status(n),
+ [_] = owners(n),
+ 0 = users(n),
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ sync_do(Pid11, terminate),
+ check_pps(P0),
+
+ %% Blocking owner closes.
+ Pid12 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {linkto, none},
+ {size, {100,No}}, {format, external}]),
+ {ok, n} = sync_do(Pid12, {open, File}),
+ ok = sync_do(Pid12, block),
+ {blocked, true} = status(n),
+ [_] = owners(n),
+ 1 = users(n),
+ ok = sync_do(Pid12, close),
+ ok = status(n),
+ [] = owners(n),
+ 1 = users(n),
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ sync_do(Pid12, terminate),
+ check_pps(P0),
+
+ %% Blocking owner unblocks and closes.
+ Pid13 = spawn_link(?MODULE, lserv, [n]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {linkto, none},
+ {size, {100,No}}, {format, external}]),
+ {ok, n} = sync_do(Pid13, {open, File}),
+ ok = sync_do(Pid13, block),
+ {blocked, true} = status(n),
+ [_] = owners(n),
+ 1 = users(n),
+ ok = sync_do(Pid13, unblock),
+ ok = sync_do(Pid13, close),
+ ok = status(n),
+ [] = owners(n),
+ 1 = users(n),
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ sync_do(Pid13, terminate),
+ check_pps(P0),
del(File, No), % cleanup
ok.
-close_deadlock(suite) -> [];
-close_deadlock(doc) ->
- ["OTP-4745. Deadlock with just an ordinary log could happen."];
+%% OTP-4745. Deadlock with just an ordinary log could happen.
close_deadlock(Conf) when is_list(Conf) ->
- ?line true = is_alive(),
+ true = is_alive(),
- ?line PrivDir = ?privdir(Conf),
+ PrivDir = ?privdir(Conf),
- ?line F1 = filename:join(PrivDir, "a.LOG"),
- ?line file:delete(F1),
+ F1 = filename:join(PrivDir, "a.LOG"),
+ file:delete(F1),
Self = self(),
%% One process opens the log at the same time as another process
@@ -2378,11 +2303,11 @@ close_deadlock(Conf) when is_list(Conf) ->
Name = a,
Fun = fun() -> open_close(Self, Name, F1) end,
P = spawn(Fun),
- ?line receive {P, Name} -> ok end,
- ?line {ok, L} = disk_log:open([{name,Name},{file,F1}]),
- ?line ok = disk_log:close(L),
- ?line receive {P, done} -> ok end,
- ?line file:delete(F1),
+ receive {P, Name} -> ok end,
+ {ok, L} = disk_log:open([{name,Name},{file,F1}]),
+ ok = disk_log:close(L),
+ receive {P, done} -> ok end,
+ file:delete(F1),
%% One process opens the log at the same time as another process
%% closes the log due to file error while truncating.
@@ -2390,38 +2315,38 @@ close_deadlock(Conf) when is_list(Conf) ->
%% "work". When it works, as it seems to do right now :), the
%% disk_log_server gets {error, no_such_log}, receives the EXIT
%% message caused by truncate, and tries to open the log again.
- ?line No = 4,
- ?line LDir = F1 ++ ".2",
- ?line file:del_dir(LDir),
- ?line del(F1, No),
- ?line ok = file:make_dir(LDir),
+ No = 4,
+ LDir = F1 ++ ".2",
+ file:del_dir(LDir),
+ del(F1, No),
+ ok = file:make_dir(LDir),
Fun2 = fun() -> open_truncate(Self, Name, F1, No) end,
P2 = spawn(Fun2),
- ?line receive {P2, Name} -> ok end,
- ?line {ok, L} = disk_log:open([{name, Name}, {file, F1}, {type, wrap},
- {format, external}]),
+ receive {P2, Name} -> ok end,
+ {ok, L} = disk_log:open([{name, Name}, {file, F1}, {type, wrap},
+ {format, external}]),
%% Note: truncate causes the disk log process to terminate. One
%% cannot say if open above happened before, after, or during the
%% termination. The link to the owner is removed before termination.
- ?line case disk_log:close(L) of
- ok -> ok;
- {error,no_such_log} ->
- ok
- end,
- ?line receive {P2, done} -> ok end,
- ?line del(F1, No),
- ?line file:del_dir(LDir),
+ case disk_log:close(L) of
+ ok -> ok;
+ {error,no_such_log} ->
+ ok
+ end,
+ receive {P2, done} -> ok end,
+ del(F1, No),
+ file:del_dir(LDir),
%% To the same thing, this time using distributed logs.
%% (Does not seem to work very well, unfortunately.)
FunD = fun() -> open_close_dist(Self, Name, F1) end,
PD = spawn(FunD),
receive {PD, Name} -> ok end,
- ?line {[_], []} = disk_log:open([{name,Name},{file,F1},
- {distributed,[node()]}]),
- ?line ok = disk_log:close(L),
+ {[_], []} = disk_log:open([{name,Name},{file,F1},
+ {distributed,[node()]}]),
+ ok = disk_log:close(L),
receive {PD, done} -> ok end,
- ?line file:delete(F1),
+ file:delete(F1),
ok.
@@ -2467,7 +2392,7 @@ sync_do(Pid, Req) ->
end.
lserv(Log) ->
- ?line receive
+ receive
{From, {open, File}} ->
From ! disk_log:open([{name, Log}, {file, File}, {type, wrap},
{size, {100,1}}, {format, external}]);
@@ -2534,664 +2459,662 @@ lserv(Log) ->
lserv(Log).
-error_repair(suite) -> [];
-error_repair(doc) ->
- ["Error while repairing."];
+%% Error while repairing.
error_repair(Conf) when is_list(Conf) ->
- % not all error situations are covered by this test
+ %% not all error situations are covered by this test
DataDir = ?datadir(Conf),
PrivDir = ?privdir(Conf),
- ?line File = filename:join(PrivDir, "n.LOG"),
- ?line No = 4,
- ?line file:delete(File),
- ?line del(File, No), % cleanup
+ File = filename:join(PrivDir, "n.LOG"),
+ No = 4,
+ file:delete(File),
+ del(File, No), % cleanup
- % kurt.LOG is not closed and has four logged items, one is recovered
- ?line copy_wrap_log("kurt.LOG", "n.LOG", No, DataDir, PrivDir),
- ?line {repaired,n,{recovered,1},{badbytes,0}} =
+ %% kurt.LOG is not closed and has four logged items, one is recovered
+ copy_wrap_log("kurt.LOG", "n.LOG", No, DataDir, PrivDir),
+ {repaired,n,{recovered,1},{badbytes,0}} =
disk_log:open([{name, n}, {file, File}, {type, wrap}, {size,{40,No}}]),
- ?line 1 = cur_cnt(n),
- ?line 53 = curb(n),
- ?line 4 = no_items(n),
- ?line ok = disk_log:close(n),
-
- % temporary repair file cannot be created
- ?line copy_wrap_log("kurt.LOG", "n.LOG", No, DataDir, PrivDir),
- ?line Dir = File ++ ".4" ++ ".TMP",
- ?line ok = file:make_dir(Dir),
- ?line P0 = pps(),
- ?line {error, {file_error, _, _}} =
+ 1 = cur_cnt(n),
+ 53 = curb(n),
+ 4 = no_items(n),
+ ok = disk_log:close(n),
+
+ %% temporary repair file cannot be created
+ copy_wrap_log("kurt.LOG", "n.LOG", No, DataDir, PrivDir),
+ Dir = File ++ ".4" ++ ".TMP",
+ ok = file:make_dir(Dir),
+ P0 = pps(),
+ {error, {file_error, _, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap}, {size,{40,4}}]),
- ?line true = (P0 == pps()),
- ?line del(File, No),
- ?line ok = file:del_dir(Dir),
+ check_pps(P0),
+ del(File, No),
+ ok = file:del_dir(Dir),
+ error_logger:add_report_handler(?MODULE, self()),
%% repair a file
- ?line P1 = pps(),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {40,No}}]),
- ?line ok = disk_log:log_terms(n, [{this,is}]), % first file full
- ?line ok = disk_log:log_terms(n, [{some,terms}]), % second file full
- ?line ok = disk_log:close(n),
- ?line BadFile = add_ext(File, 2), % current file
- ?line set_opened(BadFile),
- ?line crash(BadFile, 28), % the binary is now invalid
- ?line {repaired,n,{recovered,0},{badbytes,26}} =
- disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {40,No}}]),
- ?line ok = disk_log:close(n),
- ?line true = (P1 == pps()),
- ?line del(File, No),
+ P1 = pps(),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {40,No}}]),
+ ok = disk_log:log_terms(n, [{this,is}]), % first file full
+ ok = disk_log:log_terms(n, [{some,terms}]), % second file full
+ ok = disk_log:close(n),
+ BadFile = add_ext(File, 2), % current file
+ set_opened(BadFile),
+ crash(BadFile, 28), % the binary is now invalid
+ {repaired,n,{recovered,0},{badbytes,26}} =
+ disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {40,No}}]),
+ ok = disk_log:close(n),
+ check_pps(P1),
+ del(File, No),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% yet another repair
- ?line P2 = pps(),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {4000,No}}]),
- ?line ok = disk_log:log_terms(n, [{this,is},{some,terms}]),
- ?line ok = disk_log:close(n),
- ?line BadFile2 = add_ext(File, 1), % current file
- ?line set_opened(BadFile2),
- ?line crash(BadFile2, 51), % the second binary is now invalid
- ?line {repaired,n,{recovered,1},{badbytes,26}} =
+ P2 = pps(),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {4000,No}}]),
+ ok = disk_log:log_terms(n, [{this,is},{some,terms}]),
+ ok = disk_log:close(n),
+ BadFile2 = add_ext(File, 1), % current file
+ set_opened(BadFile2),
+ crash(BadFile2, 51), % the second binary is now invalid
+ {repaired,n,{recovered,1},{badbytes,26}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {4000,No}}]),
- ?line ok = disk_log:close(n),
- ?line true = (P2 == pps()),
- ?line del(File, No),
+ ok = disk_log:close(n),
+ check_pps(P2),
+ del(File, No),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% Repair, large term
- ?line Big = term_to_binary(lists:duplicate(66000,$a)),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {40,No}}]),
- ?line ok = disk_log:log_terms(n, [Big]),
- ?line ok = disk_log:close(n),
- ?line set_opened(add_ext(File, 1)),
- ?line {repaired,n,{recovered,1},{badbytes,0}} =
+ Big = term_to_binary(lists:duplicate(66000,$a)),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {40,No}}]),
+ ok = disk_log:log_terms(n, [Big]),
+ ok = disk_log:close(n),
+ set_opened(add_ext(File, 1)),
+ {repaired,n,{recovered,1},{badbytes,0}} =
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {40,No}}]),
- ?line {_, [Got]} = disk_log:chunk(n, start),
- ?line ok = disk_log:close(n),
- ?line Got = Big,
- ?line del(File, No),
+ {_, [Got]} = disk_log:chunk(n, start),
+ ok = disk_log:close(n),
+ Got = Big,
+ del(File, No),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% A term a little smaller than a chunk, then big terms.
- ?line BigSmall = mk_bytes(1024*64-8-12),
- ?line file:delete(File),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line ok = disk_log:log_terms(n, [BigSmall, Big, Big]),
- ?line ok = disk_log:close(n),
- ?line set_opened(File),
- ?line FileSize = file_size(File),
- ?line crash(File, FileSize-byte_size(Big)-4),
- ?line Error1 = {error, {need_repair, _}} =
+ BigSmall = mk_bytes(1024*64-8-12),
+ file:delete(File),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ ok = disk_log:log_terms(n, [BigSmall, Big, Big]),
+ ok = disk_log:close(n),
+ set_opened(File),
+ FileSize = file_size(File),
+ crash(File, FileSize-byte_size(Big)-4),
+ Error1 = {error, {need_repair, _}} =
disk_log:open([{name, n}, {file, File}, {repair, false},
{type, halt}, {format, internal}]),
- ?line "The disk log" ++ _ = format_error(Error1),
- ?line {repaired,n,{recovered,2},{badbytes,132013}} =
+ "The disk log" ++ _ = format_error(Error1),
+ {repaired,n,{recovered,2},{badbytes,132013}} =
disk_log:open([{name, n}, {file, File}, {repair, true},
{type, halt}, {format, internal}]),
- ?line ok = disk_log:close(n),
- ?line file:delete(File),
+ ok = disk_log:close(n),
+ file:delete(File),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% The header is recovered.
- ?line {ok,n} =
+ {ok,n} =
disk_log:open([{name, n}, {file, File}, {type, halt},
{format, internal},
{head_func, {?MODULE, head_fun, [{ok,"head"}]}}]),
- ?line ok = disk_log:log_terms(n, [list,'of',terms]),
- ?line ["head",list,'of',terms] = get_all_terms(n),
- ?line ok = disk_log:close(n),
- ?line set_opened(File),
- ?line crash(File, 30),
- ?line {repaired,n,{recovered,3},{badbytes,16}} =
+ ok = disk_log:log_terms(n, [list,'of',terms]),
+ ["head",list,'of',terms] = get_all_terms(n),
+ ok = disk_log:close(n),
+ set_opened(File),
+ crash(File, 30),
+ {repaired,n,{recovered,3},{badbytes,16}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal},{repair,true},
+ {format, internal},{repair,true}, {quiet, true},
{head_func, {?MODULE, head_fun, [{ok,"head"}]}}]),
- ?line ["head",'of',terms] = get_all_terms(n),
- ?line ok = disk_log:close(n),
-
+ ["head",'of',terms] = get_all_terms(n),
+ ok = disk_log:close(n),
+ error_logger:delete_report_handler(?MODULE),
file:delete(File),
+ {messages, []} = process_info(self(), messages),
ok.
-
+
set_opened(File) ->
{ok, Fd} = file:open(File, [raw, binary, read, write]),
ok = file:write(Fd, [?LOGMAGIC, ?OPENED]),
ok = file:close(Fd).
-error_log(suite) -> [];
-error_log(doc) ->
- ["Error while repairing."];
+%% Error while repairing.
error_log(Conf) when is_list(Conf) ->
- ?line Dir = ?privdir(Conf),
-
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line file:delete(File),
- ?line del(File, No), % cleanup
- ?line LDir = File ++ ".2",
-
- ?line Q = qlen(),
- % dummy just to get all processes "above" disk_log going
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external},{size, {100, No}}]),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
-
- % inc_wrap_file fails, the external log is not terminated
- ?line P0 = pps(),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external},{size, {100, No}}]),
- ?line ok = file:make_dir(LDir),
- ?line {error, {file_error, _, _}} = disk_log:inc_wrap_file(n),
- ?line timer:sleep(500),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
-
- % inc_wrap_file fails, the internal log is not terminated, ./File.2/ exists
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal},{size, {100, No}}]),
- ?line {error, {file_error, _, _}} = disk_log:inc_wrap_file(n),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
-
- % truncate fails, the log is terminated, ./File.2/ exists
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external},{size, {100, No}}]),
- ?line {error, {file_error, _, _}} = disk_log:truncate(n),
- ?line true = (P0 == pps()),
- ?line del(File, No),
+ Dir = ?privdir(Conf),
+
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ file:delete(File),
+ del(File, No), % cleanup
+ LDir = File ++ ".2",
+
+ Q = qlen(),
+ %% dummy just to get all processes "above" disk_log going
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external},{size, {100, No}}]),
+ ok = disk_log:close(n),
+ del(File, No),
+
+ %% inc_wrap_file fails, the external log is not terminated
+ P0 = pps(),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external},{size, {100, No}}]),
+ ok = file:make_dir(LDir),
+ {error, {file_error, _, _}} = disk_log:inc_wrap_file(n),
+ timer:sleep(500),
+ ok = disk_log:close(n),
+ del(File, No),
+
+ %% inc_wrap_file fails, the internal log is not terminated, ./File.2/ exists
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal},{size, {100, No}}]),
+ {error, {file_error, _, _}} = disk_log:inc_wrap_file(n),
+ ok = disk_log:close(n),
+ del(File, No),
+
+ %% truncate fails, the log is terminated, ./File.2/ exists
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external},{size, {100, No}}]),
+ {error, {file_error, _, _}} = disk_log:truncate(n),
+ check_pps(P0),
+ del(File, No),
%% OTP-4880.
- % reopen (rename) fails, the log is terminated, ./File.2/ exists
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, external},{size, 100000}]),
- ?line {error, {file_error, _, eisdir}} = disk_log:reopen(n, LDir),
- ?line true = (P0 == pps()),
- ?line file:delete(File),
+ %% reopen (rename) fails, the log is terminated, ./File.2/ exists
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, external},{size, 100000}]),
+ {error, {file_error, _, eisdir}} = disk_log:reopen(n, LDir),
+ check_pps(P0),
+ file:delete(File),
- ?line B = mk_bytes(60),
+ B = mk_bytes(60),
%% OTP-4880. reopen a wrap log, rename fails
- ?line File2 = filename:join(Dir, "n.LOG2"),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File2}, {type, wrap},
- {format, external},{size, {100, No}}]),
- ?line ok = disk_log:blog_terms(n, [B,B,B]),
- ?line {error, {file_error, _, eisdir}} = disk_log:reopen(n, File),
- ?line {error, no_such_log} = disk_log:close(n),
- ?line del(File2, No),
- ?line del(File, No),
-
- % log, external wrap log, ./File.2/ exists
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external},{size, {100, No}}]),
- ?line {error, {file_error, _, _}} = disk_log:blog_terms(n, [B,B,B]),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
-
- % log, internal wrap log, ./File.2/ exists
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal},{size, {100, No}}]),
- ?line {error, {file_error, _, _}} = disk_log:log_terms(n, [B,B,B]),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
-
- ?line ok = file:del_dir(LDir),
-
- % can't remove file when changing size
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal},{size, {100, No}}]),
- ?line ok = disk_log:log_terms(n, [B,B,B,B]),
- ?line ok = disk_log:change_size(n, {100, No-2}),
- ?line Three = File ++ ".3",
- ?line ok = file:delete(Three),
- ?line ok = file:make_dir(Three),
- ?line {error, {file_error, _, _}} = disk_log:log_terms(n, [B,B,B]),
- ?line timer:sleep(500),
- ?line ok = disk_log:close(n),
- ?line ok = file:del_dir(Three),
- ?line del(File, No),
- ?line Q = qlen(),
+ File2 = filename:join(Dir, "n.LOG2"),
+ {ok, n} = disk_log:open([{name, n}, {file, File2}, {type, wrap},
+ {format, external},{size, {100, No}}]),
+ ok = disk_log:blog_terms(n, [B,B,B]),
+ {error, {file_error, _, eisdir}} = disk_log:reopen(n, File),
+ {error, no_such_log} = disk_log:close(n),
+ del(File2, No),
+ del(File, No),
+
+ %% log, external wrap log, ./File.2/ exists
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external},{size, {100, No}}]),
+ {error, {file_error, _, _}} = disk_log:blog_terms(n, [B,B,B]),
+ ok = disk_log:close(n),
+ del(File, No),
+
+ %% log, internal wrap log, ./File.2/ exists
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal},{size, {100, No}}]),
+ {error, {file_error, _, _}} = disk_log:log_terms(n, [B,B,B]),
+ ok = disk_log:close(n),
+ del(File, No),
+
+ ok = file:del_dir(LDir),
+
+ %% can't remove file when changing size
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal},{size, {100, No}}]),
+ ok = disk_log:log_terms(n, [B,B,B,B]),
+ ok = disk_log:change_size(n, {100, No-2}),
+ Three = File ++ ".3",
+ ok = file:delete(Three),
+ ok = file:make_dir(Three),
+ {error, {file_error, _, _}} = disk_log:log_terms(n, [B,B,B]),
+ timer:sleep(500),
+ ok = disk_log:close(n),
+ ok = file:del_dir(Three),
+ del(File, No),
+ Q = qlen(),
ok.
-
-chunk(suite) -> [];
-chunk(doc) ->
- ["Test chunk and chunk_step."];
+
+%% Test chunk and chunk_step.
chunk(Conf) when is_list(Conf) ->
%% See also halt_ro_crash/1 above.
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
+ File = filename:join(Dir, "n.LOG"),
No = 4,
- ?line B = mk_bytes(60),
- ?line BB = mk_bytes(64000), % 64 kB chunks
- ?line del(File, No),% cleanup
+ B = mk_bytes(60),
+ BB = mk_bytes(64000), % 64 kB chunks
+ del(File, No),% cleanup
%% Make sure chunk_step skips the rest of the binary.
%% OTP-3716. This was a bug...
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {50,No}}]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {50,No}}]),
%% 1, 2 and 3 on file one, 4 on file two.
- ?line ok = disk_log:log_terms(n, [1,2,3,4]),
- ?line {I1, [1]} = disk_log:chunk(n, start, 1),
- ?line [{node,Node}] = disk_log:chunk_info(I1),
- ?line Node = node(),
- ?line Error1 = {error, {no_continuation, foobar}} =
+ ok = disk_log:log_terms(n, [1,2,3,4]),
+ {I1, [1]} = disk_log:chunk(n, start, 1),
+ [{node,Node}] = disk_log:chunk_info(I1),
+ Node = node(),
+ Error1 = {error, {no_continuation, foobar}} =
disk_log:chunk_info(foobar),
- ?line "The term" ++ _ = format_error(Error1),
- ?line {ok, I2} = disk_log:chunk_step(n, I1, 1),
- ?line {error, {badarg, continuation}} = disk_log:chunk_step(n, foobar, 1),
- ?line {I3, [4]} = disk_log:chunk(n, I2, 1),
- ?line {ok, I4} = disk_log:chunk_step(n, I3, -1),
- ?line {_, [1]} = disk_log:chunk(n, I4, 1),
- ?line {error, {badarg, continuation}} = disk_log:bchunk(n, 'begin'),
- ?line {Ib1, [Bin1,Bin2]} = disk_log:bchunk(n, start, 2),
- ?line 1 = binary_to_term(Bin1),
- ?line 2 = binary_to_term(Bin2),
- ?line {ok, Ib2} = disk_log:chunk_step(n, Ib1, 1),
- ?line {Ib3, [Bin3]} = disk_log:bchunk(n, Ib2, 1),
- ?line 4 = binary_to_term(Bin3),
- ?line {ok, Ib4} = disk_log:chunk_step(n, Ib3, -1),
- ?line {_, [Bin4]} = disk_log:bchunk(n, Ib4, 1),
- ?line 1 = binary_to_term(Bin4),
- ?line {Ib5, [Bin1, Bin2, Bin17]} = disk_log:bchunk(n, start),
- ?line 3 = binary_to_term(Bin17),
- ?line {Ib6, [Bin3]} = disk_log:bchunk(n, Ib5, infinity),
- ?line eof = disk_log:bchunk(n, Ib6, infinity),
- ?line ok = disk_log:close(n),
- ?line del(File, No), % cleanup
+ "The term" ++ _ = format_error(Error1),
+ {ok, I2} = disk_log:chunk_step(n, I1, 1),
+ {error, {badarg, continuation}} = disk_log:chunk_step(n, foobar, 1),
+ {I3, [4]} = disk_log:chunk(n, I2, 1),
+ {ok, I4} = disk_log:chunk_step(n, I3, -1),
+ {_, [1]} = disk_log:chunk(n, I4, 1),
+ {error, {badarg, continuation}} = disk_log:bchunk(n, 'begin'),
+ {Ib1, [Bin1,Bin2]} = disk_log:bchunk(n, start, 2),
+ 1 = binary_to_term(Bin1),
+ 2 = binary_to_term(Bin2),
+ {ok, Ib2} = disk_log:chunk_step(n, Ib1, 1),
+ {Ib3, [Bin3]} = disk_log:bchunk(n, Ib2, 1),
+ 4 = binary_to_term(Bin3),
+ {ok, Ib4} = disk_log:chunk_step(n, Ib3, -1),
+ {_, [Bin4]} = disk_log:bchunk(n, Ib4, 1),
+ 1 = binary_to_term(Bin4),
+ {Ib5, [Bin1, Bin2, Bin17]} = disk_log:bchunk(n, start),
+ 3 = binary_to_term(Bin17),
+ {Ib6, [Bin3]} = disk_log:bchunk(n, Ib5, infinity),
+ eof = disk_log:bchunk(n, Ib6, infinity),
+ ok = disk_log:close(n),
+ del(File, No), % cleanup
%% external log, cannot read chunks
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external}, {size, {100,No}}]),
- ?line {error, {badarg, continuation}} = disk_log:chunk(n, 'begin'),
- ?line {error, {format_external, n}} = disk_log:chunk(n, start),
- ?line Error2 = {error, {not_internal_wrap, n}} =
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external}, {size, {100,No}}]),
+ {error, {badarg, continuation}} = disk_log:chunk(n, 'begin'),
+ {error, {format_external, n}} = disk_log:chunk(n, start),
+ Error2 = {error, {not_internal_wrap, n}} =
disk_log:chunk_step(n, start, 1),
- ?line "The requested" ++ _ = format_error(Error2),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ "The requested" ++ _ = format_error(Error2),
+ ok = disk_log:close(n),
+ del(File, No),
%% wrap, read_write
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {100,No}}]),
- ?line ok = disk_log:log_terms(n, [B,B,B,B]),
- ?line {C1, [_]} = disk_log:chunk(n, start),
- ?line {C2, [_]} = disk_log:chunk(n, C1),
- ?line {C3, [_]} = disk_log:chunk(n, C2),
- ?line {C4, [_]} = disk_log:chunk(n, C3, 1),
- ?line eof = disk_log:chunk(n, C4),
- ?line {C5, [_]} = disk_log:chunk(n, start),
- ?line {ok, C6} = disk_log:chunk_step(n, C5, 1),
- ?line {C7, [_]} = disk_log:chunk(n, C6),
- ?line {ok, C8} = disk_log:chunk_step(n, C7, 1),
- ?line {_, [_]} = disk_log:chunk(n, C8),
- ?line ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {100,No}}]),
+ ok = disk_log:log_terms(n, [B,B,B,B]),
+ {C1, [_]} = disk_log:chunk(n, start),
+ {C2, [_]} = disk_log:chunk(n, C1),
+ {C3, [_]} = disk_log:chunk(n, C2),
+ {C4, [_]} = disk_log:chunk(n, C3, 1),
+ eof = disk_log:chunk(n, C4),
+ {C5, [_]} = disk_log:chunk(n, start),
+ {ok, C6} = disk_log:chunk_step(n, C5, 1),
+ {C7, [_]} = disk_log:chunk(n, C6),
+ {ok, C8} = disk_log:chunk_step(n, C7, 1),
+ {_, [_]} = disk_log:chunk(n, C8),
+ ok = disk_log:close(n),
%% wrap, read_only
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {mode, read_only},
- {format, internal}, {size, {100,No}}]),
- ?line {CC1, [_]} = disk_log:chunk(n, start),
- ?line {CC2, [_]} = disk_log:chunk(n, CC1),
- ?line {CC3, [_]} = disk_log:chunk(n, CC2),
- ?line {CC4, [_]} = disk_log:chunk(n, CC3, 1),
- ?line eof = disk_log:chunk(n, CC4),
- ?line {CC5, [_]} = disk_log:chunk(n, start),
- ?line {ok, CC6} = disk_log:chunk_step(n, CC5, 1),
- ?line {CC7, [_]} = disk_log:chunk(n, CC6),
- ?line {ok, CC8} = disk_log:chunk_step(n, CC7, 1),
- ?line {_, [_]} = disk_log:chunk(n, CC8),
- ?line ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {mode, read_only},
+ {format, internal}, {size, {100,No}}]),
+ {CC1, [_]} = disk_log:chunk(n, start),
+ {CC2, [_]} = disk_log:chunk(n, CC1),
+ {CC3, [_]} = disk_log:chunk(n, CC2),
+ {CC4, [_]} = disk_log:chunk(n, CC3, 1),
+ eof = disk_log:chunk(n, CC4),
+ {CC5, [_]} = disk_log:chunk(n, start),
+ {ok, CC6} = disk_log:chunk_step(n, CC5, 1),
+ {CC7, [_]} = disk_log:chunk(n, CC6),
+ {ok, CC8} = disk_log:chunk_step(n, CC7, 1),
+ {_, [_]} = disk_log:chunk(n, CC8),
+ ok = disk_log:close(n),
%% OTP-3716. A bug: {Error, List} and {Error, List, Bad} could be
%% returned from chunk/2.
%% Magic bytes not OK.
%% File header (8 bytes) OK, item header not OK.
- ?line InvalidFile = add_ext(File, 1),
- ?line crash(InvalidFile, 15),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {mode, read_only},
- {format, internal}, {size, {100,No}}]),
- ?line {_, [], 61} = disk_log:chunk(n, start),
- ?line ok = disk_log:close(n),
+ InvalidFile = add_ext(File, 1),
+ crash(InvalidFile, 15),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {mode, read_only},
+ {format, internal}, {size, {100,No}}]),
+ {_, [], 61} = disk_log:chunk(n, start),
+ ok = disk_log:close(n),
%% read_write...
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {100,No}}]),
- ?line Error3 = {error, {corrupt_log_file, Culprit}} =
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {100,No}}]),
+ Error3 = {error, {corrupt_log_file, Culprit}} =
disk_log:chunk(n, start),
- ?line "The disk log file" ++ _ = format_error(Error3),
- ?line Culprit = InvalidFile,
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ "The disk log file" ++ _ = format_error(Error3),
+ Culprit = InvalidFile,
+ ok = disk_log:close(n),
+ del(File, No),
%% Two wrap log files, writing the second one, then reading the first
%% one, where a bogus term resides.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {40,No}}]),
- ?line ok = disk_log:log_terms(n, [{this,is}]), % first file full
- ?line ok = disk_log:log_terms(n, [{some,terms}]), % second file full
- ?line 2 = curf(n),
- ?line BadFile = add_ext(File, 1),
- ?line crash(BadFile, 28), % the _binary_ is now invalid
- ?line {error, {corrupt_log_file, BFile}} = disk_log:chunk(n, start, 1),
- ?line BadFile = BFile,
- ?line ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {40,No}}]),
+ ok = disk_log:log_terms(n, [{this,is}]), % first file full
+ ok = disk_log:log_terms(n, [{some,terms}]), % second file full
+ 2 = curf(n),
+ BadFile = add_ext(File, 1),
+ crash(BadFile, 28), % the _binary_ is now invalid
+ {error, {corrupt_log_file, BFile}} = disk_log:chunk(n, start, 1),
+ BadFile = BFile,
+ ok = disk_log:close(n),
%% The same, with a halt log.
- ?line file:delete(File), % cleanup
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line ok = disk_log:log_terms(n, [{this,is}]),
- ?line ok = disk_log:sync(n),
- ?line crash(File, 28), % the _binary_ is now invalid
- ?line {error, {corrupt_log_file, File2}} = disk_log:chunk(n, start, 1),
- ?line crash(File, 10),
- ?line {error,{corrupt_log_file,_}} = disk_log:bchunk(n, start, 1),
- ?line true = File == File2,
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ file:delete(File), % cleanup
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ ok = disk_log:log_terms(n, [{this,is}]),
+ ok = disk_log:sync(n),
+ crash(File, 28), % the _binary_ is now invalid
+ {error, {corrupt_log_file, File2}} = disk_log:chunk(n, start, 1),
+ crash(File, 10),
+ {error,{corrupt_log_file,_}} = disk_log:bchunk(n, start, 1),
+ true = File == File2,
+ ok = disk_log:close(n),
+ del(File, No),
%% halt, read_write
- ?line file:delete(File), % cleanup
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line ok = disk_log:log_terms(n, [BB,BB,BB,BB]),
- ?line {D1, [Ch1]} = disk_log:chunk(n, start, 1),
- ?line Ch1 = BB,
- ?line {D2, [Ch2]} = disk_log:chunk(n, D1, 1),
- ?line Ch2 = BB,
- ?line {D3, [Ch3]} = disk_log:chunk(n, D2, 1),
- ?line Ch3 = BB,
- ?line {D4, [Ch4]} = disk_log:chunk(n, D3, 1),
- ?line Ch4 = BB,
- ?line eof = disk_log:chunk(n, D4),
- ?line ok = disk_log:close(n),
+ file:delete(File), % cleanup
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ ok = disk_log:log_terms(n, [BB,BB,BB,BB]),
+ {D1, [Ch1]} = disk_log:chunk(n, start, 1),
+ Ch1 = BB,
+ {D2, [Ch2]} = disk_log:chunk(n, D1, 1),
+ Ch2 = BB,
+ {D3, [Ch3]} = disk_log:chunk(n, D2, 1),
+ Ch3 = BB,
+ {D4, [Ch4]} = disk_log:chunk(n, D3, 1),
+ Ch4 = BB,
+ eof = disk_log:chunk(n, D4),
+ ok = disk_log:close(n),
%% halt, read_only
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal},{mode,read_only}]),
- ?line {E1, [Ch5]} = disk_log:chunk(n, start, 1),
- ?line Ch5 = BB,
- ?line {E2, [Ch6]} = disk_log:chunk(n, E1, 1),
- ?line Ch6 = BB,
- ?line {E3, [Ch7]} = disk_log:chunk(n, E2, 1),
- ?line Ch7 = BB,
- ?line {E4, [Ch8]} = disk_log:chunk(n, E3, 1),
- ?line Ch8 = BB,
- ?line eof = disk_log:chunk(n, E4),
- ?line ok = disk_log:close(n),
- ?line file:delete(File), % cleanup
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal},{mode,read_only}]),
+ {E1, [Ch5]} = disk_log:chunk(n, start, 1),
+ Ch5 = BB,
+ {E2, [Ch6]} = disk_log:chunk(n, E1, 1),
+ Ch6 = BB,
+ {E3, [Ch7]} = disk_log:chunk(n, E2, 1),
+ Ch7 = BB,
+ {E4, [Ch8]} = disk_log:chunk(n, E3, 1),
+ Ch8 = BB,
+ eof = disk_log:chunk(n, E4),
+ ok = disk_log:close(n),
+ file:delete(File), % cleanup
%% More than 64 kB term.
- ?line BBB = term_to_binary(lists:duplicate(66000,$a)),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line ok = disk_log:log_terms(n, [BBB]),
- ?line {F1, [BBB1]} = disk_log:chunk(n, start),
- ?line BBB1 = BBB,
- ?line eof = disk_log:chunk(n, F1),
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}, {mode, read_only}]),
- ?line {F1r, [BBB2]} = disk_log:chunk(n, start),
- ?line BBB2 = BBB,
- ?line eof = disk_log:chunk(n, F1r),
- ?line ok = disk_log:close(n),
-
- ?line truncate(File, 8192),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line {error, {corrupt_log_file, _}} = disk_log:chunk(n, start),
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}, {mode, read_only}]),
- ?line {K1, [], 8176} = disk_log:chunk(n, start),
- ?line eof = disk_log:chunk(n, K1),
- ?line ok = disk_log:close(n),
- ?line file:delete(File), % cleanup
+ BBB = term_to_binary(lists:duplicate(66000,$a)),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ ok = disk_log:log_terms(n, [BBB]),
+ {F1, [BBB1]} = disk_log:chunk(n, start),
+ BBB1 = BBB,
+ eof = disk_log:chunk(n, F1),
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}, {mode, read_only}]),
+ {F1r, [BBB2]} = disk_log:chunk(n, start),
+ BBB2 = BBB,
+ eof = disk_log:chunk(n, F1r),
+ ok = disk_log:close(n),
+
+ truncate(File, 8192),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ {error, {corrupt_log_file, _}} = disk_log:chunk(n, start),
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}, {mode, read_only}]),
+ {K1, [], 8176} = disk_log:chunk(n, start),
+ eof = disk_log:chunk(n, K1),
+ ok = disk_log:close(n),
+ file:delete(File), % cleanup
%% OTP-3716. A bug: eof in the middle of the last element is not ok.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line ok = disk_log:log_terms(n, [B,BB]),
- ?line ok = disk_log:close(n),
- ?line truncate(File, 80),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line {G1, [_]} = disk_log:chunk(n, start, 1),
- ?line {error, {corrupt_log_file, _}} = disk_log:chunk(n, G1, 1),
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}, {mode, read_only}]),
- ?line {G1r, [_]} = disk_log:chunk(n, start, 1),
- ?line {_, [], 4} = disk_log:chunk(n, G1r, 1),
- ?line ok = disk_log:close(n),
- ?line file:delete(File), % cleanup
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ ok = disk_log:log_terms(n, [B,BB]),
+ ok = disk_log:close(n),
+ truncate(File, 80),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ {G1, [_]} = disk_log:chunk(n, start, 1),
+ {error, {corrupt_log_file, _}} = disk_log:chunk(n, G1, 1),
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}, {mode, read_only}]),
+ {G1r, [_]} = disk_log:chunk(n, start, 1),
+ {_, [], 4} = disk_log:chunk(n, G1r, 1),
+ ok = disk_log:close(n),
+ file:delete(File), % cleanup
%% Opening a wrap log read-only. The second of four terms is destroyed.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {size, {4000,No}}]),
- ?line ok = disk_log:log_terms(n,
- [{this,is},{some,terms},{on,a},{wrap,file}]),
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, internal}, {mode, read_only}]),
- ?line CrashFile = add_ext(File, 1),
- ?line crash(CrashFile, 51), % the binary term {some,terms} is now bad
- ?line {H1, [{this,is}], 18} = disk_log:chunk(n, start, 10),
- ?line {H2, [{on,a},{wrap,file}]} = disk_log:chunk(n, H1),
- ?line eof = disk_log:chunk(n, H2),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {size, {4000,No}}]),
+ ok = disk_log:log_terms(n,
+ [{this,is},{some,terms},{on,a},{wrap,file}]),
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, internal}, {mode, read_only}]),
+ CrashFile = add_ext(File, 1),
+ crash(CrashFile, 51), % the binary term {some,terms} is now bad
+ {H1, [{this,is}], 18} = disk_log:chunk(n, start, 10),
+ {H2, [{on,a},{wrap,file}]} = disk_log:chunk(n, H1),
+ eof = disk_log:chunk(n, H2),
+ ok = disk_log:close(n),
+ del(File, No),
%% The same as last, but with a halt log.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}, {mode, read_write}]),
- ?line ok = disk_log:alog_terms(n, [{this,is},{some,terms}]),
- ?line ok = disk_log:log_terms(n, [{on,a},{halt,file}]),
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}, {mode, read_only}]),
- ?line crash(File, 51), % the binary term {some,terms} is now bad
- ?line {J1, [{this,is}], 18} = disk_log:chunk(n, start, 10),
- ?line {J2, [{on,a},{halt,file}]} = disk_log:chunk(n, J1),
- ?line eof = disk_log:chunk(n, J2),
- ?line ok = disk_log:close(n),
- ?line file:delete(File),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}, {mode, read_write}]),
+ ok = disk_log:alog_terms(n, [{this,is},{some,terms}]),
+ ok = disk_log:log_terms(n, [{on,a},{halt,file}]),
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}, {mode, read_only}]),
+ crash(File, 51), % the binary term {some,terms} is now bad
+ {J1, [{this,is}], 18} = disk_log:chunk(n, start, 10),
+ {J2, [{on,a},{halt,file}]} = disk_log:chunk(n, J1),
+ eof = disk_log:chunk(n, J2),
+ ok = disk_log:close(n),
+ file:delete(File),
%% OTP-7641. Same as last one, but the size of the bad term is
%% less than ?HEADERSz (8) bytes.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}, {mode, read_write}]),
- ?line ok = disk_log:alog_terms(n, [{this,is},{s}]),
- ?line ok = disk_log:log_terms(n, [{on,a},{halt,file}]),
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}, {mode, read_only}]),
- ?line crash(File, 44), % the binary term {s} is now bad
- ?line {J11, [{this,is}], 7} = disk_log:chunk(n, start, 10),
- ?line {J21, [{on,a},{halt,file}]} = disk_log:chunk(n, J11),
- ?line eof = disk_log:chunk(n, J21),
- ?line ok = disk_log:close(n),
- ?line file:delete(File),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}, {mode, read_write}]),
+ ok = disk_log:alog_terms(n, [{this,is},{s}]),
+ ok = disk_log:log_terms(n, [{on,a},{halt,file}]),
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}, {mode, read_only}]),
+ crash(File, 44), % the binary term {s} is now bad
+ {J11, [{this,is}], 7} = disk_log:chunk(n, start, 10),
+ {J21, [{on,a},{halt,file}]} = disk_log:chunk(n, J11),
+ eof = disk_log:chunk(n, J21),
+ ok = disk_log:close(n),
+ file:delete(File),
%% Minimal MD5-proctected term, and maximal unprotected term.
%% A chunk ends in the middle of the MD5-sum.
- ?line MD5term = mk_bytes(64*1024-8),
- ?line NotMD5term = mk_bytes((64*1024-8)-1),
- ?line Term2 = mk_bytes((64*1024-8)-16),
- ?line MD5L = [MD5term,NotMD5term,Term2,MD5term,MD5term,NotMD5term],
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line ok = disk_log:log_terms(n, MD5L),
- ?line true = MD5L == get_all_terms(n),
- ?line ok = disk_log:close(n),
- ?line true = MD5L == get_all_terms(n, File, halt),
- ?line crash(File, 21), % the MD5-sum of the first term is now bad
- ?line true = {tl(MD5L),64*1024-8} == get_all_terms_and_bad(n, File, halt),
- ?line {_,64*1024-8} = get_all_binary_terms_and_bad(n, File, halt),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal}]),
- ?line {error, {corrupt_log_file, _}} = disk_log:chunk(n, start),
- ?line ok = disk_log:close(n),
- ?line file:delete(File),
+ MD5term = mk_bytes(64*1024-8),
+ NotMD5term = mk_bytes((64*1024-8)-1),
+ Term2 = mk_bytes((64*1024-8)-16),
+ MD5L = [MD5term,NotMD5term,Term2,MD5term,MD5term,NotMD5term],
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ ok = disk_log:log_terms(n, MD5L),
+ true = MD5L == get_all_terms(n),
+ ok = disk_log:close(n),
+ true = MD5L == get_all_terms(n, File, halt),
+ crash(File, 21), % the MD5-sum of the first term is now bad
+ true = {tl(MD5L),64*1024-8} == get_all_terms_and_bad(n, File, halt),
+ {_,64*1024-8} = get_all_binary_terms_and_bad(n, File, halt),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {format, internal}]),
+ {error, {corrupt_log_file, _}} = disk_log:chunk(n, start),
+ ok = disk_log:close(n),
+ file:delete(File),
%% A file with "old" terms (magic word is MAGICINT).
DataDir = ?datadir(Conf),
OldTermsFileOrig = filename:join(DataDir, "old_terms.LOG"),
OldTermsFile = filename:join(Dir, "old_terms.LOG"),
- ?line copy_file(OldTermsFileOrig, OldTermsFile),
- ?line {[_,_,_,_],0} = get_all_terms_and_bad(n, OldTermsFile, halt),
- ?line {ok, n} = disk_log:open([{name, n}, {file, OldTermsFile},
- {type, halt}, {format, internal}]),
- ?line [_,_,_,_] = get_all_terms(n),
- ?line ok = disk_log:close(n),
- ?line file:delete(OldTermsFile),
+ copy_file(OldTermsFileOrig, OldTermsFile),
+ {[_,_,_,_],0} = get_all_terms_and_bad(n, OldTermsFile, halt),
+ {ok, n} = disk_log:open([{name, n}, {file, OldTermsFile},
+ {type, halt}, {format, internal}]),
+ [_,_,_,_] = get_all_terms(n),
+ ok = disk_log:close(n),
+ file:delete(OldTermsFile),
ok.
-error_index(suite) -> [];
-error_index(doc) ->
- ["OTP-5558. Keep the contents of index files after disk crash."];
+%% OTP-5558. Keep the contents of index files after disk crash.
error_index(Conf) when is_list(Conf) ->
- ?line Dir = ?privdir(Conf),
+ Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line IdxFile = File ++ ".idx",
- ?line No = 4,
- ?line file:delete(File),
- ?line del(File, No), % cleanup
+ File = filename:join(Dir, "n.LOG"),
+ IdxFile = File ++ ".idx",
+ No = 4,
+ file:delete(File),
+ del(File, No), % cleanup
Args = [{name,n},{type,wrap},{size,{100,No}},{file,File}],
- ?line {ok, n} = disk_log:open(Args),
- ?line ok = disk_log:close(n),
- ?line Q = qlen(),
+ {ok, n} = disk_log:open(Args),
+ ok = disk_log:close(n),
+ Q = qlen(),
P0 = pps(),
- ?line ok = file:write_file(IdxFile, <<"abc">>),
- ?line {error, {invalid_index_file, _}} = disk_log:open(Args),
- ?line {error, {invalid_index_file, _}} = disk_log:open(Args),
- ?line {error, {invalid_index_file, _}} = disk_log:open(Args),
-
- ?line del(File, No),
- ?line true = (P0 == pps()),
- ?line true = (Q == qlen()),
+ ok = file:write_file(IdxFile, <<"abc">>),
+ {error, {invalid_index_file, _}} = disk_log:open(Args),
+ {error, {invalid_index_file, _}} = disk_log:open(Args),
+ {error, {invalid_index_file, _}} = disk_log:open(Args),
+
+ del(File, No),
+ check_pps(P0),
+ true = (Q == qlen()),
ok.
-
-truncate(suite) -> [];
-truncate(doc) ->
- ["Test truncate/1 on halt and wrap logs."];
+
+%% Test truncate/1 on halt and wrap logs.
truncate(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line Q = qlen(),
+ Q = qlen(),
Halt = join(Dir, "halt.LOG"),
- % Halt logs.
-
- ?line file:delete(Halt), % cleanup
- ?line {ok, halt} = disk_log:open([{name, halt}, {type, halt}, {file, Halt},
- {head, header}, {notify, true}]),
- ?line infinity = sz(halt),
- ?line ok = disk_log:truncate(halt, tjohej),
- ?line rec(1, {disk_log, node(), halt, {truncated, 1}}),
- ?line ok = disk_log:change_size(halt, 10000),
- ?line 10000 = sz(halt),
- ?line disk_log:close(halt),
- ?line [tjohej] = get_all_terms(halt, Halt, halt),
- ?line file:delete(Halt),
-
- ?line {ok, halt} = disk_log:open([{name, halt}, {type, halt}, {file, Halt},
- {head, header}, {notify, true}]),
- ?line ok = disk_log:truncate(halt),
- ?line rec(1, {disk_log, node(), halt, {truncated, 1}}),
- ?line disk_log:close(halt),
- ?line [header] = get_all_terms(halt, Halt, halt),
- ?line file:delete(Halt),
-
- ?line {ok, halt} = disk_log:open([{name, halt}, {type, halt},
- {file, Halt}, {format, external},
- {head, "header"}, {notify, false}]),
- ?line ok = disk_log:btruncate(halt, "apa"),
- ?line disk_log:close(halt),
- ?line 3 = file_size(Halt),
- ?line file:delete(Halt),
-
+ %% Halt logs.
+
+ file:delete(Halt), % cleanup
+ {ok, halt} = disk_log:open([{name, halt}, {type, halt}, {file, Halt},
+ {head, header}, {notify, true}]),
+ infinity = sz(halt),
+ ok = disk_log:truncate(halt, tjohej),
+ rec(1, {disk_log, node(), halt, {truncated, 1}}),
+ ok = disk_log:change_size(halt, 10000),
+ 10000 = sz(halt),
+ disk_log:close(halt),
+ [tjohej] = get_all_terms(halt, Halt, halt),
+ file:delete(Halt),
+
+ {ok, halt} = disk_log:open([{name, halt}, {type, halt}, {file, Halt},
+ {head, header}, {notify, true}]),
+ ok = disk_log:truncate(halt),
+ rec(1, {disk_log, node(), halt, {truncated, 1}}),
+ disk_log:close(halt),
+ [header] = get_all_terms(halt, Halt, halt),
+ file:delete(Halt),
+
+ {ok, halt} = disk_log:open([{name, halt}, {type, halt},
+ {file, Halt}, {format, external},
+ {head, "header"}, {notify, false}]),
+ ok = disk_log:btruncate(halt, "apa"),
+ disk_log:close(halt),
+ 3 = file_size(Halt),
+ file:delete(Halt),
+
%% Wrap logs.
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line B = mk_bytes(60),
- ?line del(File, No), % cleanup
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ B = mk_bytes(60),
+ del(File, No), % cleanup
%% Internal with header.
- ?line Size = {100, No},
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {head, header}, {notify, true},
- {size, Size}]),
- ?line ok = disk_log:log_terms(n, [B,B,B]),
+ Size = {100, No},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {head, header}, {notify, true},
+ {size, Size}]),
+ ok = disk_log:log_terms(n, [B,B,B]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(2, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:truncate(n, apa),
- ?line rec(1, {disk_log, node(), n, {truncated, 6}}),
- ?line {0, 0} = no_overflows(n),
- ?line 23 = curb(n),
- ?line 1 = curf(n),
- ?line 1 = cur_cnt(n),
- ?line true = (Size == sz(n)),
-
- ?line ok = disk_log:log_terms(n, [B, B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:close(n),
- ?line [apa, _, header, _] = get_all_terms(n, File, wrap),
- ?line del(File, No),
-
+ rec(2, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:truncate(n, apa),
+ rec(1, {disk_log, node(), n, {truncated, 6}}),
+ {0, 0} = no_overflows(n),
+ 23 = curb(n),
+ 1 = curf(n),
+ 1 = cur_cnt(n),
+ true = (Size == sz(n)),
+
+ ok = disk_log:log_terms(n, [B, B]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:close(n),
+ [apa, _, header, _] = get_all_terms(n, File, wrap),
+ del(File, No),
+
%% Internal without general header.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {notify, true},
- {size, {100, No}}]),
- ?line ok = disk_log:log_terms(n, [B,B,B]),
- ?line rec(2, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:truncate(n, apa),
- ?line rec(1, {disk_log, node(), n, {truncated, 3}}),
- ?line {0, 0} = no_overflows(n),
- ?line 23 = curb(n),
- ?line 1 = curf(n),
- ?line 1 = cur_cnt(n),
- ?line true = (Size == sz(n)),
-
- ?line ok = disk_log:log_terms(n, [B, B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:close(n),
- ?line [apa, _, _] = get_all_terms(n, File, wrap),
- ?line del(File, No),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {notify, true},
+ {size, {100, No}}]),
+ ok = disk_log:log_terms(n, [B,B,B]),
+ rec(2, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:truncate(n, apa),
+ rec(1, {disk_log, node(), n, {truncated, 3}}),
+ {0, 0} = no_overflows(n),
+ 23 = curb(n),
+ 1 = curf(n),
+ 1 = cur_cnt(n),
+ true = (Size == sz(n)),
+
+ ok = disk_log:log_terms(n, [B, B]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:close(n),
+ [apa, _, _] = get_all_terms(n, File, wrap),
+ del(File, No),
%% Internal without any header.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {notify, true},
- {size, {100, No}}]),
- ?line ok = disk_log:log_terms(n, [B,B,B]),
- ?line rec(2, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:truncate(n),
- ?line rec(1, {disk_log, node(), n, {truncated, 3}}),
- ?line {0, 0} = no_overflows(n),
- ?line 8 = curb(n),
- ?line 1 = curf(n),
- ?line 0 = cur_cnt(n),
- ?line true = (Size == sz(n)),
-
- ?line ok = disk_log:log_terms(n, [B, B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:close(n),
- ?line [_, _] = get_all_terms(n, File, wrap),
- ?line del(File, No),
- ?line Q = qlen(),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {notify, true},
+ {size, {100, No}}]),
+ ok = disk_log:log_terms(n, [B,B,B]),
+ rec(2, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:truncate(n),
+ rec(1, {disk_log, node(), n, {truncated, 3}}),
+ {0, 0} = no_overflows(n),
+ 8 = curb(n),
+ 1 = curf(n),
+ 0 = cur_cnt(n),
+ true = (Size == sz(n)),
+
+ ok = disk_log:log_terms(n, [B, B]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:close(n),
+ [_, _] = get_all_terms(n, File, wrap),
+ del(File, No),
+ Q = qlen(),
ok.
-many_users(suite) -> [];
-many_users(doc) ->
- ["Test many users logging and sync:ing at the same time."];
+%% Test many users logging and sync:ing at the same time.
many_users(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
N = 100,
@@ -3199,32 +3122,32 @@ many_users(Conf) when is_list(Conf) ->
Fun1 = fun(Name, Pid, I) -> disk_log:log(Name, {Pid, I}) end,
Fun2 = fun(Name, Pid, I) -> ok = disk_log:log(Name, {Pid, I}),
disk_log:sync(Name) end,
- ?line {C1, T1} = many(Fun2, NoClients, N, halt, internal, infinity, Dir),
- ?line true = lists:duplicate(NoClients, ok) == C1,
- ?line true = length(T1) == N*NoClients,
- ?line {C2, T2} = many(Fun1, NoClients, N, halt, internal, 1000, Dir),
- ?line true = lists:duplicate(NoClients, {error, {full,"log.LOG"}}) == C2,
- ?line true = length(T2) > 0,
- ?line {C3, T3} = many(Fun2, NoClients, N, wrap, internal,
- {300*NoClients,200}, Dir),
- ?line true = lists:duplicate(NoClients, ok) == C3,
- ?line true = length(T3) == N*NoClients,
+ {C1, T1} = many(Fun2, NoClients, N, halt, internal, infinity, Dir),
+ true = lists:duplicate(NoClients, ok) == C1,
+ true = length(T1) == N*NoClients,
+ {C2, T2} = many(Fun1, NoClients, N, halt, internal, 1000, Dir),
+ true = lists:duplicate(NoClients, {error, {full,"log.LOG"}}) == C2,
+ true = length(T2) > 0,
+ {C3, T3} = many(Fun2, NoClients, N, wrap, internal,
+ {300*NoClients,200}, Dir),
+ true = lists:duplicate(NoClients, ok) == C3,
+ true = length(T3) == N*NoClients,
ok.
many(Fun, NoClients, N, Type, Format, Size, Dir) ->
Name = "log.LOG",
File = filename:join(Dir, Name),
del_files(Size, File),
- ?line Q = qlen(),
- ?line {ok, _} = disk_log:open([{name,Name}, {type,Type}, {size,Size},
- {format,Format}, {file,File}]),
- ?line Pids = spawn_clients(NoClients, client, [self(), Name, N, Fun]),
- ?line Checked = check_clients(Pids),
- ?line ok = disk_log:close(Name),
- ?line Terms = get_all_terms(Name, File, Type),
- ?line del_files(Size, File),
- ?line Q = qlen(),
- ?line {Checked, Terms}.
+ Q = qlen(),
+ {ok, _} = disk_log:open([{name,Name}, {type,Type}, {size,Size},
+ {format,Format}, {file,File}]),
+ Pids = spawn_clients(NoClients, client, [self(), Name, N, Fun]),
+ Checked = check_clients(Pids),
+ ok = disk_log:close(Name),
+ Terms = get_all_terms(Name, File, Type),
+ del_files(Size, File),
+ Q = qlen(),
+ {Checked, Terms}.
spawn_clients(0, _F, _A) ->
[];
@@ -3251,211 +3174,208 @@ del_files(_Size, File) ->
-info_current(suite) -> [];
-info_current(doc) ->
- ["Test no_current_{bytes, items} as returned by info/0."];
+%% Test no_current_{bytes, items} as returned by info/0.
info_current(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
+ File = filename:join(Dir, "n.LOG"),
No = 4,
B = mk_bytes(60),
BB = mk_bytes(160), % bigger than a single wrap log file
SB = mk_bytes(10), % much smaller than a single wrap log file
- ?line del(File, No),% cleanup
+ del(File, No),% cleanup
- ?line Q = qlen(),
+ Q = qlen(),
%% Internal with header.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {head, header}, {size, {100,No}}]),
- ?line {26, 1} = {curb(n), cur_cnt(n)},
- ?line {1, 1} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log(n, B),
- ?line {94, 2} = {curb(n), cur_cnt(n)},
- ?line {2, 2} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {notify, true},
- {head, header}, {size, {100,No}}]),
- ?line {94, 2} = {curb(n), cur_cnt(n)},
- ?line {0, 2} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log(n, B),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {94, 2} = {curb(n), cur_cnt(n)},
- ?line {2, 4} = {no_written_items(n), no_items(n)},
- ?line disk_log:inc_wrap_file(n),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {26, 1} = {curb(n), cur_cnt(n)},
- ?line {3, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [B,B,B]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {head, header}, {size, {100,No}}]),
+ {26, 1} = {curb(n), cur_cnt(n)},
+ {1, 1} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log(n, B),
+ {94, 2} = {curb(n), cur_cnt(n)},
+ {2, 2} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {notify, true},
+ {head, header}, {size, {100,No}}]),
+ {94, 2} = {curb(n), cur_cnt(n)},
+ {0, 2} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log(n, B),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {94, 2} = {curb(n), cur_cnt(n)},
+ {2, 4} = {no_written_items(n), no_items(n)},
+ disk_log:inc_wrap_file(n),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {26, 1} = {curb(n), cur_cnt(n)},
+ {3, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [B,B,B]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line {94, 2} = {curb(n), cur_cnt(n)},
- ?line {8, 7} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line ok = disk_log:log_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line {94, 2} = {curb(n), cur_cnt(n)},
- ?line {12, 7} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [BB,BB]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ {94, 2} = {curb(n), cur_cnt(n)},
+ {8, 7} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ ok = disk_log:log_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ {94, 2} = {curb(n), cur_cnt(n)},
+ {12, 7} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [BB,BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(2, {disk_log, node(), n, {wrap, 2}}),
- ?line {194, 2} = {curb(n), cur_cnt(n)},
- ?line {16, 7} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [SB,SB,SB]),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line {80, 4} = {curb(n), cur_cnt(n)},
- ?line {20, 9} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ rec(2, {disk_log, node(), n, {wrap, 2}}),
+ {194, 2} = {curb(n), cur_cnt(n)},
+ {16, 7} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [SB,SB,SB]),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ {80, 4} = {curb(n), cur_cnt(n)},
+ {20, 9} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ del(File, No),
%% Internal without header.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {size, {100,No}}]),
- ?line {8, 0} = {curb(n), cur_cnt(n)},
- ?line {0, 0} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log(n, B),
- ?line {76, 1} = {curb(n), cur_cnt(n)},
- ?line {1, 1} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {notify, true}, {size, {100,No}}]),
- ?line {76, 1} = {curb(n), cur_cnt(n)},
- ?line {0, 1} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log(n, B),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {76, 1} = {curb(n), cur_cnt(n)},
- ?line {1, 2} = {no_written_items(n), no_items(n)},
- ?line disk_log:inc_wrap_file(n),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {8, 0} = {curb(n), cur_cnt(n)},
- ?line {1, 2} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [B,B,B]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {size, {100,No}}]),
+ {8, 0} = {curb(n), cur_cnt(n)},
+ {0, 0} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log(n, B),
+ {76, 1} = {curb(n), cur_cnt(n)},
+ {1, 1} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {notify, true}, {size, {100,No}}]),
+ {76, 1} = {curb(n), cur_cnt(n)},
+ {0, 1} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log(n, B),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {76, 1} = {curb(n), cur_cnt(n)},
+ {1, 2} = {no_written_items(n), no_items(n)},
+ disk_log:inc_wrap_file(n),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {8, 0} = {curb(n), cur_cnt(n)},
+ {1, 2} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [B,B,B]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line {76, 1} = {curb(n), cur_cnt(n)},
- ?line {4, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line ok = disk_log:log_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line {76, 1} = {curb(n), cur_cnt(n)},
- ?line {6, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [BB,BB]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ {76, 1} = {curb(n), cur_cnt(n)},
+ {4, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ ok = disk_log:log_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ {76, 1} = {curb(n), cur_cnt(n)},
+ {6, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [BB,BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(2, {disk_log, node(), n, {wrap, 1}}),
- ?line {176, 1} = {curb(n), cur_cnt(n)},
- ?line {8, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:log_terms(n, [SB,SB,SB]),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line {62, 3} = {curb(n), cur_cnt(n)},
- ?line {11, 6} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ rec(2, {disk_log, node(), n, {wrap, 1}}),
+ {176, 1} = {curb(n), cur_cnt(n)},
+ {8, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:log_terms(n, [SB,SB,SB]),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ {62, 3} = {curb(n), cur_cnt(n)},
+ {11, 6} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ del(File, No),
%% External with header.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external}, {head, "header"},
- {size, {100,No}}]),
- ?line {6, 1} = {curb(n), cur_cnt(n)},
- ?line {1, 1} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog(n, B),
- ?line {62, 2} = {curb(n), cur_cnt(n)},
- ?line {2, 2} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external}, {head, "header"},
- {notify, true}, {size, {100,No}}]),
- ?line {62, 2} = {curb(n), cur_cnt(n)},
- ?line {0, 2} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog(n, B),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {62, 2} = {curb(n), cur_cnt(n)},
- ?line {2, 4} = {no_written_items(n), no_items(n)},
- ?line disk_log:inc_wrap_file(n),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {6, 1} = {curb(n), cur_cnt(n)},
- ?line {3, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [B,B,B]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external}, {head, "header"},
+ {size, {100,No}}]),
+ {6, 1} = {curb(n), cur_cnt(n)},
+ {1, 1} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog(n, B),
+ {62, 2} = {curb(n), cur_cnt(n)},
+ {2, 2} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external}, {head, "header"},
+ {notify, true}, {size, {100,No}}]),
+ {62, 2} = {curb(n), cur_cnt(n)},
+ {0, 2} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog(n, B),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {62, 2} = {curb(n), cur_cnt(n)},
+ {2, 4} = {no_written_items(n), no_items(n)},
+ disk_log:inc_wrap_file(n),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {6, 1} = {curb(n), cur_cnt(n)},
+ {3, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [B,B,B]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line {62, 2} = {curb(n), cur_cnt(n)},
- ?line {8, 7} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line ok = disk_log:blog_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line {62, 2} = {curb(n), cur_cnt(n)},
- ?line {12, 7} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [BB,BB]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ {62, 2} = {curb(n), cur_cnt(n)},
+ {8, 7} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ ok = disk_log:blog_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ {62, 2} = {curb(n), cur_cnt(n)},
+ {12, 7} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [BB,BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(2, {disk_log, node(), n, {wrap, 2}}),
- ?line {162, 2} = {curb(n), cur_cnt(n)},
- ?line {16, 7} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [SB,SB,SB]),
-
- ?line rec(1, {disk_log, node(), n, {wrap, 2}}),
- ?line {24, 4} = {curb(n), cur_cnt(n)},
- ?line {20, 9} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ rec(2, {disk_log, node(), n, {wrap, 2}}),
+ {162, 2} = {curb(n), cur_cnt(n)},
+ {16, 7} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [SB,SB,SB]),
+
+ rec(1, {disk_log, node(), n, {wrap, 2}}),
+ {24, 4} = {curb(n), cur_cnt(n)},
+ {20, 9} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ del(File, No),
%% External without header.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {format, external}, {size, {100,No}}]),
- ?line {0, 0} = {curb(n), cur_cnt(n)},
- ?line {0, 0} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog(n, B),
- ?line {56, 1} = {curb(n), cur_cnt(n)},
- ?line {1, 1} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
- {notify, true},
- {format, external}, {size, {100,No}}]),
- ?line {56, 1} = {curb(n), cur_cnt(n)},
- ?line {0, 1} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog(n, B),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {56, 1} = {curb(n), cur_cnt(n)},
- ?line {1, 2} = {no_written_items(n), no_items(n)},
- ?line disk_log:inc_wrap_file(n),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line {0, 0} = {curb(n), cur_cnt(n)},
- ?line {1, 2} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [B,B,B]),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {format, external}, {size, {100,No}}]),
+ {0, 0} = {curb(n), cur_cnt(n)},
+ {0, 0} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog(n, B),
+ {56, 1} = {curb(n), cur_cnt(n)},
+ {1, 1} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {notify, true},
+ {format, external}, {size, {100,No}}]),
+ {56, 1} = {curb(n), cur_cnt(n)},
+ {0, 1} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog(n, B),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {56, 1} = {curb(n), cur_cnt(n)},
+ {1, 2} = {no_written_items(n), no_items(n)},
+ disk_log:inc_wrap_file(n),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ {0, 0} = {curb(n), cur_cnt(n)},
+ {1, 2} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [B,B,B]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line {56, 1} = {curb(n), cur_cnt(n)},
- ?line {4, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line ok = disk_log:blog_terms(n, [B]),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line {56, 1} = {curb(n), cur_cnt(n)},
- ?line {6, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [BB,BB]),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ {56, 1} = {curb(n), cur_cnt(n)},
+ {4, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ ok = disk_log:blog_terms(n, [B]),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ {56, 1} = {curb(n), cur_cnt(n)},
+ {6, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [BB,BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(2, {disk_log, node(), n, {wrap, 1}}),
- ?line {156, 1} = {curb(n), cur_cnt(n)},
- ?line {8, 4} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:blog_terms(n, [SB,SB,SB]),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line {18, 3} = {curb(n), cur_cnt(n)},
- ?line {11, 6} = {no_written_items(n), no_items(n)},
- ?line ok = disk_log:close(n),
- ?line del(File, No),
-
- ?line Q = qlen(),
+ rec(2, {disk_log, node(), n, {wrap, 1}}),
+ {156, 1} = {curb(n), cur_cnt(n)},
+ {8, 4} = {no_written_items(n), no_items(n)},
+ ok = disk_log:blog_terms(n, [SB,SB,SB]),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ {18, 3} = {curb(n), cur_cnt(n)},
+ {11, 6} = {no_written_items(n), no_items(n)},
+ ok = disk_log:close(n),
+ del(File, No),
+
+ Q = qlen(),
ok.
-change_size_before(suite) -> [];
change_size_before(doc) ->
["Change size of a wrap log file before we have reached "
"to the file index corresponding to the new size"];
@@ -3478,138 +3398,136 @@ change_size_before(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
del(File, 5),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File},
- {type, wrap}, {size, {100,5}}]),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:change_size(a, {100, 3}),
- ?line [Log_1_1, Log_1_2,
- Log_2_1, Log_2_2] = get_all_terms(a),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_1_2_1),
- ?line disk_log:log(a, Log_1_2_2),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_1_2_1, Log_1_2_2] = get_all_terms(a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,3}}]),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_1_2_1, Log_1_2_2] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File},
+ {type, wrap}, {size, {100,5}}]),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:change_size(a, {100, 3}),
+ [Log_1_1, Log_1_2,
+ Log_2_1, Log_2_2] = get_all_terms(a),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_1_2_1),
+ disk_log:log(a, Log_1_2_2),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_1_2_1, Log_1_2_2] = get_all_terms(a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,3}}]),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_1_2_1, Log_1_2_2] = get_all_terms(a),
disk_log:close(a),
del(File, 5),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {60,5}}, {format, external}]),
- ?line disk_log:blog(a, Log_1_1),
- ?line disk_log:blog(a, Log_1_2),
- ?line disk_log:blog(a, Log_2_1),
- ?line disk_log:blog(a, Log_2_2),
- ?line disk_log:change_size(a, {60, 3}),
- ?line ok = disk_log:sync(a),
- ?line {ok, Fd1} = file:open(File ++ ".1", [read]),
- ?line Log11_12 = Log_1_1 ++ Log_1_2,
- ?line {ok,Log11_12} = file:read(Fd1, 200),
- ?line ok = file:close(Fd1),
- ?line {ok, Fd2} = file:open(File ++ ".2", [read]),
-% ?t:format(0, "~p~n",[file:read(Fd2, 200)]),
- ?line Log21_22 = Log_2_1 ++ Log_2_2,
- ?line {ok,Log21_22} = file:read(Fd2, 200),
- ?line ok = file:close(Fd2),
- ?line disk_log:blog(a, Log_3_1),
- ?line disk_log:blog(a, Log_3_2),
- ?line disk_log:blog(a, Log_1_2_1),
- ?line disk_log:blog(a, Log_1_2_2),
- ?line ok = disk_log:sync(a),
- ?line {ok, Fd2a} = file:open(File ++ ".2", [read]),
- ?line {ok,Log21_22} = file:read(Fd2a, 200),
- ?line ok = file:close(Fd2a),
- ?line {ok, Fd3a} = file:open(File ++ ".3", [read]),
- ?line Log31_32 = Log_3_1 ++ Log_3_2,
- ?line {ok,Log31_32} = file:read(Fd3a, 200),
- ?line ok = file:close(Fd3a),
- ?line {ok, Fd1a} = file:open(File ++ ".1", [read]),
- ?line Log121_122 = Log_1_2_1 ++ Log_1_2_2,
- ?line {ok,Log121_122} = file:read(Fd1a, 200),
- ?line ok = file:close(Fd1a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {60,3}}, {format, external}]),
- ?line {ok, Fd2b} = file:open(File ++ ".2", [read]),
- ?line {ok,Log21_22} = file:read(Fd2b, 200),
- ?line ok = file:close(Fd2b),
- ?line {ok, Fd3b} = file:open(File ++ ".3", [read]),
- ?line {ok,Log31_32} = file:read(Fd3b, 200),
- ?line ok = file:close(Fd3b),
- ?line {ok, Fd1b} = file:open(File ++ ".1", [read]),
- ?line {ok,Log121_122} = file:read(Fd1b, 200),
- ?line ok = file:close(Fd1b),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {60,5}}, {format, external}]),
+ disk_log:blog(a, Log_1_1),
+ disk_log:blog(a, Log_1_2),
+ disk_log:blog(a, Log_2_1),
+ disk_log:blog(a, Log_2_2),
+ disk_log:change_size(a, {60, 3}),
+ ok = disk_log:sync(a),
+ {ok, Fd1} = file:open(File ++ ".1", [read]),
+ Log11_12 = Log_1_1 ++ Log_1_2,
+ {ok,Log11_12} = file:read(Fd1, 200),
+ ok = file:close(Fd1),
+ {ok, Fd2} = file:open(File ++ ".2", [read]),
+ Log21_22 = Log_2_1 ++ Log_2_2,
+ {ok,Log21_22} = file:read(Fd2, 200),
+ ok = file:close(Fd2),
+ disk_log:blog(a, Log_3_1),
+ disk_log:blog(a, Log_3_2),
+ disk_log:blog(a, Log_1_2_1),
+ disk_log:blog(a, Log_1_2_2),
+ ok = disk_log:sync(a),
+ {ok, Fd2a} = file:open(File ++ ".2", [read]),
+ {ok,Log21_22} = file:read(Fd2a, 200),
+ ok = file:close(Fd2a),
+ {ok, Fd3a} = file:open(File ++ ".3", [read]),
+ Log31_32 = Log_3_1 ++ Log_3_2,
+ {ok,Log31_32} = file:read(Fd3a, 200),
+ ok = file:close(Fd3a),
+ {ok, Fd1a} = file:open(File ++ ".1", [read]),
+ Log121_122 = Log_1_2_1 ++ Log_1_2_2,
+ {ok,Log121_122} = file:read(Fd1a, 200),
+ ok = file:close(Fd1a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {60,3}}, {format, external}]),
+ {ok, Fd2b} = file:open(File ++ ".2", [read]),
+ {ok,Log21_22} = file:read(Fd2b, 200),
+ ok = file:close(Fd2b),
+ {ok, Fd3b} = file:open(File ++ ".3", [read]),
+ {ok,Log31_32} = file:read(Fd3b, 200),
+ ok = file:close(Fd3b),
+ {ok, Fd1b} = file:open(File ++ ".1", [read]),
+ {ok,Log121_122} = file:read(Fd1b, 200),
+ ok = file:close(Fd1b),
disk_log:close(a),
del(File, 5),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,5}}]),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:change_size(a, {60, 3}),
- ?line [Log_1_1, Log_1_2,
- Log_2_1, Log_2_2] = get_all_terms(a),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_1_2_1),
- ?line [Log_2_1, Log_2_2,
- Log_3_1,
- Log_1_2_1] = get_all_terms(a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {60,3}}]),
- ?line [Log_2_1, Log_2_2,
- Log_3_1,
- Log_1_2_1] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,5}}]),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:change_size(a, {60, 3}),
+ [Log_1_1, Log_1_2,
+ Log_2_1, Log_2_2] = get_all_terms(a),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_1_2_1),
+ [Log_2_1, Log_2_2,
+ Log_3_1,
+ Log_1_2_1] = get_all_terms(a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {60,3}}]),
+ [Log_2_1, Log_2_2,
+ Log_3_1,
+ Log_1_2_1] = get_all_terms(a),
disk_log:close(a),
del(File, 5),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {60, 3}}]),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:change_size(a, {100, 5}),
- ?line [Log_1_1,
- Log_2_1] = get_all_terms(a),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_4_1),
- ?line disk_log:log(a, Log_4_2),
- ?line disk_log:log(a, Log_5_1),
- ?line disk_log:log(a, Log_5_2),
- ?line disk_log:log(a, Log_1_2_1),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_4_1, Log_4_2,
- Log_5_1, Log_5_2,
- Log_1_2_1] = get_all_terms(a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100, 5}}]),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_4_1, Log_4_2,
- Log_5_1, Log_5_2,
- Log_1_2_1] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {60, 3}}]),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_2_1),
+ disk_log:change_size(a, {100, 5}),
+ [Log_1_1,
+ Log_2_1] = get_all_terms(a),
+ disk_log:log(a, Log_2_2),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_4_1),
+ disk_log:log(a, Log_4_2),
+ disk_log:log(a, Log_5_1),
+ disk_log:log(a, Log_5_2),
+ disk_log:log(a, Log_1_2_1),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_4_1, Log_4_2,
+ Log_5_1, Log_5_2,
+ Log_1_2_1] = get_all_terms(a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100, 5}}]),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_4_1, Log_4_2,
+ Log_5_1, Log_5_2,
+ Log_1_2_1] = get_all_terms(a),
disk_log:close(a),
del(File, 5).
-change_size_during(suite) -> [];
-change_size_during(doc) -> ["Change size of a wrap log file while logging "
- "to a file index between the old and the new size"];
+%% Change size of a wrap log file while logging to a file index
+%% between the old and the new size.
change_size_during(Conf) when is_list(Conf) ->
Log_1_1 = "first log first message",
@@ -3633,114 +3551,111 @@ change_size_during(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,5}}]),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_4_1),
- ?line disk_log:log(a, Log_4_2),
- ?line disk_log:log(a, Log_5_1),
- ?line disk_log:log(a, Log_5_2),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_4_1),
- ?line disk_log:log(a, Log_4_2),
- ?line disk_log:change_size(a, {100, 3}),
- ?line [Log_5_1, Log_5_2,
- Log_1_1, Log_1_2,
- Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_4_1, Log_4_2] = get_all_terms(a),
- ?line disk_log:log(a, Log_1_2_1),
- ?line disk_log:log(a, Log_1_2_2),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_4_1, Log_4_2,
- Log_1_2_1, Log_1_2_2] = get_all_terms(a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,3}}]),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_4_1, Log_4_2,
- Log_1_2_1, Log_1_2_2] = get_all_terms(a),
- ?line disk_log:log(a, Log_2_2_1),
- ?line disk_log:log(a, Log_2_2_2),
- ?line disk_log:log(a, Log_3_2_1),
- ?line disk_log:log(a, Log_3_2_2),
- ?line disk_log:log(a, Log_1_3_1),
- ?line disk_log:log(a, Log_1_3_2),
- ?line [Log_2_2_1, Log_2_2_2,
- Log_3_2_1, Log_3_2_2,
- Log_1_3_1, Log_1_3_2] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,5}}]),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_4_1),
+ disk_log:log(a, Log_4_2),
+ disk_log:log(a, Log_5_1),
+ disk_log:log(a, Log_5_2),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_4_1),
+ disk_log:log(a, Log_4_2),
+ disk_log:change_size(a, {100, 3}),
+ [Log_5_1, Log_5_2,
+ Log_1_1, Log_1_2,
+ Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_4_1, Log_4_2] = get_all_terms(a),
+ disk_log:log(a, Log_1_2_1),
+ disk_log:log(a, Log_1_2_2),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_4_1, Log_4_2,
+ Log_1_2_1, Log_1_2_2] = get_all_terms(a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,3}}]),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_4_1, Log_4_2,
+ Log_1_2_1, Log_1_2_2] = get_all_terms(a),
+ disk_log:log(a, Log_2_2_1),
+ disk_log:log(a, Log_2_2_2),
+ disk_log:log(a, Log_3_2_1),
+ disk_log:log(a, Log_3_2_2),
+ disk_log:log(a, Log_1_3_1),
+ disk_log:log(a, Log_1_3_2),
+ [Log_2_2_1, Log_2_2_2,
+ Log_3_2_1, Log_3_2_2,
+ Log_1_3_1, Log_1_3_2] = get_all_terms(a),
disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,3}}]),
- ?line [Log_2_2_1, Log_2_2_2,
- Log_3_2_1, Log_3_2_2,
- Log_1_3_1, Log_1_3_2] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,3}}]),
+ [Log_2_2_1, Log_2_2_2,
+ Log_3_2_1, Log_3_2_2,
+ Log_1_3_1, Log_1_3_2] = get_all_terms(a),
disk_log:close(a),
del(File, 5),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,5}}]),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_4_1),
- ?line disk_log:log(a, Log_4_2),
- ?line disk_log:log(a, Log_5_1),
- ?line disk_log:log(a, Log_5_2),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_4_1),
- ?line disk_log:log(a, Log_4_2),
- ?line disk_log:log(a, Log_5_1),
- ?line disk_log:log(a, Log_5_2),
- ?line disk_log:change_size(a, {100, 3}),
- ?line [Log_1_1, Log_1_2,
- Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_4_1, Log_4_2,
- Log_5_1, Log_5_2] = get_all_terms(a),
- ?line disk_log:log(a, Log_1_2_1),
- ?line disk_log:log(a, Log_1_2_2),
- ?line disk_log:log(a, Log_2_2_1),
- ?line disk_log:log(a, Log_2_2_2),
- ?line disk_log:log(a, Log_3_2_1),
- ?line disk_log:log(a, Log_3_2_2),
- ?line disk_log:log(a, Log_1_3_1),
- ?line disk_log:log(a, Log_1_3_2),
- ?line [Log_2_2_1, Log_2_2_2,
- Log_3_2_1, Log_3_2_2,
- Log_1_3_1, Log_1_3_2] = get_all_terms(a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,3}}]),
- ?line [Log_2_2_1, Log_2_2_2,
- Log_3_2_1, Log_3_2_2,
- Log_1_3_1, Log_1_3_2] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,5}}]),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_4_1),
+ disk_log:log(a, Log_4_2),
+ disk_log:log(a, Log_5_1),
+ disk_log:log(a, Log_5_2),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_4_1),
+ disk_log:log(a, Log_4_2),
+ disk_log:log(a, Log_5_1),
+ disk_log:log(a, Log_5_2),
+ disk_log:change_size(a, {100, 3}),
+ [Log_1_1, Log_1_2,
+ Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_4_1, Log_4_2,
+ Log_5_1, Log_5_2] = get_all_terms(a),
+ disk_log:log(a, Log_1_2_1),
+ disk_log:log(a, Log_1_2_2),
+ disk_log:log(a, Log_2_2_1),
+ disk_log:log(a, Log_2_2_2),
+ disk_log:log(a, Log_3_2_1),
+ disk_log:log(a, Log_3_2_2),
+ disk_log:log(a, Log_1_3_1),
+ disk_log:log(a, Log_1_3_2),
+ [Log_2_2_1, Log_2_2_2,
+ Log_3_2_1, Log_3_2_2,
+ Log_1_3_1, Log_1_3_2] = get_all_terms(a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}, {size, {100,3}}]),
+ [Log_2_2_1, Log_2_2_2,
+ Log_3_2_1, Log_3_2_2,
+ Log_1_3_1, Log_1_3_2] = get_all_terms(a),
disk_log:close(a),
del(File, 5).
-change_size_after(suite) -> [];
-change_size_after(doc) ->
- ["Change size of a wrap log file before we have reached "
- "(on the second round) "
- "to the file index corresponding to the new size"];
+%% Change size of a wrap log file before we have reached (on the
+%% second round) to the file index corresponding to the new size.
change_size_after(Conf) when is_list(Conf) ->
Log_1_1 = "first log first message",
@@ -3758,172 +3673,169 @@ change_size_after(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "a.LOG"),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,5}}]),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_4_1),
- ?line disk_log:log(a, Log_4_2),
- ?line disk_log:log(a, Log_5_1),
- ?line disk_log:log(a, Log_5_2),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:change_size(a, {100, 3}),
- ?line [Log_3_1,Log_3_2,
- Log_4_1, Log_4_2,
- Log_5_1, Log_5_2,
- Log_1_1, Log_1_2,
- Log_2_1, Log_2_2] = get_all_terms(a),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_1_2_1),
- ?line disk_log:log(a, Log_1_2_2),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_1_2_1, Log_1_2_2] = get_all_terms(a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,3}}]),
- ?line [Log_2_1, Log_2_2,
- Log_3_1, Log_3_2,
- Log_1_2_1, Log_1_2_2] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,5}}]),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_4_1),
+ disk_log:log(a, Log_4_2),
+ disk_log:log(a, Log_5_1),
+ disk_log:log(a, Log_5_2),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:change_size(a, {100, 3}),
+ [Log_3_1,Log_3_2,
+ Log_4_1, Log_4_2,
+ Log_5_1, Log_5_2,
+ Log_1_1, Log_1_2,
+ Log_2_1, Log_2_2] = get_all_terms(a),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_1_2_1),
+ disk_log:log(a, Log_1_2_2),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_1_2_1, Log_1_2_2] = get_all_terms(a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,3}}]),
+ [Log_2_1, Log_2_2,
+ Log_3_1, Log_3_2,
+ Log_1_2_1, Log_1_2_2] = get_all_terms(a),
disk_log:close(a),
del(File, 5),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {100,5}}]),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_3_2),
- ?line disk_log:log(a, Log_4_1),
- ?line disk_log:log(a, Log_4_2),
- ?line disk_log:log(a, Log_5_1),
- ?line disk_log:log(a, Log_5_2),
- ?line disk_log:log(a, Log_1_1),
- ?line disk_log:log(a, Log_1_2),
- ?line disk_log:log(a, Log_2_1),
- ?line disk_log:log(a, Log_2_2),
- ?line disk_log:change_size(a, {60, 3}),
- ?line [Log_3_1,Log_3_2,
- Log_4_1, Log_4_2,
- Log_5_1, Log_5_2,
- Log_1_1, Log_1_2,
- Log_2_1, Log_2_2] = get_all_terms(a),
- ?line disk_log:log(a, Log_3_1),
- ?line disk_log:log(a, Log_1_2_1),
- ?line [Log_2_1, Log_2_2,
- Log_3_1,
- Log_1_2_1] = get_all_terms(a),
-
- ?line disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
- {size, {60,3}}]),
- ?line [Log_2_1, Log_2_2,
- Log_3_1,
- Log_1_2_1] = get_all_terms(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {100,5}}]),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_3_2),
+ disk_log:log(a, Log_4_1),
+ disk_log:log(a, Log_4_2),
+ disk_log:log(a, Log_5_1),
+ disk_log:log(a, Log_5_2),
+ disk_log:log(a, Log_1_1),
+ disk_log:log(a, Log_1_2),
+ disk_log:log(a, Log_2_1),
+ disk_log:log(a, Log_2_2),
+ disk_log:change_size(a, {60, 3}),
+ [Log_3_1,Log_3_2,
+ Log_4_1, Log_4_2,
+ Log_5_1, Log_5_2,
+ Log_1_1, Log_1_2,
+ Log_2_1, Log_2_2] = get_all_terms(a),
+ disk_log:log(a, Log_3_1),
+ disk_log:log(a, Log_1_2_1),
+ [Log_2_1, Log_2_2,
+ Log_3_1,
+ Log_1_2_1] = get_all_terms(a),
+
+ disk_log:close(a),
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {size, {60,3}}]),
+ [Log_2_1, Log_2_2,
+ Log_3_1,
+ Log_1_2_1] = get_all_terms(a),
disk_log:close(a),
del(File, 5).
-default_size(suite) -> [];
-default_size(doc) -> ["Open an existing wrap log without size option "];
+%% Open an existing wrap log without size option .
default_size(Conf) when is_list(Conf) ->
- ?line Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "a.LOG"),
- ?line {error, {badarg, size}} = disk_log:open([{name,a}, {file, File},
+ Dir = ?privdir(Conf),
+ File = filename:join(Dir, "a.LOG"),
+ {error, {badarg, size}} = disk_log:open([{name,a}, {file, File},
{type, wrap}]),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap},
{size, {100,5}}]),
- ?line disk_log:close(a),
+ disk_log:close(a),
- ?line {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}]),
- ?line {100, 5} = disk_log_1:read_size_file(File),
- ?line ok = disk_log:close(a),
- ?line del(File, 5).
+ {ok, a} = disk_log:open([{name,a}, {file, File}, {type, wrap}]),
+ {100, 5} = disk_log_1:read_size_file(File),
+ ok = disk_log:close(a),
+ del(File, 5).
-change_size2(suite) -> [];
-change_size2(doc) -> ["Testing change_size/2 a bit more..."];
+%% Testing change_size/2 a bit more...
change_size2(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line del(File, No), % cleanup
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ del(File, No), % cleanup
%% External halt.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {size, 100000},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {size, 100000},
{format, external}, {type, halt}]),
- ?line B = mk_bytes(60), % 56 actually...
- ?line ok = disk_log:blog_terms(n, [B,list_to_binary(B),B]),
- ?line Error1 = {error, {new_size_too_small,n,168}} =
+ B = mk_bytes(60), % 56 actually...
+ ok = disk_log:blog_terms(n, [B,list_to_binary(B),B]),
+ Error1 = {error, {new_size_too_small,n,168}} =
disk_log:change_size(n, 167),
- ?line "The current size" ++ _ = format_error(Error1),
- ?line ok = disk_log:change_size(n, infinity),
- ?line ok = disk_log:change_size(n, 168),
- ?line ok = disk_log:close(n),
- ?line file:delete(File), % cleanup
+ "The current size" ++ _ = format_error(Error1),
+ ok = disk_log:change_size(n, infinity),
+ ok = disk_log:change_size(n, 168),
+ ok = disk_log:close(n),
+ file:delete(File), % cleanup
%% External wrap.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{size, {100,No}}, {notify, true},
{format, external}]),
- ?line BB = mk_bytes(160),
- ?line ok = disk_log:blog_terms(n, [BB, BB, BB, BB]), % create all files
+ BB = mk_bytes(160),
+ ok = disk_log:blog_terms(n, [BB, BB, BB, BB]), % create all files
%% Used to be one message, but now one per wrapped file.
- ?line rec(3, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:blog_terms(n, [BB, BB]),
+ rec(3, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:blog_terms(n, [BB, BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(2, {disk_log, node(), n, {wrap, 1}}),
- ?line ok = disk_log:change_size(n, {100, 2}),
- ?line ok = disk_log:change_size(n, {100, 2}),
- ?line {100, 2} = sz(n),
- ?line ok = disk_log:balog_terms(n, [BB, BB]),
- ?line ok = disk_log:balog_terms(n, [BB]),
- ?line ok = disk_log:blog_terms(n, [BB]),
+ rec(2, {disk_log, node(), n, {wrap, 1}}),
+ ok = disk_log:change_size(n, {100, 2}),
+ ok = disk_log:change_size(n, {100, 2}),
+ {100, 2} = sz(n),
+ ok = disk_log:balog_terms(n, [BB, BB]),
+ ok = disk_log:balog_terms(n, [BB]),
+ ok = disk_log:blog_terms(n, [BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(4, {disk_log, node(), n, {wrap, 1}}),
- ?line ok = disk_log:change_size(n, {100, 4}),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
+ rec(4, {disk_log, node(), n, {wrap, 1}}),
+ ok = disk_log:change_size(n, {100, 4}),
+ ok = disk_log:close(n),
+ del(File, No),
%% Internal wrap.
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{size, {100,No}}, {notify, true},
{format, internal}]),
- ?line ok = disk_log:blog_terms(n, [BB, BB, BB, BB]), % create all files
+ ok = disk_log:blog_terms(n, [BB, BB, BB, BB]), % create all files
%% Used to be one message, but now one per wrapped file.
- ?line rec(3, {disk_log, node(), n, {wrap, 0}}),
- ?line ok = disk_log:blog_terms(n, [BB, BB]),
+ rec(3, {disk_log, node(), n, {wrap, 0}}),
+ ok = disk_log:blog_terms(n, [BB, BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(2, {disk_log, node(), n, {wrap, 1}}),
- ?line ok = disk_log:change_size(n, {100, 2}),
- ?line {100, 2} = sz(n),
- ?line ok = disk_log:blog_terms(n, [BB, BB, BB, BB]),
+ rec(2, {disk_log, node(), n, {wrap, 1}}),
+ ok = disk_log:change_size(n, {100, 2}),
+ {100, 2} = sz(n),
+ ok = disk_log:blog_terms(n, [BB, BB, BB, BB]),
%% Used to be one message, but now one per wrapped file.
- ?line rec(4, {disk_log, node(), n, {wrap, 1}}),
- ?line ok = disk_log:close(n),
- ?line del(File, No).
+ rec(4, {disk_log, node(), n, {wrap, 1}}),
+ ok = disk_log:close(n),
+ del(File, No).
-change_size_truncate(suite) -> [];
-change_size_truncate(doc) -> ["OTP-3484: truncating index file"];
+%% OTP-3484: truncating index file.
change_size_truncate(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "bert.LOG"),
- ?line No = 3,
- ?line B = mk_bytes(60),
+ File = filename:join(Dir, "bert.LOG"),
+ No = 3,
+ B = mk_bytes(60),
%% The problem here is truncation of the index file. One cannot easily
%% check that the index file is correctly updated, but print_index_file()
@@ -3933,541 +3845,527 @@ change_size_truncate(Conf) when is_list(Conf) ->
%% Change the size immediately after creating the log, while there
%% are no log files. This used to write stuff a negative offset
%% from the beginning of the file.
- ?line del(File, No+1),
- ?line {ok, bert} = disk_log:open([{name,bert}, {type,wrap}, {file, File},
+ del(File, No+1),
+ {ok, bert} = disk_log:open([{name,bert}, {type,wrap}, {file, File},
{notify, true}, {size,{1000,255}}]),
- ?line ok = disk_log:change_size(bert,{100,No}),
- ?line ok = disk_log:blog(bert, B),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 0}}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 0}}),
- ?line 3 = curf(bert),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line 1 = curf(bert),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
-
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
-
- % Three items expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
- ?line 3 = curf(bert),
- ?line ok = disk_log:change_size(bert,{100,1}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- % Three items expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- % One item expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
-
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line ok = disk_log:close(bert),
- ?line del(File, No),
+ ok = disk_log:change_size(bert,{100,No}),
+ ok = disk_log:blog(bert, B),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 0}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 0}}),
+ 3 = curf(bert),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ 1 = curf(bert),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+
+ %% Three items expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
+ 3 = curf(bert),
+ ok = disk_log:change_size(bert,{100,1}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ %% Three items expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ %% One item expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
+
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:close(bert),
+ del(File, No),
%% Part 2.
%% Change the size twice, the second time while the the effects of
%% the first changed have not yet been handled. Finally close before
%% the index file has been truncated.
- ?line del(File, No),
- ?line {ok, bert} = disk_log:open([{name,bert}, {type,wrap}, {file, File},
+ del(File, No),
+ {ok, bert} = disk_log:open([{name,bert}, {type,wrap}, {file, File},
{notify, true}, {size,{100,No}}]),
- ?line ok = disk_log:blog(bert, B),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 0}}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 0}}),
+ ok = disk_log:blog(bert, B),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 0}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 0}}),
- ?line 3 = curf(bert),
- ?line ok = disk_log:change_size(bert,{100,No-1}),
+ 3 = curf(bert),
+ ok = disk_log:change_size(bert,{100,No-1}),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line 1 = curf(bert),
- ?line ok = disk_log:change_size(bert,{100,No+1}),
+ 1 = curf(bert),
+ ok = disk_log:change_size(bert,{100,No+1}),
- % Three items expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
+ %% Three items expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
- % Three items expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
+ %% Three items expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
- ?line 2 = curf(bert),
- ?line ok = disk_log:change_size(bert,{100,1}),
+ 2 = curf(bert),
+ ok = disk_log:change_size(bert,{100,1}),
- % Three items expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
+ %% Three items expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
- ?line ok = disk_log:close(bert),
+ ok = disk_log:close(bert),
- % State: .siz is 1, current file is 2, index file size is 3...
+ %% State: .siz is 1, current file is 2, index file size is 3...
- ?line {ok, bert} = disk_log:open([{name,bert}, {file, File},
+ {ok, bert} = disk_log:open([{name,bert}, {file, File},
{type,wrap}, {notify, true}]),
- % Three items expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
+ %% Three items expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
- ?line 2 = curf(bert),
- ?line ok = disk_log:blog(bert, B),
- ?line rec(1, {disk_log, node(), bert, {wrap, 1}}),
- ?line ok = disk_log:close(bert),
+ 2 = curf(bert),
+ ok = disk_log:blog(bert, B),
+ rec(1, {disk_log, node(), bert, {wrap, 1}}),
+ ok = disk_log:close(bert),
- ?line {ok, bert} = disk_log:open([{name,bert}, {file, File},
+ {ok, bert} = disk_log:open([{name,bert}, {file, File},
{type,wrap}, {notify, true}]),
- % Two items expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
+ %% Two items expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
- ?line 1 = curf(bert),
- ?line ok = disk_log:blog(bert, B),
+ 1 = curf(bert),
+ ok = disk_log:blog(bert, B),
%% Expect {wrap 0}. Nothing lost now, last wrap notification
%% reported one lost item.
- ?line rec(1, {disk_log, node(), bert, {wrap, 0}}),
+ rec(1, {disk_log, node(), bert, {wrap, 0}}),
- % One item expected.
- % disk_log_1:print_index_file("bert.LOG.idx"),
- ?line ok = disk_log:close(bert),
+ %% One item expected.
+ %% disk_log_1:print_index_file("bert.LOG.idx"),
+ ok = disk_log:close(bert),
- ?line del(File, No),
+ del(File, No),
ok.
-change_attribute(suite) -> [];
-change_attribute(doc) ->
- ["Change notify and head"];
+%% Change notify and head.
change_attribute(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line No = 4,
- ?line del(File, No), % cleanup
- ?line B = mk_bytes(60),
+ File = filename:join(Dir, "n.LOG"),
+ No = 4,
+ del(File, No), % cleanup
+ B = mk_bytes(60),
- ?line Q = qlen(),
+ Q = qlen(),
- % test change_notify
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ %% test change_notify
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{size, {100,No}}]),
- ?line {ok, n} = disk_log:open([{name, n}]), % ignored...
- ?line ok = disk_log:log_terms(n, [B,B]),
- ?line {error, {badarg, notify}} = disk_log:change_notify(n, self(), wrong),
- ?line ok = disk_log:change_notify(n, self(), false),
- ?line ok = disk_log:change_notify(n, self(), true),
- ?line Error1 = {error, {not_owner, _}} =
+ {ok, n} = disk_log:open([{name, n}]), % ignored...
+ ok = disk_log:log_terms(n, [B,B]),
+ {error, {badarg, notify}} = disk_log:change_notify(n, self(), wrong),
+ ok = disk_log:change_notify(n, self(), false),
+ ok = disk_log:change_notify(n, self(), true),
+ Error1 = {error, {not_owner, _}} =
disk_log:change_notify(n, none, true),
- ?line "The pid" ++ _ = format_error(Error1),
- ?line 2 = no_written_items(n),
- ?line 0 = users(n),
- ?line Parent = self(),
- ?line Pid = spawn(fun() -> disk_log:close(n), Parent ! {self(),done} end),
- ?line receive {Pid, done} -> ok end,
- ?line 0 = users(n),
- ?line 1 = length(owners(n)),
-
- % test change_header
- ?line {error, {badarg, head}} = disk_log:change_header(n, none),
- ?line {error, {badarg, head}} =
+ "The pid" ++ _ = format_error(Error1),
+ 2 = no_written_items(n),
+ 0 = users(n),
+ Parent = self(),
+ Pid = spawn(fun() -> disk_log:close(n), Parent ! {self(),done} end),
+ receive {Pid, done} -> ok end,
+ 0 = users(n),
+ 1 = length(owners(n)),
+
+ %% test change_header
+ {error, {badarg, head}} = disk_log:change_header(n, none),
+ {error, {badarg, head}} =
disk_log:change_header(n, {head_func, {1,2,3}}),
- ?line ok = disk_log:change_header(n, {head, header}),
- ?line ok = disk_log:log(n, B),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line 4 = no_written_items(n),
- ?line ok = disk_log:change_header(n, {head, none}),
- ?line ok = disk_log:log(n, B),
- ?line rec(1, {disk_log, node(), n, {wrap, 0}}),
- ?line 5 = no_written_items(n),
- ?line ok = disk_log:change_header(n,
+ ok = disk_log:change_header(n, {head, header}),
+ ok = disk_log:log(n, B),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ 4 = no_written_items(n),
+ ok = disk_log:change_header(n, {head, none}),
+ ok = disk_log:log(n, B),
+ rec(1, {disk_log, node(), n, {wrap, 0}}),
+ 5 = no_written_items(n),
+ ok = disk_log:change_header(n,
{head_func, {?MODULE, head_fun, [{ok,header}]}}),
- ?line ok = disk_log:log(n, B),
- ?line rec(1, {disk_log, node(), n, {wrap, 1}}),
- ?line 7 = no_written_items(n),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:close(n),
- ?line del(File, No),
- ?line file:delete(File), % cleanup
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {format, external},
+ ok = disk_log:log(n, B),
+ rec(1, {disk_log, node(), n, {wrap, 1}}),
+ 7 = no_written_items(n),
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:close(n),
+ del(File, No),
+ file:delete(File), % cleanup
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {format, external},
{type, halt}]),
- ?line {error, {badarg, head}} = disk_log:change_header(n, {head, header}),
- ?line ok = disk_log:change_header(n, {head, "header"}),
- ?line ok = disk_log:close(n),
- ?line file:delete(File),
+ {error, {badarg, head}} = disk_log:change_header(n, {head, header}),
+ ok = disk_log:change_header(n, {head, "header"}),
+ ok = disk_log:close(n),
+ file:delete(File),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{size, {100,No}}]),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{size, {100,No}}]),
- ?line ok = disk_log:change_notify(n, self(), true),
- ?line ok = disk_log:change_header(n, {head, tjolahopp}),
- ?line {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
+ ok = disk_log:change_notify(n, self(), true),
+ ok = disk_log:change_header(n, {head, tjolahopp}),
+ {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{size, {100,No}}, {notify, true}]),
- ?line ok = disk_log:close(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line Q = qlen(),
- ?line del(File, No).
+ ok = disk_log:close(n),
+ {error, no_such_log} = disk_log:info(n),
+ Q = qlen(),
+ del(File, No).
-dist_open(suite) -> [];
-dist_open(doc) ->
- ["Open a distributed log"];
+%% Open a distributed log.
dist_open(Conf) when is_list(Conf) ->
- ?line PrivDir = ?privdir(Conf),
- ?line true = is_alive(),
-
- ?line Q = qlen(),
- ?line File = filename:join(PrivDir, "n.LOG"),
- ?line File1 = filename:join(PrivDir, "n1.LOG"),
- ?line No = 3,
- ?line file:delete(File),
- ?line del(File, No), % cleanup
- ?line del(File1, No), % cleanup
- ?line B = mk_bytes(60),
-
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node} = start_node(disk_log, "-pa " ++ PA),
- ?line wait_for_ready_net(),
+ PrivDir = ?privdir(Conf),
+ true = is_alive(),
+
+ Q = qlen(),
+ File = filename:join(PrivDir, "n.LOG"),
+ File1 = filename:join(PrivDir, "n1.LOG"),
+ No = 3,
+ file:delete(File),
+ del(File, No), % cleanup
+ del(File1, No), % cleanup
+ B = mk_bytes(60),
+
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, Node} = start_node(disk_log, "-pa " ++ PA),
+ wait_for_ready_net(),
%% open non-distributed on this node:
- ?line {ok,n} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {ok,n} = disk_log:open([{name, n}, {file, File}, {type, halt},
{distributed, []}]),
- ?line Error1 = {error, {halt_log, n}} = disk_log:inc_wrap_file(n),
- ?line "The halt log" ++ _ = format_error(Error1),
- ?line ok = disk_log:lclose(n),
- ?line file:delete(File),
+ Error1 = {error, {halt_log, n}} = disk_log:inc_wrap_file(n),
+ "The halt log" ++ _ = format_error(Error1),
+ ok = disk_log:lclose(n),
+ file:delete(File),
%% open distributed on this node:
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {[_],[]} = disk_log:open([{name, n}, {file, File}, {type, halt},
{distributed, [node()]}]),
%% the error message is ignored:
- ?line ok = disk_log:inc_wrap_file(n),
- ?line ok = disk_log:close(n),
- ?line file:delete(File),
+ ok = disk_log:inc_wrap_file(n),
+ ok = disk_log:close(n),
+ file:delete(File),
%% open a wrap log on this node, write something on this node
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File},
+ {[_],[]} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [node()]}]),
- ?line ok = disk_log:log(n, B),
- ?line ok = disk_log:close(n),
+ ok = disk_log:log(n, B),
+ ok = disk_log:close(n),
%% open a wrap log on this node and aother node, write something
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File},
+ {[_],[]} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [node()]}]),
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File1},
+ {[_],[]} = disk_log:open([{name, n}, {file, File1},
{type, wrap}, {size, {50, No}},
{distributed, [Node]}]),
- ?line ok = disk_log:log(n, B),
- ?line ok = rpc:call(Node, disk_log, log, [n, B]),
- ?line ok = disk_log:close(n),
- ?line del(File, No),
- ?line del(File1, No),
- ?line file:delete(File),
+ ok = disk_log:log(n, B),
+ ok = rpc:call(Node, disk_log, log, [n, B]),
+ ok = disk_log:close(n),
+ del(File, No),
+ del(File1, No),
+ file:delete(File),
%% open a wrap log on this node and another node, use lclose
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File},
+ {[_],[]} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [node()]}]),
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File},
+ {[_],[]} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [node()]},
{linkto,none}]),
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File1},
+ {[_],[]} = disk_log:open([{name, n}, {file, File1},
{type, wrap}, {size, {50, No}},
{distributed, [Node]}]),
- ?line [_, _] = distributed(n),
- ?line ok = disk_log:lclose(n, Node),
- ?line [_] = distributed(n),
- ?line ok = disk_log:lclose(n),
- ?line ok = disk_log:lclose(n),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line del(File, No),
- ?line del(File1, No),
- ?line file:delete(File),
-
- % open an invalid log file, and see how error are handled
- ?line First = "n.LOG.1",
- ?line make_file(PrivDir, First, 8),
-
- ?line {[], [_,_]} = disk_log:open([{name, n}, {file, File},
+ [_, _] = distributed(n),
+ ok = disk_log:lclose(n, Node),
+ [_] = distributed(n),
+ ok = disk_log:lclose(n),
+ ok = disk_log:lclose(n),
+ {error, no_such_log} = disk_log:info(n),
+ del(File, No),
+ del(File1, No),
+ file:delete(File),
+
+ %% open an invalid log file, and see how error are handled
+ First = "n.LOG.1",
+ make_file(PrivDir, First, 8),
+
+ {[], [_,_]} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [Node,node()]}]),
- ?line del(File, No),
- ?line file:delete(File),
+ del(File, No),
+ file:delete(File),
- % open a wrap on one other node (not on this node)
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File},
+ %% open a wrap on one other node (not on this node)
+ {[_],[]} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [Node]}]),
- ?line ok = rpc:call(Node, disk_log, log, [n, B]),
- ?line {error, no_such_log} = disk_log:lclose(n),
- ?line ok = disk_log:close(n),
+ ok = rpc:call(Node, disk_log, log, [n, B]),
+ {error, no_such_log} = disk_log:lclose(n),
+ ok = disk_log:close(n),
- ?line Q = qlen(),
+ Q = qlen(),
- ?line {error, no_such_log} = disk_log:info(n),
- ?line del(File, No),
- ?line file:delete(File),
- ?line stop_node(Node),
+ {error, no_such_log} = disk_log:info(n),
+ del(File, No),
+ file:delete(File),
+ stop_node(Node),
ok.
-dist_error_open(suite) -> [];
-dist_error_open(doc) ->
- ["Open a log distributed and not distributed"];
+%% Open a log distributed and not distributed.
dist_error_open(Conf) when is_list(Conf) ->
- ?line PrivDir = ?privdir(Conf),
- ?line true = is_alive(),
-
- ?line Q = qlen(),
- ?line File = filename:join(PrivDir, "bert.LOG"),
- ?line File1 = filename:join(PrivDir, "bert1.LOG"),
- ?line No = 3,
- ?line file:delete(File),
- ?line del(File, No), % cleanup
- ?line del(File1, No), % cleanup
-
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node} = start_node(disk_log, "-pa " ++ PA),
- ?line wait_for_ready_net(),
-
- % open non-distributed on this node:
- ?line {ok,n} = disk_log:open([{name, n}, {file, File},
+ PrivDir = ?privdir(Conf),
+ true = is_alive(),
+
+ Q = qlen(),
+ File = filename:join(PrivDir, "bert.LOG"),
+ File1 = filename:join(PrivDir, "bert1.LOG"),
+ No = 3,
+ file:delete(File),
+ del(File, No), % cleanup
+ del(File1, No), % cleanup
+
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, Node} = start_node(disk_log, "-pa " ++ PA),
+ wait_for_ready_net(),
+
+ %% open non-distributed on this node:
+ {ok,n} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}}]),
- % trying to open distributed on this node (error):
- ?line {[],[Error1={ENode,{error,{node_already_open,n}}}]} =
+ %% trying to open distributed on this node (error):
+ {[],[Error1={ENode,{error,{node_already_open,n}}}]} =
disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [node()]}]),
- ?line true =
+ true =
lists:prefix(lists:flatten(io_lib:format("~p: The distribution",
[ENode])),
format_error(Error1)),
- ?line ok = disk_log:lclose(n),
+ ok = disk_log:lclose(n),
- % open distributed on this node:
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File},
+ %% open distributed on this node:
+ {[_],[]} = disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}},
{distributed, [node()]}]),
- % trying to open non-distributed on this node (error):
- ?line {_,{node_already_open,n}} =
+ %% trying to open non-distributed on this node (error):
+ {_,{node_already_open,n}} =
disk_log:open([{name, n}, {file, File},
{type, wrap}, {size, {50, No}}]),
- ?line ok = disk_log:close(n),
- ?line Q = qlen(),
+ ok = disk_log:close(n),
+ Q = qlen(),
- ?line del(File, No),
- ?line del(File1, No),
- ?line file:delete(File),
- ?line stop_node(Node),
+ del(File, No),
+ del(File1, No),
+ file:delete(File),
+ stop_node(Node),
ok.
-dist_notify(suite) -> [];
-dist_notify(doc) ->
- ["Notification from other node"];
+%% Notification from other node.
dist_notify(Conf) when is_list(Conf) ->
- ?line PrivDir = ?privdir(Conf),
- ?line true = is_alive(),
+ PrivDir = ?privdir(Conf),
+ true = is_alive(),
- ?line File = filename:join(PrivDir, "bert.LOG"),
- ?line File1 = filename:join(PrivDir, "bert1.LOG"),
- ?line No = 3,
- ?line B = mk_bytes(60),
- ?line file:delete(File),
- ?line file:delete(File1),
- ?line del(File, No), % cleanup
- ?line del(File1, No),
-
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node} = start_node(disk_log, "-pa " ++ PA),
- ?line wait_for_ready_net(),
-
- % opening distributed on this node:
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File}, {notify, false},
+ File = filename:join(PrivDir, "bert.LOG"),
+ File1 = filename:join(PrivDir, "bert1.LOG"),
+ No = 3,
+ B = mk_bytes(60),
+ file:delete(File),
+ file:delete(File1),
+ del(File, No), % cleanup
+ del(File1, No),
+
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, Node} = start_node(disk_log, "-pa " ++ PA),
+ wait_for_ready_net(),
+
+ %% opening distributed on this node:
+ {[_],[]} = disk_log:open([{name, n}, {file, File}, {notify, false},
{type, wrap}, {size, {50, No}},
{distributed, [node()]}]),
- % opening distributed on other node:
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File1},
+ %% opening distributed on other node:
+ {[_],[]} = disk_log:open([{name, n}, {file, File1},
{notify, true}, {linkto, self()},
{type, wrap}, {size, {50, No}},
{distributed, [Node]}]),
- ?line disk_log:alog(n, B),
- ?line disk_log:alog(n, B),
- ?line ok = disk_log:sync(n),
- ?line rec(1, {disk_log, Node, n, {wrap, 0}}),
- ?line ok = disk_log:close(n),
-
- ?line del(File, No),
- ?line del(File1, No),
- ?line file:delete(File),
- ?line stop_node(Node),
+ disk_log:alog(n, B),
+ disk_log:alog(n, B),
+ ok = disk_log:sync(n),
+ rec(1, {disk_log, Node, n, {wrap, 0}}),
+ ok = disk_log:close(n),
+
+ del(File, No),
+ del(File1, No),
+ file:delete(File),
+ stop_node(Node),
ok.
-dist_terminate(suite) -> [];
-dist_terminate(doc) ->
- ["Terminating nodes with distributed logs"];
+%% Terminating nodes with distributed logs.
dist_terminate(Conf) when is_list(Conf) ->
- ?line Dir = ?privdir(Conf),
- ?line true = is_alive(),
+ Dir = ?privdir(Conf),
+ true = is_alive(),
- ?line File = filename:join(Dir, "n.LOG"),
- ?line File1 = filename:join(Dir, "n1.LOG"),
+ File = filename:join(Dir, "n.LOG"),
+ File1 = filename:join(Dir, "n1.LOG"),
No = 1,
del(File, No), % cleanup
del(File1, No), % cleanup
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node} = start_node(disk_log, "-pa " ++ PA),
- ?line wait_for_ready_net(),
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, Node} = start_node(disk_log, "-pa " ++ PA),
+ wait_for_ready_net(),
%% Distributed versions of two of the situations in close_block(/1.
%% One of two owners terminates.
- ?line Pid1 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid2 = spawn_link(?MODULE, lserv, [n]),
- ?line {[{_, {ok, n}}], []} = sync_do(Pid1, {dist_open, File, node()}),
- ?line {[{_, {ok, n}}], []} = sync_do(Pid2, {dist_open, File1, Node}),
- ?line [_] = sync_do(Pid1, owners),
- ?line [_] = sync_do(Pid2, owners),
- ?line 0 = sync_do(Pid1, users),
- ?line 0 = sync_do(Pid2, users),
- ?line sync_do(Pid1, terminate),
- ?line [_] = sync_do(Pid2, owners),
- ?line 0 = sync_do(Pid2, users),
- ?line sync_do(Pid2, terminate),
- ?line {error, no_such_log} = disk_log:info(n),
+ Pid1 = spawn_link(?MODULE, lserv, [n]),
+ Pid2 = spawn_link(?MODULE, lserv, [n]),
+ {[{_, {ok, n}}], []} = sync_do(Pid1, {dist_open, File, node()}),
+ {[{_, {ok, n}}], []} = sync_do(Pid2, {dist_open, File1, Node}),
+ [_] = sync_do(Pid1, owners),
+ [_] = sync_do(Pid2, owners),
+ 0 = sync_do(Pid1, users),
+ 0 = sync_do(Pid2, users),
+ sync_do(Pid1, terminate),
+ [_] = sync_do(Pid2, owners),
+ 0 = sync_do(Pid2, users),
+ sync_do(Pid2, terminate),
+ {error, no_such_log} = disk_log:info(n),
%% Users terminate (no link...).
- ?line Pid3 = spawn_link(?MODULE, lserv, [n]),
- ?line Pid4 = spawn_link(?MODULE, lserv, [n]),
- ?line {[{_, {ok, n}}], []} =
+ Pid3 = spawn_link(?MODULE, lserv, [n]),
+ Pid4 = spawn_link(?MODULE, lserv, [n]),
+ {[{_, {ok, n}}], []} =
sync_do(Pid3, {dist_open, File, none, node()}),
- ?line {[{_, {ok, n}}], []} =
+ {[{_, {ok, n}}], []} =
sync_do(Pid4, {dist_open, File1, none, Node}),
- ?line [] = sync_do(Pid3, owners),
- ?line [] = sync_do(Pid4, owners),
- ?line 1 = sync_do(Pid3, users),
- ?line 1 = sync_do(Pid4, users),
- ?line sync_do(Pid3, terminate),
- ?line [] = sync_do(Pid4, owners),
- ?line 1 = sync_do(Pid4, users),
- ?line sync_do(Pid4, terminate),
- ?line ok = disk_log:close(n), % closing all nodes
- ?line {error, no_such_log} = disk_log:info(n),
+ [] = sync_do(Pid3, owners),
+ [] = sync_do(Pid4, owners),
+ 1 = sync_do(Pid3, users),
+ 1 = sync_do(Pid4, users),
+ sync_do(Pid3, terminate),
+ [] = sync_do(Pid4, owners),
+ 1 = sync_do(Pid4, users),
+ sync_do(Pid4, terminate),
+ ok = disk_log:close(n), % closing all nodes
+ {error, no_such_log} = disk_log:info(n),
- ?line del(File, No),
- ?line del(File1, No),
- ?line stop_node(Node),
+ del(File, No),
+ del(File1, No),
+ stop_node(Node),
ok.
-dist_accessible(suite) -> [];
-dist_accessible(doc) ->
- ["Accessible logs on nodes"];
+%% Accessible logs on nodes.
dist_accessible(Conf) when is_list(Conf) ->
- ?line PrivDir = ?privdir(Conf),
-
- ?line true = is_alive(),
-
- ?line F1 = filename:join(PrivDir, "a.LOG"),
- ?line file:delete(F1),
- ?line F2 = filename:join(PrivDir, "b.LOG"),
- ?line file:delete(F2),
- ?line F3 = filename:join(PrivDir, "c.LOG"),
- ?line file:delete(F3),
- ?line F4 = filename:join(PrivDir, "d.LOG"),
- ?line file:delete(F1),
- ?line F5 = filename:join(PrivDir, "e.LOG"),
- ?line file:delete(F2),
- ?line F6 = filename:join(PrivDir, "f.LOG"),
- ?line file:delete(F3),
-
- ?line {[],[]} = disk_log:accessible_logs(),
- ?line {ok, a} = disk_log:open([{name, a}, {type, halt}, {file, F1}]),
- ?line {[a],[]} = disk_log:accessible_logs(),
- ?line {ok, b} = disk_log:open([{name, b}, {type, halt}, {file, F2}]),
- ?line {[a,b],[]} = disk_log:accessible_logs(),
- ?line {ok, c} = disk_log:open([{name, c}, {type, halt}, {file, F3}]),
- ?line {[a,b,c],[]} = disk_log:accessible_logs(),
-
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node} = start_node(disk_log, "-pa " ++ PA),
- ?line wait_for_ready_net(),
-
- ?line {[_],[]} = disk_log:open([{name, a}, {file, F4}, {type, halt},
+ PrivDir = ?privdir(Conf),
+
+ true = is_alive(),
+
+ F1 = filename:join(PrivDir, "a.LOG"),
+ file:delete(F1),
+ F2 = filename:join(PrivDir, "b.LOG"),
+ file:delete(F2),
+ F3 = filename:join(PrivDir, "c.LOG"),
+ file:delete(F3),
+ F4 = filename:join(PrivDir, "d.LOG"),
+ file:delete(F1),
+ F5 = filename:join(PrivDir, "e.LOG"),
+ file:delete(F2),
+ F6 = filename:join(PrivDir, "f.LOG"),
+ file:delete(F3),
+
+ {[],[]} = disk_log:accessible_logs(),
+ {ok, a} = disk_log:open([{name, a}, {type, halt}, {file, F1}]),
+ {[a],[]} = disk_log:accessible_logs(),
+ {ok, b} = disk_log:open([{name, b}, {type, halt}, {file, F2}]),
+ {[a,b],[]} = disk_log:accessible_logs(),
+ {ok, c} = disk_log:open([{name, c}, {type, halt}, {file, F3}]),
+ {[a,b,c],[]} = disk_log:accessible_logs(),
+
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, Node} = start_node(disk_log, "-pa " ++ PA),
+ wait_for_ready_net(),
+
+ {[_],[]} = disk_log:open([{name, a}, {file, F4}, {type, halt},
{distributed, [Node]}]),
- ?line {[a,b,c],[]} = disk_log:accessible_logs(),
- ?line {[],[a]} = rpc:call(Node, disk_log, accessible_logs, []),
- ?line {[_],[]} = disk_log:open([{name, b}, {file, F5}, {type, halt},
+ {[a,b,c],[]} = disk_log:accessible_logs(),
+ {[],[a]} = rpc:call(Node, disk_log, accessible_logs, []),
+ {[_],[]} = disk_log:open([{name, b}, {file, F5}, {type, halt},
{distributed, [Node]}]),
- ?line {[],[a,b]} = rpc:call(Node, disk_log, accessible_logs, []),
- ?line {[_],[]} = disk_log:open([{name, c}, {file, F6}, {type, halt},
+ {[],[a,b]} = rpc:call(Node, disk_log, accessible_logs, []),
+ {[_],[]} = disk_log:open([{name, c}, {file, F6}, {type, halt},
{distributed, [Node]}]),
- ?line {[],[a,b,c]} = rpc:call(Node, disk_log, accessible_logs, []),
- ?line {[a,b,c],[]} = disk_log:accessible_logs(),
- ?line ok = disk_log:close(a),
- ?line {[b,c],[a]} = disk_log:accessible_logs(),
- ?line ok = disk_log:close(b),
- ?line {[c],[a,b]} = disk_log:accessible_logs(),
- ?line ok = disk_log:close(b),
- ?line {[c],[a]} = disk_log:accessible_logs(),
- ?line {[],[a,c]} = rpc:call(Node, disk_log, accessible_logs, []),
- ?line ok = disk_log:close(c),
- ?line {[],[a,c]} = disk_log:accessible_logs(),
- ?line ok = disk_log:close(c),
- ?line {[],[a]} = disk_log:accessible_logs(),
- ?line {[],[a]} = rpc:call(Node, disk_log, accessible_logs, []),
- ?line ok = disk_log:close(a),
- ?line {[],[]} = disk_log:accessible_logs(),
- ?line {[],[]} = rpc:call(Node, disk_log, accessible_logs, []),
-
- ?line file:delete(F1),
- ?line file:delete(F2),
- ?line file:delete(F3),
- ?line file:delete(F4),
- ?line file:delete(F5),
- ?line file:delete(F6),
-
- ?line stop_node(Node),
+ {[],[a,b,c]} = rpc:call(Node, disk_log, accessible_logs, []),
+ {[a,b,c],[]} = disk_log:accessible_logs(),
+ ok = disk_log:close(a),
+ {[b,c],[a]} = disk_log:accessible_logs(),
+ ok = disk_log:close(b),
+ {[c],[a,b]} = disk_log:accessible_logs(),
+ ok = disk_log:close(b),
+ {[c],[a]} = disk_log:accessible_logs(),
+ {[],[a,c]} = rpc:call(Node, disk_log, accessible_logs, []),
+ ok = disk_log:close(c),
+ {[],[a,c]} = disk_log:accessible_logs(),
+ ok = disk_log:close(c),
+ {[],[a]} = disk_log:accessible_logs(),
+ {[],[a]} = rpc:call(Node, disk_log, accessible_logs, []),
+ ok = disk_log:close(a),
+ {[],[]} = disk_log:accessible_logs(),
+ {[],[]} = rpc:call(Node, disk_log, accessible_logs, []),
+
+ file:delete(F1),
+ file:delete(F2),
+ file:delete(F3),
+ file:delete(F4),
+ file:delete(F5),
+ file:delete(F6),
+
+ stop_node(Node),
ok.
-dist_deadlock(suite) -> [];
-dist_deadlock(doc) ->
- ["OTP-4405. Deadlock between two nodes could happen."];
+%% OTP-4405. Deadlock between two nodes could happen.
dist_deadlock(Conf) when is_list(Conf) ->
- ?line PrivDir = ?privdir(Conf),
+ PrivDir = ?privdir(Conf),
- ?line true = is_alive(),
+ true = is_alive(),
- ?line F1 = filename:join(PrivDir, "a.LOG"),
- ?line file:delete(F1),
- ?line F2 = filename:join(PrivDir, "b.LOG"),
- ?line file:delete(F2),
+ F1 = filename:join(PrivDir, "a.LOG"),
+ file:delete(F1),
+ F2 = filename:join(PrivDir, "b.LOG"),
+ file:delete(F2),
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node1} = start_node(disk_log_node1, "-pa " ++ PA),
- ?line {ok, Node2} = start_node(disk_log_node2, "-pa " ++ PA),
- ?line wait_for_ready_net(),
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, Node1} = start_node(disk_log_node1, "-pa " ++ PA),
+ {ok, Node2} = start_node(disk_log_node2, "-pa " ++ PA),
+ wait_for_ready_net(),
Self = self(),
Fun1 = fun() -> dist_dl(Node2, a, F1, Self) end,
@@ -4477,11 +4375,11 @@ dist_deadlock(Conf) when is_list(Conf) ->
receive {P1, a} -> ok end,
receive {P2, b} -> ok end,
- ?line stop_node(Node1),
- ?line stop_node(Node2),
+ stop_node(Node1),
+ stop_node(Node2),
- ?line file:delete(F1),
- ?line file:delete(F2),
+ file:delete(F1),
+ file:delete(F2),
ok.
dist_dl(Node, Name, File, Pid) ->
@@ -4492,12 +4390,10 @@ dist_dl(Node, Name, File, Pid) ->
Pid ! {self(), Name},
ok.
-dist_open2(suite) -> [];
-dist_open2(doc) ->
- ["OTP-4480. Opening several logs simultaneously."];
+%% OTP-4480. Opening several logs simultaneously.
dist_open2(Conf) when is_list(Conf) ->
- ?line true = is_alive(),
- ?line {ok, _Pg2} = pg2:start(),
+ true = is_alive(),
+ {ok, _Pg2} = pg2:start(),
dist_open2_1(Conf, 0),
dist_open2_1(Conf, 100),
@@ -4513,9 +4409,9 @@ dist_open2(Conf) when is_list(Conf) ->
%% to open the log. The second one succeeds, and the third one is
%% attached.
P0 = pps(),
- ?line File0 = "n.LOG",
- ?line File = filename:join(PrivDir, File0),
- ?line make_file(PrivDir, File0, 8),
+ File0 = "n.LOG",
+ File = filename:join(PrivDir, File0),
+ make_file(PrivDir, File0, 8),
Parent = self(),
F1 = fun() -> R = disk_log:open([{name, Log}, {file, File},
@@ -4529,18 +4425,18 @@ dist_open2(Conf) when is_list(Conf) ->
Parent ! {self(), R},
timer:sleep(300)
end,
- ?line Pid1 = spawn(F1),
+ Pid1 = spawn(F1),
timer:sleep(10),
- ?line Pid2 = spawn(F2),
- ?line Pid3 = spawn(F2),
+ Pid2 = spawn(F2),
+ Pid3 = spawn(F2),
- ?line receive {Pid1,R1} -> {[],[_]} = R1 end,
- ?line receive {Pid2,R2} -> {[_],[]} = R2 end,
- ?line receive {Pid3,R3} -> {[_],[]} = R3 end,
+ receive {Pid1,R1} -> {[],[_]} = R1 end,
+ receive {Pid2,R2} -> {[_],[]} = R2 end,
+ receive {Pid3,R3} -> {[_],[]} = R3 end,
timer:sleep(500),
- ?line file:delete(File),
- ?line true = (P0 == pps()),
+ file:delete(File),
+ check_pps(P0),
%% This time the first process has a naughty head_func. This test
%% does not add very much. Perhaps it should be removed. However,
@@ -4561,15 +4457,15 @@ dist_open2(Conf) when is_list(Conf) ->
{type,halt}]),
Parent ! {self(), R}
end,
- ?line Pid4 = spawn(F3),
+ Pid4 = spawn(F3),
timer:sleep(10),
- ?line Pid5 = spawn(F4),
- ?line Pid6 = spawn(F4),
+ Pid5 = spawn(F4),
+ Pid6 = spawn(F4),
%% The timing is crucial here.
- ?line R = case receive {Pid4,R4} -> R4 end of
+ R = case receive {Pid4,R4} -> R4 end of
{error, no_such_log} ->
- ?line R5 = receive {Pid5, R5a} -> R5a end,
- ?line R6 = receive {Pid6, R6a} -> R6a end,
+ R5 = receive {Pid5, R5a} -> R5a end,
+ R6 = receive {Pid6, R6a} -> R6a end,
case {R5, R6} of
{{repaired, _, _, _}, {ok, Log}} -> ok;
{{ok, Log}, {repaired, _, _, _}} -> ok;
@@ -4577,16 +4473,16 @@ dist_open2(Conf) when is_list(Conf) ->
end,
ok;
{ok, Log} -> % uninteresting case
- ?line receive {Pid5,_R5} -> ok end,
- ?line receive {Pid6,_R6} -> ok end,
+ receive {Pid5,_R5} -> ok end,
+ receive {Pid6,_R6} -> ok end,
{comment,
"Timing dependent test did not check anything."}
end,
timer:sleep(100),
- ?line {error, no_such_log} = disk_log:close(Log),
+ {error, no_such_log} = disk_log:close(Log),
file:delete(File),
- ?line true = (P0 == pps()),
+ check_pps(P0),
No = 2,
Log2 = n2,
@@ -4598,7 +4494,7 @@ dist_open2(Conf) when is_list(Conf) ->
%% processes should be able to attach to other log without having to
%% wait.
- ?line {ok,Log} =
+ {ok,Log} =
disk_log:open([{name,Log},{file,File},{type,wrap},{size,{100,No}}]),
Pid = spawn(fun() ->
receive {HeadPid, start} -> ok end,
@@ -4607,15 +4503,15 @@ dist_open2(Conf) when is_list(Conf) ->
HeadPid ! {self(), done}
end),
HeadFunc = {?MODULE, slow_header, [Pid]},
- ?line ok = disk_log:change_header(Log, {head_func, HeadFunc}),
- ?line ok = disk_log:inc_wrap_file(Log), % header is written
+ ok = disk_log:change_header(Log, {head_func, HeadFunc}),
+ ok = disk_log:inc_wrap_file(Log), % header is written
timer:sleep(100),
- ?line ok = disk_log:close(Log),
+ ok = disk_log:close(Log),
file:delete(File2),
del(File, No),
- ?line true = (P0 == pps()),
+ check_pps(P0),
R.
@@ -4625,13 +4521,13 @@ dist_open2_1(Conf, Delay) ->
Log = n,
A0 = [{name,Log},{file,File},{type,halt}],
- ?line create_opened_log(File, A0),
+ create_opened_log(File, A0),
P0 = pps(),
Log2 = log2,
File2 = "log2.LOG",
- ?line file:delete(File2),
- ?line {ok,Log2} = disk_log:open([{name,Log2},{file,File2},{type,halt}]),
+ file:delete(File2),
+ {ok,Log2} = disk_log:open([{name,Log2},{file,File2},{type,halt}]),
Parent = self(),
F = fun() ->
@@ -4639,28 +4535,28 @@ dist_open2_1(Conf, Delay) ->
timer:sleep(Delay),
Parent ! {self(), R}
end,
- ?line Pid1 = spawn(F),
+ Pid1 = spawn(F),
timer:sleep(10),
- ?line Pid2 = spawn(F),
- ?line Pid3 = spawn(F),
- ?line {error, no_such_log} = disk_log:log(Log, term), % is repairing now
- ?line 0 = qlen(),
+ Pid2 = spawn(F),
+ Pid3 = spawn(F),
+ {error, no_such_log} = disk_log:log(Log, term), % is repairing now
+ 0 = qlen(),
%% The file is already open, so this will not take long.
- ?line {ok,Log2} = disk_log:open([{name,Log2},{file,File2},{type,halt}]),
- ?line 0 = qlen(), % still repairing
- ?line ok = disk_log:close(Log2),
- ?line {error, no_such_log} = disk_log:close(Log2),
- ?line file:delete(File2),
-
- ?line receive {Pid1,R1} -> {repaired,_,_,_} = R1 end,
- ?line receive {Pid2,R2} -> {ok,_} = R2 end,
- ?line receive {Pid3,R3} -> {ok,_} = R3 end,
+ {ok,Log2} = disk_log:open([{name,Log2},{file,File2},{type,halt}]),
+ 0 = qlen(), % still repairing
+ ok = disk_log:close(Log2),
+ {error, no_such_log} = disk_log:close(Log2),
+ file:delete(File2),
+
+ receive {Pid1,R1} -> {repaired,_,_,_} = R1 end,
+ receive {Pid2,R2} -> {ok,_} = R2 end,
+ receive {Pid3,R3} -> {ok,_} = R3 end,
timer:sleep(500),
- ?line {error, no_such_log} = disk_log:info(Log),
+ {error, no_such_log} = disk_log:info(Log),
file:delete(File),
- ?line true = (P0 == pps()),
+ check_pps(P0),
ok.
@@ -4669,18 +4565,18 @@ dist_open2_2(Conf, Delay) ->
File = filename:join(Dir, "n.LOG"),
Log = n,
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node1} = start_node(disk_log_node2, "-pa " ++ PA),
- ?line wait_for_ready_net(),
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, Node1} = start_node(disk_log_node2, "-pa " ++ PA),
+ wait_for_ready_net(),
P0 = pps(),
A0 = [{name,Log},{file,File},{type,halt}],
- ?line create_opened_log(File, A0),
+ create_opened_log(File, A0),
Log2 = log2,
File2 = "log2.LOG",
- ?line file:delete(File2),
- ?line {[{Node1,{ok,Log2}}],[]} =
+ file:delete(File2),
+ {[{Node1,{ok,Log2}}],[]} =
disk_log:open([{name,Log2},{file,File2},{type,halt},
{distributed,[Node1]}]),
@@ -4693,32 +4589,32 @@ dist_open2_2(Conf, Delay) ->
Parent ! {self(), R}
end,
%% And {priority, ...} probably has no effect either.
- ?line Pid1 = spawn_opt(F, [{priority, low}]),
- % timer:sleep(1), % no guarantee that Pid1 will return {repaired, ...}
- ?line Pid2 = spawn_opt(F, [{priority, low}]),
- ?line {error, no_such_log} =
+ Pid1 = spawn_opt(F, [{priority, low}]),
+ %% timer:sleep(1), % no guarantee that Pid1 will return {repaired, ...}
+ Pid2 = spawn_opt(F, [{priority, low}]),
+ {error, no_such_log} =
disk_log:log(Log, term), % maybe repairing now
- ?line 0 = qlen(),
+ 0 = qlen(),
%% The file is already open, so this will not take long.
- ?line {[{Node1,{ok,Log2}}],[]} =
+ {[{Node1,{ok,Log2}}],[]} =
disk_log:open([{name,Log2},{file,File2},{type,halt},
{distributed,[Node1]}]),
- ?line 0 = qlen(), % probably still repairing
- ?line ok = disk_log:close(Log2),
- ?line file:delete(File2),
+ 0 = qlen(), % probably still repairing
+ ok = disk_log:close(Log2),
+ file:delete(File2),
- ?line receive {Pid1,R1} -> R1 end,
- ?line receive {Pid2,R2} -> R2 end,
- ?line case {R1, R2} of
+ receive {Pid1,R1} -> R1 end,
+ receive {Pid2,R2} -> R2 end,
+ case {R1, R2} of
{{[{Node1,{repaired,_,_,_}}],[]},
{[{Node1,{ok,Log}}],[]}} -> ok;
{{[{Node1,{ok,Log}}],[]},
{[{Node1,{repaired,_,_,_}}],[]}} -> ok
end,
- ?line true = (P0 == pps()),
- ?line stop_node(Node1),
+ check_pps(P0),
+ stop_node(Node1),
file:delete(File),
ok.
@@ -4751,168 +4647,50 @@ log_terms(Log, N) ->
ok = disk_log:log(Log, {term, N}),
log_terms(Log, N-1).
-other_groups(suite) -> [];
-other_groups(doc) ->
- ["OTP-5810. Cope with pg2 groups that are not disk logs."];
+%% OTP-5810. Cope with pg2 groups that are not disk logs.
other_groups(Conf) when is_list(Conf) ->
- ?line true = is_alive(),
- ?line PrivDir = ?privdir(Conf),
+ true = is_alive(),
+ PrivDir = ?privdir(Conf),
- ?line File = filename:join(PrivDir, "n.LOG"),
- ?line file:delete(File),
+ File = filename:join(PrivDir, "n.LOG"),
+ file:delete(File),
- ?line {[],[]} = disk_log:accessible_logs(),
- ?line {[_],[]} = disk_log:open([{name, n}, {file, File}, {type, halt},
+ {[],[]} = disk_log:accessible_logs(),
+ {[_],[]} = disk_log:open([{name, n}, {file, File}, {type, halt},
{distributed, [node()]}]),
- ?line {[],[n]} = disk_log:accessible_logs(),
+ {[],[n]} = disk_log:accessible_logs(),
Group = grupp,
- ?line pg2:create(Group),
- ?line ok = pg2:join(Group, self()),
- ?line {[],[n]} = disk_log:accessible_logs(),
- ?line [_] =
+ pg2:create(Group),
+ ok = pg2:join(Group, self()),
+ {[],[n]} = disk_log:accessible_logs(),
+ [_] =
lists:filter(fun(P) -> disk_log:pid2name(P) =/= undefined end,
erlang:processes()),
- ?line pg2:delete(Group),
- ?line {[],[n]} = disk_log:accessible_logs(),
- ?line ok = disk_log:close(n),
- ?line {[],[]} = disk_log:accessible_logs(),
- ?line file:delete(File),
-
- ok.
-
--define(MAX, 16384). % MAX in disk_log_1.erl
-evil(suite) -> [];
-evil(doc) -> ["Evil cases such as closed file descriptor port."];
-evil(Conf) when is_list(Conf) ->
- Dir = ?privdir(Conf),
- File = filename:join(Dir, "n.LOG"),
- Log = n,
-
- %% Not a very thorough test.
-
- ?line ok = setup_evil_filled_cache_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ?line ok = disk_log:close(Log),
-
- ?line ok = setup_evil_filled_cache_halt(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:truncate(Log, apa),
- ?line ok = stop_evil(Log),
-
- %% White box test.
- file:delete(File),
- ?line Ports0 = erlang:ports(),
- ?line {ok, Log} = disk_log:open([{name,Log},{file,File},{type,halt},
- {size,?MAX+50},{format,external}]),
- ?line [Fd] = erlang:ports() -- Ports0,
- ?line {B,_} = x_mk_bytes(30),
- ?line ok = disk_log:blog(Log, <<0:(?MAX+1)/unit:8>>),
- ?line exit(Fd, kill),
- ?line {error, {file_error,_,einval}} = disk_log:blog_terms(Log, [B,B]),
- ?line ok= disk_log:close(Log),
+ pg2:delete(Group),
+ {[],[n]} = disk_log:accessible_logs(),
+ ok = disk_log:close(n),
+ {[],[]} = disk_log:accessible_logs(),
file:delete(File),
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:close(Log),
-
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ?line ok = stop_evil(Log),
-
- ?line ok = setup_evil_halt(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ?line ok = stop_evil(Log),
-
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- ?line {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- ?line ok = stop_evil(Log),
-
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- ?line ok = stop_evil(Log),
-
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:inc_wrap_file(Log),
- ?line ok = stop_evil(Log),
-
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:chunk(Log, start),
- ?line ok = stop_evil(Log),
-
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:truncate(Log),
- ?line ok = stop_evil(Log),
-
- ?line ok = setup_evil_wrap(Log, Dir),
- ?line {error, {file_error,_,einval}} = disk_log:chunk_step(Log, start, 1),
- ?line ok = stop_evil(Log),
-
- io:format("messages: ~p~n", [erlang:process_info(self(), messages)]),
- del(File, 2),
- file:delete(File),
- ok.
-
-setup_evil_wrap(Log, Dir) ->
- setup_evil(Log, [{type,wrap},{size,{100,2}}], Dir).
-
-setup_evil_halt(Log, Dir) ->
- setup_evil(Log, [{type,halt},{size,10000}], Dir).
-
-setup_evil(Log, Args, Dir) ->
- File = filename:join(Dir, lists:concat([Log, ".LOG"])),
- file:delete(File),
- del(File, 2),
- ok = disk_log:start(),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]),
- [Fd] = erlang:ports() -- Ports0,
- exit(Fd, kill),
- ok = disk_log:log_terms(n, [<<0:10/unit:8>>]),
- timer:sleep(2500), % TIMEOUT in disk_log_1.erl is 2000
- ok.
-
-stop_evil(Log) ->
- {error, _} = disk_log:close(Log),
ok.
-setup_evil_filled_cache_wrap(Log, Dir) ->
- setup_evil_filled_cache(Log, [{type,wrap},{size,{?MAX,2}}], Dir).
-
-setup_evil_filled_cache_halt(Log, Dir) ->
- setup_evil_filled_cache(Log, [{type,halt},{size,infinity}], Dir).
-
-%% The cache is filled, and the file descriptor port gone.
-setup_evil_filled_cache(Log, Args, Dir) ->
- File = filename:join(Dir, lists:concat([Log, ".LOG"])),
- file:delete(File),
- del(File, 2),
- ok = disk_log:start(),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]),
- [Fd] = erlang:ports() -- Ports0,
- ok = disk_log:log_terms(n, [<<0:?MAX/unit:8>>]),
- exit(Fd, kill),
- ok.
-
-otp_6278(suite) -> [];
-otp_6278(doc) -> ["OTP-6278. open/1 creates no status or crash report."];
+%% OTP-6278. open/1 creates no status or crash report.
otp_6278(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = filename:join(Dir, "no_such_dir/no_such_file"),
- ?line error_logger:add_report_handler(?MODULE, self()),
- ?line {error, {file_error, _, _}} =
+ error_logger:add_report_handler(?MODULE, self()),
+ {error, {file_error, _, _}} =
disk_log:open([{name,n},{file,File}]),
receive
{crash_report,_Pid,Report} ->
- ?line io:format("Unexpected: ~p\n", [Report]),
- ?line ?t:fail()
+ io:format("Unexpected: ~p\n", [Report]),
+ ct:fail(failed)
after 1000 ->
ok
end,
- ?line error_logger:delete_report_handler(?MODULE).
+ error_logger:delete_report_handler(?MODULE).
-otp_10131(suite) -> [];
-otp_10131(doc) -> ["OTP-10131. head_func type."];
+%% OTP-10131. head_func type.
otp_10131(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
Log = otp_10131,
@@ -4979,7 +4757,7 @@ copy_wrap_log(FromName, ToName, N, FromDir, ToDir) ->
-define(BUFSIZE, 8192).
copy_file(Src, Dest) ->
- % ?t:format("copying from ~p to ~p~n", [Src, Dest]),
+ %% io:format("copying from ~p to ~p~n", [Src, Dest]),
{ok, InFd} = file:open(Src, [raw, binary, read]),
{ok, OutFd} = file:open(Dest, [raw, binary, write]),
ok = copy_file1(InFd, OutFd),
@@ -5013,10 +4791,59 @@ log(Name, N) ->
format_error(E) ->
lists:flatten(disk_log:format_error(E)).
+check_pps({Ports0,Procs0} = P0) ->
+ case pps() of
+ P0 ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ case pps() of
+ P0 ->
+ ok;
+ {Ports1,Procs1} = P1 ->
+ case {Ports1 -- Ports0, Procs1 -- Procs0} of
+ {[], []} -> ok;
+ {PortsDiff,ProcsDiff} ->
+ io:format("failure, got ~p~n, expected ~p\n", [P1, P0]),
+ show("Old port", Ports0 -- Ports1),
+ show("New port", PortsDiff),
+ show("Old proc", Procs0 -- Procs1),
+ show("New proc", ProcsDiff),
+ ct:fail(failed)
+ end
+ end
+ end.
+
+show(_S, []) ->
+ ok;
+show(S, [{Pid, Name, InitCall}|Pids]) when is_pid(Pid) ->
+ io:format("~s: ~w (~w), ~w: ~p~n",
+ [S, Pid, proc_reg_name(Name), InitCall,
+ erlang:process_info(Pid)]),
+ show(S, Pids);
+show(S, [{Port, _}|Ports]) when is_port(Port)->
+ io:format("~s: ~w: ~p~n", [S, Port, erlang:port_info(Port)]),
+ show(S, Ports).
+
pps() ->
timer:sleep(100),
- {erlang:ports(), lists:filter(fun(P) -> erlang:is_process_alive(P) end,
- processes())}.
+ {port_list(), process_list()}.
+
+port_list() ->
+ [{P,safe_second_element(erlang:port_info(P, name))} ||
+ P <- erlang:ports()].
+
+process_list() ->
+ [{P,process_info(P, registered_name),
+ safe_second_element(process_info(P, initial_call))} ||
+ P <- processes(), erlang:is_process_alive(P)].
+
+proc_reg_name({registered_name, Name}) -> Name;
+proc_reg_name([]) -> no_reg_name.
+
+safe_second_element({_,Info}) -> Info;
+safe_second_element(Other) -> Other.
+
qlen() ->
{_, {_, N}} = lists:keysearch(message_queue_len, 1, process_info(self())),
@@ -5104,50 +4931,15 @@ get_known(Node) ->
%% Copied from erl_distribution_SUITE.erl:
start_node(Name, Param) ->
- ?t:start_node(Name, slave, [{args, Param}]).
+ test_server:start_node(Name, slave, [{args, Param}]).
stop_node(Node) ->
- ?t:stop_node(Node).
-
-%from(H, [H | T]) -> T;
-%from(H, [_ | T]) -> from(H, T);
-%from(_H, []) -> [].
-
-
-%% Check for NFS cache size, this is called from init_per_testcase() and
-%% makes different tests run depending on the size of the NFS cache on
-%% VxWorks. Possibly this could be adopted to Windows too, but we seldom use
-%% NFS when testing on windows, so I can find better things to do.
-%% The port program used simply reads the nfsCacheSize variable on the board.
-%% If the board is configured without NFS, the port program will fail to load
-%% and this will return 0, which may or may not be the wrong thing to do.
-
-check_nfs(_Config) ->
- 0.
+ test_server:stop_node(Node).
-skip_expand([]) ->
- [];
-skip_expand([Case | T]) ->
- case (catch apply(?MODULE, Case, [suite])) of
- {'EXIT', _} ->
- [Case | skip_expand(T)];
- [] ->
- [Case | skip_expand(T)];
- Res ->
- skip_expand(Res) ++ skip_expand(T)
- end.
-
-
-skip_list(Config) ->
- case check_nfs(Config) of
- 0 ->
- skip_expand(?SKIP_NO_CACHE);
- _ ->
- skip_expand(?SKIP_LARGE_CACHE)
- end.
+%% from(H, [H | T]) -> T;
+%% from(H, [_ | T]) -> from(H, T);
+%% from(_H, []) -> [].
-should_skip(_Test,_Config) ->
- false.
%%-----------------------------------------------------------------
%% The error_logger handler used.
@@ -5159,6 +4951,9 @@ init(Tester) ->
handle_event({error_report, _GL, {Pid, crash_report, Report}}, Tester) ->
Tester ! {crash_report, Pid, Report},
{ok, Tester};
+handle_event({info_msg, _GL, {Pid, F,A}}, Tester) ->
+ Tester ! {info_msg, Pid, F, A},
+ {ok, Tester};
handle_event(_Event, State) ->
{ok, State}.
diff --git a/lib/kernel/test/disk_log_SUITE_data/wrap_log_test.erl b/lib/kernel/test/disk_log_SUITE_data/wrap_log_test.erl
index 1cd1a4b0a4..38449b6bb3 100644
--- a/lib/kernel/test/disk_log_SUITE_data/wrap_log_test.erl
+++ b/lib/kernel/test/disk_log_SUITE_data/wrap_log_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/erl_boot_server_SUITE.erl b/lib/kernel/test/erl_boot_server_SUITE.erl
index 954880e252..1eaa2cf500 100644
--- a/lib/kernel/test/erl_boot_server_SUITE.erl
+++ b/lib/kernel/test/erl_boot_server_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(erl_boot_server_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2]).
@@ -34,7 +34,9 @@
%% Changed for the new erl_boot_server for R3A by Bjorn Gustavsson.
%%-----------------------------------------------------------------
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[start, start_link, stop, add, delete, responses].
@@ -57,274 +59,262 @@ end_per_group(_GroupName, Config) ->
-define(all_ones, {255, 255, 255, 255}).
-start(doc) -> "Tests the erl_boot_server:start/1 function.";
+%% Tests the erl_boot_server:start/1 function.
start(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(50)),
- ?line [Host1, Host2|_] = good_hosts(Config),
+ [Host1, Host2|_] = good_hosts(Config),
%% Bad arguments.
BadHost = "bad__host",
- ?line {error, {badarg, {}}} = erl_boot_server:start({}),
- ?line {error, {badarg, atom}} = erl_boot_server:start(atom),
- ?line {error, {badarg, [atom, BadHost]}} =
+ {error, {badarg, {}}} = erl_boot_server:start({}),
+ {error, {badarg, atom}} = erl_boot_server:start(atom),
+ {error, {badarg, [atom, BadHost]}} =
erl_boot_server:start([atom, BadHost]),
- ?line {error, {badarg, [Host1, BadHost]}} =
+ {error, {badarg, [Host1, BadHost]}} =
erl_boot_server:start([Host1, BadHost]),
%% Test once.
- ?line {ok, Pid1} = erl_boot_server:start([Host1]),
- ?line {error, {already_started, Pid1}} =
+ {ok, Pid1} = erl_boot_server:start([Host1]),
+ {error, {already_started, Pid1}} =
erl_boot_server:start([Host1]),
- ?line exit(Pid1, kill),
+ exit(Pid1, kill),
%% Test again.
- test_server:sleep(1),
- ?line {ok, Pid2} = erl_boot_server:start([Host1, Host2]),
- ?line {error, {already_started, Pid2}} =
+ ct:sleep(1),
+ {ok, Pid2} = erl_boot_server:start([Host1, Host2]),
+ {error, {already_started, Pid2}} =
erl_boot_server:start([Host1, Host2]),
- ?line exit(Pid2, kill),
- test_server:sleep(1),
+ exit(Pid2, kill),
+ ct:sleep(1),
- ?line test_server:timetrap_cancel(Dog),
ok.
-start_link(doc) -> "Tests the erl_boot_server:start_link/1 function.";
+%% Tests the erl_boot_server:start_link/1 function.
start_link(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line [Host1, Host2|_] = good_hosts(Config),
+ [Host1, Host2|_] = good_hosts(Config),
OldFlag = process_flag(trap_exit, true),
- ?line {error, {badarg, {}}} = erl_boot_server:start_link({}),
- ?line {error, {badarg, atom}} = erl_boot_server:start_link(atom),
- ?line BadHost = "bad__host",
- ?line {error, {badarg, [atom, BadHost]}} =
+ {error, {badarg, {}}} = erl_boot_server:start_link({}),
+ {error, {badarg, atom}} = erl_boot_server:start_link(atom),
+ BadHost = "bad__host",
+ {error, {badarg, [atom, BadHost]}} =
erl_boot_server:start_link([atom, BadHost]),
- ?line {ok, Pid1} = erl_boot_server:start_link([Host1]),
- ?line {error, {already_started, Pid1}} =
+ {ok, Pid1} = erl_boot_server:start_link([Host1]),
+ {error, {already_started, Pid1}} =
erl_boot_server:start_link([Host1]),
- ?line shutdown(Pid1),
+ shutdown(Pid1),
- ?line {ok, Pid2} = erl_boot_server:start_link([Host1, Host2]),
- ?line {error, {already_started, Pid2}} =
+ {ok, Pid2} = erl_boot_server:start_link([Host1, Host2]),
+ {error, {already_started, Pid2}} =
erl_boot_server:start_link([Host1, Host2]),
- ?line shutdown(Pid2),
+ shutdown(Pid2),
process_flag(trap_exit, OldFlag),
- ?line test_server:timetrap_cancel(Dog),
ok.
-stop(doc) -> "Tests that no processes are left if a boot server is killed.";
+%% Tests that no processes are left if a boot server is killed.
stop(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(50)),
- ?line [Host1|_] = good_hosts(Config),
+ [Host1|_] = good_hosts(Config),
%% Start a boot server and kill it. Make sure that any helper processes
%% dies.
- % Make sure the inet_gethost_native server is already started,
- % otherwise it will make this test fail:
- ?line inet:getaddr(localhost, inet),
- ?line Before = processes(),
- ?line {ok, Pid} = erl_boot_server:start([Host1]),
- ?line New = processes() -- [Pid|Before],
- ?line exit(Pid, kill),
- ?line receive after 100 -> ok end,
- ?line case [P || P <- New, is_process_alive(P)] of
- [] ->
- ok;
- NotKilled ->
- test_server:fail({not_killed, NotKilled})
- end,
- ?line test_server:timetrap_cancel(Dog),
+ %% Make sure the inet_gethost_native server is already started,
+ %% otherwise it will make this test fail:
+ inet:getaddr(localhost, inet),
+ Before = processes(),
+ {ok, Pid} = erl_boot_server:start([Host1]),
+ New = processes() -- [Pid|Before],
+ exit(Pid, kill),
+ receive after 100 -> ok end,
+ case [P || P <- New, is_process_alive(P)] of
+ [] ->
+ ok;
+ NotKilled ->
+ ct:fail({not_killed, NotKilled})
+ end,
ok.
-add(doc) -> "Tests the erl_boot_server:add/1 function.";
+%% Tests the erl_boot_server:add/1 function.
add(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line OldFlag = process_flag(trap_exit, true),
- ?line {ok, Pid1} = erl_boot_server:start_link([]),
- ?line [] = erl_boot_server:which_slaves(),
- ?line [Host1, Host2, Host3|_] = good_hosts(Config),
+ OldFlag = process_flag(trap_exit, true),
+ {ok, Pid1} = erl_boot_server:start_link([]),
+ [] = erl_boot_server:which_slaves(),
+ [Host1, Host2, Host3|_] = good_hosts(Config),
%% Try bad values.
- ?line {error, {badarg, {}}} = erl_boot_server:add_slave({}),
- ?line {error, {badarg, [atom]}} = erl_boot_server:add_slave([atom]),
- ?line BadHost = "bad__host",
- ?line {error, {badarg, BadHost}} = erl_boot_server:add_slave(BadHost),
- ?line [] = erl_boot_server:which_slaves(),
+ {error, {badarg, {}}} = erl_boot_server:add_slave({}),
+ {error, {badarg, [atom]}} = erl_boot_server:add_slave([atom]),
+ BadHost = "bad__host",
+ {error, {badarg, BadHost}} = erl_boot_server:add_slave(BadHost),
+ [] = erl_boot_server:which_slaves(),
%% Add good host names.
- ?line {ok, Ip1} = inet:getaddr(Host1, inet),
- ?line {ok, Ip2} = inet:getaddr(Host2, inet),
- ?line {ok, Ip3} = inet:getaddr(Host3, inet),
- ?line MIp1 = {?all_ones, Ip1},
- ?line MIp2 = {?all_ones, Ip2},
- ?line MIp3 = {?all_ones, Ip3},
- ?line ok = erl_boot_server:add_slave(Host1),
- ?line [MIp1] = erl_boot_server:which_slaves(),
- ?line ok = erl_boot_server:add_slave(Host2),
- ?line M_Ip1_Ip2 = lists:sort([MIp1, MIp2]),
- ?line M_Ip1_Ip2 = lists:sort(erl_boot_server:which_slaves()),
- ?line ok = erl_boot_server:add_slave(Host3),
- ?line M_Ip1_Ip2_Ip3 = lists:sort([MIp3|M_Ip1_Ip2]),
- ?line M_Ip1_Ip2_Ip3 = erl_boot_server:which_slaves(),
+ {ok, Ip1} = inet:getaddr(Host1, inet),
+ {ok, Ip2} = inet:getaddr(Host2, inet),
+ {ok, Ip3} = inet:getaddr(Host3, inet),
+ MIp1 = {?all_ones, Ip1},
+ MIp2 = {?all_ones, Ip2},
+ MIp3 = {?all_ones, Ip3},
+ ok = erl_boot_server:add_slave(Host1),
+ [MIp1] = erl_boot_server:which_slaves(),
+ ok = erl_boot_server:add_slave(Host2),
+ M_Ip1_Ip2 = lists:sort([MIp1, MIp2]),
+ M_Ip1_Ip2 = lists:sort(erl_boot_server:which_slaves()),
+ ok = erl_boot_server:add_slave(Host3),
+ M_Ip1_Ip2_Ip3 = lists:sort([MIp3|M_Ip1_Ip2]),
+ M_Ip1_Ip2_Ip3 = erl_boot_server:which_slaves(),
%% Add duplicate names.
- ?line ok = erl_boot_server:add_slave(Host3),
- ?line M_Ip1_Ip2_Ip3 = erl_boot_server:which_slaves(),
+ ok = erl_boot_server:add_slave(Host3),
+ M_Ip1_Ip2_Ip3 = erl_boot_server:which_slaves(),
%% More bad names.
- ?line {error, {badarg, BadHost}} = erl_boot_server:add_slave(BadHost),
- ?line M_Ip1_Ip2_Ip3 = erl_boot_server:which_slaves(),
+ {error, {badarg, BadHost}} = erl_boot_server:add_slave(BadHost),
+ M_Ip1_Ip2_Ip3 = erl_boot_server:which_slaves(),
%% Cleanup.
- ?line shutdown(Pid1),
- ?line process_flag(trap_exit, OldFlag),
- ?line test_server:timetrap_cancel(Dog),
+ shutdown(Pid1),
+ process_flag(trap_exit, OldFlag),
ok.
-delete(doc) -> "Tests the erl_boot_server:delete/1 function.";
+%% Tests the erl_boot_server:delete/1 function.
delete(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line OldFlag = process_flag(trap_exit, true),
+ OldFlag = process_flag(trap_exit, true),
- ?line [Host1, Host2, Host3|_] = good_hosts(Config),
- ?line {ok, Ip1} = inet:getaddr(Host1, inet),
- ?line {ok, Ip2} = inet:getaddr(Host2, inet),
- ?line {ok, Ip3} = inet:getaddr(Host3, inet),
- ?line MIp1 = {?all_ones, Ip1},
- ?line MIp2 = {?all_ones, Ip2},
- ?line MIp3 = {?all_ones, Ip3},
+ [Host1, Host2, Host3|_] = good_hosts(Config),
+ {ok, Ip1} = inet:getaddr(Host1, inet),
+ {ok, Ip2} = inet:getaddr(Host2, inet),
+ {ok, Ip3} = inet:getaddr(Host3, inet),
+ MIp1 = {?all_ones, Ip1},
+ MIp2 = {?all_ones, Ip2},
+ MIp3 = {?all_ones, Ip3},
- ?line {ok, Pid1} = erl_boot_server:start_link([Host1, Host2, Host3]),
- ?line M_Ip123 = lists:sort([MIp1, MIp2, MIp3]),
- ?line M_Ip123 = erl_boot_server:which_slaves(),
+ {ok, Pid1} = erl_boot_server:start_link([Host1, Host2, Host3]),
+ M_Ip123 = lists:sort([MIp1, MIp2, MIp3]),
+ M_Ip123 = erl_boot_server:which_slaves(),
%% Do some bad attempts and check that the list of slaves is intact.
- ?line {error, {badarg, {}}} = erl_boot_server:delete_slave({}),
- ?line {error, {badarg, [atom]}} = erl_boot_server:delete_slave([atom]),
- ?line BadHost = "bad__host",
- ?line {error, {badarg, BadHost}} = erl_boot_server:delete_slave(BadHost),
- ?line M_Ip123 = erl_boot_server:which_slaves(),
+ {error, {badarg, {}}} = erl_boot_server:delete_slave({}),
+ {error, {badarg, [atom]}} = erl_boot_server:delete_slave([atom]),
+ BadHost = "bad__host",
+ {error, {badarg, BadHost}} = erl_boot_server:delete_slave(BadHost),
+ M_Ip123 = erl_boot_server:which_slaves(),
%% Delete Host2 and make sure it's gone.
- ?line ok = erl_boot_server:delete_slave(Host2),
- ?line M_Ip13 = lists:sort([MIp1, MIp3]),
- ?line M_Ip13 = erl_boot_server:which_slaves(),
-
- ?line ok = erl_boot_server:delete_slave(Host1),
- ?line [MIp3] = erl_boot_server:which_slaves(),
- ?line ok = erl_boot_server:delete_slave(Host1),
- ?line [MIp3] = erl_boot_server:which_slaves(),
-
- ?line {error, {badarg, BadHost}} = erl_boot_server:delete_slave(BadHost),
- ?line [MIp3] = erl_boot_server:which_slaves(),
-
- ?line ok = erl_boot_server:delete_slave(Ip3),
- ?line [] = erl_boot_server:which_slaves(),
- ?line ok = erl_boot_server:delete_slave(Ip3),
- ?line [] = erl_boot_server:which_slaves(),
-
- ?line shutdown(Pid1),
- ?line process_flag(trap_exit, OldFlag),
- ?line test_server:timetrap_cancel(Dog),
+ ok = erl_boot_server:delete_slave(Host2),
+ M_Ip13 = lists:sort([MIp1, MIp3]),
+ M_Ip13 = erl_boot_server:which_slaves(),
+
+ ok = erl_boot_server:delete_slave(Host1),
+ [MIp3] = erl_boot_server:which_slaves(),
+ ok = erl_boot_server:delete_slave(Host1),
+ [MIp3] = erl_boot_server:which_slaves(),
+
+ {error, {badarg, BadHost}} = erl_boot_server:delete_slave(BadHost),
+ [MIp3] = erl_boot_server:which_slaves(),
+
+ ok = erl_boot_server:delete_slave(Ip3),
+ [] = erl_boot_server:which_slaves(),
+ ok = erl_boot_server:delete_slave(Ip3),
+ [] = erl_boot_server:which_slaves(),
+
+ shutdown(Pid1),
+ process_flag(trap_exit, OldFlag),
ok.
-responses(doc) -> "Tests erl_boot_server responses to slave requests.";
+%% Tests erl_boot_server responses to slave requests.
responses(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(30)),
- ?line process_flag(trap_exit, true),
+ process_flag(trap_exit, true),
%% Copy from inet_boot.hrl
EBOOT_PORT = 4368,
EBOOT_REQUEST = "EBOOTQ",
EBOOT_REPLY = "EBOOTR",
- ?line {ok,Host} = inet:gethostname(),
- ?line {ok,Ip} = inet:getaddr(Host, inet),
+ {ok,Host} = inet:gethostname(),
+ {ok,Ip} = inet:getaddr(Host, inet),
ThisVer = erlang:system_info(version),
- ?line {ok,BootPid} = erl_boot_server:start_link([Host]),
+ {ok,BootPid} = erl_boot_server:start_link([Host]),
%% Send junk
- ?line S1 = open_udp(),
- ?line prim_inet:sendto(S1, Ip, EBOOT_PORT, ["0"]),
+ S1 = open_udp(),
+ prim_inet:sendto(S1, Ip, EBOOT_PORT, ["0"]),
receive
What ->
- ?line close_udp(S1),
- ?line ?t:fail({"got unexpected response",What})
+ close_udp(S1),
+ ct:fail({"got unexpected response",What})
after 100 ->
ok
end,
%% Req from a slave with same erlang vsn.
- ?line S2 = open_udp(),
- ?line prim_inet:sendto(S2, Ip, EBOOT_PORT, [EBOOT_REQUEST,ThisVer]),
+ S2 = open_udp(),
+ prim_inet:sendto(S2, Ip, EBOOT_PORT, [EBOOT_REQUEST,ThisVer]),
receive
{udp,S2,Ip,_Port1,Resp1} ->
- ?line close_udp(S2),
- ?line EBOOT_REPLY = string:substr(Resp1, 1, length(EBOOT_REPLY)),
- ?line Rest1 = string:substr(Resp1, length(EBOOT_REPLY)+1, length(Resp1)),
- ?line [_,_,_ | ThisVer] = Rest1
+ close_udp(S2),
+ EBOOT_REPLY = string:substr(Resp1, 1, length(EBOOT_REPLY)),
+ Rest1 = string:substr(Resp1, length(EBOOT_REPLY)+1, length(Resp1)),
+ [_,_,_ | ThisVer] = Rest1
after 2000 ->
- ?line close_udp(S2),
- ?line ?t:fail("no boot server response; same vsn")
+ close_udp(S2),
+ ct:fail("no boot server response; same vsn")
end,
-
+
%% Req from a slave with other erlang vsn.
- ?line S3 = open_udp(),
- ?line prim_inet:sendto(S3, Ip, EBOOT_PORT, [EBOOT_REQUEST,"1.0"]),
+ S3 = open_udp(),
+ prim_inet:sendto(S3, Ip, EBOOT_PORT, [EBOOT_REQUEST,"1.0"]),
receive
Anything ->
- ?line close_udp(S3),
- ?line ?t:fail({"got unexpected response",Anything})
+ close_udp(S3),
+ ct:fail({"got unexpected response",Anything})
after 100 ->
ok
end,
%% Kill the boot server and wait for it to disappear.
- ?line unlink(BootPid),
- ?line BootPidMref = erlang:monitor(process, BootPid),
- ?line exit(BootPid, kill),
+ unlink(BootPid),
+ BootPidMref = erlang:monitor(process, BootPid),
+ exit(BootPid, kill),
receive
{'DOWN',BootPidMref,_,_,_} -> ok
end,
- ?line {ok,BootPid2} = erl_boot_server:start_link(["127.0.0.1"]),
+ {ok,BootPid2} = erl_boot_server:start_link(["127.0.0.1"]),
%% Req from slave with invalid ip address.
- ?line S4 = open_udp(),
+ S4 = open_udp(),
Ret =
case Ip of
{127,0,0,1} ->
{comment,"IP address for this host is 127.0.0.1"};
_ ->
- ?line prim_inet:sendto(S4, Ip, EBOOT_PORT,
- [EBOOT_REQUEST,ThisVer]),
+ prim_inet:sendto(S4, Ip, EBOOT_PORT,
+ [EBOOT_REQUEST,ThisVer]),
receive
Huh ->
- ?line close_udp(S4),
- ?line ?t:fail({"got unexpected response",Huh})
+ close_udp(S4),
+ ct:fail({"got unexpected response",Huh})
after 100 ->
ok
end
end,
- ?line unlink(BootPid2),
- ?line exit(BootPid2, kill),
+ unlink(BootPid2),
+ exit(BootPid2, kill),
%% Now wait for any late unexpected messages.
receive
Whatever ->
- ?line ?t:fail({unexpected_message,Whatever})
+ ct:fail({unexpected_message,Whatever})
after 4000 ->
- ?line close_udp(S1),
- ?line close_udp(S3),
- ?line close_udp(S4),
+ close_udp(S1),
+ close_udp(S3),
+ close_udp(S4),
ok
end,
- ?line test_server:timetrap_cancel(Dog),
Ret.
shutdown(Pid) ->
@@ -335,7 +325,7 @@ shutdown(Pid) ->
after 1000 ->
%% The timeout used to be 1 ms, which could be too short time for the
%% SMP emulator on a slow computer with one CPU.
- test_server:fail(shutdown)
+ ct:fail(shutdown)
end.
good_hosts(_Config) ->
@@ -347,10 +337,10 @@ good_hosts(_Config) ->
[GoodHost1, GoodHost2, GoodHost3].
open_udp() ->
- ?line {ok, S} = prim_inet:open(udp, inet, dgram),
- ?line ok = prim_inet:setopts(S, [{mode,list},{active,true},
- {deliver,term},{broadcast,true}]),
- ?line {ok,_} = prim_inet:bind(S, {0,0,0,0}, 0),
+ {ok, S} = prim_inet:open(udp, inet, dgram),
+ ok = prim_inet:setopts(S, [{mode,list},{active,true},
+ {deliver,term},{broadcast,true}]),
+ {ok,_} = prim_inet:bind(S, {0,0,0,0}, 0),
S.
close_udp(S) ->
diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl
index 2f73ab170a..5a8bbd56c4 100644
--- a/lib/kernel/test/erl_distribution_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,13 +19,16 @@
%%
-module(erl_distribution_SUITE).
-%-define(line_trace, 1).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2]).
--export([tick/1, tick_change/1, illegal_nodenames/1, hidden_node/1,
+-export([tick/1, tick_change/1,
+ connect_node/1,
+ nodenames/1, hostnames/1,
+ illegal_nodenames/1, hidden_node/1,
+ setopts/1,
table_waste/1, net_setuptime/1,
inet_dist_options_options/1,
@@ -43,6 +46,8 @@
-export([get_socket_priorities/0,
tick_cli_test/1, tick_cli_test1/1,
tick_serv_test/2, tick_serv_test1/1,
+ run_remote_test/1,
+ setopts_do/2,
keep_conn/1, time_ping/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -51,7 +56,6 @@
-export([pinger/1]).
-
-define(DUMMY_NODE,dummy@test01).
%%-----------------------------------------------------------------
@@ -61,10 +65,14 @@
%% erl -sname master -rsh ctrsh
%%-----------------------------------------------------------------
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,4}}].
all() ->
- [tick, tick_change, illegal_nodenames, hidden_node,
+ [tick, tick_change, nodenames, hostnames, illegal_nodenames,
+ connect_node,
+ hidden_node, setopts,
table_waste, net_setuptime, inet_dist_options_options,
{group, monitor_nodes}].
@@ -81,6 +89,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
+ [slave:stop(N) || N <- nodes()],
ok.
init_per_group(_GroupName, Config) ->
@@ -89,29 +98,34 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
-
+init_per_testcase(TC, Config) when TC == hostnames;
+ TC == nodenames ->
+ file:make_dir("hostnames_nodedir"),
+ file:write_file("hostnames_nodedir/ignore_core_files",""),
+ Config;
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
- Dog=?t:timetrap(?t:minutes(4)),
- [{watchdog, Dog}|Config].
+ Config.
-end_per_testcase(_Func, Config) ->
- Dog=?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+end_per_testcase(_Func, _Config) ->
+ ok.
+
+connect_node(Config) when is_list(Config) ->
+ Connected = nodes(connected),
+ true = net_kernel:connect_node(node()),
+ Connected = nodes(connected),
+ ok.
-tick(suite) -> [];
-tick(doc) -> [];
tick(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(120)),
PaDir = filename:dirname(code:which(erl_distribution_SUITE)),
-
+
%% First check that the normal case is OK!
- ?line {ok, Node} = start_node(dist_test, "-pa " ++ PaDir),
+ {ok, Node} = start_node(dist_test, "-pa " ++ PaDir),
rpc:call(Node, erl_distribution_SUITE, tick_cli_test, [node()]),
-
+
erlang:monitor_node(Node, true),
receive
{nodedown, Node} ->
- test_server:fail("nodedown from other node")
+ ct:fail("nodedown from other node")
after 30000 ->
erlang:monitor_node(Node, false),
stop_node(Node)
@@ -129,20 +143,20 @@ tick(Config) when is_list(Config) ->
%% Set the ticktime on the server node to 100 secs so the server
%% node doesn't tick the client node within the interval ...
- ?line {ok, ServNode} = start_node(dist_test_server,
- "-kernel net_ticktime 100 "
- "-pa " ++ PaDir),
+ {ok, ServNode} = start_node(dist_test_server,
+ "-kernel net_ticktime 100 "
+ "-pa " ++ PaDir),
rpc:call(ServNode, erl_distribution_SUITE, tick_serv_test, [Node, node()]),
- ?line {ok, _} = start_node(dist_test,
- "-kernel net_ticktime 12 "
- "-pa " ++ PaDir),
+ {ok, _} = start_node(dist_test,
+ "-kernel net_ticktime 12 "
+ "-pa " ++ PaDir),
rpc:call(Node, erl_distribution_SUITE, tick_cli_test, [ServNode]),
-
+
spawn_link(erl_distribution_SUITE, keep_conn, [Node]),
{tick_serv, ServNode} ! {i_want_the_result, self()},
-
+
monitor_node(ServNode, true),
monitor_node(Node, true),
@@ -154,56 +168,145 @@ tick(Config) when is_list(Config) ->
{tick_test, Error} ->
stop_node(ServNode),
stop_node(Node),
- test_server:fail(Error);
+ ct:fail(Error);
{nodedown, Node} ->
stop_node(ServNode),
- test_server:fail("client node died");
+ ct:fail("client node died");
{nodedown, ServNode} ->
stop_node(Node),
- test_server:fail("server node died")
+ ct:fail("server node died")
end,
- ?line test_server:timetrap_cancel(Dog),
ok.
-table_waste(doc) ->
- ["Checks that pinging nonexistyent nodes does not waste space in distribution table"];
-table_waste(suite) ->
- [];
+%% Checks that pinging nonexistyent nodes does not waste space in distribution table.
table_waste(Config) when is_list(Config) ->
- ?line {ok, HName} = inet:gethostname(),
+ {ok, HName} = inet:gethostname(),
F = fun(0,_F) -> [];
(N,F) ->
- ?line Name = list_to_atom("erl_distribution_"++integer_to_list(N)++
- "@"++HName),
- ?line pang = net_adm:ping(Name),
- ?line F(N-1,F)
+ Name = list_to_atom("erl_distribution_"++integer_to_list(N)++
+ "@"++HName),
+ pang = net_adm:ping(Name),
+ F(N-1,F)
end,
- ?line F(256,F),
- ?line {ok, N} = start_node(erl_distribution_300,""),
- ?line stop_node(N),
+ F(256,F),
+ {ok, N} = start_node(erl_distribution_300,""),
+ stop_node(N),
ok.
-
-
-illegal_nodenames(doc) ->
- ["Test that pinging an illegal nodename does not kill the node"];
-illegal_nodenames(suite) ->
- [];
+%% Test that starting nodes with different legal name part works, and that illegal
+%% ones are filtered
+nodenames(Config) when is_list(Config) ->
+ legal("a1@b"),
+ legal("a-1@b"),
+ legal("a_1@b"),
+
+ illegal("cdé@a"),
+ illegal("te欢st@a").
+
+%% Test that starting nodes with different legal host part works, and that illegal
+%% ones are filtered
+hostnames(Config) when is_list(Config) ->
+ Host = gethostname(),
+ legal([$a,$@|atom_to_list(Host)]),
+ legal("1@b1"),
+ legal("b@b1-c"),
+ legal("c@b1_c"),
+ legal("d@b1#c"),
+ legal("f@::1"),
+ legal("g@1:bc3:4e3f:f20:0:1"),
+
+ case file:native_name_encoding() of
+ latin1 -> ignore;
+ _ -> legal("e@b1é")
+ end,
+ long_hostnames(net_kernel:longnames()),
+
+ illegal("h@testالع"),
+ illegal("i@языtest"),
+ illegal("j@te欢st").
+
+long_hostnames(true) ->
+ legal("[email protected]"),
+ legal("[email protected]"),
+ legal("[email protected]_c.d"),
+ legal("[email protected]"),
+ legal("[email protected]");
+long_hostnames(false) ->
+ illegal("[email protected]").
+
+legal(Name) ->
+ case test_node(Name) of
+ started ->
+ ok;
+ not_started ->
+ ct:fail("no ~p node started", [Name])
+ end.
+
+illegal(Name) ->
+ case test_node(Name, true) of
+ not_started ->
+ ok;
+ started ->
+ ct:fail("~p node started with illegal name", [Name])
+ end.
+
+test_node(Name) ->
+ test_node(Name, false).
+test_node(Name, Illigal) ->
+ ProgName = ct:get_progname(),
+ Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++
+ " -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++
+ case Illigal of
+ true ->
+ " -eval \"timer:sleep(10000),init:stop().\"";
+ false ->
+ ""
+ end,
+ net_kernel:monitor_nodes(true),
+ BinCommand = unicode:characters_to_binary(Command, utf8),
+ Prt = open_port({spawn, BinCommand}, [stream,{cd,"hostnames_nodedir"}]),
+ Node = list_to_atom(Name),
+ receive
+ {nodeup, Node} ->
+ net_kernel:monitor_nodes(false),
+ slave:stop(Node),
+ started
+ after 5000 ->
+ net_kernel:monitor_nodes(false),
+ not_started
+ end.
+
+long_or_short() ->
+ case net_kernel:longnames() of
+ true -> " -name ";
+ false -> " -sname "
+ end.
+
+% get the localhost's name, depending on the using name policy
+gethostname() ->
+ Hostname = case net_kernel:longnames() of
+ true->
+ net_adm:localhost();
+ _->
+ {ok, Name}=inet:gethostname(),
+ Name
+ end,
+ list_to_atom(Hostname).
+
+%% Test that pinging an illegal nodename does not kill the node.
illegal_nodenames(Config) when is_list(Config) ->
- ?line Dog=?t:timetrap(?t:minutes(2)),
PaDir = filename:dirname(code:which(erl_distribution_SUITE)),
- ?line {ok, Node}=start_node(illegal_nodenames, "-pa " ++ PaDir),
+ {ok, Node}=start_node(illegal_nodenames, "-pa " ++ PaDir),
monitor_node(Node, true),
- ?line RPid=rpc:call(Node, erlang, spawn,
- [?MODULE, pinger, [self()]]),
+ RPid=rpc:call(Node, erlang, spawn,
+ [?MODULE, pinger, [self()]]),
receive
{RPid, pinged} ->
ok;
{nodedown, Node} ->
- ?t:fail("Remote node died.")
+ ct:fail("Remote node died.")
end,
stop_node(Node),
- ?t:timetrap_cancel(Dog),
ok.
pinger(Starter) ->
@@ -213,7 +316,7 @@ pinger(Starter) ->
ok.
-net_setuptime(doc) -> ["Test that you can set the net_setuptime properly"];
+%% Test that you can set the net_setuptime properly.
net_setuptime(Config) when is_list(Config) ->
%% In this test case, we reluctantly accept shorter times than the given
%% setup time, because the connection attempt can end in a
@@ -221,28 +324,28 @@ net_setuptime(Config) when is_list(Config) ->
Res0 = do_test_setuptime("2"),
io:format("Res0 = ~p", [Res0]),
- ?line true = (Res0 =< 4000),
+ true = (Res0 =< 4000),
Res1 = do_test_setuptime("0.3"),
io:format("Res1 = ~p", [Res1]),
- ?line true = (Res1 =< 500),
+ true = (Res1 =< 500),
ok.
do_test_setuptime(Setuptime) when is_list(Setuptime) ->
- ?line PaDir = filename:dirname(code:which(?MODULE)),
- ?line {ok, Node} = start_node(dist_setuptime_test, "-pa " ++ PaDir ++
- " -kernel net_setuptime " ++ Setuptime),
- ?line Res = rpc:call(Node,?MODULE,time_ping,[?DUMMY_NODE]),
- ?line stop_node(Node),
+ PaDir = filename:dirname(code:which(?MODULE)),
+ {ok, Node} = start_node(dist_setuptime_test, "-pa " ++ PaDir ++
+ " -kernel net_setuptime " ++ Setuptime),
+ Res = rpc:call(Node,?MODULE,time_ping,[?DUMMY_NODE]),
+ stop_node(Node),
Res.
time_ping(Node) ->
T0 = erlang:monotonic_time(),
pang = net_adm:ping(Node),
T1 = erlang:monotonic_time(),
- erlang:convert_time_unit(T1 - T0, native, milli_seconds).
+ erlang:convert_time_unit(T1 - T0, native, millisecond).
%% Keep the connection with the client node up.
-%% This is neccessary as the client node runs with much shorter
+%% This is necessary as the client node runs with much shorter
%% tick time !!
keep_conn(Node) ->
sleep(1),
@@ -283,7 +386,7 @@ tick_cli_test1(Node) ->
receive
{whats_the_result, From} ->
Diff = erlang:convert_time_unit(T2-T1, native,
- milli_seconds),
+ millisecond),
case Diff of
T when T > 8000, T < 16000 ->
From ! {tick_test, T};
@@ -295,161 +398,319 @@ tick_cli_test1(Node) ->
end
end.
+setopts(Config) when is_list(Config) ->
+ register(setopts_regname, self()),
+ [N1,N2,N3,N4] = get_nodenames(4, setopts),
+
+ {_N1F,Port1} = start_node_unconnected(N1, ?MODULE, run_remote_test,
+ ["setopts_do", atom_to_list(node()), "1", "ping"]),
+ 0 = wait_for_port_exit(Port1),
+
+ {_N2F,Port2} = start_node_unconnected(N2, ?MODULE, run_remote_test,
+ ["setopts_do", atom_to_list(node()), "2", "ping"]),
+ 0 = wait_for_port_exit(Port2),
+
+ {ok, LSock} = gen_tcp:listen(0, [{packet,2}, {active,false}]),
+ {ok, LTcpPort} = inet:port(LSock),
-tick_change(doc) -> ["OTP-4255"];
-tick_change(suite) -> [];
+ {N3F,Port3} = start_node_unconnected(N3, ?MODULE, run_remote_test,
+ ["setopts_do", atom_to_list(node()),
+ "1", integer_to_list(LTcpPort)]),
+ wait_and_connect(LSock, N3F, Port3),
+ 0 = wait_for_port_exit(Port3),
+
+ {N4F,Port4} = start_node_unconnected(N4, ?MODULE, run_remote_test,
+ ["setopts_do", atom_to_list(node()),
+ "2", integer_to_list(LTcpPort)]),
+ wait_and_connect(LSock, N4F, Port4),
+ 0 = wait_for_port_exit(Port4),
+
+ ok.
+
+wait_and_connect(LSock, NodeName, NodePort) ->
+ {ok, Sock} = gen_tcp:accept(LSock),
+ {ok, "Connect please"} = gen_tcp:recv(Sock, 0),
+ flush_from_port(NodePort),
+ pong = net_adm:ping(NodeName),
+ gen_tcp:send(Sock, "Connect done"),
+ gen_tcp:close(Sock).
+
+
+flush_from_port(Port) ->
+ flush_from_port(Port, 10).
+
+flush_from_port(Port, Timeout) ->
+ receive
+ {Port,{data,String}} ->
+ io:format("~p: ~s\n", [Port, String]),
+ flush_from_port(Port, Timeout)
+ after Timeout ->
+ timeout
+ end.
+
+wait_for_port_exit(Port) ->
+ case (receive M -> M end) of
+ {Port,{exit_status,Status}} ->
+ Status;
+ {Port,{data,String}} ->
+ io:format("~p: ~s\n", [Port, String]),
+ wait_for_port_exit(Port)
+ end.
+
+run_remote_test([FuncStr, TestNodeStr | Args]) ->
+ Status = try
+ io:format("Node ~p started~n", [node()]),
+ TestNode = list_to_atom(TestNodeStr),
+ io:format("Node ~p spawning function ~p~n", [node(), FuncStr]),
+ {Pid,Ref} = spawn_monitor(?MODULE, list_to_atom(FuncStr), [TestNode, Args]),
+ io:format("Node ~p waiting for function ~p~n", [node(), FuncStr]),
+ receive
+ {'DOWN', Ref, process, Pid, normal} ->
+ 0;
+ Other ->
+ io:format("Node ~p got unexpected msg: ~p\n",[node(), Other]),
+ 1
+ end
+ catch
+ C:E:S ->
+ io:format("Node ~p got EXCEPTION ~p:~p\nat ~p\n",
+ [node(), C, E, S]),
+ 2
+ end,
+ io:format("Node ~p doing halt(~p).\n",[node(), Status]),
+ erlang:halt(Status).
+
+% Do the actual test on the remote node
+setopts_do(TestNode, [OptNr, ConnectData]) ->
+ [] = nodes(),
+ {Opt, Val} = opt_from_nr(OptNr),
+ ok = net_kernel:setopts(new, [{Opt, Val}]),
+
+ [] = nodes(),
+ {error, noconnection} = net_kernel:getopts(TestNode, [Opt]),
+
+ case ConnectData of
+ "ping" -> % We connect
+ net_adm:ping(TestNode);
+ TcpPort -> % Other connect
+ {ok, Sock} = gen_tcp:connect("localhost", list_to_integer(TcpPort),
+ [{active,false},{packet,2}]),
+ ok = gen_tcp:send(Sock, "Connect please"),
+ {ok, "Connect done"} = gen_tcp:recv(Sock, 0),
+ gen_tcp:close(Sock)
+ end,
+ [TestNode] = nodes(),
+ {ok, [{Opt,Val}]} = net_kernel:getopts(TestNode, [Opt]),
+ {error, noconnection} = net_kernel:getopts('pixie@fairyland', [Opt]),
+
+ NewVal = change_val(Val),
+ ok = net_kernel:setopts(TestNode, [{Opt, NewVal}]),
+ {ok, [{Opt,NewVal}]} = net_kernel:getopts(TestNode, [Opt]),
+
+ ok = net_kernel:setopts(TestNode, [{Opt, Val}]),
+ {ok, [{Opt,Val}]} = net_kernel:getopts(TestNode, [Opt]),
+
+ ok.
+
+opt_from_nr("1") -> {nodelay, true};
+opt_from_nr("2") -> {nodelay, false}.
+
+change_val(true) -> false;
+change_val(false) -> true.
+
+start_node_unconnected(Name, Mod, Func, Args) ->
+ FullName = full_node_name(Name),
+ CmdLine = mk_node_cmdline(Name,Mod,Func,Args),
+ io:format("Starting node ~p: ~s~n", [FullName, CmdLine]),
+ case open_port({spawn, CmdLine}, [exit_status]) of
+ Port when is_port(Port) ->
+ {FullName, Port};
+ Error ->
+ exit({failed_to_start_node, FullName, Error})
+ end.
+
+full_node_name(PreName) ->
+ HostSuffix = lists:dropwhile(fun ($@) -> false; (_) -> true end,
+ atom_to_list(node())),
+ list_to_atom(atom_to_list(PreName) ++ HostSuffix).
+
+mk_node_cmdline(Name,Mod,Func,Args) ->
+ Static = "-noinput",
+ Pa = filename:dirname(code:which(?MODULE)),
+ Prog = case catch init:get_argument(progname) of
+ {ok,[[P]]} -> P;
+ _ -> exit(no_progname_argument_found)
+ end,
+ NameSw = case net_kernel:longnames() of
+ false -> "-sname ";
+ true -> "-name ";
+ _ -> exit(not_distributed_node)
+ end,
+ {ok, Pwd} = file:get_cwd(),
+ NameStr = atom_to_list(Name),
+ Prog ++ " "
+ ++ Static ++ " "
+ ++ NameSw ++ " " ++ NameStr
+ ++ " -pa " ++ Pa
+ ++ " -env ERL_CRASH_DUMP " ++ Pwd ++ "/erl_crash_dump." ++ NameStr
+ ++ " -setcookie " ++ atom_to_list(erlang:get_cookie())
+ ++ " -run " ++ atom_to_list(Mod) ++ " " ++ atom_to_list(Func)
+ ++ " " ++ string:join(Args, " ").
+
+
+%% OTP-4255.
tick_change(Config) when is_list(Config) ->
- ?line PaDir = filename:dirname(code:which(?MODULE)),
- ?line [BN, CN] = get_nodenames(2, tick_change),
- ?line DefaultTT = net_kernel:get_net_ticktime(),
- ?line unchanged = net_kernel:set_net_ticktime(DefaultTT, 60),
- ?line case DefaultTT of
- I when is_integer(I) -> ?line ok;
- _ -> ?line ?t:fail(DefaultTT)
- end,
-
- % In case other nodes are connected
+ PaDir = filename:dirname(code:which(?MODULE)),
+ [BN, CN] = get_nodenames(2, tick_change),
+ DefaultTT = net_kernel:get_net_ticktime(),
+ unchanged = net_kernel:set_net_ticktime(DefaultTT, 60),
+ case DefaultTT of
+ I when is_integer(I) -> ok;
+ _ -> ct:fail(DefaultTT)
+ end,
+
+ %% In case other nodes are connected
case nodes(connected) of
- [] -> ?line net_kernel:set_net_ticktime(10, 0);
- _ -> ?line rpc:multicall(nodes([this, connected]), net_kernel,
- set_net_ticktime, [10, 5])
+ [] -> net_kernel:set_net_ticktime(10, 0);
+ _ -> rpc:multicall(nodes([this, connected]), net_kernel,
+ set_net_ticktime, [10, 5])
end,
- ?line wait_until(fun () -> 10 == net_kernel:get_net_ticktime() end),
- ?line {ok, B} = start_node(BN, "-kernel net_ticktime 10 -pa " ++ PaDir),
- ?line {ok, C} = start_node(CN, "-kernel net_ticktime 10 -hidden -pa "
- ++ PaDir),
+ wait_until(fun () -> 10 == net_kernel:get_net_ticktime() end),
+ {ok, B} = start_node(BN, "-kernel net_ticktime 10 -pa " ++ PaDir),
+ {ok, C} = start_node(CN, "-kernel net_ticktime 10 -hidden -pa "
+ ++ PaDir),
- ?line OTE = process_flag(trap_exit, true),
+ OTE = process_flag(trap_exit, true),
case catch begin
- ?line run_tick_change_test(B, C, 10, 1, PaDir),
- ?line run_tick_change_test(B, C, 1, 10, PaDir)
+ run_tick_change_test(B, C, 10, 1, PaDir),
+ run_tick_change_test(B, C, 1, 10, PaDir)
end of
{'EXIT', Reason} ->
- ?line stop_node(B),
- ?line stop_node(C),
+ stop_node(B),
+ stop_node(C),
%% In case other nodes are connected
case nodes(connected) of
- [] -> ?line net_kernel:set_net_ticktime(DefaultTT, 0);
- _ -> ?line rpc:multicall(nodes([this, connected]), net_kernel,
- set_net_ticktime, [DefaultTT, 10])
+ [] -> net_kernel:set_net_ticktime(DefaultTT, 0);
+ _ -> rpc:multicall(nodes([this, connected]), net_kernel,
+ set_net_ticktime, [DefaultTT, 10])
end,
- ?line wait_until(fun () ->
- DefaultTT == net_kernel:get_net_ticktime()
- end),
- ?line process_flag(trap_exit, OTE),
- ?t:fail(Reason);
+ wait_until(fun () ->
+ DefaultTT == net_kernel:get_net_ticktime()
+ end),
+ process_flag(trap_exit, OTE),
+ ct:fail(Reason);
_ ->
ok
end,
- ?line process_flag(trap_exit, OTE),
- ?line stop_node(B),
- ?line stop_node(C),
+ process_flag(trap_exit, OTE),
+ stop_node(B),
+ stop_node(C),
- % In case other nodes are connected
+ %% In case other nodes are connected
case nodes(connected) of
- [] -> ?line net_kernel:set_net_ticktime(DefaultTT, 0);
- _ -> ?line rpc:multicall(nodes([this, connected]), net_kernel,
- set_net_ticktime, [DefaultTT, 5])
+ [] -> net_kernel:set_net_ticktime(DefaultTT, 0);
+ _ -> rpc:multicall(nodes([this, connected]), net_kernel,
+ set_net_ticktime, [DefaultTT, 5])
end,
- ?line wait_until(fun () -> DefaultTT == net_kernel:get_net_ticktime() end),
- ?line ok.
+ wait_until(fun () -> DefaultTT == net_kernel:get_net_ticktime() end),
+ ok.
wait_for_nodedowns(Tester, Ref) ->
receive
{nodedown, Node} ->
- ?t:format("~p~n", [{node(), {nodedown, Node}}]),
- ?line Tester ! {Ref, {node(), {nodedown, Node}}}
+ io:format("~p~n", [{node(), {nodedown, Node}}]),
+ Tester ! {Ref, {node(), {nodedown, Node}}}
end,
wait_for_nodedowns(Tester, Ref).
run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
- ?line [DN, EN] = get_nodenames(2, tick_change),
-
- ?line Tester = self(),
- ?line Ref = make_ref(),
- ?line MonitorNodes = fun (Nodes) ->
- ?line lists:foreach(
- fun (N) ->
- ?line monitor_node(N,true)
- end,
- Nodes),
- wait_for_nodedowns(Tester, Ref)
- end,
-
- ?line {ok, D} = start_node(DN, "-kernel net_ticktime "
- ++ integer_to_list(PrevTT) ++ " -pa " ++ PaDir),
-
- ?line NMA = spawn_link(fun () -> MonitorNodes([B, C, D]) end),
- ?line NMB = spawn_link(B, fun () -> MonitorNodes([node(), C, D]) end),
- ?line NMC = spawn_link(C, fun () -> MonitorNodes([node(), B, D]) end),
-
- ?line MaxTT = case PrevTT > TT of
- true -> ?line PrevTT;
- false -> ?line TT
- end,
+ [DN, EN] = get_nodenames(2, tick_change),
+
+ Tester = self(),
+ Ref = make_ref(),
+ MonitorNodes = fun (Nodes) ->
+ lists:foreach(
+ fun (N) ->
+ monitor_node(N,true)
+ end,
+ Nodes),
+ wait_for_nodedowns(Tester, Ref)
+ end,
+
+ {ok, D} = start_node(DN, "-kernel net_ticktime "
+ ++ integer_to_list(PrevTT) ++ " -pa " ++ PaDir),
+
+ NMA = spawn_link(fun () -> MonitorNodes([B, C, D]) end),
+ NMB = spawn_link(B, fun () -> MonitorNodes([node(), C, D]) end),
+ NMC = spawn_link(C, fun () -> MonitorNodes([node(), B, D]) end),
+
+ MaxTT = case PrevTT > TT of
+ true -> PrevTT;
+ false -> TT
+ end,
- ?line CheckResult = make_ref(),
- ?line spawn_link(fun () ->
- receive
- after (25 + MaxTT)*1000 ->
- Tester ! CheckResult
- end
- end),
+ CheckResult = make_ref(),
+ spawn_link(fun () ->
+ receive
+ after (25 + MaxTT)*1000 ->
+ Tester ! CheckResult
+ end
+ end),
- % In case other nodes than these are connected
+ %% In case other nodes than these are connected
case nodes(connected) -- [B, C, D] of
- [] -> ?line ok;
- OtherNodes -> ?line rpc:multicall(OtherNodes, net_kernel,
- set_net_ticktime, [TT, 20])
+ [] -> ok;
+ OtherNodes -> rpc:multicall(OtherNodes, net_kernel,
+ set_net_ticktime, [TT, 20])
end,
- ?line change_initiated = net_kernel:set_net_ticktime(TT,20),
- ?line {ongoing_change_to,_} = net_kernel:set_net_ticktime(TT,20),
- ?line sleep(3),
- ?line change_initiated = rpc:call(B,net_kernel,set_net_ticktime,[TT,15]),
- ?line sleep(7),
- ?line change_initiated = rpc:call(C,net_kernel,set_net_ticktime,[TT,10]),
-
- ?line {ok, E} = start_node(EN, "-kernel net_ticktime "
- ++ integer_to_list(TT) ++ " -pa " ++ PaDir),
- ?line NME = spawn_link(E, fun () -> MonitorNodes([node(), B, C, D]) end),
- ?line NMA2 = spawn_link(fun () -> MonitorNodes([E]) end),
- ?line NMB2 = spawn_link(B, fun () -> MonitorNodes([E]) end),
- ?line NMC2 = spawn_link(C, fun () -> MonitorNodes([E]) end),
-
- receive CheckResult -> ?line ok end,
-
- ?line unlink(NMA), exit(NMA, kill),
- ?line unlink(NMB), exit(NMB, kill),
- ?line unlink(NMC), exit(NMC, kill),
- ?line unlink(NME), exit(NME, kill),
- ?line unlink(NMA2), exit(NMA2, kill),
- ?line unlink(NMB2), exit(NMB2, kill),
- ?line unlink(NMC2), exit(NMC2, kill),
+ change_initiated = net_kernel:set_net_ticktime(TT,20),
+ {ongoing_change_to,_} = net_kernel:set_net_ticktime(TT,20),
+ sleep(3),
+ change_initiated = rpc:call(B,net_kernel,set_net_ticktime,[TT,15]),
+ sleep(7),
+ change_initiated = rpc:call(C,net_kernel,set_net_ticktime,[TT,10]),
+
+ {ok, E} = start_node(EN, "-kernel net_ticktime "
+ ++ integer_to_list(TT) ++ " -pa " ++ PaDir),
+ NME = spawn_link(E, fun () -> MonitorNodes([node(), B, C, D]) end),
+ NMA2 = spawn_link(fun () -> MonitorNodes([E]) end),
+ NMB2 = spawn_link(B, fun () -> MonitorNodes([E]) end),
+ NMC2 = spawn_link(C, fun () -> MonitorNodes([E]) end),
+
+ receive CheckResult -> ok end,
+
+ unlink(NMA), exit(NMA, kill),
+ unlink(NMB), exit(NMB, kill),
+ unlink(NMC), exit(NMC, kill),
+ unlink(NME), exit(NME, kill),
+ unlink(NMA2), exit(NMA2, kill),
+ unlink(NMB2), exit(NMB2, kill),
+ unlink(NMC2), exit(NMC2, kill),
%% The node not changing ticktime should have been disconnected from the
%% other nodes
- receive {Ref, {Node, {nodedown, D}}} when Node == node() -> ?line ok
- after 0 -> ?line exit({?LINE, no_nodedown})
+ receive {Ref, {Node, {nodedown, D}}} when Node == node() -> ok
+ after 0 -> exit({?LINE, no_nodedown})
end,
- receive {Ref, {B, {nodedown, D}}} -> ?line ok
- after 0 -> ?line exit({?LINE, no_nodedown})
+ receive {Ref, {B, {nodedown, D}}} -> ok
+ after 0 -> exit({?LINE, no_nodedown})
end,
- receive {Ref, {C, {nodedown, D}}} -> ?line ok
- after 0 -> ?line exit({?LINE, no_nodedown})
+ receive {Ref, {C, {nodedown, D}}} -> ok
+ after 0 -> exit({?LINE, no_nodedown})
end,
- receive {Ref, {E, {nodedown, D}}} -> ?line ok
- after 0 -> ?line exit({?LINE, no_nodedown})
+ receive {Ref, {E, {nodedown, D}}} -> ok
+ after 0 -> exit({?LINE, no_nodedown})
end,
%% No other connections should have been broken
receive
{Ref, Reason} ->
- ?line stop_node(E),
- ?line exit({?LINE, Reason});
+ stop_node(E),
+ exit({?LINE, Reason});
{'EXIT', Pid, Reason} when Pid == NMA;
Pid == NMB;
Pid == NMC;
@@ -457,70 +718,65 @@ run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
Pid == NMA2;
Pid == NMB2;
Pid == NMC2 ->
- ?line stop_node(E),
+ stop_node(E),
- ?line exit({?LINE, {node(Pid), Reason}})
+ exit({?LINE, {node(Pid), Reason}})
after 0 ->
- ?line TT = net_kernel:get_net_ticktime(),
- ?line TT = rpc:call(B, net_kernel, get_net_ticktime, []),
- ?line TT = rpc:call(C, net_kernel, get_net_ticktime, []),
- ?line TT = rpc:call(E, net_kernel, get_net_ticktime, []),
- ?line stop_node(E),
- ?line ok
+ TT = net_kernel:get_net_ticktime(),
+ TT = rpc:call(B, net_kernel, get_net_ticktime, []),
+ TT = rpc:call(C, net_kernel, get_net_ticktime, []),
+ TT = rpc:call(E, net_kernel, get_net_ticktime, []),
+ stop_node(E),
+ ok
end.
%%
%% Basic tests of hidden node.
%%
-hidden_node(doc) ->
- ["Basic test of hidden node"];
-hidden_node(suite) ->
- [];
+%% Basic test of hidden node.
hidden_node(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(40)),
PaDir = filename:dirname(code:which(?MODULE)),
VArgs = "-pa " ++ PaDir,
HArgs = "-hidden -pa " ++ PaDir,
- ?line {ok, V} = start_node(visible_node, VArgs),
+ {ok, V} = start_node(visible_node, VArgs),
VMN = start_monitor_nodes_proc(V),
- ?line {ok, H} = start_node(hidden_node, HArgs),
- % Connect visible_node -> hidden_node
+ {ok, H} = start_node(hidden_node, HArgs),
+ %% Connect visible_node -> hidden_node
connect_nodes(V, H),
test_nodes(V, H),
stop_node(H),
sleep(5),
check_monitor_nodes_res(VMN, H),
stop_node(V),
- ?line {ok, H} = start_node(hidden_node, HArgs),
+ {ok, H} = start_node(hidden_node, HArgs),
HMN = start_monitor_nodes_proc(H),
- ?line {ok, V} = start_node(visible_node, VArgs),
- % Connect hidden_node -> visible_node
+ {ok, V} = start_node(visible_node, VArgs),
+ %% Connect hidden_node -> visible_node
connect_nodes(H, V),
test_nodes(V, H),
stop_node(V),
sleep(5),
check_monitor_nodes_res(HMN, V),
stop_node(H),
- ?line ?t:timetrap_cancel(Dog),
ok.
connect_nodes(A, B) ->
- % Check that they haven't already connected.
- ?line false = lists:member(A, rpc:call(B, erlang, nodes, [connected])),
- ?line false = lists:member(B, rpc:call(A, erlang, nodes, [connected])),
- % Connect them.
- ?line pong = rpc:call(A, net_adm, ping, [B]).
-
+ %% Check that they haven't already connected.
+ false = lists:member(A, rpc:call(B, erlang, nodes, [connected])),
+ false = lists:member(B, rpc:call(A, erlang, nodes, [connected])),
+ %% Connect them.
+ pong = rpc:call(A, net_adm, ping, [B]).
+
test_nodes(V, H) ->
- % No nodes should be visible on hidden_node
- ?line [] = rpc:call(H, erlang, nodes, []),
- % visible_node should be hidden on hidden_node
- ?line true = lists:member(V, rpc:call(H, erlang, nodes, [hidden])),
- % hidden_node node shouldn't be visible on visible_node
- ?line false = lists:member(H, rpc:call(V, erlang, nodes, [])),
- % hidden_node should be hidden on visible_node
- ?line true = lists:member(H, rpc:call(V, erlang, nodes, [hidden])).
+ %% No nodes should be visible on hidden_node
+ [] = rpc:call(H, erlang, nodes, []),
+ %% visible_node should be hidden on hidden_node
+ true = lists:member(V, rpc:call(H, erlang, nodes, [hidden])),
+ %% hidden_node node shouldn't be visible on visible_node
+ false = lists:member(H, rpc:call(V, erlang, nodes, [])),
+ %% hidden_node should be hidden on visible_node
+ true = lists:member(H, rpc:call(V, erlang, nodes, [hidden])).
mn_loop(MNs) ->
receive
@@ -548,21 +804,19 @@ start_monitor_nodes_proc(Node) ->
ok
end,
Pid.
-
+
check_monitor_nodes_res(Pid, Node) ->
Ref = make_ref(),
Pid ! {monitor_nodes_result, Ref, self()},
receive
{Ref, MNs} ->
- ?line false = lists:keysearch(Node, 2, MNs)
+ false = lists:keysearch(Node, 2, MNs)
end.
-inet_dist_options_options(suite) -> [];
-inet_dist_options_options(doc) ->
- ["Check the kernel inet_dist_{listen,connect}_options options"];
+%% Check the kernel inet_dist_{listen,connect}_options options.
inet_dist_options_options(Config) when is_list(Config) ->
Prio = 1,
case gen_udp:open(0, [{priority,Prio}]) of
@@ -571,7 +825,7 @@ inet_dist_options_options(Config) when is_list(Config) ->
{ok,[{priority,Prio}]} ->
ok = gen_udp:close(Socket),
do_inet_dist_options_options(Prio);
- _ ->
+ _ ->
ok = gen_udp:close(Socket),
{skip,
"Can not set priority "++integer_to_list(Prio)++
@@ -595,25 +849,25 @@ do_inet_dist_options_options(Prio) ->
"-hidden "
"-kernel inet_dist_connect_options "++PriorityString++" "
"-kernel inet_dist_listen_options "++PriorityString,
- ?line {ok,Node1} =
+ {ok,Node1} =
start_node(inet_dist_options_1, InetDistOptions),
- ?line {ok,Node2} =
+ {ok,Node2} =
start_node(inet_dist_options_2, InetDistOptions),
%%
- ?line pong =
+ pong =
rpc:call(Node1, net_adm, ping, [Node2]),
- ?line PrioritiesNode1 =
+ PrioritiesNode1 =
rpc:call(Node1, ?MODULE, get_socket_priorities, []),
- ?line PrioritiesNode2 =
+ PrioritiesNode2 =
rpc:call(Node2, ?MODULE, get_socket_priorities, []),
- ?line ?t:format("PrioritiesNode1 = ~p", [PrioritiesNode1]),
- ?line ?t:format("PrioritiesNode2 = ~p", [PrioritiesNode2]),
- ?line Elevated = [P || P <- PrioritiesNode1, P =:= Prio],
- ?line Elevated = [P || P <- PrioritiesNode2, P =:= Prio],
- ?line [_|_] = Elevated,
+ io:format("PrioritiesNode1 = ~p", [PrioritiesNode1]),
+ io:format("PrioritiesNode2 = ~p", [PrioritiesNode2]),
+ Elevated = [P || P <- PrioritiesNode1, P =:= Prio],
+ Elevated = [P || P <- PrioritiesNode2, P =:= Prio],
+ [_|_] = Elevated,
%%
- ?line stop_node(Node2),
- ?line stop_node(Node1),
+ stop_node(Node2),
+ stop_node(Node1),
ok.
get_socket_priorities() ->
@@ -624,185 +878,178 @@ get_socket_priorities() ->
element(2, erlang:port_info(Port, name)) =:= "tcp_inet"]].
-
+
%%
%% Testcase:
%% monitor_nodes_nodedown_reason
%%
-monitor_nodes_nodedown_reason(doc) -> [];
-monitor_nodes_nodedown_reason(suite) -> [];
monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line ok = net_kernel:monitor_nodes(true),
- ?line ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
-
- ?line Names = get_numbered_nodenames(5, node),
- ?line [NN1, NN2, NN3, NN4, NN5] = Names,
-
- ?line {ok, N1} = start_node(NN1),
- ?line {ok, N2} = start_node(NN2),
- ?line {ok, N3} = start_node(NN3),
- ?line {ok, N4} = start_node(NN4, "-hidden"),
-
- ?line receive {nodeup, N1} -> ok end,
- ?line receive {nodeup, N2} -> ok end,
- ?line receive {nodeup, N3} -> ok end,
-
- ?line receive {nodeup, N1, []} -> ok end,
- ?line receive {nodeup, N2, []} -> ok end,
- ?line receive {nodeup, N3, []} -> ok end,
-
- ?line stop_node(N1),
- ?line stop_node(N4),
- ?line true = net_kernel:disconnect(N2),
- ?line TickTime = net_kernel:get_net_ticktime(),
- ?line SleepTime = TickTime + (TickTime div 4),
- ?line spawn(N3, fun () ->
- block_emu(SleepTime*1000),
- halt()
- end),
-
- ?line receive {nodedown, N1} -> ok end,
- ?line receive {nodedown, N2} -> ok end,
- ?line receive {nodedown, N3} -> ok end,
-
- ?line receive {nodedown, N1, [{nodedown_reason, R1}]} -> connection_closed = R1 end,
- ?line receive {nodedown, N2, [{nodedown_reason, R2}]} -> disconnect = R2 end,
- ?line receive {nodedown, N3, [{nodedown_reason, R3}]} -> net_tick_timeout = R3 end,
-
- ?line ok = net_kernel:monitor_nodes(false, [nodedown_reason]),
-
- ?line {ok, N5} = start_node(NN5),
- ?line stop_node(N5),
-
- ?line receive {nodeup, N5} -> ok end,
- ?line receive {nodedown, N5} -> ok end,
- ?line print_my_messages(),
- ?line ok = check_no_nodedown_nodeup(1000),
- ?line ok = net_kernel:monitor_nodes(false),
- ?line MonNodeState = monitor_node_state(),
- ?line ok.
-
-
-monitor_nodes_complex_nodedown_reason(doc) -> [];
-monitor_nodes_complex_nodedown_reason(suite) -> [];
+ MonNodeState = monitor_node_state(),
+ ok = net_kernel:monitor_nodes(true),
+ ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
+
+ Names = get_numbered_nodenames(5, node),
+ [NN1, NN2, NN3, NN4, NN5] = Names,
+
+ {ok, N1} = start_node(NN1),
+ {ok, N2} = start_node(NN2),
+ {ok, N3} = start_node(NN3),
+ {ok, N4} = start_node(NN4, "-hidden"),
+
+ receive {nodeup, N1} -> ok end,
+ receive {nodeup, N2} -> ok end,
+ receive {nodeup, N3} -> ok end,
+
+ receive {nodeup, N1, []} -> ok end,
+ receive {nodeup, N2, []} -> ok end,
+ receive {nodeup, N3, []} -> ok end,
+
+ stop_node(N1),
+ stop_node(N4),
+ true = net_kernel:disconnect(N2),
+ TickTime = net_kernel:get_net_ticktime(),
+ SleepTime = TickTime + (TickTime div 2),
+ spawn(N3, fun () ->
+ block_emu(SleepTime*1000),
+ halt()
+ end),
+
+ receive {nodedown, N1} -> ok end,
+ receive {nodedown, N2} -> ok end,
+ receive {nodedown, N3} -> ok end,
+
+ receive {nodedown, N1, [{nodedown_reason, R1}]} -> connection_closed = R1 end,
+ receive {nodedown, N2, [{nodedown_reason, R2}]} -> disconnect = R2 end,
+ receive {nodedown, N3, [{nodedown_reason, R3}]} -> net_tick_timeout = R3 end,
+
+ ok = net_kernel:monitor_nodes(false, [nodedown_reason]),
+
+ {ok, N5} = start_node(NN5),
+ stop_node(N5),
+
+ receive {nodeup, N5} -> ok end,
+ receive {nodedown, N5} -> ok end,
+ print_my_messages(),
+ ok = check_no_nodedown_nodeup(1000),
+ ok = net_kernel:monitor_nodes(false),
+ MonNodeState = monitor_node_state(),
+ ok.
+
+
monitor_nodes_complex_nodedown_reason(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line Me = self(),
- ?line ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
- ?line [Name] = get_nodenames(1, monitor_nodes_complex_nodedown_reason),
- ?line {ok, Node} = start_node(Name, ""),
- ?line Pid = spawn(Node,
- fun() ->
- Me ! {stuff,
- self(),
- [make_ref(),
- {processes(), erlang:ports()}]}
- end),
- ?line receive {nodeup, Node, []} -> ok end,
- ?line {ok, NodeInfo} = net_kernel:node_info(Node),
- ?line {value,{owner, Owner}} = lists:keysearch(owner, 1, NodeInfo),
- ?line ComplexTerm = receive {stuff, Pid, _} = Msg ->
- {Msg, term_to_binary(Msg)}
- end,
- ?line exit(Owner, ComplexTerm),
- ?line receive
- {nodedown, Node, [{nodedown_reason, NodeDownReason}]} ->
- ?line ok
- end,
+ MonNodeState = monitor_node_state(),
+ Me = self(),
+ ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
+ [Name] = get_nodenames(1, monitor_nodes_complex_nodedown_reason),
+ {ok, Node} = start_node(Name, ""),
+ Pid = spawn(Node,
+ fun() ->
+ Me ! {stuff,
+ self(),
+ [make_ref(),
+ {processes(), erlang:ports()}]}
+ end),
+ receive {nodeup, Node, []} -> ok end,
+ {ok, NodeInfo} = net_kernel:node_info(Node),
+ {value,{owner, Owner}} = lists:keysearch(owner, 1, NodeInfo),
+ ComplexTerm = receive {stuff, Pid, _} = Msg ->
+ {Msg, term_to_binary(Msg)}
+ end,
+ exit(Owner, ComplexTerm),
+ receive
+ {nodedown, Node, [{nodedown_reason, NodeDownReason}]} ->
+ ok
+ end,
%% If the complex nodedown_reason messed something up garbage collections
%% are likely to dump core
- ?line garbage_collect(),
- ?line garbage_collect(),
- ?line garbage_collect(),
- ?line ComplexTerm = NodeDownReason,
- ?line ok = net_kernel:monitor_nodes(false, [nodedown_reason]),
- ?line no_msgs(),
- ?line MonNodeState = monitor_node_state(),
- ?line ok.
-
-
-
+ garbage_collect(),
+ garbage_collect(),
+ garbage_collect(),
+ ComplexTerm = NodeDownReason,
+ ok = net_kernel:monitor_nodes(false, [nodedown_reason]),
+ no_msgs(),
+ MonNodeState = monitor_node_state(),
+ ok.
+
+
+
%%
%% Testcase:
%% monitor_nodes_node_type
%%
-monitor_nodes_node_type(doc) -> [];
-monitor_nodes_node_type(suite) -> [];
monitor_nodes_node_type(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line ok = net_kernel:monitor_nodes(true),
- ?line ok = net_kernel:monitor_nodes(true, [{node_type, all}]),
- ?line Names = get_numbered_nodenames(9, node),
-% ?line ?t:format("Names: ~p~n", [Names]),
- ?line [NN1, NN2, NN3, NN4, NN5, NN6, NN7, NN8, NN9] = Names,
-
- ?line {ok, N1} = start_node(NN1),
- ?line {ok, N2} = start_node(NN2),
- ?line {ok, N3} = start_node(NN3, "-hidden"),
- ?line {ok, N4} = start_node(NN4, "-hidden"),
-
- ?line receive {nodeup, N1} -> ok end,
- ?line receive {nodeup, N2} -> ok end,
-
- ?line receive {nodeup, N1, [{node_type, visible}]} -> ok end,
- ?line receive {nodeup, N2, [{node_type, visible}]} -> ok end,
- ?line receive {nodeup, N3, [{node_type, hidden}]} -> ok end,
- ?line receive {nodeup, N4, [{node_type, hidden}]} -> ok end,
-
- ?line stop_node(N1),
- ?line stop_node(N2),
- ?line stop_node(N3),
- ?line stop_node(N4),
-
- ?line receive {nodedown, N1} -> ok end,
- ?line receive {nodedown, N2} -> ok end,
-
- ?line receive {nodedown, N1, [{node_type, visible}]} -> ok end,
- ?line receive {nodedown, N2, [{node_type, visible}]} -> ok end,
- ?line receive {nodedown, N3, [{node_type, hidden}]} -> ok end,
- ?line receive {nodedown, N4, [{node_type, hidden}]} -> ok end,
-
- ?line ok = net_kernel:monitor_nodes(false, [{node_type, all}]),
- ?line {ok, N5} = start_node(NN5),
-
- ?line receive {nodeup, N5} -> ok end,
- ?line stop_node(N5),
- ?line receive {nodedown, N5} -> ok end,
-
- ?line ok = net_kernel:monitor_nodes(true, [{node_type, hidden}]),
- ?line {ok, N6} = start_node(NN6),
- ?line {ok, N7} = start_node(NN7, "-hidden"),
-
-
- ?line receive {nodeup, N6} -> ok end,
- ?line receive {nodeup, N7, [{node_type, hidden}]} -> ok end,
- ?line stop_node(N6),
- ?line stop_node(N7),
-
- ?line receive {nodedown, N6} -> ok end,
- ?line receive {nodedown, N7, [{node_type, hidden}]} -> ok end,
-
- ?line ok = net_kernel:monitor_nodes(true, [{node_type, visible}]),
- ?line ok = net_kernel:monitor_nodes(false, [{node_type, hidden}]),
- ?line ok = net_kernel:monitor_nodes(false),
-
- ?line {ok, N8} = start_node(NN8),
- ?line {ok, N9} = start_node(NN9, "-hidden"),
-
- ?line receive {nodeup, N8, [{node_type, visible}]} -> ok end,
- ?line stop_node(N8),
- ?line stop_node(N9),
-
- ?line receive {nodedown, N8, [{node_type, visible}]} -> ok end,
- ?line print_my_messages(),
- ?line ok = check_no_nodedown_nodeup(1000),
- ?line ok = net_kernel:monitor_nodes(false, [{node_type, visible}]),
- ?line MonNodeState = monitor_node_state(),
- ?line ok.
+ MonNodeState = monitor_node_state(),
+ ok = net_kernel:monitor_nodes(true),
+ ok = net_kernel:monitor_nodes(true, [{node_type, all}]),
+ Names = get_numbered_nodenames(9, node),
+ [NN1, NN2, NN3, NN4, NN5, NN6, NN7, NN8, NN9] = Names,
+
+ {ok, N1} = start_node(NN1),
+ {ok, N2} = start_node(NN2),
+ {ok, N3} = start_node(NN3, "-hidden"),
+ {ok, N4} = start_node(NN4, "-hidden"),
+
+ receive {nodeup, N1} -> ok end,
+ receive {nodeup, N2} -> ok end,
+
+ receive {nodeup, N1, [{node_type, visible}]} -> ok end,
+ receive {nodeup, N2, [{node_type, visible}]} -> ok end,
+ receive {nodeup, N3, [{node_type, hidden}]} -> ok end,
+ receive {nodeup, N4, [{node_type, hidden}]} -> ok end,
+
+ stop_node(N1),
+ stop_node(N2),
+ stop_node(N3),
+ stop_node(N4),
+
+ receive {nodedown, N1} -> ok end,
+ receive {nodedown, N2} -> ok end,
+
+ receive {nodedown, N1, [{node_type, visible}]} -> ok end,
+ receive {nodedown, N2, [{node_type, visible}]} -> ok end,
+ receive {nodedown, N3, [{node_type, hidden}]} -> ok end,
+ receive {nodedown, N4, [{node_type, hidden}]} -> ok end,
+
+ ok = net_kernel:monitor_nodes(false, [{node_type, all}]),
+ {ok, N5} = start_node(NN5),
+
+ receive {nodeup, N5} -> ok end,
+ stop_node(N5),
+ receive {nodedown, N5} -> ok end,
+
+ ok = net_kernel:monitor_nodes(true, [{node_type, hidden}]),
+ {ok, N6} = start_node(NN6),
+ {ok, N7} = start_node(NN7, "-hidden"),
+
+
+ receive {nodeup, N6} -> ok end,
+ receive {nodeup, N7, [{node_type, hidden}]} -> ok end,
+ stop_node(N6),
+ stop_node(N7),
+
+ receive {nodedown, N6} -> ok end,
+ receive {nodedown, N7, [{node_type, hidden}]} -> ok end,
+
+ ok = net_kernel:monitor_nodes(true, [{node_type, visible}]),
+ ok = net_kernel:monitor_nodes(false, [{node_type, hidden}]),
+ ok = net_kernel:monitor_nodes(false),
+
+ {ok, N8} = start_node(NN8),
+ {ok, N9} = start_node(NN9, "-hidden"),
+
+ receive {nodeup, N8, [{node_type, visible}]} -> ok end,
+ stop_node(N8),
+ stop_node(N9),
+
+ receive {nodedown, N8, [{node_type, visible}]} -> ok end,
+ print_my_messages(),
+ ok = check_no_nodedown_nodeup(1000),
+ ok = net_kernel:monitor_nodes(false, [{node_type, visible}]),
+ MonNodeState = monitor_node_state(),
+ ok.
%%
@@ -810,95 +1057,89 @@ monitor_nodes_node_type(Config) when is_list(Config) ->
%% monitor_nodes
%%
-monitor_nodes_misc(doc) -> [];
-monitor_nodes_misc(suite) -> [];
monitor_nodes_misc(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line ok = net_kernel:monitor_nodes(true),
- ?line ok = net_kernel:monitor_nodes(true, [{node_type, all}, nodedown_reason]),
- ?line ok = net_kernel:monitor_nodes(true, [nodedown_reason, {node_type, all}]),
- ?line Names = get_numbered_nodenames(3, node),
-% ?line ?t:format("Names: ~p~n", [Names]),
- ?line [NN1, NN2, NN3] = Names,
-
- ?line {ok, N1} = start_node(NN1),
- ?line {ok, N2} = start_node(NN2, "-hidden"),
-
- ?line receive {nodeup, N1} -> ok end,
-
- ?line receive {nodeup, N1, [{node_type, visible}]} -> ok end,
- ?line receive {nodeup, N1, [{node_type, visible}]} -> ok end,
- ?line receive {nodeup, N2, [{node_type, hidden}]} -> ok end,
- ?line receive {nodeup, N2, [{node_type, hidden}]} -> ok end,
-
- ?line stop_node(N1),
- ?line stop_node(N2),
-
- ?line VisbleDownInfo = lists:sort([{node_type, visible},
- {nodedown_reason, connection_closed}]),
- ?line HiddenDownInfo = lists:sort([{node_type, hidden},
- {nodedown_reason, connection_closed}]),
-
- ?line receive {nodedown, N1} -> ok end,
-
- ?line receive {nodedown, N1, Info1A} -> VisbleDownInfo = lists:sort(Info1A) end,
- ?line receive {nodedown, N1, Info1B} -> VisbleDownInfo = lists:sort(Info1B) end,
- ?line receive {nodedown, N2, Info2A} -> HiddenDownInfo = lists:sort(Info2A) end,
- ?line receive {nodedown, N2, Info2B} -> HiddenDownInfo = lists:sort(Info2B) end,
-
- ?line ok = net_kernel:monitor_nodes(false, [{node_type, all}, nodedown_reason]),
-
- ?line {ok, N3} = start_node(NN3),
- ?line receive {nodeup, N3} -> ok end,
- ?line stop_node(N3),
- ?line receive {nodedown, N3} -> ok end,
- ?line print_my_messages(),
- ?line ok = check_no_nodedown_nodeup(1000),
- ?line ok = net_kernel:monitor_nodes(false),
- ?line MonNodeState = monitor_node_state(),
- ?line ok.
-
-
-monitor_nodes_otp_6481(doc) ->
- ["Tests that {nodeup, Node} messages are received before "
- "messages from Node and that {nodedown, Node} messages are"
- "received after messages from Node"];
-monitor_nodes_otp_6481(suite) ->
- [];
+ MonNodeState = monitor_node_state(),
+ ok = net_kernel:monitor_nodes(true),
+ ok = net_kernel:monitor_nodes(true, [{node_type, all}, nodedown_reason]),
+ ok = net_kernel:monitor_nodes(true, [nodedown_reason, {node_type, all}]),
+ Names = get_numbered_nodenames(3, node),
+ [NN1, NN2, NN3] = Names,
+
+ {ok, N1} = start_node(NN1),
+ {ok, N2} = start_node(NN2, "-hidden"),
+
+ receive {nodeup, N1} -> ok end,
+
+ receive {nodeup, N1, [{node_type, visible}]} -> ok end,
+ receive {nodeup, N1, [{node_type, visible}]} -> ok end,
+ receive {nodeup, N2, [{node_type, hidden}]} -> ok end,
+ receive {nodeup, N2, [{node_type, hidden}]} -> ok end,
+
+ stop_node(N1),
+ stop_node(N2),
+
+ VisbleDownInfo = lists:sort([{node_type, visible},
+ {nodedown_reason, connection_closed}]),
+ HiddenDownInfo = lists:sort([{node_type, hidden},
+ {nodedown_reason, connection_closed}]),
+
+ receive {nodedown, N1} -> ok end,
+
+ receive {nodedown, N1, Info1A} -> VisbleDownInfo = lists:sort(Info1A) end,
+ receive {nodedown, N1, Info1B} -> VisbleDownInfo = lists:sort(Info1B) end,
+ receive {nodedown, N2, Info2A} -> HiddenDownInfo = lists:sort(Info2A) end,
+ receive {nodedown, N2, Info2B} -> HiddenDownInfo = lists:sort(Info2B) end,
+
+ ok = net_kernel:monitor_nodes(false, [{node_type, all}, nodedown_reason]),
+
+ {ok, N3} = start_node(NN3),
+ receive {nodeup, N3} -> ok end,
+ stop_node(N3),
+ receive {nodedown, N3} -> ok end,
+ print_my_messages(),
+ ok = check_no_nodedown_nodeup(1000),
+ ok = net_kernel:monitor_nodes(false),
+ MonNodeState = monitor_node_state(),
+ ok.
+
+
+%% Tests that {nodeup, Node} messages are received before
+%% messages from Node and that {nodedown, Node} messages are
+%% received after messages from Node.
monitor_nodes_otp_6481(Config) when is_list(Config) ->
- ?line ?t:format("Testing nodedown...~n"),
- ?line monitor_nodes_otp_6481_test(Config, nodedown),
- ?line ?t:format("ok~n"),
- ?line ?t:format("Testing nodeup...~n"),
- ?line monitor_nodes_otp_6481_test(Config, nodeup),
- ?line ?t:format("ok~n"),
- ?line ok.
+ io:format("Testing nodedown...~n"),
+ monitor_nodes_otp_6481_test(Config, nodedown),
+ io:format("ok~n"),
+ io:format("Testing nodeup...~n"),
+ monitor_nodes_otp_6481_test(Config, nodeup),
+ io:format("ok~n"),
+ ok.
monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line NodeMsg = make_ref(),
- ?line Me = self(),
- ?line [Name] = get_nodenames(1, monitor_nodes_otp_6481),
- ?line case TestType of
- nodedown -> ?line ok = net_kernel:monitor_nodes(true);
- nodeup -> ?line ok
- end,
- ?line Seq = lists:seq(1,10000),
- ?line MN = spawn_link(
- fun () ->
- ?line lists:foreach(
- fun (_) ->
- ?line ok = net_kernel:monitor_nodes(true)
- end,
- Seq),
- ?line Me ! {mon_set, self()},
- ?line receive after infinity -> ok end
- end),
- ?line receive {mon_set, MN} -> ok end,
- ?line case TestType of
- nodedown -> ?line ok;
- nodeup -> ?line ok = net_kernel:monitor_nodes(true)
- end,
+ MonNodeState = monitor_node_state(),
+ NodeMsg = make_ref(),
+ Me = self(),
+ [Name] = get_nodenames(1, monitor_nodes_otp_6481),
+ case TestType of
+ nodedown -> ok = net_kernel:monitor_nodes(true);
+ nodeup -> ok
+ end,
+ Seq = lists:seq(1,10000),
+ MN = spawn_link(
+ fun () ->
+ lists:foreach(
+ fun (_) ->
+ ok = net_kernel:monitor_nodes(true)
+ end,
+ Seq),
+ Me ! {mon_set, self()},
+ receive after infinity -> ok end
+ end),
+ receive {mon_set, MN} -> ok end,
+ case TestType of
+ nodedown -> ok;
+ nodeup -> ok = net_kernel:monitor_nodes(true)
+ end,
%% Whitebox:
%% nodedown test: Since this process was the first one monitoring
@@ -909,170 +1150,162 @@ monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) ->
%% on nodeup
%% Verify the monitor_nodes order expected
- ?line TestMonNodeState = monitor_node_state(),
- %?line ?t:format("~p~n", [TestMonNodeState]),
- ?line TestMonNodeState =
- MonNodeState
- ++ case TestType of
- nodedown -> [{self(), []}];
- nodeup -> []
- end
+ TestMonNodeState = monitor_node_state(),
+ %% io:format("~p~n", [TestMonNodeState]),
+ TestMonNodeState =
+ case TestType of
+ nodedown -> [];
+ nodeup -> [{self(), []}]
+ end
++ lists:map(fun (_) -> {MN, []} end, Seq)
++ case TestType of
- nodedown -> [];
- nodeup -> [{self(), []}]
- end,
-
-
- ?line {ok, Node} = start_node(Name, "", this),
- ?line receive {nodeup, Node} -> ok end,
-
- ?line RemotePid = spawn(Node,
- fun () ->
- receive after 1500 -> ok end,
- % infinit loop of msgs
- % we want an endless stream of messages and the kill
- % the node mercilessly.
- % We then want to ensure that the nodedown message arrives
- % last ... without garbage after it.
- _ = spawn(fun() -> node_loop_send(Me, NodeMsg, 1) end),
- receive {Me, kill_it} -> ok end,
- halt()
- end),
+ nodedown -> [{self(), []}];
+ nodeup -> []
+ end
+ ++ MonNodeState,
+
+ {ok, Node} = start_node(Name, "", this),
+ receive {nodeup, Node} -> ok end,
+
+ RemotePid = spawn(Node,
+ fun () ->
+ receive after 1500 -> ok end,
+ %% infinite loop of msgs
+ %% we want an endless stream of messages and the kill
+ %% the node mercilessly.
+ %% We then want to ensure that the nodedown message arrives
+ %% last ... without garbage after it.
+ _ = spawn(fun() -> node_loop_send(Me, NodeMsg, 1) end),
+ receive {Me, kill_it} -> ok end,
+ halt()
+ end),
- ?line net_kernel:disconnect(Node),
- ?line receive {nodedown, Node} -> ok end,
+ net_kernel:disconnect(Node),
+ receive {nodedown, Node} -> ok end,
%% Verify that '{nodeup, Node}' comes before '{NodeMsg, 1}' (the message
%% bringing up the connection).
- ?line no_msgs(500),
- ?line {nodeup, Node} = receive Msg1 -> Msg1 end,
- ?line {NodeMsg, 1} = receive Msg2 -> Msg2 end,
- % msg stream has begun, kill the node
- ?line RemotePid ! {self(), kill_it},
+ {nodeup, Node} = receive Msg1 -> Msg1 end,
+ {NodeMsg, N} = receive Msg2 -> Msg2 end,
+ %% msg stream has begun, kill the node
+ RemotePid ! {self(), kill_it},
%% Verify that '{nodedown, Node}' comes after the last '{NodeMsg, N}'
%% message.
- ?line {nodedown, Node} = flush_node_msgs(NodeMsg, 2),
- ?line no_msgs(500),
-
- ?line Mon = erlang:monitor(process, MN),
- ?line unlink(MN),
- ?line exit(MN, bang),
- ?line receive {'DOWN', Mon, process, MN, bang} -> ok end,
- ?line ok = net_kernel:monitor_nodes(false),
- ?line MonNodeState = monitor_node_state(),
- ?line ok.
+ {nodedown, Node} = flush_node_msgs(NodeMsg, N+1),
+ no_msgs(500),
+
+ Mon = erlang:monitor(process, MN),
+ unlink(MN),
+ exit(MN, bang),
+ receive {'DOWN', Mon, process, MN, bang} -> ok end,
+ ok = net_kernel:monitor_nodes(false),
+ MonNodeState = monitor_node_state(),
+ ok.
flush_node_msgs(NodeMsg, No) ->
case receive Msg -> Msg end of
- {NodeMsg, No} -> flush_node_msgs(NodeMsg, No+1);
- OtherMsg -> OtherMsg
+ {NodeMsg, N} when N >= No ->
+ flush_node_msgs(NodeMsg, N+1);
+ OtherMsg ->
+ OtherMsg
end.
node_loop_send(Pid, Msg, No) ->
Pid ! {Msg, No},
node_loop_send(Pid, Msg, No + 1).
-monitor_nodes_errors(doc) ->
- [];
-monitor_nodes_errors(suite) ->
- [];
monitor_nodes_errors(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line error = net_kernel:monitor_nodes(asdf),
- ?line {error,
- {unknown_options,
- [gurka]}} = net_kernel:monitor_nodes(true,
- [gurka]),
- ?line {error,
- {options_not_a_list,
- gurka}} = net_kernel:monitor_nodes(true,
- gurka),
- ?line {error,
- {option_value_mismatch,
- [{node_type,visible},
- {node_type,hidden}]}}
+ MonNodeState = monitor_node_state(),
+ error = net_kernel:monitor_nodes(asdf),
+ {error,
+ {unknown_options,
+ [gurka]}} = net_kernel:monitor_nodes(true,
+ [gurka]),
+ {error,
+ {options_not_a_list,
+ gurka}} = net_kernel:monitor_nodes(true,
+ gurka),
+ {error,
+ {option_value_mismatch,
+ [{node_type,visible},
+ {node_type,hidden}]}}
= net_kernel:monitor_nodes(true,
[{node_type,hidden},
{node_type,visible}]),
- ?line {error,
- {option_value_mismatch,
- [{node_type,visible},
- {node_type,all}]}}
+ {error,
+ {option_value_mismatch,
+ [{node_type,visible},
+ {node_type,all}]}}
= net_kernel:monitor_nodes(true,
[{node_type,all},
{node_type,visible}]),
- ?line {error,
- {bad_option_value,
- {node_type,
- blaha}}}
+ {error,
+ {bad_option_value,
+ {node_type,
+ blaha}}}
= net_kernel:monitor_nodes(true, [{node_type, blaha}]),
- ?line MonNodeState = monitor_node_state(),
- ?line ok.
+ MonNodeState = monitor_node_state(),
+ ok.
-monitor_nodes_combinations(doc) ->
- [];
-monitor_nodes_combinations(suite) ->
- [];
monitor_nodes_combinations(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line monitor_nodes_all_comb(true),
- ?line [VisibleName, HiddenName] = get_nodenames(2,
- monitor_nodes_combinations),
- ?line {ok, Visible} = start_node(VisibleName, ""),
- ?line receive_all_comb_nodeup_msgs(visible, Visible),
- ?line no_msgs(),
- ?line stop_node(Visible),
- ?line receive_all_comb_nodedown_msgs(visible, Visible, connection_closed),
- ?line no_msgs(),
- ?line {ok, Hidden} = start_node(HiddenName, "-hidden"),
- ?line receive_all_comb_nodeup_msgs(hidden, Hidden),
- ?line no_msgs(),
- ?line stop_node(Hidden),
- ?line receive_all_comb_nodedown_msgs(hidden, Hidden, connection_closed),
- ?line no_msgs(),
- ?line monitor_nodes_all_comb(false),
- ?line MonNodeState = monitor_node_state(),
- ?line no_msgs(),
- ?line ok.
+ MonNodeState = monitor_node_state(),
+ monitor_nodes_all_comb(true),
+ [VisibleName, HiddenName] = get_nodenames(2,
+ monitor_nodes_combinations),
+ {ok, Visible} = start_node(VisibleName, ""),
+ receive_all_comb_nodeup_msgs(visible, Visible),
+ no_msgs(),
+ stop_node(Visible),
+ receive_all_comb_nodedown_msgs(visible, Visible, connection_closed),
+ no_msgs(),
+ {ok, Hidden} = start_node(HiddenName, "-hidden"),
+ receive_all_comb_nodeup_msgs(hidden, Hidden),
+ no_msgs(),
+ stop_node(Hidden),
+ receive_all_comb_nodedown_msgs(hidden, Hidden, connection_closed),
+ no_msgs(),
+ monitor_nodes_all_comb(false),
+ MonNodeState = monitor_node_state(),
+ no_msgs(),
+ ok.
monitor_nodes_all_comb(Flag) ->
- ?line ok = net_kernel:monitor_nodes(Flag),
- ?line ok = net_kernel:monitor_nodes(Flag,
- [nodedown_reason]),
- ?line ok = net_kernel:monitor_nodes(Flag,
- [{node_type, hidden}]),
- ?line ok = net_kernel:monitor_nodes(Flag,
- [{node_type, visible}]),
- ?line ok = net_kernel:monitor_nodes(Flag,
- [{node_type, all}]),
- ?line ok = net_kernel:monitor_nodes(Flag,
- [nodedown_reason,
- {node_type, hidden}]),
- ?line ok = net_kernel:monitor_nodes(Flag,
- [nodedown_reason,
- {node_type, visible}]),
- ?line ok = net_kernel:monitor_nodes(Flag,
- [nodedown_reason,
- {node_type, all}]),
+ ok = net_kernel:monitor_nodes(Flag),
+ ok = net_kernel:monitor_nodes(Flag,
+ [nodedown_reason]),
+ ok = net_kernel:monitor_nodes(Flag,
+ [{node_type, hidden}]),
+ ok = net_kernel:monitor_nodes(Flag,
+ [{node_type, visible}]),
+ ok = net_kernel:monitor_nodes(Flag,
+ [{node_type, all}]),
+ ok = net_kernel:monitor_nodes(Flag,
+ [nodedown_reason,
+ {node_type, hidden}]),
+ ok = net_kernel:monitor_nodes(Flag,
+ [nodedown_reason,
+ {node_type, visible}]),
+ ok = net_kernel:monitor_nodes(Flag,
+ [nodedown_reason,
+ {node_type, all}]),
%% There currently are 8 different combinations
- ?line 8.
+ 8.
receive_all_comb_nodeup_msgs(visible, Node) ->
- ?t:format("Receive nodeup visible...~n"),
+ io:format("Receive nodeup visible...~n"),
Exp = [{nodeup, Node},
{nodeup, Node, []}]
++ mk_exp_mn_all_comb_nodeup_msgs_common(visible, Node),
receive_mn_msgs(Exp),
- ?t:format("ok~n"),
+ io:format("ok~n"),
ok;
receive_all_comb_nodeup_msgs(hidden, Node) ->
- ?t:format("Receive nodeup hidden...~n"),
+ io:format("Receive nodeup hidden...~n"),
Exp = mk_exp_mn_all_comb_nodeup_msgs_common(hidden, Node),
receive_mn_msgs(Exp),
- ?t:format("ok~n"),
+ io:format("ok~n"),
ok.
mk_exp_mn_all_comb_nodeup_msgs_common(Type, Node) ->
@@ -1083,20 +1316,20 @@ mk_exp_mn_all_comb_nodeup_msgs_common(Type, Node) ->
{nodeup, Node, InfoNt}].
receive_all_comb_nodedown_msgs(visible, Node, Reason) ->
- ?t:format("Receive nodedown visible...~n"),
+ io:format("Receive nodedown visible...~n"),
Exp = [{nodedown, Node},
{nodedown, Node, [{nodedown_reason, Reason}]}]
++ mk_exp_mn_all_comb_nodedown_msgs_common(visible,
Node,
Reason),
receive_mn_msgs(Exp),
- ?t:format("ok~n"),
+ io:format("ok~n"),
ok;
receive_all_comb_nodedown_msgs(hidden, Node, Reason) ->
- ?t:format("Receive nodedown hidden...~n"),
+ io:format("Receive nodedown hidden...~n"),
Exp = mk_exp_mn_all_comb_nodedown_msgs_common(hidden, Node, Reason),
receive_mn_msgs(Exp),
- ?t:format("ok~n"),
+ io:format("ok~n"),
ok.
mk_exp_mn_all_comb_nodedown_msgs_common(Type, Node, Reason) ->
@@ -1110,81 +1343,73 @@ mk_exp_mn_all_comb_nodedown_msgs_common(Type, Node, Reason) ->
receive_mn_msgs([]) ->
ok;
receive_mn_msgs(Msgs) ->
- ?t:format("Expecting msgs: ~p~n", [Msgs]),
+ io:format("Expecting msgs: ~p~n", [Msgs]),
receive
{_Dir, _Node} = Msg ->
- ?t:format("received ~p~n", [Msg]),
+ io:format("received ~p~n", [Msg]),
case lists:member(Msg, Msgs) of
true -> receive_mn_msgs(lists:delete(Msg, Msgs));
- false -> ?t:fail({unexpected_message, Msg,
+ false -> ct:fail({unexpected_message, Msg,
expected_messages, Msgs})
end;
{Dir, Node, Info} ->
Msg = {Dir, Node, lists:sort(Info)},
- ?t:format("received ~p~n", [Msg]),
+ io:format("received ~p~n", [Msg]),
case lists:member(Msg, Msgs) of
true -> receive_mn_msgs(lists:delete(Msg, Msgs));
- false -> ?t:fail({unexpected_message, Msg,
+ false -> ct:fail({unexpected_message, Msg,
expected_messages, Msgs})
end;
Msg ->
- ?t:format("received ~p~n", [Msg]),
- ?t:fail({unexpected_message, Msg,
+ io:format("received ~p~n", [Msg]),
+ ct:fail({unexpected_message, Msg,
expected_messages, Msgs})
end.
-monitor_nodes_cleanup(doc) ->
- [];
-monitor_nodes_cleanup(suite) ->
- [];
monitor_nodes_cleanup(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line Me = self(),
- ?line No = monitor_nodes_all_comb(true),
- ?line Inf = spawn(fun () ->
- monitor_nodes_all_comb(true),
- Me ! {mons_set, self()},
- receive after infinity -> ok end
- end),
- ?line TO = spawn(fun () ->
- monitor_nodes_all_comb(true),
- Me ! {mons_set, self()},
- receive after 500 -> ok end
- end),
- ?line receive {mons_set, Inf} -> ok end,
- ?line receive {mons_set, TO} -> ok end,
- ?line MNLen = length(MonNodeState) + No*3,
- ?line MNLen = length(monitor_node_state()),
- ?line MonInf = erlang:monitor(process, Inf),
- ?line MonTO = erlang:monitor(process, TO),
- ?line exit(Inf, bang),
- ?line No = monitor_nodes_all_comb(false),
- ?line receive {'DOWN', MonInf, process, Inf, bang} -> ok end,
- ?line receive {'DOWN', MonTO, process, TO, normal} -> ok end,
- ?line MonNodeState = monitor_node_state(),
- ?line no_msgs(),
- ?line ok.
-
-monitor_nodes_many(doc) ->
- [];
-monitor_nodes_many(suite) ->
- [];
+ MonNodeState = monitor_node_state(),
+ Me = self(),
+ No = monitor_nodes_all_comb(true),
+ Inf = spawn(fun () ->
+ monitor_nodes_all_comb(true),
+ Me ! {mons_set, self()},
+ receive after infinity -> ok end
+ end),
+ TO = spawn(fun () ->
+ monitor_nodes_all_comb(true),
+ Me ! {mons_set, self()},
+ receive after 500 -> ok end
+ end),
+ receive {mons_set, Inf} -> ok end,
+ receive {mons_set, TO} -> ok end,
+ MNLen = length(MonNodeState) + No*3,
+ MNLen = length(monitor_node_state()),
+ MonInf = erlang:monitor(process, Inf),
+ MonTO = erlang:monitor(process, TO),
+ exit(Inf, bang),
+ No = monitor_nodes_all_comb(false),
+ receive {'DOWN', MonInf, process, Inf, bang} -> ok end,
+ receive {'DOWN', MonTO, process, TO, normal} -> ok end,
+ MonNodeState = monitor_node_state(),
+ no_msgs(),
+ ok.
+
monitor_nodes_many(Config) when is_list(Config) ->
- ?line MonNodeState = monitor_node_state(),
- ?line [Name] = get_nodenames(1, monitor_nodes_many),
+ MonNodeState = monitor_node_state(),
+ [Name] = get_nodenames(1, monitor_nodes_many),
%% We want to perform more than 2^16 net_kernel:monitor_nodes
%% since this will wrap an internal counter
- ?line No = (1 bsl 16) + 17,
- ?line repeat(fun () -> ok = net_kernel:monitor_nodes(true) end, No),
- ?line No = length(monitor_node_state()) - length(MonNodeState),
- ?line {ok, Node} = start_node(Name),
- ?line repeat(fun () -> receive {nodeup, Node} -> ok end end, No),
- ?line stop_node(Node),
- ?line repeat(fun () -> receive {nodedown, Node} -> ok end end, No),
- ?line ok = net_kernel:monitor_nodes(false),
- ?line no_msgs(10),
- ?line MonNodeState = monitor_node_state(),
- ?line ok.
+ No = (1 bsl 16) + 17,
+ repeat(fun () -> ok = net_kernel:monitor_nodes(true) end, No),
+ No = length(monitor_node_state()) - length(MonNodeState),
+ {ok, Node} = start_node(Name),
+ repeat(fun () -> receive {nodeup, Node} -> ok end end, No),
+ stop_node(Node),
+ repeat(fun () -> receive {nodedown, Node} -> ok end end, No),
+ ok = net_kernel:monitor_nodes(false),
+ no_msgs(10),
+ MonNodeState = monitor_node_state(),
+ ok.
%% Misc. functions
@@ -1196,59 +1421,45 @@ monitor_node_state() ->
check_no_nodedown_nodeup(TimeOut) ->
- ?line receive
- {nodeup, _, _} = Msg -> ?line ?t:fail({unexpected_nodeup, Msg});
- {nodeup, _} = Msg -> ?line ?t:fail({unexpected_nodeup, Msg});
- {nodedown, _, _} = Msg -> ?line ?t:fail({unexpected_nodedown, Msg});
- {nodedown, _} = Msg -> ?line ?t:fail({unexpected_nodedown, Msg})
- after TimeOut ->
- ok
- end.
+ receive
+ {nodeup, _, _} = Msg -> ct:fail({unexpected_nodeup, Msg});
+ {nodeup, _} = Msg -> ct:fail({unexpected_nodeup, Msg});
+ {nodedown, _, _} = Msg -> ct:fail({unexpected_nodedown, Msg});
+ {nodedown, _} = Msg -> ct:fail({unexpected_nodedown, Msg})
+ after TimeOut ->
+ ok
+ end.
print_my_messages() ->
- ?line {messages, Messages} = process_info(self(), messages),
- ?line ?t:format("Messages: ~p~n", [Messages]),
- ?line ok.
+ {messages, Messages} = process_info(self(), messages),
+ io:format("Messages: ~p~n", [Messages]),
+ ok.
sleep(T) -> receive after T * 1000 -> ok end.
start_node(Name, Param, this) ->
NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
- ?t:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
+ test_server:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
start_node(Name, Param, "this") ->
NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
- ?t:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
+ test_server:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
start_node(Name, Param, Rel) when is_atom(Rel) ->
NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
- ?t:start_node(Name, peer, [{args, NewParam}, {erl, [{release, atom_to_list(Rel)}]}]);
+ test_server:start_node(Name, peer, [{args, NewParam}, {erl, [{release, atom_to_list(Rel)}]}]);
start_node(Name, Param, Rel) when is_list(Rel) ->
NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
- ?t:start_node(Name, peer, [{args, NewParam}, {erl, [{release, Rel}]}]).
+ test_server:start_node(Name, peer, [{args, NewParam}, {erl, [{release, Rel}]}]).
start_node(Name, Param) ->
NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
- ?t:start_node(Name, slave, [{args, NewParam}]).
-% M = list_to_atom(from($@, atom_to_list(node()))),
-% slave:start_link(M, Name, Param).
+ test_server:start_node(Name, slave, [{args, NewParam}]).
start_node(Name) ->
start_node(Name, "").
stop_node(Node) ->
- ?t:stop_node(Node).
-% erlang:monitor_node(Node, true),
-% rpc:cast(Node, init, stop, []),
-% receive
-% {nodedown, Node} ->
-% ok
-% after 10000 ->
-% test_server:fail({stop_node, Node})
-% end.
-
-% from(H, [H | T]) -> T;
-% from(H, [_ | T]) -> from(H, T);
-% from(H, []) -> [].
+ test_server:stop_node(Node).
get_nodenames(N, T) ->
get_nodenames(N, T, []).
diff --git a/lib/kernel/test/erl_distribution_wb_SUITE.erl b/lib/kernel/test/erl_distribution_wb_SUITE.erl
index c107e92fae..8256444bdc 100644
--- a/lib/kernel/test/erl_distribution_wb_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_wb_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(erl_distribution_wb_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/inet.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -30,7 +30,7 @@
%% 1)
%%
-%% Connections are now always set up symetrically with respect to
+%% Connections are now always set up symmetrically with respect to
%% publication. If connecting node doesn't send DFLAG_PUBLISHED
%% the other node wont send DFLAG_PUBLISHED. If the connecting
%% node send DFLAG_PUBLISHED but the other node doesn't send
@@ -56,11 +56,18 @@
-define(DFLAG_HIDDEN_ATOM_CACHE,16#40).
-define(DFLAG_NEW_FUN_TAGS,16#80).
-define(DFLAG_EXTENDED_PIDS_PORTS,16#100).
+-define(DFLAG_UTF8_ATOMS, 16#10000).
%% From R9 and forward extended references is compulsory
%% From R10 and forward extended pids and ports are compulsory
--define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor ?DFLAG_EXTENDED_PIDS_PORTS)).
+%% From R20 and forward UTF8 atoms are compulsory
+%% From R21 and forward NEW_FUN_TAGS is compulsory (no more tuple fallback {fun, ...})
+-define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor
+ ?DFLAG_EXTENDED_PIDS_PORTS bor
+ ?DFLAG_UTF8_ATOMS bor
+ ?DFLAG_NEW_FUN_TAGS)).
+-define(PASS_THROUGH, $p).
-define(shutdown(X), exit(X)).
-define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]).
@@ -71,7 +78,7 @@
-define(i16(X1,X0),
(?u16(X1,X0) -
- (if (X1) > 127 -> 16#10000; true -> 0 end))).
+ (if (X1) > 127 -> 16#10000; true -> 0 end))).
-define(u16(X1,X0),
(((X1) bsl 8) bor (X0))).
@@ -79,7 +86,9 @@
-define(u32(X3,X2,X1,X0),
(((X3) bsl 24) bor ((X2) bsl 16) bor ((X1) bsl 8) bor (X0))).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[whitebox, switch_options, missing_compulsory_dflags].
@@ -101,39 +110,33 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
- Dog=?t:timetrap(?t:minutes(1)),
- [{watchdog, Dog}|Config].
-
-end_per_testcase(_Func, Config) ->
- Dog=?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
-
-switch_options(doc) ->
- ["Tests switching of options for the tcp port, as this is done"
- " when the distribution port is to be shortcut into the emulator."
- " Maybe this should be in the inet test suite, but only the distribution"
- " does such horrible things..."];
+ Config.
+
+end_per_testcase(_Func, _Config) ->
+ ok.
+
+%% Tests switching of options for the tcp port, as this is done
+%% when the distribution port is to be shortcut into the emulator.
+%% Maybe this should be in the inet test suite, but only the distribution
+%% does such horrible things...
switch_options(Config) when is_list(Config) ->
ok = test_switch_active(),
ok = test_switch_active_partial() ,
ok = test_switch_active_and_packet(),
ok.
-
-whitebox(doc) ->
- ["Whitebox testing of distribution handshakes. Tests both BC with R5 and "
- "the md5 version. Note that after R6B, this should be revised to "
- "remove BC code."];
+
+%% Whitebox testing of distribution handshakes.
whitebox(Config) when is_list(Config) ->
- ?line {ok, Node} = start_node(?MODULE,""),
- ?line Cookie = erlang:get_cookie(),
- ?line {_,Host} = split(node()),
- ?line ok = pending_up_md5(Node, join(ccc,Host), Cookie),
- ?line ok = simultaneous_md5(Node, join('A',Host), Cookie),
- ?line ok = simultaneous_md5(Node, join(zzzzzzzzzzzzzz,Host), Cookie),
- ?line stop_node(Node),
+ {ok, Node} = start_node(?MODULE,""),
+ Cookie = erlang:get_cookie(),
+ {_,Host} = split(node()),
+ ok = pending_up_md5(Node, join(ccc,Host), Cookie),
+ ok = simultaneous_md5(Node, join('A',Host), Cookie),
+ ok = simultaneous_md5(Node, join(zzzzzzzzzzzzzz,Host), Cookie),
+ stop_node(Node),
ok.
-
+
%%
%% The actual tests
%%
@@ -143,56 +146,56 @@ whitebox(Config) when is_list(Config) ->
%%
test_switch_active() ->
- ?line {Client, Server} = socket_pair(0, 4),
- ?line ok = write_packets_32(Client, 1, 5),
+ {Client, Server} = socket_pair(0, 4),
+ ok = write_packets_32(Client, 1, 5),
receive after 2000 -> ok end,
- ?line ok = read_packets(Server, 1, 1),
+ ok = read_packets(Server, 1, 1),
receive after 2000 -> ok end,
- ?line ok = read_packets(Server, 2, 2),
- ?line inet:setopts(Server, [{active, true}]),
- ?line ok = receive_packets(Server, 3, 5),
+ ok = read_packets(Server, 2, 2),
+ inet:setopts(Server, [{active, true}]),
+ ok = receive_packets(Server, 3, 5),
close_pair({Client, Server}),
ok.
-
+
test_switch_active_partial() ->
- ?line {Client, Server} = socket_pair(0, 4),
- ?line ok = write_packets_32(Client, 1, 2),
- ?line ok = gen_tcp:send(Client,[?int32(4), [0,0,0]]),
+ {Client, Server} = socket_pair(0, 4),
+ ok = write_packets_32(Client, 1, 2),
+ ok = gen_tcp:send(Client,[?int32(4), [0,0,0]]),
receive after 2000 -> ok end,
- ?line ok = read_packets(Server, 1, 1),
+ ok = read_packets(Server, 1, 1),
receive after 2000 -> ok end,
- ?line ok = read_packets(Server, 2, 2),
- ?line inet:setopts(Server, [{active, true}]),
- ?line ok = gen_tcp:send(Client,[3]),
- ?line ok = write_packets_32(Client, 4, 5),
- ?line ok = receive_packets(Server, 3, 5),
+ ok = read_packets(Server, 2, 2),
+ inet:setopts(Server, [{active, true}]),
+ ok = gen_tcp:send(Client,[3]),
+ ok = write_packets_32(Client, 4, 5),
+ ok = receive_packets(Server, 3, 5),
close_pair({Client, Server}),
ok.
-
+
do_test_switch_active_and_packet(SendBefore, SendAfter) ->
- ?line {Client, Server} = socket_pair(0, 2),
- ?line ok = write_packets_16(Client, 1, 2),
- ?line ok = gen_tcp:send(Client,SendBefore),
+ {Client, Server} = socket_pair(0, 2),
+ ok = write_packets_16(Client, 1, 2),
+ ok = gen_tcp:send(Client,SendBefore),
receive after 2000 -> ok end,
- ?line ok = read_packets(Server, 1, 1),
+ ok = read_packets(Server, 1, 1),
receive after 2000 -> ok end,
- ?line ok = read_packets(Server, 2, 2),
- ?line inet:setopts(Server, [{packet,4}, {active, true}]),
- ?line ok = gen_tcp:send(Client,SendAfter),
- ?line ok = write_packets_32(Client, 4, 5),
- ?line ok = receive_packets(Server, 3, 5),
+ ok = read_packets(Server, 2, 2),
+ inet:setopts(Server, [{packet,4}, {active, true}]),
+ ok = gen_tcp:send(Client,SendAfter),
+ ok = write_packets_32(Client, 4, 5),
+ ok = receive_packets(Server, 3, 5),
close_pair({Client, Server}),
ok.
test_switch_active_and_packet() ->
- ?line ok = do_test_switch_active_and_packet([0],[0,0,4,0,0,0,3]),
- ?line ok = do_test_switch_active_and_packet([0,0],[0,4,0,0,0,3]),
- ?line ok = do_test_switch_active_and_packet([0,0,0],[4,0,0,0,3]),
- ?line ok = do_test_switch_active_and_packet([0,0,0,4],[0,0,0,3]),
- ?line ok = do_test_switch_active_and_packet([0,0,0,4,0],[0,0,3]),
- ?line ok = do_test_switch_active_and_packet([0,0,0,4,0,0],[0,3]),
- ?line ok = do_test_switch_active_and_packet([0,0,0,4,0,0,0],[3]),
- ?line ok = do_test_switch_active_and_packet([0,0,0,4,0,0,0,3],[]),
+ ok = do_test_switch_active_and_packet([0],[0,0,4,0,0,0,3]),
+ ok = do_test_switch_active_and_packet([0,0],[0,4,0,0,0,3]),
+ ok = do_test_switch_active_and_packet([0,0,0],[4,0,0,0,3]),
+ ok = do_test_switch_active_and_packet([0,0,0,4],[0,0,0,3]),
+ ok = do_test_switch_active_and_packet([0,0,0,4,0],[0,0,3]),
+ ok = do_test_switch_active_and_packet([0,0,0,4,0,0],[0,3]),
+ ok = do_test_switch_active_and_packet([0,0,0,4,0,0,0],[3]),
+ ok = do_test_switch_active_and_packet([0,0,0,4,0,0,0,3],[]),
ok.
@@ -200,181 +203,180 @@ test_switch_active_and_packet() ->
%% Handshake tests
%%
pending_up_md5(Node,OurName,Cookie) ->
- ?line {NA,NB} = split(Node),
- ?line {port,PortNo,_} = erl_epmd:port_please(NA,NB),
- ?line {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
- [{active,false},
- {packet,2}]),
- ?line send_name(SocketA,OurName,5),
- ?line ok = recv_status(SocketA),
- ?line {hidden,Node,5,HisChallengeA} = recv_challenge(SocketA), % See 1)
- ?line OurChallengeA = gen_challenge(),
- ?line OurDigestA = gen_digest(HisChallengeA, Cookie),
- ?line send_challenge_reply(SocketA, OurChallengeA, OurDigestA),
- ?line ok = recv_challenge_ack(SocketA, OurChallengeA, Cookie),
- %%%
- %%% OK, one connection is up, now lets be nasty and try another up:
- %%%
- %%% But wait for a while, the other node might not have done setnode
- %%% just yet...
- ?line receive after 1000 -> ok end,
- ?line {ok, SocketB} = gen_tcp:connect(atom_to_list(NB),PortNo,
- [{active,false},
- {packet,2}]),
- ?line send_name(SocketB,OurName,5),
- ?line alive = recv_status(SocketB),
- ?line send_status(SocketB, true),
- ?line gen_tcp:close(SocketA),
- ?line {hidden,Node,5,HisChallengeB} = recv_challenge(SocketB), % See 1)
- ?line OurChallengeB = gen_challenge(),
- ?line OurDigestB = gen_digest(HisChallengeB, Cookie),
- ?line send_challenge_reply(SocketB, OurChallengeB, OurDigestB),
- ?line ok = recv_challenge_ack(SocketB, OurChallengeB, Cookie),
- %%%
- %%% Well, are we happy?
- %%%
-
- ?line inet:setopts(SocketB, [{active, false},
- {packet, 4}]),
- ?line gen_tcp:send(SocketB,build_rex_message('',OurName)),
- ?line {Header, Message} = recv_message(SocketB),
- ?line io:format("Received header ~p, data ~p.~n",
+ {NA,NB} = split(Node),
+ {port,PortNo,_} = erl_epmd:port_please(NA,NB),
+ {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
+ [{active,false},
+ {packet,2}]),
+ send_name(SocketA,OurName,5),
+ ok = recv_status(SocketA),
+ {hidden,Node,5,HisChallengeA} = recv_challenge(SocketA), % See 1)
+ OurChallengeA = gen_challenge(),
+ OurDigestA = gen_digest(HisChallengeA, Cookie),
+ send_challenge_reply(SocketA, OurChallengeA, OurDigestA),
+ ok = recv_challenge_ack(SocketA, OurChallengeA, Cookie),
+%%%
+%%% OK, one connection is up, now lets be nasty and try another up:
+%%%
+%%% But wait for a while, the other node might not have done setnode
+%%% just yet...
+ receive after 1000 -> ok end,
+ {ok, SocketB} = gen_tcp:connect(atom_to_list(NB),PortNo,
+ [{active,false},
+ {packet,2}]),
+ send_name(SocketB,OurName,5),
+ alive = recv_status(SocketB),
+ send_status(SocketB, true),
+ gen_tcp:close(SocketA),
+ {hidden,Node,5,HisChallengeB} = recv_challenge(SocketB), % See 1)
+ OurChallengeB = gen_challenge(),
+ OurDigestB = gen_digest(HisChallengeB, Cookie),
+ send_challenge_reply(SocketB, OurChallengeB, OurDigestB),
+ ok = recv_challenge_ack(SocketB, OurChallengeB, Cookie),
+%%%
+%%% Well, are we happy?
+%%%
+
+ inet:setopts(SocketB, [{active, false},
+ {packet, 4}]),
+ gen_tcp:send(SocketB,build_rex_message('',OurName)),
+ {Header, Message} = recv_message(SocketB),
+ io:format("Received header ~p, data ~p.~n",
[Header, Message]),
- ?line gen_tcp:close(SocketB),
+ gen_tcp:close(SocketB),
ok.
simultaneous_md5(Node, OurName, Cookie) when OurName < Node ->
- ?line pong = net_adm:ping(Node),
- ?line LSocket = case gen_tcp:listen(0, [{active, false}, {packet,2}]) of
+ pong = net_adm:ping(Node),
+ LSocket = case gen_tcp:listen(0, [{active, false}, {packet,2}]) of
{ok, Socket} ->
Socket;
Else ->
exit(Else)
end,
- ?line EpmdSocket = register(OurName, LSocket, 1, 5),
- ?line {NA, NB} = split(Node),
- ?line rpc:cast(Node, net_adm, ping, [OurName]),
- ?line receive after 1000 -> ok end,
- ?line {port, PortNo, _} = erl_epmd:port_please(NA,NB),
- ?line {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
- [{active,false},
- {packet,2}]),
- ?line send_name(SocketA,OurName,5),
+ EpmdSocket = register(OurName, LSocket, 1, 5),
+ {NA, NB} = split(Node),
+ rpc:cast(Node, net_adm, ping, [OurName]),
+ receive after 1000 -> ok end,
+ {port, PortNo, _} = erl_epmd:port_please(NA,NB),
+ {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
+ [{active,false},
+ {packet,2}]),
+ send_name(SocketA,OurName,5),
%% We are still not marked up on the other side, as our first message
%% is not sent.
- ?line SocketB = case gen_tcp:accept(LSocket) of
+ SocketB = case gen_tcp:accept(LSocket) of
{ok, Socket1} ->
- ?line Socket1;
+ Socket1;
Else2 ->
- ?line exit(Else2)
+ exit(Else2)
end,
- ?line nok = recv_status(SocketA),
- % Now we are expected to close A
- ?line gen_tcp:close(SocketA),
- % But still Socket B will continue
- ?line {normal,Node,5} = recv_name(SocketB), % See 1)
- ?line send_status(SocketB, ok_simultaneous),
- ?line MyChallengeB = gen_challenge(),
- ?line send_challenge(SocketB, OurName, MyChallengeB,5),
- ?line HisChallengeB = recv_challenge_reply(SocketB, MyChallengeB, Cookie),
- ?line DigestB = gen_digest(HisChallengeB,Cookie),
- ?line send_challenge_ack(SocketB, DigestB),
- ?line inet:setopts(SocketB, [{active, false},
- {packet, 4}]),
- % This should be the ping message.
- ?line {Header, Message} = recv_message(SocketB),
- ?line io:format("Received header ~p, data ~p.~n",
+ nok = recv_status(SocketA),
+ %% Now we are expected to close A
+ gen_tcp:close(SocketA),
+ %% But still Socket B will continue
+ {normal,Node,5} = recv_name(SocketB), % See 1)
+ send_status(SocketB, ok_simultaneous),
+ MyChallengeB = gen_challenge(),
+ send_challenge(SocketB, OurName, MyChallengeB,5),
+ HisChallengeB = recv_challenge_reply(SocketB, MyChallengeB, Cookie),
+ DigestB = gen_digest(HisChallengeB,Cookie),
+ send_challenge_ack(SocketB, DigestB),
+ inet:setopts(SocketB, [{active, false},
+ {packet, 4}]),
+ %% This should be the ping message.
+ {Header, Message} = recv_message(SocketB),
+ io:format("Received header ~p, data ~p.~n",
[Header, Message]),
- ?line gen_tcp:close(SocketB),
- ?line gen_tcp:close(LSocket),
- ?line gen_tcp:close(EpmdSocket),
+ gen_tcp:close(SocketB),
+ gen_tcp:close(LSocket),
+ gen_tcp:close(EpmdSocket),
ok;
-
+
simultaneous_md5(Node, OurName, Cookie) when OurName > Node ->
- ?line pong = net_adm:ping(Node),
- ?line LSocket = case gen_tcp:listen(0, [{active, false}, {packet,2}]) of
- {ok, Socket} ->
- ?line Socket;
- Else ->
- ?line exit(Else)
- end,
- ?line EpmdSocket = register(OurName, LSocket, 1, 5),
- ?line {NA, NB} = split(Node),
- ?line rpc:cast(Node, net_adm, ping, [OurName]),
- ?line receive after 1000 -> ok end,
- ?line {port, PortNo, _} = erl_epmd:port_please(NA,NB),
- ?line {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
- [{active,false},
- {packet,2}]),
- ?line SocketB = case gen_tcp:accept(LSocket) of
+ pong = net_adm:ping(Node),
+ LSocket = case gen_tcp:listen(0, [{active, false}, {packet,2}]) of
+ {ok, Socket} ->
+ Socket;
+ Else ->
+ exit(Else)
+ end,
+ EpmdSocket = register(OurName, LSocket, 1, 5),
+ {NA, NB} = split(Node),
+ rpc:cast(Node, net_adm, ping, [OurName]),
+ receive after 1000 -> ok end,
+ {port, PortNo, _} = erl_epmd:port_please(NA,NB),
+ {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
+ [{active,false},
+ {packet,2}]),
+ SocketB = case gen_tcp:accept(LSocket) of
{ok, Socket1} ->
- ?line Socket1;
+ Socket1;
Else2 ->
- ?line exit(Else2)
+ exit(Else2)
end,
- ?line send_name(SocketA,OurName,5),
- ?line ok_simultaneous = recv_status(SocketA),
+ send_name(SocketA,OurName,5),
+ ok_simultaneous = recv_status(SocketA),
%% Socket B should die during this
- ?line case catch begin
- ?line {normal,Node,5} = recv_name(SocketB), % See 1)
- ?line send_status(SocketB, ok_simultaneous),
- ?line MyChallengeB = gen_challenge(),
- ?line send_challenge(SocketB, OurName, MyChallengeB,
- 5),
- ?line HisChallengeB = recv_challenge_reply(
- SocketB,
- MyChallengeB,
- Cookie),
- ?line DigestB = gen_digest(HisChallengeB,Cookie),
- ?line send_challenge_ack(SocketB, DigestB),
- ?line inet:setopts(SocketB, [{active, false},
- {packet, 4}]),
- ?line {HeaderB, MessageB} = recv_message(SocketB),
- ?line io:format("Received header ~p, data ~p.~n",
- [HeaderB, MessageB])
- end of
- {'EXIT', Exitcode} ->
- ?line io:format("Expected exitsignal caught: ~p.~n",
- [Exitcode]);
- Success ->
- ?line io:format("Unexpected success: ~p~n",
- [Success]),
- ?line exit(unexpected_success)
- end,
- ?line gen_tcp:close(SocketB),
+ case catch begin
+ {normal,Node,5} = recv_name(SocketB), % See 1)
+ send_status(SocketB, ok_simultaneous),
+ MyChallengeB = gen_challenge(),
+ send_challenge(SocketB, OurName, MyChallengeB,
+ 5),
+ HisChallengeB = recv_challenge_reply(
+ SocketB,
+ MyChallengeB,
+ Cookie),
+ DigestB = gen_digest(HisChallengeB,Cookie),
+ send_challenge_ack(SocketB, DigestB),
+ inet:setopts(SocketB, [{active, false},
+ {packet, 4}]),
+ {HeaderB, MessageB} = recv_message(SocketB),
+ io:format("Received header ~p, data ~p.~n",
+ [HeaderB, MessageB])
+ end of
+ {'EXIT', Exitcode} ->
+ io:format("Expected exitsignal caught: ~p.~n",
+ [Exitcode]);
+ Success ->
+ io:format("Unexpected success: ~p~n",
+ [Success]),
+ exit(unexpected_success)
+ end,
+ gen_tcp:close(SocketB),
%% But still Socket A will continue
- ?line {hidden,Node,5,HisChallengeA} = recv_challenge(SocketA), % See 1)
- ?line OurChallengeA = gen_challenge(),
- ?line OurDigestA = gen_digest(HisChallengeA, Cookie),
- ?line send_challenge_reply(SocketA, OurChallengeA, OurDigestA),
- ?line ok = recv_challenge_ack(SocketA, OurChallengeA, Cookie),
-
- ?line inet:setopts(SocketA, [{active, false},
- {packet, 4}]),
- ?line gen_tcp:send(SocketA,build_rex_message('',OurName)),
- ?line {Header, Message} = recv_message(SocketA),
- ?line io:format("Received header ~p, data ~p.~n",
+ {hidden,Node,5,HisChallengeA} = recv_challenge(SocketA), % See 1)
+ OurChallengeA = gen_challenge(),
+ OurDigestA = gen_digest(HisChallengeA, Cookie),
+ send_challenge_reply(SocketA, OurChallengeA, OurDigestA),
+ ok = recv_challenge_ack(SocketA, OurChallengeA, Cookie),
+
+ inet:setopts(SocketA, [{active, false},
+ {packet, 4}]),
+ gen_tcp:send(SocketA,build_rex_message('',OurName)),
+ {Header, Message} = recv_message(SocketA),
+ io:format("Received header ~p, data ~p.~n",
[Header, Message]),
- ?line gen_tcp:close(SocketA),
- ?line gen_tcp:close(LSocket),
- ?line gen_tcp:close(EpmdSocket),
+ gen_tcp:close(SocketA),
+ gen_tcp:close(LSocket),
+ gen_tcp:close(EpmdSocket),
ok.
-missing_compulsory_dflags(doc) -> [];
missing_compulsory_dflags(Config) when is_list(Config) ->
- ?line [Name1, Name2] = get_nodenames(2, missing_compulsory_dflags),
- ?line {ok, Node} = start_node(Name1,""),
- ?line {NA,NB} = split(Node),
- ?line {port,PortNo,_} = erl_epmd:port_please(NA,NB),
- ?line {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
- [{active,false},
- {packet,2}]),
- ?line BadNode = list_to_atom(atom_to_list(Name2)++"@"++atom_to_list(NB)),
- ?line send_name(SocketA,BadNode,5,0),
- ?line not_allowed = recv_status(SocketA),
- ?line gen_tcp:close(SocketA),
- ?line stop_node(Node),
- ?line ok.
+ [Name1, Name2] = get_nodenames(2, missing_compulsory_dflags),
+ {ok, Node} = start_node(Name1,""),
+ {NA,NB} = split(Node),
+ {port,PortNo,_} = erl_epmd:port_please(NA,NB),
+ {ok, SocketA} = gen_tcp:connect(atom_to_list(NB),PortNo,
+ [{active,false},
+ {packet,2}]),
+ BadNode = list_to_atom(atom_to_list(Name2)++"@"++atom_to_list(NB)),
+ send_name(SocketA,BadNode,5,0),
+ not_allowed = recv_status(SocketA),
+ gen_tcp:close(SocketA),
+ stop_node(Node),
+ ok.
%%
%% Here comes the utilities
@@ -437,7 +439,7 @@ socket_pair(ClientPack, ServerPack) ->
{ok, Server} = gen_tcp:accept(Listen),
gen_tcp:close(Listen),
{Client, Server}.
-
+
close_pair({Client, Server}) ->
gen_tcp:close(Client),
gen_tcp:close(Server),
@@ -454,7 +456,7 @@ close_pair({Client, Server}) ->
gen_challenge() ->
rand:uniform(1000000).
-
+
%% Generate a message digest from Challenge number and Cookie
gen_digest(Challenge, Cookie) when is_integer(Challenge), is_atom(Cookie) ->
C0 = erlang:md5_init(),
@@ -595,15 +597,15 @@ do_register_node(NodeName, TcpPort, VLow, VHigh) ->
Elen = length(Extra),
Len = 1+2+1+1+2+2+2+length(Name)+2+Elen,
gen_tcp:send(Socket, [?int16(Len), $x,
- ?int16(TcpPort),
- $M,
- 0,
- ?int16(VHigh),
- ?int16(VLow),
- ?int16(length(Name)),
- Name,
- ?int16(Elen),
- Extra]),
+ ?int16(TcpPort),
+ $M,
+ 0,
+ ?int16(VHigh),
+ ?int16(VLow),
+ ?int16(length(Name)),
+ Name,
+ ?int16(Elen),
+ Extra]),
case wait_for_reg_reply(Socket, []) of
{error, epmd_close} ->
exit(epmd_broken);
@@ -666,24 +668,25 @@ split(Atom) ->
build_rex_message(Cookie,OurName) ->
[$?,term_to_binary({6,self(),Cookie,rex}),
term_to_binary({'$gen_cast',
- {cast,
- rpc,
- cast,
- [OurName, hello, world, []],
- self()} })].
+ {cast,
+ rpc,
+ cast,
+ [OurName, hello, world, []],
+ self()} })].
%% Receive a distribution message
recv_message(Socket) ->
case gen_tcp:recv(Socket, 0) of
+ {ok,[]} ->
+ recv_message(Socket); %% a tick, ignore
{ok,Data} ->
B0 = list_to_binary(Data),
- {_,B1} = erlang:split_binary(B0,1),
- Header = binary_to_term(B1),
- Siz = byte_size(term_to_binary(Header)),
- {_,B2} = erlang:split_binary(B1,Siz),
+ <<?PASS_THROUGH, B1/binary>> = B0,
+ {Header,Siz} = binary_to_term(B1,[used]),
+ <<_:Siz/binary,B2/binary>> = B1,
Message = case (catch binary_to_term(B2)) of
{'EXIT', _} ->
- could_not_digest_message;
+ {could_not_digest_message,B2};
Other ->
Other
end,
@@ -698,10 +701,10 @@ join(Name,Host) ->
%% start/stop slave.
start_node(Name, Param) ->
- ?t:start_node(Name, slave, [{args, Param}]).
+ test_server:start_node(Name, slave, [{args, Param}]).
stop_node(Node) ->
- ?t:stop_node(Node).
+ test_server:stop_node(Node).
get_nodenames(N, T) ->
diff --git a/lib/kernel/test/erl_prim_loader_SUITE.erl b/lib/kernel/test/erl_prim_loader_SUITE.erl
index 0803cf428f..16a127aa3e 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -20,31 +20,36 @@
-module(erl_prim_loader_SUITE).
-include_lib("kernel/include/file.hrl").
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
+ init_per_testcase/2,end_per_testcase/2,
init_per_group/2,end_per_group/2]).
-export([get_path/1, set_path/1, get_file/1, normalize_and_backslash/1,
inet_existing/1, inet_coming_up/1, inet_disconnects/1,
multiple_slaves/1, file_requests/1,
local_archive/1, remote_archive/1,
- primary_archive/1, virtual_dir_in_archive/1]).
+ primary_archive/1, virtual_dir_in_archive/1,
+ get_modules/1]).
--export([init_per_testcase/2, end_per_testcase/2]).
+-define(PRIM_FILE, prim_file).
%%-----------------------------------------------------------------
%% Test suite for erl_prim_loader. (Most code is run during system start/stop.)
%%-----------------------------------------------------------------
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,3}}].
all() ->
[get_path, set_path, get_file,
normalize_and_backslash, inet_existing,
inet_coming_up, inet_disconnects, multiple_slaves,
file_requests, local_archive, remote_archive,
- primary_archive, virtual_dir_in_archive].
+ primary_archive, virtual_dir_in_archive,
+ get_modules].
groups() ->
[].
@@ -62,53 +67,102 @@ end_per_group(_GroupName, Config) ->
Config.
-init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
- Dog=?t:timetrap(?t:minutes(3)),
- [{watchdog, Dog}|Config].
+init_per_testcase(_Func, Config) ->
+ Config.
-end_per_testcase(_Func, Config) ->
- Dog=?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+end_per_testcase(_Func, _Config) ->
+ ok.
-get_path(doc) -> [];
get_path(Config) when is_list(Config) ->
- ?line case erl_prim_loader:get_path() of
- {ok, Path} when is_list(Path) ->
- ok;
- _ ->
- test_server:fail(get_path)
- end,
+ case erl_prim_loader:get_path() of
+ {ok, Path} when is_list(Path) ->
+ ok;
+ _ ->
+ ct:fail(get_path)
+ end,
ok.
-set_path(doc) -> [];
set_path(Config) when is_list(Config) ->
- ?line {ok, Path} = erl_prim_loader:get_path(),
- ?line ok = erl_prim_loader:set_path(Path),
- ?line {ok, Path} = erl_prim_loader:get_path(),
+ {ok, Path} = erl_prim_loader:get_path(),
+ ok = erl_prim_loader:set_path(Path),
+ {ok, Path} = erl_prim_loader:get_path(),
NewPath = Path ++ ["dummy_dir","/dummy_dir/dummy_dir"],
- ?line ok = erl_prim_loader:set_path(NewPath),
- ?line {ok, NewPath} = erl_prim_loader:get_path(),
+ ok = erl_prim_loader:set_path(NewPath),
+ {ok, NewPath} = erl_prim_loader:get_path(),
- ?line ok = erl_prim_loader:set_path(Path), % Reset path.
- ?line {ok, Path} = erl_prim_loader:get_path(),
+ ok = erl_prim_loader:set_path(Path), % Reset path.
+ {ok, Path} = erl_prim_loader:get_path(),
- ?line {'EXIT',_} = (catch erl_prim_loader:set_path(not_a_list)),
- ?line {ok, Path} = erl_prim_loader:get_path(),
+ {'EXIT',_} = (catch erl_prim_loader:set_path(not_a_list)),
+ {ok, Path} = erl_prim_loader:get_path(),
ok.
-get_file(doc) -> [];
get_file(Config) when is_list(Config) ->
- ?line case erl_prim_loader:get_file("lists" ++ code:objfile_extension()) of
- {ok,Bin,File} when is_binary(Bin), is_list(File) ->
- ok;
- _ ->
- test_server:fail(get_valid_file)
- end,
- ?line error = erl_prim_loader:get_file("duuuuuuummmy_file"),
- ?line error = erl_prim_loader:get_file(duuuuuuummmy_file),
- ?line error = erl_prim_loader:get_file({dummy}),
+ case erl_prim_loader:get_file("lists" ++ code:objfile_extension()) of
+ {ok,Bin,File} when is_binary(Bin), is_list(File) ->
+ ok;
+ _ ->
+ ct:fail(get_valid_file)
+ end,
+ error = erl_prim_loader:get_file("duuuuuuummmy_file"),
+ error = erl_prim_loader:get_file(duuuuuuummmy_file),
+ error = erl_prim_loader:get_file({dummy}),
+ ok.
+
+get_modules(Config) ->
+ case test_server:is_cover() of
+ false -> do_get_modules(Config);
+ true -> {skip,"Cover"}
+ end.
+
+do_get_modules(Config) ->
+ PrivDir = proplists:get_value(priv_dir, Config),
+ NotADir = atom_to_list(?FUNCTION_NAME) ++ "_not_a_dir",
+ ok = file:write_file(filename:join(PrivDir, NotADir), <<>>),
+ ok = file:set_cwd(PrivDir),
+
+ MsGood = lists:sort([lists,gen_server,gb_trees,code_server]),
+ Ms = [certainly_not_existing|MsGood],
+ SuccExp = [begin
+ F = code:which(M),
+ {ok,Code} = file:read_file(F),
+ {M,{F,erlang:md5(Code)}}
+ end || M <- MsGood],
+ FailExp = [{certainly_not_existing,enoent}],
+
+ io:format("SuccExp = ~p\n", [SuccExp]),
+ io:format("FailExp = ~p\n", [FailExp]),
+
+ Path = code:get_path(),
+ Process = fun(_, F, Code) -> {ok,{F,erlang:md5(Code)}} end,
+ {ok,{SuccExp,FailExp}} = get_modules_sorted(Ms, Process, Path),
+
+ %% Test that an 'enotdir' error can be handled.
+ {ok,{SuccExp,FailExp}} = get_modules_sorted(Ms, Process, [NotADir|Path]),
+
+ Name = inet_get_modules,
+ {ok, Node, BootPid} = complete_start_node(Name),
+ ThisDir = filename:dirname(code:which(?MODULE)),
+ true = rpc:call(Node, code, add_patha, [ThisDir]),
+ _ = rpc:call(Node, code, ensure_loaded, [?MODULE]),
+ {ok,{InetSucc,FailExp}} = rpc:call(Node, erl_prim_loader,
+ get_modules, [Ms,Process,Path]),
+ SuccExp = lists:sort(InetSucc),
+
+ stop_node(Node),
+ unlink(BootPid),
+ exit(BootPid, kill),
+
ok.
+get_modules_sorted(Ms, Process, Path) ->
+ case erl_prim_loader:get_modules(Ms, Process, Path) of
+ {ok,{Succ,FailExp}} ->
+ {ok,{lists:sort(Succ),lists:sort(FailExp)}};
+ Other ->
+ Other
+ end.
+
normalize_and_backslash(Config) ->
%% Test OTP-11170
case os:type() of
@@ -118,7 +172,7 @@ normalize_and_backslash(Config) ->
test_normalize_and_backslash(Config)
end.
test_normalize_and_backslash(Config) ->
- PrivDir = ?config(priv_dir,Config),
+ PrivDir = proplists:get_value(priv_dir,Config),
Dir = filename:join(PrivDir,"\\"),
File = filename:join(Dir,"file-OTP-11170"),
ok = file:make_dir(Dir),
@@ -129,41 +183,27 @@ test_normalize_and_backslash(Config) ->
ok = file:del_dir(Dir),
ok.
-inet_existing(doc) -> ["Start a node using the 'inet' loading method, ",
- "from an already started boot server."];
+%% Start a node using the 'inet' loading method,
+%% from an already started boot server.
inet_existing(Config) when is_list(Config) ->
Name = erl_prim_test_inet_existing,
- Host = host(),
- Cookie = atom_to_list(erlang:get_cookie()),
- IpStr = ip_str(Host),
- LFlag = get_loader_flag(os:type()),
- Args = LFlag ++ " -hosts " ++ IpStr ++
- " -setcookie " ++ Cookie,
- {ok, BootPid} = erl_boot_server:start_link([Host]),
- {ok, Node} = start_node(Name, Args),
+ BootPid = start_boot_server(),
+ Node = start_node_using_inet(Name),
{ok,[["inet"]]} = rpc:call(Node, init, get_argument, [loader]),
stop_node(Node),
unlink(BootPid),
exit(BootPid, kill),
ok.
-inet_coming_up(doc) -> ["Start a node using the 'inet' loading method, ",
- "but start the boot server afterwards."];
+%% Start a node using the 'inet' loading method,
+%% but start the boot server afterwards.
inet_coming_up(Config) when is_list(Config) ->
Name = erl_prim_test_inet_coming_up,
- Cookie = atom_to_list(erlang:get_cookie()),
- Host = host(),
- IpStr = ip_str(Host),
- LFlag = get_loader_flag(os:type()),
- Args = LFlag ++
- " -hosts " ++ IpStr ++
- " -setcookie " ++ Cookie,
- {ok, Node} = start_node(Name, Args, [{wait, false}]),
+ Node = start_node_using_inet(Name, [{wait,false}]),
%% Wait a while, then start boot server, and wait for node to start.
- test_server:sleep(test_server:seconds(6)),
- io:format("erl_boot_server:start_link([~p]).", [Host]),
- {ok, BootPid} = erl_boot_server:start_link([Host]),
+ ct:sleep({seconds,6}),
+ BootPid = start_boot_server(),
wait_really_started(Node, 25),
%% Check loader argument, then cleanup.
@@ -174,41 +214,36 @@ inet_coming_up(Config) when is_list(Config) ->
ok.
wait_really_started(Node, 0) ->
- test_server:fail({not_booted,Node});
+ ct:fail({not_booted,Node});
wait_really_started(Node, N) ->
case rpc:call(Node, init, get_status, []) of
{started, _} ->
ok;
_ ->
- test_server:sleep(1000),
+ ct:sleep(1000),
wait_really_started(Node, N - 1)
end.
-inet_disconnects(doc) -> ["Start a node using the 'inet' loading method, ",
- "then lose the connection."];
+%% Start a node using the 'inet' loading method,
+%% then lose the connection.
inet_disconnects(Config) when is_list(Config) ->
case test_server:is_native(erl_boot_server) of
true ->
{skip,"erl_boot_server is native"};
false ->
- ?line Name = erl_prim_test_inet_disconnects,
- ?line Host = host(),
- ?line Cookie = atom_to_list(erlang:get_cookie()),
- ?line IpStr = ip_str(Host),
- ?line LFlag = get_loader_flag(os:type()),
- ?line Args = LFlag ++ " -hosts " ++ IpStr ++
- " -setcookie " ++ Cookie,
-
- ?line {ok, BootPid} = erl_boot_server:start([Host]),
+ Name = erl_prim_test_inet_disconnects,
+
+ BootPid = start_boot_server(),
+ unlink(BootPid),
Self = self(),
%% This process shuts down the boot server during loading.
- ?line Stopper = spawn_link(fun() -> stop_boot(BootPid, Self) end),
- ?line receive
- {Stopper,ready} -> ok
- end,
+ Stopper = spawn_link(fun() -> stop_boot(BootPid, Self) end),
+ receive
+ {Stopper,ready} -> ok
+ end,
%% Let the loading begin...
- ?line {ok, Node} = start_node(Name, Args, [{wait, false}]),
+ Node = start_node_using_inet(Name, [{wait,false}]),
%% When the stopper is ready, the slave node should be
%% looking for a boot server again.
@@ -216,18 +251,18 @@ inet_disconnects(Config) when is_list(Config) ->
{Stopper,ok} ->
ok;
{Stopper,{error,Reason}} ->
- ?line ?t:fail(Reason)
+ ct:fail(Reason)
after 60000 ->
- ?line ?t:fail(stopper_died)
+ ct:fail(stopper_died)
end,
%% Start new boot server to see that loading is continued.
- ?line {ok, BootPid2} = erl_boot_server:start_link([Host]),
- ?line wait_really_started(Node, 25),
- ?line {ok,[["inet"]]} = rpc:call(Node, init, get_argument, [loader]),
- ?line stop_node(Node),
- ?line unlink(BootPid2),
- ?line exit(BootPid2, kill),
+ BootPid2 = start_boot_server(),
+ wait_really_started(Node, 25),
+ {ok,[["inet"]]} = rpc:call(Node, init, get_argument, [loader]),
+ stop_node(Node),
+ unlink(BootPid2),
+ exit(BootPid2, kill),
ok
end.
@@ -256,81 +291,72 @@ get_calls(Count, Pid) ->
{error,{trace_msg_timeout,Count}}
end.
-multiple_slaves(doc) ->
- ["Start nodes in parallell, all using the 'inet' loading method, ",
- "verify that the boot server manages"];
+%% Start nodes in parallel, all using the 'inet' loading method;
+%% verify that the boot server manages.
multiple_slaves(Config) when is_list(Config) ->
- case os:type() of
- {ose,_} ->
- {comment, "OSE: multiple nodes not supported"};
- _ ->
- ?line Name = erl_prim_test_multiple_slaves,
- ?line Host = host(),
- ?line Cookie = atom_to_list(erlang:get_cookie()),
- ?line IpStr = ip_str(Host),
- ?line LFlag = get_loader_flag(os:type()),
- ?line Args = LFlag ++ " -hosts " ++ IpStr ++
- " -setcookie " ++ Cookie,
-
- NoOfNodes = 10, % no of slave nodes to be started
-
- NamesAndNodes =
- lists:map(fun(N) ->
- NameN = atom_to_list(Name) ++
- integer_to_list(N),
- NodeN = NameN ++ "@" ++ Host,
- {list_to_atom(NameN),list_to_atom(NodeN)}
- end, lists:seq(1, NoOfNodes)),
-
- ?line Nodes = start_multiple_nodes(NamesAndNodes, Args, []),
-
- %% "queue up" the nodes to wait for the boot server to respond
- %% (note: test_server supervises each node start by accept()
- %% on a socket, the timeout value for the accept has to be quite
- %% long for this test to work).
- ?line test_server:sleep(test_server:seconds(5)),
- %% start the code loading circus!
- ?line {ok,BootPid} = erl_boot_server:start_link([Host]),
- %% give the nodes a chance to boot up before attempting to stop them
- ?line test_server:sleep(test_server:seconds(10)),
-
- ?line wait_and_shutdown(lists:reverse(Nodes), 30),
-
- ?line unlink(BootPid),
- ?line exit(BootPid, kill),
- ok
- end.
+ Name = erl_prim_test_multiple_slaves,
+ Host = host(),
+ IpStr = ip_str(Host),
+ Args = " -loader inet -hosts " ++ IpStr,
+
+ NoOfNodes = 10, % no of slave nodes to be started
+
+ NamesAndNodes =
+ lists:map(fun(N) ->
+ NameN = atom_to_list(Name) ++
+ integer_to_list(N),
+ NodeN = NameN ++ "@" ++ Host,
+ {list_to_atom(NameN),list_to_atom(NodeN)}
+ end, lists:seq(1, NoOfNodes)),
+
+ Nodes = start_multiple_nodes(NamesAndNodes, Args, []),
+
+ %% "queue up" the nodes to wait for the boot server to respond
+ %% (note: test_server supervises each node start by accept()
+ %% on a socket, the timeout value for the accept has to be quite
+ %% long for this test to work).
+ ct:sleep({seconds,5}),
+ %% start the code loading circus!
+ BootPid = start_boot_server(),
+ %% give the nodes a chance to boot up before attempting to stop them
+ ct:sleep({seconds,10}),
+
+ wait_and_shutdown(lists:reverse(Nodes), 30),
+
+ unlink(BootPid),
+ exit(BootPid, kill),
+ ok.
start_multiple_nodes([{Name,Node} | NNs], Args, Started) ->
- ?line {ok,Node} = start_node(Name, Args, [{wait, false}]),
+ {ok,Node} = start_node(Name, Args, [{wait, false}]),
start_multiple_nodes(NNs, Args, [Node | Started]);
start_multiple_nodes([], _, Nodes) ->
Nodes.
wait_and_shutdown([Node | Nodes], Tries) ->
- ?line wait_really_started(Node, Tries),
- ?line {ok,[["inet"]]} = rpc:call(Node, init, get_argument, [loader]),
- ?line stop_node(Node),
+ wait_really_started(Node, Tries),
+ {ok,[["inet"]]} = rpc:call(Node, init, get_argument, [loader]),
+ stop_node(Node),
wait_and_shutdown(Nodes, Tries);
wait_and_shutdown([], _) ->
ok.
-file_requests(doc) -> ["Start a node using the 'inet' loading method, ",
- "verify that the boot server responds to file requests."];
+%% Start a node using the 'inet' loading method,
+%% verify that the boot server responds to file requests.
file_requests(Config) when is_list(Config) ->
- ?line {ok, Node, BootPid} = complete_start_node(erl_prim_test_file_req),
+ {ok, Node, BootPid} = complete_start_node(erl_prim_test_file_req),
%% compare with results from file server calls (the
%% boot server uses the same file sys and cwd)
{ok,Files} = file:list_dir("."),
io:format("Files: ~p~n",[Files]),
- ?line {ok,Files} = rpc:call(Node, erl_prim_loader, list_dir, ["."]),
+ {ok,Files} = rpc:call(Node, erl_prim_loader, list_dir, ["."]),
{ok,Info} = file:read_file_info(code:which(test_server)),
- ?line {ok,Info} = rpc:call(Node, erl_prim_loader, read_file_info,
- [code:which(test_server)]),
+ {ok,Info} = rpc:call(Node, erl_prim_loader, read_file_info,
+ [code:which(test_server)]),
- PrivDir = ?config(priv_dir,Config),
+ PrivDir = proplists:get_value(priv_dir,Config),
Dir = filename:join(PrivDir,?MODULE_STRING++"_file_requests"),
ok = file:make_dir(Dir),
Alias = filename:join(Dir,"symlink"),
@@ -354,163 +380,140 @@ file_requests(Config) when is_list(Config) ->
end,
{ok,Cwd} = file:get_cwd(),
- ?line {ok,Cwd} = rpc:call(Node, erl_prim_loader, get_cwd, []),
+ {ok,Cwd} = rpc:call(Node, erl_prim_loader, get_cwd, []),
case file:get_cwd("C:") of
{error,enotsup} ->
ok;
{ok,DCwd} ->
- ?line {ok,DCwd} = rpc:call(Node, erl_prim_loader, get_cwd, ["C:"])
+ {ok,DCwd} = rpc:call(Node, erl_prim_loader, get_cwd, ["C:"])
end,
- ?line stop_node(Node),
- ?line unlink(BootPid),
- ?line exit(BootPid, kill),
+ stop_node(Node),
+ unlink(BootPid),
+ exit(BootPid, kill),
ok.
-complete_start_node(Name) ->
- ?line Host = host(),
- ?line Cookie = atom_to_list(erlang:get_cookie()),
- ?line IpStr = ip_str(Host),
- ?line LFlag = get_loader_flag(os:type()),
- ?line Args = LFlag ++ " -hosts " ++ IpStr ++
- " -setcookie " ++ Cookie,
-
- ?line {ok,BootPid} = erl_boot_server:start_link([Host]),
-
- ?line {ok,Node} = start_node(Name, Args),
- ?line wait_really_started(Node, 25),
- {ok, Node, BootPid}.
-
-local_archive(suite) ->
- [];
-local_archive(doc) ->
- ["Read files from local archive."];
+%% Read files from local archive.
local_archive(Config) when is_list(Config) ->
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
KernelDir = filename:basename(code:lib_dir(kernel)),
Archive = filename:join([PrivDir, KernelDir ++ init:archive_extension()]),
file:delete(Archive),
- ?line {ok, Archive} = create_archive(Archive, [KernelDir]),
+ {ok, Archive} = create_archive(Archive, [KernelDir]),
Node = node(),
BeamName = "inet.beam",
- ?line ok = test_archive(Node, Archive, KernelDir, BeamName),
+ ok = test_archive(Node, Archive, KernelDir, BeamName),
%% Cleanup
- ?line ok = rpc:call(Node, erl_prim_loader, release_archives, []),
- ?line ok = file:delete(Archive),
+ ok = rpc:call(Node, erl_prim_loader, purge_archive_cache, []),
+ ok = file:delete(Archive),
ok.
-remote_archive(suite) ->
- {req, [{local_slave_nodes, 1}, {time, 10}]};
-remote_archive(doc) ->
- ["Read files from remote archive."];
+%% Read files from remote archive.
remote_archive(Config) when is_list(Config) ->
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
KernelDir = filename:basename(code:lib_dir(kernel)),
Archive = filename:join([PrivDir, KernelDir ++ init:archive_extension()]),
file:delete(Archive),
- ?line {ok, Archive} = create_archive(Archive, [KernelDir]),
+ {ok, Archive} = create_archive(Archive, [KernelDir]),
- ?line {ok, Node, BootPid} = complete_start_node(remote_archive),
+ {ok, Node, BootPid} = complete_start_node(remote_archive),
BeamName = "inet.beam",
- ?line ok = test_archive(Node, Archive, KernelDir, BeamName),
+ ok = test_archive(Node, Archive, KernelDir, BeamName),
%% Cleanup
- ?line stop_node(Node),
- ?line unlink(BootPid),
- ?line exit(BootPid, kill),
+ stop_node(Node),
+ unlink(BootPid),
+ exit(BootPid, kill),
ok.
-primary_archive(suite) ->
- {req, [{local_slave_nodes, 1}, {time, 10}]};
-primary_archive(doc) ->
- ["Read files from primary archive."];
+%% Read files from primary archive.
primary_archive(Config) when is_list(Config) ->
%% Copy the orig files to priv_dir
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
Archive = filename:join([PrivDir, "primary_archive.zip"]),
file:delete(Archive),
- DataDir = ?config(data_dir, Config),
- ?line {ok, _} = zip:create(Archive, ["primary_archive"],
- [{compress, []}, {cwd, DataDir}]),
- ?line {ok, _} = zip:extract(Archive, [{cwd, PrivDir}]),
+ DataDir = proplists:get_value(data_dir, Config),
+ {ok, _} = zip:create(Archive, ["primary_archive"],
+ [{compress, []}, {cwd, DataDir}]),
+ {ok, _} = zip:extract(Archive, [{cwd, PrivDir}]),
TopDir = filename:join([PrivDir, "primary_archive"]),
%% Compile the code
DictDir = "primary_archive_dict-1.0",
DummyDir = "primary_archive_dummy",
- ?line ok = compile_app(TopDir, DictDir),
- ?line ok = compile_app(TopDir, DummyDir),
-
+ ok = compile_app(TopDir, DictDir),
+ ok = compile_app(TopDir, DummyDir),
+
%% Create the archive
{ok, TopFiles} = file:list_dir(TopDir),
- ?line {ok, {_, ArchiveBin}} = zip:create(Archive, TopFiles,
- [memory, {compress, []}, {cwd, TopDir}]),
-
+ {ok, {_, ArchiveBin}} = zip:create(Archive, TopFiles,
+ [memory, {compress, []}, {cwd, TopDir}]),
+
%% Use temporary node to simplify cleanup
- ?line Cookie = atom_to_list(erlang:get_cookie()),
- ?line Args = " -setcookie " ++ Cookie,
- ?line {ok,Node} = start_node(primary_archive, Args),
- ?line wait_really_started(Node, 25),
- ?line {_,_,_} = rpc:call(Node, erlang, date, []),
+ Cookie = atom_to_list(erlang:get_cookie()),
+ Args = " -setcookie " ++ Cookie,
+ {ok,Node} = start_node(primary_archive, Args),
+ wait_really_started(Node, 25),
+ {_,_,_} = rpc:call(Node, erlang, date, []),
%% Set primary archive
ExpectedEbins = [Archive, DictDir ++ "/ebin", DummyDir ++ "/ebin"],
io:format("ExpectedEbins: ~p\n", [ExpectedEbins]),
- ?line {ok, FileInfo} = prim_file:read_file_info(Archive),
- ?line {ok, Ebins} = rpc:call(Node, erl_prim_loader, set_primary_archive,
- [Archive, ArchiveBin, FileInfo,
- fun escript:parse_file/1]),
- ?line ExpectedEbins = lists:sort(Ebins), % assert
-
- ?line {ok, TopFiles2} = rpc:call(Node, erl_prim_loader, list_dir, [Archive]),
- ?line [DictDir, DummyDir] = lists:sort(TopFiles2),
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Archive),
+ {ok, Ebins} = rpc:call(Node, erl_prim_loader, set_primary_archive,
+ [Archive, ArchiveBin, FileInfo,
+ fun escript:parse_file/1]),
+ ExpectedEbins = lists:sort(Ebins), % assert
+
+ {ok, TopFiles2} = rpc:call(Node, erl_prim_loader, list_dir, [Archive]),
+ [DictDir, DummyDir] = lists:sort(TopFiles2),
BeamName = "primary_archive_dict_app.beam",
- ?line ok = test_archive(Node, Archive, DictDir, BeamName),
-
+ ok = test_archive(Node, Archive, DictDir, BeamName),
+
%% Cleanup
- ?line {ok, []} = rpc:call(Node, erl_prim_loader, set_primary_archive,
- [undefined, undefined, undefined,
- fun escript:parse_file/1]),
- ?line stop_node(Node),
- ?line ok = file:delete(Archive),
+ {ok, []} = rpc:call(Node, erl_prim_loader, set_primary_archive,
+ [undefined, undefined, undefined,
+ fun escript:parse_file/1]),
+ stop_node(Node),
+ ok = file:delete(Archive),
ok.
test_archive(Node, TopDir, AppDir, BeamName) ->
%% List dir
io:format("test_archive: ~p\n", [rpc:call(Node, erl_prim_loader, list_dir, [TopDir])]),
- ?line {ok, TopFiles} = rpc:call(Node, erl_prim_loader, list_dir, [TopDir]),
- ?line true = lists:member(AppDir, TopFiles),
+ {ok, TopFiles} = rpc:call(Node, erl_prim_loader, list_dir, [TopDir]),
+ true = lists:member(AppDir, TopFiles),
AbsAppDir = TopDir ++ "/" ++ AppDir,
- ?line {ok, AppFiles} = rpc:call(Node, erl_prim_loader, list_dir, [AbsAppDir]),
- ?line true = lists:member("ebin", AppFiles),
+ {ok, AppFiles} = rpc:call(Node, erl_prim_loader, list_dir, [AbsAppDir]),
+ true = lists:member("ebin", AppFiles),
Ebin = AbsAppDir ++ "/ebin",
- ?line {ok, EbinFiles} = rpc:call(Node, erl_prim_loader, list_dir, [Ebin]),
+ {ok, EbinFiles} = rpc:call(Node, erl_prim_loader, list_dir, [Ebin]),
Beam = Ebin ++ "/" ++ BeamName,
- ?line true = lists:member(BeamName, EbinFiles),
- ?line error = rpc:call(Node, erl_prim_loader, list_dir, [TopDir ++ "/no_such_file"]),
- ?line error = rpc:call(Node, erl_prim_loader, list_dir, [TopDir ++ "/ebin/no_such_file"]),
-
+ true = lists:member(BeamName, EbinFiles),
+ error = rpc:call(Node, erl_prim_loader, list_dir, [TopDir ++ "/no_such_file"]),
+ error = rpc:call(Node, erl_prim_loader, list_dir, [TopDir ++ "/ebin/no_such_file"]),
+
%% File info
- ?line {ok, #file_info{type = directory}} =
+ {ok, #file_info{type = directory}} =
rpc:call(Node, erl_prim_loader, read_file_info, [TopDir]),
- ?line {ok, #file_info{type = directory}} =
+ {ok, #file_info{type = directory}} =
rpc:call(Node, erl_prim_loader, read_file_info, [Ebin]),
- ?line {ok, #file_info{type = regular} = FI} =
+ {ok, #file_info{type = regular} = FI} =
rpc:call(Node, erl_prim_loader, read_file_info, [Beam]),
- ?line error = rpc:call(Node, erl_prim_loader, read_file_info, [TopDir ++ "/no_such_file"]),
- ?line error = rpc:call(Node, erl_prim_loader, read_file_info, [TopDir ++ "/ebin/no_such_file"]),
-
+ error = rpc:call(Node, erl_prim_loader, read_file_info, [TopDir ++ "/no_such_file"]),
+ error = rpc:call(Node, erl_prim_loader, read_file_info, [TopDir ++ "/ebin/no_such_file"]),
+
%% Get file
- ?line {ok, Bin, Beam} = rpc:call(Node, erl_prim_loader, get_file, [Beam]),
- ?line if
- FI#file_info.size =:= byte_size(Bin) -> ok;
- true -> exit({FI#file_info.size, byte_size(Bin)})
- end,
- ?line error = rpc:call(Node, erl_prim_loader, get_file, ["/no_such_file"]),
- ?line error = rpc:call(Node, erl_prim_loader, get_file, ["/ebin/no_such_file"]),
+ {ok, Bin, Beam} = rpc:call(Node, erl_prim_loader, get_file, [Beam]),
+ if
+ FI#file_info.size =:= byte_size(Bin) -> ok;
+ true -> exit({FI#file_info.size, byte_size(Bin)})
+ end,
+ error = rpc:call(Node, erl_prim_loader, get_file, ["/no_such_file"]),
+ error = rpc:call(Node, erl_prim_loader, get_file, ["/ebin/no_such_file"]),
ok.
create_archive(Archive, AppDirs) ->
@@ -520,12 +523,9 @@ create_archive(Archive, AppDirs) ->
zip:create(Archive, AppDirs, Opts).
-virtual_dir_in_archive(suite) ->
- [];
-virtual_dir_in_archive(doc) ->
- ["Read virtual directories from archive."];
+%% Read virtual directories from archive.
virtual_dir_in_archive(Config) when is_list(Config) ->
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
Data = <<"A little piece of data.">>,
ArchiveBase = "archive_with_virtual_dirs",
Archive = filename:join([PrivDir, ArchiveBase ++ init:archive_extension()]),
@@ -534,32 +534,62 @@ virtual_dir_in_archive(Config) when is_list(Config) ->
FileInArchive = filename:join([ArchiveBase, EbinBase, FileBase]),
BinFiles = [{FileInArchive, Data}],
Opts = [{compress, []}],
- ?line file:delete(Archive),
+ file:delete(Archive),
io:format("zip:create(~p,\n\t~p,\n\t~p).\n", [Archive, BinFiles, Opts]),
- ?line {ok, Archive} = zip:create(Archive, BinFiles, Opts),
+ {ok, Archive} = zip:create(Archive, BinFiles, Opts),
%% Verify that there is no directories
- ?line {ok, BinFiles} = zip:unzip(Archive, [memory]),
+ {ok, BinFiles} = zip:unzip(Archive, [memory]),
FullPath = filename:join([Archive, FileInArchive]),
- ?line {ok, _} = erl_prim_loader:read_file_info(FullPath),
+ {ok, _} = erl_prim_loader:read_file_info(FullPath),
%% Read one virtual dir
EbinDir = filename:dirname(FullPath),
- ?line {ok, _} = erl_prim_loader:read_file_info(EbinDir),
- ?line {ok, [FileBase]} = erl_prim_loader:list_dir(EbinDir),
+ {ok, _} = erl_prim_loader:read_file_info(EbinDir),
+ {ok, [FileBase]} = erl_prim_loader:list_dir(EbinDir),
%% Read another virtual dir
AppDir = filename:dirname(EbinDir),
- ?line {ok, _} = erl_prim_loader:read_file_info(AppDir),
- ?line {ok, [EbinBase]} = erl_prim_loader:list_dir(AppDir),
-
+ {ok, _} = erl_prim_loader:read_file_info(AppDir),
+ {ok, [EbinBase]} = erl_prim_loader:list_dir(AppDir),
+
%% Cleanup
- ?line ok = erl_prim_loader:release_archives(),
- ?line ok = file:delete(Archive),
+ ok = erl_prim_loader:purge_archive_cache(),
+ ok = file:delete(Archive),
ok.
-%% Misc. functions
+%%%
+%%% Helper functions.
+%%%
+
+complete_start_node(Name) ->
+ BootPid = start_boot_server(),
+ Node = start_node_using_inet(Name),
+ wait_really_started(Node, 25),
+ {ok, Node, BootPid}.
+
+start_boot_server() ->
+ %% Many linux systems define:
+ %% 127.0.0.1 localhost
+ %% 127.0.1.1 somehostname
+ %% Therefore, to allow the tests to work on those kind of systems,
+ %% also include "localhost" in the list of allowed hosts.
+
+ Hosts = [host(),ip_str("localhost")],
+ {ok,BootPid} = erl_boot_server:start_link(Hosts),
+ BootPid.
+
+start_node_using_inet(Name) ->
+ start_node_using_inet(Name, []).
+
+start_node_using_inet(Name, Opts) ->
+ Host = host(),
+ IpStr = ip_str(Host),
+ Args = " -loader inet -hosts " ++ IpStr,
+ {ok,Node} = start_node(Name, Args, Opts),
+ Node.
+
ip_str({A, B, C, D}) ->
lists:concat([A, ".", B, ".", C, ".", D]);
@@ -585,14 +615,11 @@ host() ->
stop_node(Node) ->
test_server:stop_node(Node).
-get_loader_flag(_) ->
- " -loader inet ".
-
compile_app(TopDir, AppName) ->
AppDir = filename:join([TopDir, AppName]),
SrcDir = filename:join([AppDir, "src"]),
OutDir = filename:join([AppDir, "ebin"]),
- ?line {ok, Files} = file:list_dir(SrcDir),
+ {ok, Files} = file:list_dir(SrcDir),
compile_files(Files, SrcDir, OutDir).
compile_files([File | Files], SrcDir, OutDir) ->
diff --git a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict.erl b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict.erl
index 7d8167f575..57aeb7fd64 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_app.erl b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_app.erl
index bf66179e00..78e4f61579 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_app.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_app.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_sup.erl b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_sup.erl
index e57d8c5a00..e63e958daf 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_sup.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dict-1.0/src/primary_archive_dict_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy.erl b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy.erl
index e1c7655331..4adbec3c81 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_app.erl b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_app.erl
index e9db18c47e..e90776ce73 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_app.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_app.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_sup.erl b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_sup.erl
index 7bab86be68..145de8167f 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_sup.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE_data/primary_archive/primary_archive_dummy/src/primary_archive_dummy_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/error_handler_SUITE.erl b/lib/kernel/test/error_handler_SUITE.erl
index d93ec643ce..c6d457f314 100644
--- a/lib/kernel/test/error_handler_SUITE.erl
+++ b/lib/kernel/test/error_handler_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2013. All Rights Reserved.
+%% Copyright Ericsson AB 2013-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -26,7 +26,9 @@
%% Callback from error_handler.
-export(['$handle_undefined_function'/2]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[undefined_function_handler].
diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl
index f1988b68d9..eab72e58a7 100644
--- a/lib/kernel/test/error_logger_SUITE.erl
+++ b/lib/kernel/test/error_logger_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(error_logger_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
%%-----------------------------------------------------------------
%% We don't have to test the normal behaviour here, i.e. the tty
@@ -30,8 +30,10 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
+ off_heap/1,
error_report/1, info_report/1, error/1, info/1,
- emulator/1, tty/1, logfile/1, add/1, delete/1]).
+ emulator/1, via_logger_process/1, other_node/1,
+ tty/1, logfile/1, add/1, delete/1, format_depth/1]).
-export([generate_error/2]).
@@ -40,19 +42,25 @@
terminate/2]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
- [error_report, info_report, error, info, emulator, tty,
- logfile, add, delete].
+ [off_heap, error_report, info_report, error, info, emulator,
+ via_logger_process, other_node, tty, logfile, add, delete,
+ format_depth].
groups() ->
[].
init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>log}),
Config.
end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
ok.
init_per_group(_GroupName, Config) ->
@@ -64,215 +72,256 @@ end_per_group(_GroupName, Config) ->
%%-----------------------------------------------------------------
-error_report(suite) -> [];
-error_report(doc) -> [];
+off_heap(_Config) ->
+ %% The error_logger process may receive a huge amount of
+ %% messages. Make sure that they are stored off heap to
+ %% avoid exessive GCs.
+ MQD = message_queue_data,
+ {MQD,off_heap} = process_info(whereis(error_logger), MQD),
+ ok.
+
+%%-----------------------------------------------------------------
+
error_report(Config) when is_list(Config) ->
- ?line error_logger:add_report_handler(?MODULE, self()),
+ error_logger:add_report_handler(?MODULE, self()),
Rep1 = [{tag1,"data1"},{tag2,data2},{tag3,3}],
Rep2 = [testing,"testing",{tag1,"tag1"}],
Rep3 = "This is a string !",
Rep4 = {this,is,a,tuple},
- ?line ok = error_logger:error_report(Rep1),
+ ok = error_logger:error_report(Rep1),
reported(error_report, std_error, Rep1),
- ?line ok = error_logger:error_report(Rep2),
+ ok = error_logger:error_report(Rep2),
reported(error_report, std_error, Rep2),
- ?line ok = error_logger:error_report(Rep3),
+ ok = error_logger:error_report(Rep3),
reported(error_report, std_error, Rep3),
- ?line ok = error_logger:error_report(Rep4),
+ ok = error_logger:error_report(Rep4),
reported(error_report, std_error, Rep4),
- ?line ok = error_logger:error_report(test_type, Rep1),
+ ok = error_logger:error_report(test_type, Rep1),
reported(error_report, test_type, Rep1),
- ?line ok = error_logger:error_report(test_type, Rep2),
+ ok = error_logger:error_report(test_type, Rep2),
reported(error_report, test_type, Rep2),
- ?line ok = error_logger:error_report(test_type, Rep3),
+ ok = error_logger:error_report(test_type, Rep3),
reported(error_report, test_type, Rep3),
- ?line ok = error_logger:error_report(test_type, Rep4),
+ ok = error_logger:error_report(test_type, Rep4),
reported(error_report, test_type, Rep4),
- ?line ok = error_logger:error_report("test_type", Rep1),
+ ok = error_logger:error_report("test_type", Rep1),
reported(error_report, "test_type", Rep1),
- ?line ok = error_logger:error_report({test,type}, Rep2),
+ ok = error_logger:error_report({test,type}, Rep2),
reported(error_report, {test,type}, Rep2),
- ?line ok = error_logger:error_report([test,type], Rep3),
+ ok = error_logger:error_report([test,type], Rep3),
reported(error_report, [test,type], Rep3),
- ?line ok = error_logger:error_report(1, Rep4),
+ ok = error_logger:error_report(1, Rep4),
reported(error_report, 1, Rep4),
- ?line my_yes = error_logger:delete_report_handler(?MODULE),
+ my_yes = error_logger:delete_report_handler(?MODULE),
ok.
%%-----------------------------------------------------------------
-info_report(suite) -> [];
-info_report(doc) -> [];
info_report(Config) when is_list(Config) ->
- ?line error_logger:add_report_handler(?MODULE, self()),
+ error_logger:add_report_handler(?MODULE, self()),
Rep1 = [{tag1,"data1"},{tag2,data2},{tag3,3}],
Rep2 = [testing,"testing",{tag1,"tag1"}],
Rep3 = "This is a string !",
Rep4 = {this,is,a,tuple},
- ?line ok = error_logger:info_report(Rep1),
+ ok = error_logger:info_report(Rep1),
reported(info_report, std_info, Rep1),
- ?line ok = error_logger:info_report(Rep2),
+ ok = error_logger:info_report(Rep2),
reported(info_report, std_info, Rep2),
- ?line ok = error_logger:info_report(Rep3),
+ ok = error_logger:info_report(Rep3),
reported(info_report, std_info, Rep3),
- ?line ok = error_logger:info_report(Rep4),
+ ok = error_logger:info_report(Rep4),
reported(info_report, std_info, Rep4),
- ?line ok = error_logger:info_report(test_type, Rep1),
+ ok = error_logger:info_report(test_type, Rep1),
reported(info_report, test_type, Rep1),
- ?line ok = error_logger:info_report(test_type, Rep2),
+ ok = error_logger:info_report(test_type, Rep2),
reported(info_report, test_type, Rep2),
- ?line ok = error_logger:info_report(test_type, Rep3),
+ ok = error_logger:info_report(test_type, Rep3),
reported(info_report, test_type, Rep3),
- ?line ok = error_logger:info_report(test_type, Rep4),
+ ok = error_logger:info_report(test_type, Rep4),
reported(info_report, test_type, Rep4),
- ?line ok = error_logger:info_report("test_type", Rep1),
+ ok = error_logger:info_report("test_type", Rep1),
reported(info_report, "test_type", Rep1),
- ?line ok = error_logger:info_report({test,type}, Rep2),
+ ok = error_logger:info_report({test,type}, Rep2),
reported(info_report, {test,type}, Rep2),
- ?line ok = error_logger:info_report([test,type], Rep3),
+ ok = error_logger:info_report([test,type], Rep3),
reported(info_report, [test,type], Rep3),
- ?line ok = error_logger:info_report(1, Rep4),
+ ok = error_logger:info_report(1, Rep4),
reported(info_report, 1, Rep4),
- ?line my_yes = error_logger:delete_report_handler(?MODULE),
+ my_yes = error_logger:delete_report_handler(?MODULE),
ok.
%%-----------------------------------------------------------------
-error(suite) -> [];
-error(doc) -> [];
error(Config) when is_list(Config) ->
- ?line error_logger:add_report_handler(?MODULE, self()),
+ error_logger:add_report_handler(?MODULE, self()),
Msg1 = "This is a plain text string~n",
Msg2 = "This is a text with arguments ~p~n",
Arg2 = "This is the argument",
Msg3 = {erroneous,msg},
- ?line ok = error_logger:error_msg(Msg1),
+ ok = error_logger:error_msg(Msg1),
reported(error, Msg1, []),
- ?line ok = error_logger:error_msg(Msg2, Arg2),
+ ok = error_logger:error_msg(Msg2, Arg2),
reported(error, Msg2, Arg2),
- ?line ok = error_logger:error_msg(Msg3),
+ ok = error_logger:error_msg(Msg3),
reported(error, Msg3, []),
- ?line ok = error_logger:error_msg(Msg1, []),
+ ok = error_logger:error_msg(Msg1, []),
reported(error, Msg1, []),
- ?line ok = error_logger:error_msg(Msg2, Arg2),
+ ok = error_logger:error_msg(Msg2, Arg2),
reported(error, Msg2, Arg2),
- ?line ok = error_logger:error_msg(Msg3, []),
+ ok = error_logger:error_msg(Msg3, []),
reported(error, Msg3, []),
- ?line ok = error_logger:format(Msg1, []),
+ ok = error_logger:format(Msg1, []),
reported(error, Msg1, []),
- ?line ok = error_logger:format(Msg2, Arg2),
+ ok = error_logger:format(Msg2, Arg2),
reported(error, Msg2, Arg2),
- ?line ok = error_logger:format(Msg3, []),
+ ok = error_logger:format(Msg3, []),
reported(error, Msg3, []),
- ?line my_yes = error_logger:delete_report_handler(?MODULE),
+ my_yes = error_logger:delete_report_handler(?MODULE),
ok.
%%-----------------------------------------------------------------
-info(suite) -> [];
-info(doc) -> [];
info(Config) when is_list(Config) ->
- ?line error_logger:add_report_handler(?MODULE, self()),
+ error_logger:add_report_handler(?MODULE, self()),
Msg1 = "This is a plain text string~n",
Msg2 = "This is a text with arguments ~p~n",
Arg2 = "This is the argument",
Msg3 = {erroneous,msg},
- ?line ok = error_logger:info_msg(Msg1),
+ ok = error_logger:info_msg(Msg1),
reported(info_msg, Msg1, []),
- ?line ok = error_logger:info_msg(Msg2, Arg2),
+ ok = error_logger:info_msg(Msg2, Arg2),
reported(info_msg, Msg2, Arg2),
- ?line ok = error_logger:info_msg(Msg3),
+ ok = error_logger:info_msg(Msg3),
reported(info_msg, Msg3, []),
- ?line ok = error_logger:info_msg(Msg1, []),
+ ok = error_logger:info_msg(Msg1, []),
reported(info_msg, Msg1, []),
- ?line ok = error_logger:info_msg(Msg2, Arg2),
+ ok = error_logger:info_msg(Msg2, Arg2),
reported(info_msg, Msg2, Arg2),
- ?line ok = error_logger:info_msg(Msg3, []),
+ ok = error_logger:info_msg(Msg3, []),
reported(info_msg, Msg3, []),
- ?line my_yes = error_logger:delete_report_handler(?MODULE),
+ my_yes = error_logger:delete_report_handler(?MODULE),
ok.
%%-----------------------------------------------------------------
-emulator(suite) -> [];
-emulator(doc) -> [];
emulator(Config) when is_list(Config) ->
- ?line error_logger:add_report_handler(?MODULE, self()),
+ error_logger:add_report_handler(?MODULE, self()),
Msg = "Error in process ~p on node ~p with exit value:~n~p~n",
Error = {badmatch,4},
Stack = [{module, function, 2, []}],
Pid = spawn(?MODULE, generate_error, [Error, Stack]),
reported(error, Msg, [Pid, node(), {Error, Stack}]),
- ?line my_yes = error_logger:delete_report_handler(?MODULE),
+ my_yes = error_logger:delete_report_handler(?MODULE),
ok.
generate_error(Error, Stack) ->
erlang:raise(error, Error, Stack).
%%-----------------------------------------------------------------
+
+via_logger_process(Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip,"Skip on windows - cant change file mode"};
+ _ ->
+ error_logger:add_report_handler(?MODULE, self()),
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ Msg = "File operation error: eacces. Target: " ++
+ Dir ++ ". Function: list_dir. ",
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ ok = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ reported(error_report, std_error, Msg),
+ my_yes = error_logger:delete_report_handler(?MODULE),
+ ok
+ end.
+
+%%-----------------------------------------------------------------
+
+other_node(_Config) ->
+ error_logger:add_report_handler(?MODULE, self()),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger,
+ #{level=>info,filter_default=>log}]),
+ rpc:call(Node,error_logger,error_report,[hi_from_remote]),
+ reported(error_report,std_error,hi_from_remote),
+ test_server:stop_node(Node),
+ ok.
+
+
+%%-----------------------------------------------------------------
%% We don't enables or disables tty error logging here. We do not
%% want to interact with the test run.
%%-----------------------------------------------------------------
-tty(suite) -> [];
-tty(doc) -> [];
tty(Config) when is_list(Config) ->
- ?line {'EXIT', _Reason} = (catch error_logger:tty(dummy)),
+ {'EXIT', _Reason} = (catch error_logger:tty(dummy)),
ok.
%%-----------------------------------------------------------------
%% If where already exists a logfile we skip this test case !!
%%-----------------------------------------------------------------
-logfile(suite) -> [];
-logfile(doc) -> [];
logfile(Config) when is_list(Config) ->
- ?line case error_logger:logfile(filename) of
- {error, no_log_file} -> % Ok, we continues.
- do_logfile();
- _ ->
- ok
- end.
+ case error_logger:logfile(filename) of
+ {error, no_log_file} -> % Ok, we continues.
+ do_logfile();
+ _ ->
+ ok
+ end.
do_logfile() ->
- ?line {error, _} = error_logger:logfile(close),
- ?line {error, _} = error_logger:logfile({open,{error}}),
- ?line ok = error_logger:logfile({open, "dummy_logfile.log"}),
- ?line "dummy_logfile.log" = error_logger:logfile(filename),
- ?line ok = error_logger:logfile(close),
- ?line {'EXIT',_} = (catch error_logger:logfile(dummy)),
+ {error, _} = error_logger:logfile(close),
+ {error, _} = error_logger:logfile({open,{error}}),
+ ok = error_logger:logfile({open, "dummy_logfile.log"}),
+ "dummy_logfile.log" = error_logger:logfile(filename),
+ ok = error_logger:logfile(close),
+ {'EXIT',_} = (catch error_logger:logfile(dummy)),
ok.
%%-----------------------------------------------------------------
-add(suite) -> [];
-add(doc) -> [];
add(Config) when is_list(Config) ->
- ?line {'EXIT',_} = (catch error_logger:add_report_handler("dummy")),
- ?line {'EXIT',_} = error_logger:add_report_handler(non_existing),
- ?line my_error = error_logger:add_report_handler(?MODULE, [error]),
+ {'EXIT',_} = (catch error_logger:add_report_handler("dummy")),
+ {'EXIT',_} = error_logger:add_report_handler(non_existing),
+ my_error = error_logger:add_report_handler(?MODULE, [error]),
ok.
%%-----------------------------------------------------------------
-delete(suite) -> [];
-delete(doc) -> [];
delete(Config) when is_list(Config) ->
- ?line {'EXIT',_} = (catch error_logger:delete_report_handler("dummy")),
- ?line {error,_} = error_logger:delete_report_handler(non_existing),
+ {'EXIT',_} = (catch error_logger:delete_report_handler("dummy")),
+ {error,_} = error_logger:delete_report_handler(non_existing),
+ ok.
+
+%%-----------------------------------------------------------------
+
+format_depth(_Config) ->
+ ok = application:set_env(kernel,error_logger_format_depth,30),
+ 30 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,3),
+ 10 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,11),
+ 11 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,unlimited),
+ unlimited = error_logger:get_format_depth(),
+ ok = application:unset_env(kernel,error_logger_format_depth),
+ unlimited = error_logger:get_format_depth(),
ok.
%%-----------------------------------------------------------------
@@ -284,7 +333,7 @@ reported(Tag, Type, Report) ->
test_server:messages_get(),
ok
after 1000 ->
- test_server:fail(no_report_received)
+ ct:fail({no_report_received,test_server:messages_get()})
end.
%%-----------------------------------------------------------------
diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl
index a3a3b2f8c6..8f1eb2ba0a 100644
--- a/lib/kernel/test/error_logger_warn_SUITE.erl
+++ b/lib/kernel/test/error_logger_warn_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@
%% Internal exports.
-export([init/1,handle_event/2,handle_info/2,handle_call/2]).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(EXPECT(Pattern),
(fun() ->
@@ -42,11 +42,10 @@
end
end)()).
-% Default timetrap timeout (set in init_per_testcase).
--define(default_timeout, ?t:minutes(1)).
-
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[basic, warnings_info, warnings_errors, rb_basic,
@@ -70,69 +69,58 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(_Case, Config) ->
- Dog = ?t:timetrap(?default_timeout),
- [{watchdog, Dog} | Config].
-end_per_testcase(_Case, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+ Config.
+
+end_per_testcase(_Case, _Config) ->
ok.
-basic(doc) ->
- ["Tests basic error logger functionality"];
+%% Tests basic error logger functionality.
basic(Config) when is_list(Config) ->
put(elw_config,Config),
basic().
-warnings_info(doc) ->
- ["Tests mapping warnings to info functionality"];
+%% Tests mapping warnings to info functionality.
warnings_info(Config) when is_list(Config) ->
put(elw_config,Config),
warnings_info().
-warnings_errors(doc) ->
- ["Tests mapping warnings to errors functionality"];
+%% Tests mapping warnings to errors functionality.
warnings_errors(Config) when is_list(Config) ->
put(elw_config,Config),
warnings_errors().
-rb_basic(doc) ->
- ["Tests basic rb functionality"];
+%% Tests basic rb functionality.
rb_basic(Config) when is_list(Config) ->
put(elw_config,Config),
rb_basic().
-rb_warnings_info(doc) ->
- ["Tests warnings as info rb functionality"];
+%% Tests warnings as info rb functionality.
rb_warnings_info(Config) when is_list(Config) ->
put(elw_config,Config),
rb_warnings_info().
-rb_warnings_errors(doc) ->
- ["Tests warnings as errors rb functionality"];
+%% Tests warnings as errors rb functionality.
rb_warnings_errors(Config) when is_list(Config) ->
put(elw_config,Config),
rb_warnings_errors().
-rb_trunc(doc) ->
- ["Tests rb functionality on truncated data"];
+%% Tests rb functionality on truncated data.
rb_trunc(Config) when is_list(Config) ->
put(elw_config,Config),
rb_trunc().
-rb_utc(doc) ->
- ["Tests UTC mapping in rb (-sasl utc_log true)"];
+%% Tests UTC mapping in rb (-sasl utc_log true).
rb_utc(Config) when is_list(Config) ->
put(elw_config,Config),
rb_utc().
-file_utc(doc) ->
- ["Tests UTC mapping in file logger (-stdlib utc_log true)"];
+%% Tests UTC mapping in file logger (-stdlib utc_log true).
file_utc(Config) when is_list(Config) ->
put(elw_config,Config),
file_utc().
-% a small gen_event
+%% a small gen_event
init([Pid]) ->
{ok, Pid}.
@@ -236,7 +224,7 @@ warnings_errors() ->
stop_node(Node),
ok.
-% RB...
+%% RB...
quote(String) ->
case os:type() of
@@ -283,7 +271,7 @@ findstrc(String,File) ->
0
end.
-% Doesn't count empty lines
+%% Doesn't count empty lines
lines(File) ->
length(
string:tokens(
@@ -291,17 +279,17 @@ lines(File) ->
element(2,file:read_file(File))),
"\n")).
-%directories anf filenames
+%% Directories and filenames
ld() ->
Config = get(elw_config),
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
filename:absname(PrivDir).
lf() ->
filename:join([ld(),"logfile.txt"]).
rd() ->
Config = get(elw_config),
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
LogDir = filename:join(PrivDir,"log"),
file:make_dir(LogDir),
filename:absname(LogDir).
@@ -315,7 +303,7 @@ nice_stop_node(Name) ->
{nodedown,Name} -> ok
end.
-%clean out rd() before each report test in order to get only one file...
+%% Clean out rd() before each report test in order to get only one file...
clean_rd() ->
{ok,L} = file:list_dir(rd()),
lists:foreach(fun(F) ->
@@ -352,10 +340,10 @@ one_rb_findstr(Param,String) ->
rb:stop_log(),
findstr(String,lf()).
-% Tests
+%% Tests
rb_basic() ->
clean_rd(),
- % Behold, the magic parameters to activate rb logging...
+ %% Behold, the magic parameters to activate rb logging...
Node = start_node(nn(),"-boot start_sasl -sasl error_logger_mf_dir "++
quote(rd())++" error_logger_mf_maxbytes 5000 "
"error_logger_mf_maxfiles 5"),
@@ -378,7 +366,7 @@ rb_basic() ->
0 = one_rb_findstr([info_msg],pid_to_list(Self)),
0 = one_rb_findstr([info_report],pid_to_list(Self)),
2 = one_rb_findstr([],pid_to_list(Self)),
- true = (one_rb_findstr([progress],"===") > 4),
+ true = (one_rb_findstr([progress],"===") > 3),
rb:stop(),
application:stop(sasl),
stop_node(Node),
@@ -408,7 +396,7 @@ rb_warnings_info() ->
1 = one_rb_findstr([info_msg],pid_to_list(Self)),
1 = one_rb_findstr([info_report],pid_to_list(Self)),
2 = one_rb_findstr([],pid_to_list(Self)),
- true = (one_rb_findstr([progress],"===") > 4),
+ true = (one_rb_findstr([progress],"===") > 3),
rb:stop(),
application:stop(sasl),
stop_node(Node),
@@ -438,7 +426,7 @@ rb_warnings_errors() ->
0 = one_rb_findstr([info_msg],pid_to_list(Self)),
0 = one_rb_findstr([info_report],pid_to_list(Self)),
2 = one_rb_findstr([],pid_to_list(Self)),
- true = (one_rb_findstr([progress],"===") > 4),
+ true = (one_rb_findstr([progress],"===") > 3),
rb:stop(),
application:stop(sasl),
stop_node(Node),
@@ -471,7 +459,7 @@ rb_trunc() ->
0 = one_rb_findstr([info_msg],pid_to_list(Self)),
0 = one_rb_findstr([info_report],pid_to_list(Self)),
1 = one_rb_findstr([],pid_to_list(Self)),
- true = (one_rb_findstr([progress],"===") > 4),
+ true = (one_rb_findstr([progress],"===") > 3),
rb:stop(),
application:stop(sasl),
stop_node(Node),
@@ -492,9 +480,12 @@ rb_utc() ->
UtcLog=case application:get_env(sasl,utc_log) of
{ok,true} ->
true;
- _AllOthers ->
+ {ok,false} ->
application:set_env(sasl,utc_log,true),
- false
+ false;
+ undefined ->
+ application:set_env(sasl,utc_log,true),
+ undefined
end,
application:start(sasl),
rb:start([{report_dir, rd()}]),
@@ -506,16 +497,19 @@ rb_utc() ->
Sum=one_rb_findstr([],"UTC"),
rb:stop(),
application:stop(sasl),
- application:set_env(sasl,utc_log,UtcLog),
+ case UtcLog of
+ undefined ->
+ application:unset_env(sasl,utc_log);
+ _ ->
+ application:set_env(sasl,utc_log,UtcLog)
+ end,
stop_node(Node),
ok.
file_utc() ->
file:delete(lf()),
SS="-stdlib utc_log true -kernel error_logger "++ oquote("{file,"++iquote(lf())++"}"),
- %erlang:display(SS),
Node = start_node(nn(),SS),
- %erlang:display(rpc:call(Node,application,get_env,[kernel,error_logger])),
Self = self(),
GL = group_leader(),
fake_gl(Node,error_msg,"~p~n",[Self]),
diff --git a/lib/kernel/test/file_SUITE.erl b/lib/kernel/test/file_SUITE.erl
index 09d9a45197..a51025cba6 100644
--- a/lib/kernel/test/file_SUITE.erl
+++ b/lib/kernel/test/file_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@
%% %CopyrightEnd%
%%
-%% This is a developement feature when developing a new file module,
+%% This is a development feature when developing a new file module,
%% ugly but practical.
-ifndef(FILE_MODULE).
-define(FILE_MODULE, file).
@@ -39,6 +39,8 @@
-define(FILE_FIN_PER_TESTCASE(Config), Config).
-endif.
+-define(PRIM_FILE, prim_file).
+
-module(?FILE_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -51,20 +53,21 @@
pos1/1, pos2/1, pos3/1]).
-export([close/1, consult1/1, path_consult/1, delete/1]).
-export([ eval1/1, path_eval/1, script1/1, path_script/1,
- open1/1,
- old_modes/1, new_modes/1, path_open/1, open_errors/1]).
+ open1/1,
+ old_modes/1, new_modes/1, path_open/1, open_errors/1]).
-export([ file_info_basic_file/1, file_info_basic_directory/1,
- file_info_bad/1, file_info_times/1, file_write_file_info/1]).
+ file_info_bad/1, file_info_times/1, file_write_file_info/1,
+ file_wfi_helpers/1]).
-export([rename/1, access/1, truncate/1, datasync/1, sync/1,
read_write/1, pread_write/1, append/1, exclusive/1]).
-export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
-export([otp_5814/1, otp_10852/1]).
-export([ read_not_really_compressed/1,
- read_compressed_cooked/1, read_compressed_cooked_binary/1,
- read_cooked_tar_problem/1,
- write_compressed/1, compress_errors/1, catenated_gzips/1,
- compress_async_crash/1]).
+ read_compressed_cooked/1, read_compressed_cooked_binary/1,
+ read_cooked_tar_problem/1,
+ write_compressed/1, compress_errors/1, catenated_gzips/1,
+ compress_async_crash/1]).
-export([ make_link/1, read_link_info_for_non_link/1, symlinks/1]).
@@ -83,7 +86,7 @@
-export([unicode/1]).
-export([altname/1]).
--export([large_file/1, large_write/1]).
+-export([large_file/0, large_file/1, large_write/0, large_write/1]).
-export([read_line_1/1, read_line_2/1, read_line_3/1,read_line_4/1]).
@@ -97,6 +100,12 @@
-export([unicode_mode/1]).
+-export([volume_relative_paths/1,unc_paths/1]).
+
+-export([tiny_writes/1, tiny_writes_delayed/1,
+ large_writes/1, large_writes_delayed/1,
+ tiny_reads/1, tiny_reads_ahead/1]).
+
%% Debug exports
-export([create_file_slow/2, create_file/2, create_bin/2]).
-export([verify_file/2, verify_bin/3]).
@@ -106,23 +115,27 @@
%% System probe functions that might be handy to check from the shell
-export([disc_free/1, memsize/0]).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
-include_lib("kernel/include/file.hrl").
-define(THROW_ERROR(RES), throw({fail, ?LINE, RES})).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[unicode, altname, read_write_file, {group, dirs},
- {group, files}, delete, rename, names, {group, errors},
- {group, compression}, {group, links}, copy,
+ {group, files}, delete, rename, names, volume_relative_paths, unc_paths,
+ {group, errors}, {group, compression}, {group, links}, copy,
delayed_write, read_ahead, segment_read, segment_write,
ipread, pid2name, interleaved_read_write, otp_5814, otp_10852,
large_file, large_write, read_line_1, read_line_2, read_line_3,
read_line_4, standard_io, old_io_protocol,
- unicode_mode
+ unicode_mode, {group, bench}
].
groups() ->
@@ -140,7 +153,8 @@ groups() ->
{pos, [], [pos1, pos2, pos3]},
{file_info, [],
[file_info_basic_file, file_info_basic_directory,
- file_info_bad, file_info_times, file_write_file_info]},
+ file_info_bad, file_info_times, file_write_file_info,
+ file_wfi_helpers]},
{consult, [], [consult1, path_consult]},
{eval, [], [eval1, path_eval]},
{script, [], [script1, path_script]},
@@ -152,11 +166,19 @@ groups() ->
write_compressed, compress_errors, catenated_gzips,
compress_async_crash]},
{links, [],
- [make_link, read_link_info_for_non_link, symlinks]}].
+ [make_link, read_link_info_for_non_link, symlinks]},
+ {bench, [],
+ [tiny_writes, tiny_writes_delayed,
+ large_writes, large_writes_delayed,
+ tiny_reads, tiny_reads_ahead]}].
init_per_group(_GroupName, Config) ->
Config.
+end_per_group(bench, Config) ->
+ ScratchDir = proplists:get_value(priv_dir, Config),
+ file:delete(filename:join(ScratchDir, "benchmark_scratch_file")),
+ Config;
end_per_group(_GroupName, Config) ->
Config.
@@ -168,16 +190,11 @@ init_per_suite(Config) when is_list(Config) ->
ok ->
[{sasl,started}]
end,
- ok = case os:type() of
- {ose,_} ->
- ok;
- _ ->
- application:start(os_mon)
- end,
+ application:start(os_mon),
case os:type() of
{win32, _} ->
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
HasAccessTime =
case ?FILE_MODULE:read_file_info(Priv) of
{ok, #file_info{atime={_, {0, 0, 0}}}} ->
@@ -199,12 +216,7 @@ end_per_suite(Config) when is_list(Config) ->
ok
end,
- case os:type() of
- {ose,_} ->
- ok;
- _ ->
- application:stop(os_mon)
- end,
+ application:stop(os_mon),
case proplists:get_value(sasl, Config) of
started ->
application:stop(sasl);
@@ -271,73 +283,66 @@ mini_server(Parent) ->
mini_server(Parent)
end.
-standard_io(suite) ->
- [];
-standard_io(doc) ->
- ["Test that standard i/o-servers work with file module"];
+%% Test that standard i/o-servers work with file module.
standard_io(Config) when is_list(Config) ->
%% Really just a smoke test
- ?line Pid = spawn(?MODULE,mini_server,[self()]),
- ?line register(mini_server,Pid),
- ?line ok = file:write(mini_server,<<"hej\n">>),
- ?line receive
- {io_request,_,_,{put_chars,<<"hej\n">>}} ->
- ok
- after 1000 ->
- exit(noreply)
- end,
- ?line {ok,"aaaaa"} = file:read(mini_server,5),
- ?line receive
- {io_request,_,_,{get_chars,'',5}} ->
- ok
- after 1000 ->
- exit(noreply)
- end,
- ?line {ok,"hej\n"} = file:read_line(mini_server),
- ?line receive
- {io_request,_,_,{get_line,''}} ->
- ok
- after 1000 ->
- exit(noreply)
- end,
- ?line OldGL = group_leader(),
- ?line group_leader(Pid,self()),
- ?line ok = file:write(standard_io,<<"hej\n">>),
- ?line group_leader(OldGL,self()),
- ?line receive
- {io_request,_,_,{put_chars,<<"hej\n">>}} ->
- ok
- after 1000 ->
- exit(noreply)
- end,
- ?line group_leader(Pid,self()),
- ?line {ok,"aaaaa"} = file:read(standard_io,5),
- ?line group_leader(OldGL,self()),
- ?line receive
- {io_request,_,_,{get_chars,'',5}} ->
- ok
- after 1000 ->
- exit(noreply)
- end,
- ?line group_leader(Pid,self()),
- ?line {ok,"hej\n"} = file:read_line(standard_io),
- ?line group_leader(OldGL,self()),
- ?line receive
- {io_request,_,_,{get_line,''}} ->
- ok
- after 1000 ->
- exit(noreply)
- end,
+ Pid = spawn(?MODULE,mini_server,[self()]),
+ register(mini_server,Pid),
+ ok = file:write(mini_server,<<"hej\n">>),
+ receive
+ {io_request,_,_,{put_chars,<<"hej\n">>}} ->
+ ok
+ after 1000 ->
+ exit(noreply)
+ end,
+ {ok,"aaaaa"} = file:read(mini_server,5),
+ receive
+ {io_request,_,_,{get_chars,'',5}} ->
+ ok
+ after 1000 ->
+ exit(noreply)
+ end,
+ {ok,"hej\n"} = file:read_line(mini_server),
+ receive
+ {io_request,_,_,{get_line,''}} ->
+ ok
+ after 1000 ->
+ exit(noreply)
+ end,
+ OldGL = group_leader(),
+ group_leader(Pid,self()),
+ ok = file:write(standard_io,<<"hej\n">>),
+ group_leader(OldGL,self()),
+ receive
+ {io_request,_,_,{put_chars,<<"hej\n">>}} ->
+ ok
+ after 1000 ->
+ exit(noreply)
+ end,
+ group_leader(Pid,self()),
+ {ok,"aaaaa"} = file:read(standard_io,5),
+ group_leader(OldGL,self()),
+ receive
+ {io_request,_,_,{get_chars,'',5}} ->
+ ok
+ after 1000 ->
+ exit(noreply)
+ end,
+ group_leader(Pid,self()),
+ {ok,"hej\n"} = file:read_line(standard_io),
+ group_leader(OldGL,self()),
+ receive
+ {io_request,_,_,{get_line,''}} ->
+ ok
+ after 1000 ->
+ exit(noreply)
+ end,
Pid ! die,
receive after 1000 -> ok end.
-old_io_protocol(suite) ->
- [];
-old_io_protocol(doc) ->
- ["Test that the old file IO protocol =< R16B still works"];
+%% Test that the old file IO protocol =< R16B still works.
old_io_protocol(Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(5)),
- RootDir = ?config(priv_dir,Config),
+ RootDir = proplists:get_value(priv_dir,Config),
Name = filename:join(RootDir,
atom_to_list(?MODULE)
++"old_io_protocol.fil"),
@@ -350,14 +355,11 @@ old_io_protocol(Config) when is_list(Config) ->
end,
ok = ?FILE_MODULE:close(Fd),
{ok, <<>>} = ?FILE_MODULE:read_file(Name),
- test_server:timetrap_cancel(Dog),
[] = flush(),
ok.
-unicode_mode(suite) -> [];
-unicode_mode(doc) -> [""];
unicode_mode(Config) ->
- Dir = {dir, ?config(priv_dir,Config)},
+ Dir = {dir, proplists:get_value(priv_dir,Config)},
OptVariants = [[Dir],
[Dir, {encoding, utf8}],
[Dir, binary],
@@ -399,11 +401,11 @@ read_write_0(Str, {Func, ReadFun}, Options) ->
io:format("~p:~p: ~p ERROR: ~ts vs~n ~w~n - ~p~n",
[?MODULE, Line, Func, Str, ReadBytes, Options]),
exit({error, ?LINE});
- error:What ->
+ error:What:Stacktrace ->
io:format("~p:??: ~p ERROR: ~p from~n ~w~n ~p~n",
[?MODULE, Func, What, Str, Options]),
- io:format("\t~p~n", [erlang:get_stacktrace()]),
+ io:format("\t~p~n", [Stacktrace]),
exit({error, ?LINE})
end.
@@ -491,7 +493,7 @@ um_check_unicode(_Utf8Bin, {ok, _ListOrBin}, _, _UTF8_) ->
um_filename(Bin, Dir, Options) when is_binary(Bin) ->
um_filename(binary_to_list(Bin), Dir, Options);
um_filename(Str = [_|_], Dir, Options) ->
- Name = hd(string:tokens(Str, ":")),
+ Name = hd(string:lexemes(Str, ":")),
Enc = atom_to_list(proplists:get_value(encoding, Options, latin1)),
File = case lists:member(binary, Options) of
true ->
@@ -503,105 +505,106 @@ um_filename(Str = [_|_], Dir, Options) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-read_write_file(suite) -> [];
-read_write_file(doc) -> [];
read_write_file(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_read_write_file"),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_read_write_file"),
%% Try writing and reading back some term
- ?line SomeTerm = {"This term",{will,be},[written,$t,$o],1,file,[]},
- ?line ok = ?FILE_MODULE:write_file(Name,term_to_binary(SomeTerm)),
- ?line {ok,Bin1} = ?FILE_MODULE:read_file(Name),
- ?line SomeTerm = binary_to_term(Bin1),
-
+ SomeTerm = {"This term",{will,be},[written,$t,$o],1,file,[]},
+ Bin1 = term_to_binary(SomeTerm),
+ ok = do_read_write_file(Name, Bin1),
+
%% Try a "null" term
- ?line NullTerm = [],
- ?line ok = ?FILE_MODULE:write_file(Name,term_to_binary(NullTerm)),
- ?line {ok,Bin2} = ?FILE_MODULE:read_file(Name),
- ?line NullTerm = binary_to_term(Bin2),
-
- %% Try some "complicated" types
- ?line BigNum = 123456789012345678901234567890,
- ?line ComplTerm = {self(),make_ref(),BigNum,3.14159},
- ?line ok = ?FILE_MODULE:write_file(Name,term_to_binary(ComplTerm)),
- ?line {ok,Bin3} = ?FILE_MODULE:read_file(Name),
- ?line ComplTerm = binary_to_term(Bin3),
+ NullTerm = [],
+ Bin2 = term_to_binary(NullTerm),
+ ok = do_read_write_file(Name, Bin2),
%% Try reading a nonexistent file
- ?line Name2 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_nonexistent_file"),
- ?line {error, enoent} = ?FILE_MODULE:read_file(Name2),
- ?line {error, enoent} = ?FILE_MODULE:read_file(""),
- ?line {error, enoent} = ?FILE_MODULE:read_file(''),
+ Name2 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_nonexistent_file"),
+ {error, enoent} = ?FILE_MODULE:read_file(Name2),
+ {error, enoent} = ?FILE_MODULE:read_file(""),
+ {error, enoent} = ?FILE_MODULE:read_file(''),
- % Try writing to a bad filename
- ?line {error, enoent} =
- ?FILE_MODULE:write_file("",term_to_binary(NullTerm)),
+ %% Try writing to a bad filename
+ {error, enoent} = do_read_write_file("", Bin2),
- % Try writing something else than a binary
- ?line {error, badarg} = ?FILE_MODULE:write_file(Name,{1,2,3}),
- ?line {error, badarg} = ?FILE_MODULE:write_file(Name,self()),
+ %% Try writing something else than a binary
+ {error, badarg} = do_read_write_file(Name, {1,2,3}),
+ {error, badarg} = do_read_write_file(Name, self()),
%% Some non-term binaries
- ?line ok = ?FILE_MODULE:write_file(Name,[]),
- ?line {ok,Bin4} = ?FILE_MODULE:read_file(Name),
- ?line 0 = byte_size(Bin4),
+ ok = do_read_write_file(Name, []),
- ?line ok = ?FILE_MODULE:write_file(Name,[Bin1,[],[[Bin2]]]),
- ?line {ok,Bin5} = ?FILE_MODULE:read_file(Name),
- ?line {Bin1,Bin2} = split_binary(Bin5,byte_size(Bin1)),
+ %% Write some iolists
+ ok = do_read_write_file(Name, [Bin1,[],[[Bin2]]]),
+ ok = do_read_write_file(Name, ["string",<<"binary">>]),
+ ok = do_read_write_file(Name, "pure string"),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
+do_read_write_file(Name, Data) ->
+ case ?FILE_MODULE:write_file(Name, Data) of
+ ok ->
+ BinData = iolist_to_binary(Data),
+ {ok,BinData} = ?FILE_MODULE:read_file(Name),
+
+ ok = ?FILE_MODULE:write_file(Name, Data, []),
+ {ok,BinData} = ?FILE_MODULE:read_file(Name),
+
+ ok = ?FILE_MODULE:write_file(Name, Data, [raw]),
+ {ok,BinData} = ?FILE_MODULE:read_file(Name),
+
+ ok;
+ {error,_}=Res ->
+ Res = ?FILE_MODULE:write_file(Name, Data, []),
+ Res = ?FILE_MODULE:write_file(Name, Data, [raw]),
+ Res
+ end.
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-make_del_dir(suite) -> [];
-make_del_dir(doc) -> [];
make_del_dir(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_mk-dir"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line {error, eexist} = ?FILE_MODULE:make_dir(NewDir),
- ?line ok = ?FILE_MODULE:del_dir(NewDir),
- ?line {error, enoent} = ?FILE_MODULE:del_dir(NewDir),
- % Make sure we are not in a directory directly under test_server
- % as that would result in eacces errors when trying to delete '..',
- % because there are processes having that directory as current.
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line {ok,CurrentDir} = file:get_cwd(),
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_mk-dir"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ {error, eexist} = ?FILE_MODULE:make_dir(NewDir),
+ ok = ?FILE_MODULE:del_dir(NewDir),
+ {error, enoent} = ?FILE_MODULE:del_dir(NewDir),
+ %% Make sure we are not in a directory directly under test_server
+ %% as that would result in eacces errors when trying to delete '..',
+ %% because there are processes having that directory as current.
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ {ok,CurrentDir} = file:get_cwd(),
case {os:type(), length(NewDir) >= 260 } of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH)\n", []),
io:format("\nNewDir = ~p\n", [NewDir]);
_ ->
- ?line ok = ?FILE_MODULE:set_cwd(NewDir)
+ ok = ?FILE_MODULE:set_cwd(NewDir)
end,
try
%% Check that we get an error when trying to create...
%% a deep directory
- ?line NewDir2 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_mk-dir-noexist/foo"),
- ?line {error, enoent} = ?FILE_MODULE:make_dir(NewDir2),
+ NewDir2 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_mk-dir-noexist/foo"),
+ {error, enoent} = ?FILE_MODULE:make_dir(NewDir2),
%% a nameless directory
- ?line {error, enoent} = ?FILE_MODULE:make_dir(""),
+ {error, enoent} = ?FILE_MODULE:make_dir(""),
%% a directory with illegal name
- ?line {error, badarg} = ?FILE_MODULE:make_dir({1,2,3}),
-
+ {error, badarg} = ?FILE_MODULE:make_dir({1,2,3}),
+
%% a directory with illegal name, even if it's a (bad) list
- ?line {error, badarg} = ?FILE_MODULE:make_dir([1,2,3,{}]),
-
+ {error, badarg} = ?FILE_MODULE:make_dir([1,2,3,{}]),
+
%% Maybe this isn't an error, exactly, but worth mentioning anyway:
%% ok = ?FILE_MODULE:make_dir([$f,$o,$o,0,$b,$a,$r])),
%% The above line works, and created a directory "./foo"
@@ -609,40 +612,36 @@ make_del_dir(Config) when is_list(Config) ->
%% a directory, but with a name that incorporates the "bar" part of
%% the list, so that [$f,$o,$o,0,$f,$o,$o] wouldn't refer to the same
%% dir. But this would slow it down.
-
+
%% Try deleting some bad directories
%% Deleting the parent directory to the current, sounds dangerous, huh?
%% Don't worry ;-) the parent directory should never be empty, right?
- ?line case ?FILE_MODULE:del_dir('..') of
- {error, eexist} -> ok;
- {error, eacces} -> ok; %OpenBSD
- {error, einval} -> ok %FreeBSD
- end,
- ?line {error, enoent} = ?FILE_MODULE:del_dir(""),
- ?line {error, badarg} = ?FILE_MODULE:del_dir([3,2,1,{}]),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog)
+ case ?FILE_MODULE:del_dir('..') of
+ {error, eexist} -> ok;
+ {error, eacces} -> ok; %OpenBSD
+ {error, einval} -> ok %FreeBSD
+ end,
+ {error, enoent} = ?FILE_MODULE:del_dir(""),
+ {error, badarg} = ?FILE_MODULE:del_dir([3,2,1,{}]),
+
+ [] = flush()
after
- ?FILE_MODULE:set_cwd(CurrentDir)
+ ?FILE_MODULE:set_cwd(CurrentDir)
end,
ok.
-cur_dir_0(suite) -> [];
-cur_dir_0(doc) -> [];
cur_dir_0(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
%% Find out the current dir, and cd to it ;-)
- ?line {ok,BaseDir} = ?FILE_MODULE:get_cwd(),
- ?line Dir1 = BaseDir ++ "", %% Check that it's a string
- ?line ok = ?FILE_MODULE:set_cwd(Dir1),
+ {ok,BaseDir} = ?FILE_MODULE:get_cwd(),
+ Dir1 = BaseDir ++ "", %% Check that it's a string
+ ok = ?FILE_MODULE:set_cwd(Dir1),
%% Make a new dir, and cd to that
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_curdir"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_curdir"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
case {os:type(), length(NewDir) >= 260} of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH):\n"),
@@ -659,6 +658,10 @@ cur_dir_0(Config) when is_list(Config) ->
{ok,NewDirFiles} = ?FILE_MODULE:list_dir("."),
true = lists:member(UncommonName,NewDirFiles),
+ %% Ensure that we get the same result with a trailing slash; the
+ %% APIs used on Windows will choke on them if passed directly.
+ {ok,NewDirFiles} = ?FILE_MODULE:list_dir("./"),
+
%% Delete the directory and return to the old current directory
%% and check that the created file isn't there (too!)
expect({error, einval}, {error, eacces},
@@ -675,52 +678,51 @@ cur_dir_0(Config) when is_list(Config) ->
{ok,OldDirFiles} = ?FILE_MODULE:list_dir("."),
false = lists:member(UncommonName,OldDirFiles)
end,
-
+
%% Try doing some bad things
- ?line {error, badarg} = ?FILE_MODULE:set_cwd({foo,bar}),
- ?line {error, enoent} = ?FILE_MODULE:set_cwd(""),
- ?line {error, enoent} = ?FILE_MODULE:set_cwd(".......a......"),
- ?line {ok,BaseDir} = ?FILE_MODULE:get_cwd(), %% Still there?
+ {error, badarg} = ?FILE_MODULE:set_cwd({foo,bar}),
+ {error, enoent} = ?FILE_MODULE:set_cwd(""),
+ {error, enoent} = ?FILE_MODULE:set_cwd(".......a......"),
+ {ok,BaseDir} = ?FILE_MODULE:get_cwd(), %% Still there?
%% On Windows, there should only be slashes, no backslashes,
%% in the return value of get_cwd().
%% (The test is harmless on Unix, because filenames usually
%% don't contain backslashes.)
- ?line {ok, BaseDir} = ?FILE_MODULE:get_cwd(),
- ?line false = lists:member($\\, BaseDir),
+ {ok, BaseDir} = ?FILE_MODULE:get_cwd(),
+ false = lists:member($\\, BaseDir),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%% Tests ?FILE_MODULE:get_cwd/1.
-cur_dir_1(suite) -> [];
-cur_dir_1(doc) -> [];
cur_dir_1(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
-
- ?line case os:type() of
- {win32, _} ->
- win_cur_dir_1(Config);
- _ ->
- ?line {error, enotsup} = ?FILE_MODULE:get_cwd("d:")
- end,
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ case os:type() of
+ {win32, _} ->
+ win_cur_dir_1(Config);
+ _ ->
+ {error, enotsup} = ?FILE_MODULE:get_cwd("d:")
+ end,
+ [] = flush(),
ok.
-
+
win_cur_dir_1(_Config) ->
- ?line {ok,BaseDir} = ?FILE_MODULE:get_cwd(),
+ {ok,BaseDir} = ?FILE_MODULE:get_cwd(),
%% Get the drive letter from the current directory,
%% and try to get current directory for that drive.
- ?line [Drive,$:|_] = BaseDir,
- ?line {ok,BaseDir} = ?FILE_MODULE:get_cwd([Drive,$:]),
+ [CurDrive,$:|_] = BaseDir,
+ {ok,BaseDir} = ?FILE_MODULE:get_cwd([CurDrive,$:]),
io:format("BaseDir = ~s\n", [BaseDir]),
+ %% We should error out on non-existent drives. Any reasonable system will
+ %% have at least one.
+ CurDirs = [?FILE_MODULE:get_cwd([Drive,$:]) || Drive <- lists:seq($A, $Z)],
+ lists:member({error,eaccess}, CurDirs),
+
%% Unfortunately, there is no way to move away from the
%% current drive as we can't use the "subst" command from
%% a SSH connection. We can't test any more.
@@ -733,7 +735,7 @@ win_cur_dir_1(_Config) ->
%%%
list_dir_error(Config) ->
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
NonExisting = filename:join(Priv, "non-existing-dir"),
{error,enoent} = ?FILE_MODULE:list_dir(NonExisting),
ok.
@@ -743,7 +745,7 @@ list_dir_error(Config) ->
%%%
list_dir(Config) ->
- RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
TestDir = filename:join(RootDir, ?MODULE_STRING++"_list_dir"),
?FILE_MODULE:make_dir(TestDir),
list_dir_1(TestDir, 42, []).
@@ -773,7 +775,7 @@ untranslatable_names(Config) ->
untranslatable_names_1(Config) ->
{ok,OldCwd} = file:get_cwd(),
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
Dir = filename:join(PrivDir, "untranslatable_names"),
ok = file:make_dir(Dir),
Node = start_node(untranslatable_names, "+fnu"),
@@ -814,7 +816,7 @@ untranslatable_names_error(Config) ->
untranslatable_names_error_1(Config) ->
{ok,OldCwd} = file:get_cwd(),
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
Dir = filename:join(PrivDir, "untranslatable_names_error"),
ok = file:make_dir(Dir),
Node = start_node(untranslatable_names, "+fnue"),
@@ -858,11 +860,11 @@ no_untranslatable_names() ->
end.
start_node(Name, Args) ->
- [_,Host] = string:tokens(atom_to_list(node()), "@"),
+ [_,Host] = string:lexemes(atom_to_list(node()), "@"),
ct:log("Trying to start ~w@~s~n", [Name,Host]),
case test_server:start_node(Name, peer, [{args,Args}]) of
{error,Reason} ->
- test_server:fail(Reason);
+ ct:fail(Reason);
{ok,Node} ->
ct:log("Node ~p started~n", [Node]),
Node
@@ -873,568 +875,525 @@ start_node(Name, Args) ->
-open1(suite) -> [];
-open1(doc) -> [];
open1(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_files"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line Name = filename:join(NewDir, "foo1.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,read_write),
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,read),
- ?line Str = "{a,tuple}.\n",
- ?line io:format(Fd1,Str,[]),
- ?line {ok,0} = ?FILE_MODULE:position(Fd1,bof),
- ?line Str = io:get_line(Fd1,''),
- ?line case io:get_line(Fd2,'') of
- Str -> Str;
- eof -> Str
- end,
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {ok,0} = ?FILE_MODULE:position(Fd1,bof),
- ?line ok = ?FILE_MODULE:truncate(Fd1),
- ?line eof = io:get_line(Fd1,''),
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line {ok,Fd3} = ?FILE_MODULE:open(Name,read),
- ?line eof = io:get_line(Fd3,''),
- ?line ok = ?FILE_MODULE:close(Fd3),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_files"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ Name = filename:join(NewDir, "foo1.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,read_write),
+ {ok,Fd2} = ?FILE_MODULE:open(Name,read),
+ Str = "{a,tuple}.\n",
+ io:format(Fd1,Str,[]),
+ {ok,0} = ?FILE_MODULE:position(Fd1,bof),
+ Str = io:get_line(Fd1,''),
+ Str = io:get_line(Fd2,''),
+ ok = ?FILE_MODULE:close(Fd2),
+ {ok,0} = ?FILE_MODULE:position(Fd1,bof),
+ ok = ?FILE_MODULE:truncate(Fd1),
+ eof = io:get_line(Fd1,''),
+ ok = ?FILE_MODULE:close(Fd1),
+ {ok,Fd3} = ?FILE_MODULE:open(Name,read),
+ eof = io:get_line(Fd3,''),
+ ok = ?FILE_MODULE:close(Fd3),
+ [] = flush(),
ok.
%% Tests all open modes.
-old_modes(suite) -> [];
-old_modes(doc) -> [];
old_modes(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_old_open_modes"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line Name1 = filename:join(NewDir, "foo1.fil"),
- ?line Marker = "hello, world",
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_old_open_modes"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ Name1 = filename:join(NewDir, "foo1.fil"),
+ Marker = "hello, world",
%% write
- ?line {ok, Fd1} = ?FILE_MODULE:open(Name1, write),
- ?line ok = io:write(Fd1, Marker),
- ?line ok = io:put_chars(Fd1, ".\n"),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ {ok, Fd1} = ?FILE_MODULE:open(Name1, write),
+ ok = io:write(Fd1, Marker),
+ ok = io:put_chars(Fd1, ".\n"),
+ ok = ?FILE_MODULE:close(Fd1),
%% read
- ?line {ok, Fd2} = ?FILE_MODULE:open(Name1, read),
- ?line {ok, Marker} = io:read(Fd2, prompt),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ {ok, Fd2} = ?FILE_MODULE:open(Name1, read),
+ {ok, Marker} = io:read(Fd2, prompt),
+ ok = ?FILE_MODULE:close(Fd2),
%% read_write
- ?line {ok, Fd3} = ?FILE_MODULE:open(Name1, read_write),
- ?line {ok, Marker} = io:read(Fd3, prompt),
- ?line ok = io:write(Fd3, Marker),
- ?line ok = ?FILE_MODULE:close(Fd3),
+ {ok, Fd3} = ?FILE_MODULE:open(Name1, read_write),
+ {ok, Marker} = io:read(Fd3, prompt),
+ ok = io:write(Fd3, Marker),
+ ok = ?FILE_MODULE:close(Fd3),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-new_modes(suite) -> [];
-new_modes(doc) -> [];
new_modes(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_new_open_modes"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line Name1 = filename:join(NewDir, "foo1.fil"),
- ?line Marker = "hello, world",
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_new_open_modes"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ Name1 = filename:join(NewDir, "foo1.fil"),
+ Marker = "hello, world",
%% write
- ?line {ok, Fd1} = ?FILE_MODULE:open(Name1, [write]),
- ?line ok = io:write(Fd1, Marker),
- ?line ok = io:put_chars(Fd1, ".\n"),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ {ok, Fd1} = ?FILE_MODULE:open(Name1, [write]),
+ ok = io:write(Fd1, Marker),
+ ok = io:put_chars(Fd1, ".\n"),
+ ok = ?FILE_MODULE:close(Fd1),
%% read
- ?line {ok, Fd2} = ?FILE_MODULE:open(Name1, [read]),
- ?line {ok, Marker} = io:read(Fd2, prompt),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ {ok, Fd2} = ?FILE_MODULE:open(Name1, [read]),
+ {ok, Marker} = io:read(Fd2, prompt),
+ ok = ?FILE_MODULE:close(Fd2),
%% read and write
- ?line {ok, Fd3} = ?FILE_MODULE:open(Name1, [read, write]),
- ?line {ok, Marker} = io:read(Fd3, prompt),
- ?line ok = io:write(Fd3, Marker),
- ?line ok = ?FILE_MODULE:close(Fd3),
+ {ok, Fd3} = ?FILE_MODULE:open(Name1, [read, write]),
+ {ok, Marker} = io:read(Fd3, prompt),
+ ok = io:write(Fd3, Marker),
+ ok = ?FILE_MODULE:close(Fd3),
%% read by default
- ?line {ok, Fd4} = ?FILE_MODULE:open(Name1, []),
- ?line {ok, Marker} = io:read(Fd4, prompt),
- ?line ok = ?FILE_MODULE:close(Fd4),
+ {ok, Fd4} = ?FILE_MODULE:open(Name1, []),
+ {ok, Marker} = io:read(Fd4, prompt),
+ ok = ?FILE_MODULE:close(Fd4),
%% read and binary
- ?line {ok, Fd5} = ?FILE_MODULE:open(Name1, [read, binary]),
- ?line {ok, Marker} = io:read(Fd5, prompt),
- ?line ok = ?FILE_MODULE:close(Fd5),
+ {ok, Fd5} = ?FILE_MODULE:open(Name1, [read, binary]),
+ {ok, Marker} = io:read(Fd5, prompt),
+ ok = ?FILE_MODULE:close(Fd5),
%% read, raw
- ?line {ok, Fd6} = ?FILE_MODULE:open(Name1, [read, raw]),
- ?line {ok, [$\[]} = ?FILE_MODULE:read(Fd6, 1),
- ?line ok = ?FILE_MODULE:close(Fd6),
-
- %% write and sync
- case ?FILE_MODULE:open(Name1, [write, sync]) of
- {ok, Fd7} ->
- ok = io:write(Fd7, Marker),
- ok = io:put_chars(Fd7, ".\n"),
- ok = ?FILE_MODULE:close(Fd7),
- {ok, Fd8} = ?FILE_MODULE:open(Name1, [read]),
- {ok, Marker} = io:read(Fd8, prompt),
- ok = ?FILE_MODULE:close(Fd8);
- {error, enotsup} ->
- %% for platforms that don't support the sync option
- ok
- end,
+ {ok, Fd6} = ?FILE_MODULE:open(Name1, [read, raw]),
+ {ok, [$\[]} = ?FILE_MODULE:read(Fd6, 1),
+ ok = ?FILE_MODULE:close(Fd6),
+
+ %% write and sync
+ case ?FILE_MODULE:open(Name1, [write, sync]) of
+ {ok, Fd7} ->
+ ok = io:write(Fd7, Marker),
+ ok = io:put_chars(Fd7, ".\n"),
+ ok = ?FILE_MODULE:close(Fd7),
+ {ok, Fd8} = ?FILE_MODULE:open(Name1, [read]),
+ {ok, Marker} = io:read(Fd8, prompt),
+ ok = ?FILE_MODULE:close(Fd8);
+ {error, enotsup} ->
+ %% for platforms that don't support the sync option
+ ok
+ end,
+
+ [] = flush(),
+ ok.
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
- ok.
-
-path_open(suite) -> [];
-path_open(doc) -> [];
path_open(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_path_open"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line FileName = "path_open.fil",
- ?line Name = filename:join(RootDir, FileName),
- ?line {ok,Fd1,_FullName1} =
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_path_open"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ FileName = "path_open.fil",
+ Name = filename:join(RootDir, FileName),
+ {ok,Fd1,_FullName1} =
?FILE_MODULE:path_open(
- [RootDir,
- "nosuch1",
- NewDir],FileName,write),
- ?line io:format(Fd1,"ABCDEFGH",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ [RootDir,
+ "nosuch1",
+ NewDir],FileName,write),
+ io:format(Fd1,"ABCDEFGH",[]),
+ ok = ?FILE_MODULE:close(Fd1),
%% locate it in the last dir
- ?line {ok,Fd2,_FullName2} =
+ {ok,Fd2,_FullName2} =
?FILE_MODULE:path_open(
- ["nosuch1",
- NewDir,
- RootDir],FileName,read),
- ?line {ok,2} =
+ ["nosuch1",
+ NewDir,
+ RootDir],FileName,read),
+ {ok,2} =
?FILE_MODULE:position(Fd2,2), "C" = io:get_chars(Fd2,'',1),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ ok = ?FILE_MODULE:close(Fd2),
%% Try a failing path
- ?line {error, enoent} = ?FILE_MODULE:path_open(
- ["nosuch1",
- NewDir],FileName,read),
+ {error, enoent} = ?FILE_MODULE:path_open(
+ ["nosuch1",
+ NewDir],FileName,read),
%% Check that it's found regardless of path, if an absolute name given
- ?line {ok,Fd3,_FullPath3} =
+ {ok,Fd3,_FullPath3} =
?FILE_MODULE:path_open(
- ["nosuch1",
- NewDir],Name,read),
- ?line {ok,2} =
+ ["nosuch1",
+ NewDir],Name,read),
+ {ok,2} =
?FILE_MODULE:position(Fd3,2), "C" = io:get_chars(Fd3,'',1),
- ?line ok = ?FILE_MODULE:close(Fd3),
+ ok = ?FILE_MODULE:close(Fd3),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-close(suite) -> [];
-close(doc) -> [];
close(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_close.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,read_write),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_close.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,read_write),
%% Just closing it is no fun, we did that a million times already
%% This is a common error, for code written before Erlang 4.3
%% bacause then ?FILE_MODULE:open just returned a Pid, and not everyone
%% really checked what they got.
- ?line {'EXIT',_Msg} = (catch ok = ?FILE_MODULE:close({ok,Fd1})),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ {'EXIT',_Msg} = (catch ok = ?FILE_MODULE:close({ok,Fd1})),
+ ok = ?FILE_MODULE:close(Fd1),
%% Try closing one more time
- ?line Val = ?FILE_MODULE:close(Fd1),
- ?line io:format("Second close gave: ~p",[Val]),
+ Val = ?FILE_MODULE:close(Fd1),
+ io:format("Second close gave: ~p",[Val]),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ %% All operations on a closed raw file should EINVAL, even if they're not
+ %% supported on the current platform.
+ {ok,Fd2} = ?FILE_MODULE:open(Name, [read, write, raw]),
+ ok = ?FILE_MODULE:close(Fd2),
+
+ {error, einval} = ?FILE_MODULE:advise(Fd2, 5, 5, normal),
+ {error, einval} = ?FILE_MODULE:allocate(Fd2, 5, 5),
+ {error, einval} = ?FILE_MODULE:close(Fd2),
+ {error, einval} = ?FILE_MODULE:datasync(Fd2),
+ {error, einval} = ?FILE_MODULE:position(Fd2, 5),
+ {error, einval} = ?FILE_MODULE:pread(Fd2, 5, 1),
+ {error, einval} = ?FILE_MODULE:pwrite(Fd2, 5, "einval please"),
+ {error, einval} = ?FILE_MODULE:read(Fd2, 1),
+ {error, einval} = ?FILE_MODULE:sync(Fd2),
+ {error, einval} = ?FILE_MODULE:truncate(Fd2),
+ {error, einval} = ?FILE_MODULE:write(Fd2, "einval please"),
+
+ [] = flush(),
ok.
-access(suite) -> [];
-access(doc) -> [];
access(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_access.fil"),
- ?line Str = "ABCDEFGH",
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd1,Str,[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_access.fil"),
+ Str = "ABCDEFGH",
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,Str,[]),
+ ok = ?FILE_MODULE:close(Fd1),
%% Check that we can't write when in read only mode
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,read),
- ?line case catch io:format(Fd2,"XXXX",[]) of
- ok ->
- test_server:fail({format,write});
- _ ->
- ok
- end,
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {ok,Fd3} = ?FILE_MODULE:open(Name,read),
- ?line Str = io:get_line(Fd3,''),
- ?line ok = ?FILE_MODULE:close(Fd3),
+ {ok,Fd2} = ?FILE_MODULE:open(Name,read),
+ case catch io:format(Fd2,"XXXX",[]) of
+ ok ->
+ ct:fail({format,write});
+ _ ->
+ ok
+ end,
+ ok = ?FILE_MODULE:close(Fd2),
+ {ok,Fd3} = ?FILE_MODULE:open(Name,read),
+ Str = io:get_line(Fd3,''),
+ ok = ?FILE_MODULE:close(Fd3),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%% Tests ?FILE_MODULE:read/2 and ?FILE_MODULE:write/2.
-read_write(suite) -> [];
-read_write(doc) -> [];
read_write(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_read_write"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line Marker = "hello, world",
- ?line MarkerB = list_to_binary(Marker),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_read_write"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ Marker = "hello, world",
+ MarkerB = list_to_binary(Marker),
%% Plain file.
- ?line Name1 = filename:join(NewDir, "plain.fil"),
- ?line {ok, Fd1} = ?FILE_MODULE:open(Name1, [read, write]),
- ?line read_write_test(Fd1, Marker, []),
+ Name1 = filename:join(NewDir, "plain.fil"),
+ {ok, Fd1} = ?FILE_MODULE:open(Name1, [read, write]),
+ read_write_test(Fd1, Marker, []),
%% Raw file.
- ?line Name2 = filename:join(NewDir, "raw.fil"),
- ?line {ok, Fd2} = ?FILE_MODULE:open(Name2, [read, write, raw]),
- ?line read_write_test(Fd2, Marker, []),
+ Name2 = filename:join(NewDir, "raw.fil"),
+ {ok, Fd2} = ?FILE_MODULE:open(Name2, [read, write, raw]),
+ read_write_test(Fd2, Marker, []),
%% Plain binary file.
- ?line Name3 = filename:join(NewDir, "plain-b.fil"),
- ?line {ok, Fd3} = ?FILE_MODULE:open(Name3, [read, write, binary]),
- ?line read_write_test(Fd3, MarkerB, <<>>),
+ Name3 = filename:join(NewDir, "plain-b.fil"),
+ {ok, Fd3} = ?FILE_MODULE:open(Name3, [read, write, binary]),
+ read_write_test(Fd3, MarkerB, <<>>),
%% Raw binary file.
- ?line Name4 = filename:join(NewDir, "raw-b.fil"),
- ?line {ok, Fd4} = ?FILE_MODULE:open(Name4, [read, write, raw, binary]),
- ?line read_write_test(Fd4, MarkerB, <<>>),
+ Name4 = filename:join(NewDir, "raw-b.fil"),
+ {ok, Fd4} = ?FILE_MODULE:open(Name4, [read, write, raw, binary]),
+ read_write_test(Fd4, MarkerB, <<>>),
- ?line test_server:timetrap_cancel(Dog),
ok.
read_write_test(File, Marker, Empty) ->
- ?line ok = ?FILE_MODULE:write(File, Marker),
- ?line {ok, 0} = ?FILE_MODULE:position(File, 0),
- ?line {ok, Empty} = ?FILE_MODULE:read(File, 0),
- ?line {ok, Marker} = ?FILE_MODULE:read(File, 100),
- ?line eof = ?FILE_MODULE:read(File, 100),
- ?line {ok, Empty} = ?FILE_MODULE:read(File, 0),
- ?line ok = ?FILE_MODULE:close(File),
- ?line [] = flush(),
+ ok = ?FILE_MODULE:write(File, Marker),
+ {ok, 0} = ?FILE_MODULE:position(File, 0),
+ {ok, Empty} = ?FILE_MODULE:read(File, 0),
+ {ok, Marker} = ?FILE_MODULE:read(File, 100),
+ eof = ?FILE_MODULE:read(File, 100),
+ {ok, Empty} = ?FILE_MODULE:read(File, 0),
+ ok = ?FILE_MODULE:close(File),
+ [] = flush(),
ok.
%% Tests ?FILE_MODULE:pread/2 and ?FILE_MODULE:pwrite/2.
-pread_write(suite) -> [];
-pread_write(doc) -> [];
pread_write(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_pread_write"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line List = "hello, world",
- ?line Bin = list_to_binary(List),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_pread_write"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ List = "hello, world",
+ Bin = list_to_binary(List),
%% Plain file.
- ?line Name1 = filename:join(NewDir, "plain.fil"),
- ?line {ok, Fd1} = ?FILE_MODULE:open(Name1, [read, write]),
- ?line pread_write_test(Fd1, List),
+ Name1 = filename:join(NewDir, "plain.fil"),
+ {ok, Fd1} = ?FILE_MODULE:open(Name1, [read, write]),
+ pread_write_test(Fd1, List),
%% Raw file.
- ?line Name2 = filename:join(NewDir, "raw.fil"),
- ?line {ok, Fd2} = ?FILE_MODULE:open(Name2, [read, write, raw]),
- ?line pread_write_test(Fd2, List),
+ Name2 = filename:join(NewDir, "raw.fil"),
+ {ok, Fd2} = ?FILE_MODULE:open(Name2, [read, write, raw]),
+ pread_write_test(Fd2, List),
%% Plain file. Binary mode.
- ?line Name3 = filename:join(NewDir, "plain-binary.fil"),
- ?line {ok, Fd3} = ?FILE_MODULE:open(Name3, [binary, read, write]),
- ?line pread_write_test(Fd3, Bin),
+ Name3 = filename:join(NewDir, "plain-binary.fil"),
+ {ok, Fd3} = ?FILE_MODULE:open(Name3, [binary, read, write]),
+ pread_write_test(Fd3, Bin),
%% Raw file. Binary mode.
- ?line Name4 = filename:join(NewDir, "raw-binary.fil"),
- ?line {ok, Fd4} = ?FILE_MODULE:open(Name4, [binary, read, write, raw]),
- ?line pread_write_test(Fd4, Bin),
+ Name4 = filename:join(NewDir, "raw-binary.fil"),
+ {ok, Fd4} = ?FILE_MODULE:open(Name4, [binary, read, write, raw]),
+ pread_write_test(Fd4, Bin),
- ?line test_server:timetrap_cancel(Dog),
ok.
pread_write_test(File, Data) ->
- ?line io:format("~p:pread_write_test(~p,~p)~n", [?MODULE, File, Data]),
- ?line Size = if is_binary(Data) -> byte_size(Data);
- is_list(Data) -> length(Data)
- end,
- ?line I = Size + 17,
- ?line ok = ?FILE_MODULE:pwrite(File, 0, Data),
- Res = ?FILE_MODULE:pread(File, 0, I),
- ?line {ok, Data} = Res,
- ?line eof = ?FILE_MODULE:pread(File, I, 1),
- ?line ok = ?FILE_MODULE:pwrite(File, [{0, Data}, {I, Data}]),
- ?line {ok, [Data, eof, Data]} =
+ io:format("~p:pread_write_test(~p,~p)~n", [?MODULE, File, Data]),
+ Size = if is_binary(Data) -> byte_size(Data);
+ is_list(Data) -> length(Data)
+ end,
+ I = Size + 17,
+ ok = ?FILE_MODULE:pwrite(File, 0, Data),
+ {ok, Data} = ?FILE_MODULE:pread(File, 0, I),
+ {ok, [Data]} = ?FILE_MODULE:pread(File, [{0, I}]),
+ eof = ?FILE_MODULE:pread(File, I, 1),
+ ok = ?FILE_MODULE:pwrite(File, [{0, Data}, {I, Data}]),
+ {ok, [Data, eof, Data]} =
?FILE_MODULE:pread(File, [{0, Size}, {2*I, 1}, {I, Size}]),
- ?line Plist = lists:seq(21*I, 0, -I),
- ?line Pwrite = lists:map(fun(P)->{P,Data}end, Plist),
- ?line Pread = [{22*I,Size} | lists:map(fun(P)->{P,Size}end, Plist)],
- ?line Presult = [eof | lists:map(fun(_)->Data end, Plist)],
- ?line ok = ?FILE_MODULE:pwrite(File, Pwrite),
- ?line {ok, Presult} = ?FILE_MODULE:pread(File, Pread),
- ?line ok = ?FILE_MODULE:close(File),
- ?line [] = flush(),
+ Plist = lists:seq(21*I, 0, -I),
+ Pwrite = lists:map(fun(P)->{P,Data}end, Plist),
+ Pread = [{22*I,Size} | lists:map(fun(P)->{P,Size}end, Plist)],
+ Presult = [eof | lists:map(fun(_)->Data end, Plist)],
+ ok = ?FILE_MODULE:pwrite(File, Pwrite),
+ {ok, Presult} = ?FILE_MODULE:pread(File, Pread),
+ ok = ?FILE_MODULE:close(File),
+ [] = flush(),
ok.
-append(doc) -> "Test appending to a file.";
-append(suite) -> [];
+%% Test appending to a file.
append(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_append"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_append"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
- ?line First = "First line\n",
- ?line Second = "Seond lines comes here\n",
- ?line Third = "And here is the third line\n",
+ First = "First line\n",
+ Second = "Seond lines comes here\n",
+ Third = "And here is the third line\n",
%% Write a small text file.
- ?line Name1 = filename:join(NewDir, "a_file.txt"),
- ?line {ok, Fd1} = ?FILE_MODULE:open(Name1, [write]),
- ?line ok = io:format(Fd1, First, []),
- ?line ok = io:format(Fd1, Second, []),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ Name1 = filename:join(NewDir, "a_file.txt"),
+ {ok, Fd1} = ?FILE_MODULE:open(Name1, [write]),
+ ok = io:format(Fd1, First, []),
+ ok = io:format(Fd1, Second, []),
+ ok = ?FILE_MODULE:close(Fd1),
%% Open it a again and a append a line to it.
- ?line {ok, Fd2} = ?FILE_MODULE:open(Name1, [append]),
- ?line ok = io:format(Fd2, Third, []),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ {ok, Fd2} = ?FILE_MODULE:open(Name1, [append]),
+ ok = io:format(Fd2, Third, []),
+ ok = ?FILE_MODULE:close(Fd2),
%% Read it back and verify.
- ?line Expected = list_to_binary([First, Second, Third]),
- ?line {ok, Expected} = ?FILE_MODULE:read_file(Name1),
+ Expected = list_to_binary([First, Second, Third]),
+ {ok, Expected} = ?FILE_MODULE:read_file(Name1),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-open_errors(suite) -> [];
-open_errors(doc) -> [];
open_errors(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line DataDir =
+ DataDir =
filename:dirname(
- filename:join(?config(data_dir, Config), "x")),
- ?line DataDirSlash = DataDir++"/",
- ?line {error, E1} = ?FILE_MODULE:open(DataDir, [read]),
- ?line {error, E2} = ?FILE_MODULE:open(DataDirSlash, [read]),
- ?line {error, E3} = ?FILE_MODULE:open(DataDir, [write]),
- ?line {error, E4} = ?FILE_MODULE:open(DataDirSlash, [write]),
- ?line {eisdir,eisdir,eisdir,eisdir} = {E1,E2,E3,E4},
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ filename:join(proplists:get_value(data_dir, Config), "x")),
+ DataDirSlash = DataDir++"/",
+ {error, E1} = ?FILE_MODULE:open(DataDir, [read]),
+ {error, E2} = ?FILE_MODULE:open(DataDirSlash, [read]),
+ {error, E3} = ?FILE_MODULE:open(DataDir, [write]),
+ {error, E4} = ?FILE_MODULE:open(DataDirSlash, [write]),
+ {eisdir,eisdir,eisdir,eisdir} = {E1,E2,E3,E4},
+
+ [] = flush(),
ok.
-exclusive(suite) -> [];
-exclusive(doc) -> "Test exclusive access to a file.";
+%% Test exclusive access to a file.
exclusive(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_exclusive"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line Name = filename:join(NewDir, "ex_file.txt"),
- ?line {ok, Fd} = ?FILE_MODULE:open(Name, [write, exclusive]),
- ?line {error, eexist} = ?FILE_MODULE:open(Name, [write, exclusive]),
- ?line ok = ?FILE_MODULE:close(Fd),
- ?line test_server:timetrap_cancel(Dog),
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_exclusive"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ Name = filename:join(NewDir, "ex_file.txt"),
+ {ok, Fd} = ?FILE_MODULE:open(Name, [write, exclusive]),
+ {error, eexist} = ?FILE_MODULE:open(Name, [write, exclusive]),
+ ok = ?FILE_MODULE:close(Fd),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-pos1(suite) -> [];
-pos1(doc) -> [];
pos1(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_pos1.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd1,"ABCDEFGH",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,read),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_pos1.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,"ABCDEFGH",[]),
+ ok = ?FILE_MODULE:close(Fd1),
+ {ok,Fd2} = ?FILE_MODULE:open(Name,read),
%% Start pos is first char
- ?line io:format("Relative positions"),
- ?line "A" = io:get_chars(Fd2,'',1),
- ?line {ok,2} = ?FILE_MODULE:position(Fd2,{cur,1}),
- ?line "C" = io:get_chars(Fd2,'',1),
- ?line {ok,0} = ?FILE_MODULE:position(Fd2,{cur,-3}),
- ?line "A" = io:get_chars(Fd2,'',1),
+ io:format("Relative positions"),
+ "A" = io:get_chars(Fd2,'',1),
+ {ok,2} = ?FILE_MODULE:position(Fd2,{cur,1}),
+ "C" = io:get_chars(Fd2,'',1),
+ {ok,0} = ?FILE_MODULE:position(Fd2,{cur,-3}),
+ "A" = io:get_chars(Fd2,'',1),
%% Backwards from first char should be an error
- ?line {ok,0} = ?FILE_MODULE:position(Fd2,{cur,-1}),
- ?line {error, einval} = ?FILE_MODULE:position(Fd2,{cur,-1}),
+ {ok,0} = ?FILE_MODULE:position(Fd2,{cur,-1}),
+ {error, einval} = ?FILE_MODULE:position(Fd2,{cur,-1}),
%% Reset position and move again
- ?line {ok,0} = ?FILE_MODULE:position(Fd2,0),
- ?line {ok,2} = ?FILE_MODULE:position(Fd2,{cur,2}),
- ?line "C" = io:get_chars(Fd2,'',1),
+ {ok,0} = ?FILE_MODULE:position(Fd2,0),
+ {ok,2} = ?FILE_MODULE:position(Fd2,{cur,2}),
+ "C" = io:get_chars(Fd2,'',1),
%% Go a lot forwards
- ?line {ok,13} = ?FILE_MODULE:position(Fd2,{cur,10}),
- ?line eof = io:get_chars(Fd2,'',1),
+ {ok,13} = ?FILE_MODULE:position(Fd2,{cur,10}),
+ eof = io:get_chars(Fd2,'',1),
%% Try some fixed positions
- ?line io:format("Fixed positions"),
- ?line {ok,8} =
+ io:format("Fixed positions"),
+ {ok,8} =
?FILE_MODULE:position(Fd2,8), eof = io:get_chars(Fd2,'',1),
- ?line {ok,8} =
+ {ok,8} =
?FILE_MODULE:position(Fd2,cur), eof = io:get_chars(Fd2,'',1),
- ?line {ok,7} =
+ {ok,7} =
?FILE_MODULE:position(Fd2,7), "H" = io:get_chars(Fd2,'',1),
- ?line {ok,0} =
+ {ok,0} =
?FILE_MODULE:position(Fd2,0), "A" = io:get_chars(Fd2,'',1),
- ?line {ok,3} =
+ {ok,3} =
?FILE_MODULE:position(Fd2,3), "D" = io:get_chars(Fd2,'',1),
- ?line {ok,12} =
+ {ok,12} =
?FILE_MODULE:position(Fd2,12), eof = io:get_chars(Fd2,'',1),
- ?line {ok,3} =
+ {ok,3} =
?FILE_MODULE:position(Fd2,3), "D" = io:get_chars(Fd2,'',1),
%% Try the {bof,X} notation
- ?line {ok,3} = ?FILE_MODULE:position(Fd2,{bof,3}),
- ?line "D" = io:get_chars(Fd2,'',1),
+ {ok,3} = ?FILE_MODULE:position(Fd2,{bof,3}),
+ "D" = io:get_chars(Fd2,'',1),
%% Try eof positions
- ?line io:format("EOF positions"),
- ?line {ok,8} =
+ io:format("EOF positions"),
+ {ok,8} =
?FILE_MODULE:position(Fd2,{eof,0}), eof=io:get_chars(Fd2,'',1),
- ?line {ok,7} =
+ {ok,7} =
?FILE_MODULE:position(Fd2,{eof,-1}),
- ?line "H" = io:get_chars(Fd2,'',1),
- ?line {ok,0} =
+ "H" = io:get_chars(Fd2,'',1),
+ {ok,0} =
?FILE_MODULE:position(Fd2,{eof,-8}), "A"=io:get_chars(Fd2,'',1),
- ?line {error, einval} = ?FILE_MODULE:position(Fd2,{eof,-9}),
+ {error, einval} = ?FILE_MODULE:position(Fd2,{eof,-9}),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-pos2(suite) -> [];
-pos2(doc) -> [];
pos2(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_pos2.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd1,"ABCDEFGH",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,read),
- ?line {error, einval} = ?FILE_MODULE:position(Fd2,-1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_pos2.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,"ABCDEFGH",[]),
+ ok = ?FILE_MODULE:close(Fd1),
+ {ok,Fd2} = ?FILE_MODULE:open(Name,read),
+ {error, einval} = ?FILE_MODULE:position(Fd2,-1),
%% Make sure that we still can search after an error.
- ?line {ok,0} = ?FILE_MODULE:position(Fd2, 0),
- ?line {ok,3} = ?FILE_MODULE:position(Fd2, {bof,3}),
- ?line "D" = io:get_chars(Fd2,'',1),
+ {ok,0} = ?FILE_MODULE:position(Fd2, 0),
+ {ok,3} = ?FILE_MODULE:position(Fd2, {bof,3}),
+ "D" = io:get_chars(Fd2,'',1),
- ?line [] = flush(),
- ?line io:format("DONE"),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
+ io:format("DONE"),
ok.
-pos3(suite) -> [];
-pos3(doc) -> ["When it does not use raw mode, file:position had a bug."];
+%% When it does not use raw mode, file:position had a bug.
pos3(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(data_dir, Config),
- ?line Name = filename:join(RootDir, "realmen.html.gz"),
+ RootDir = proplists:get_value(data_dir, Config),
+ Name = filename:join(RootDir, "realmen.html.gz"),
- ?line {ok, Fd} = ?FILE_MODULE:open(Name, [read, binary]),
- ?line {ok, _} = ?FILE_MODULE:read(Fd, 5),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {bof, -1}),
+ {ok, Fd} = ?FILE_MODULE:open(Name, [read, binary]),
+ {ok, _} = ?FILE_MODULE:read(Fd, 5),
+ {error, einval} = ?FILE_MODULE:position(Fd, {bof, -1}),
%% Here ok had returned =(
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {cur, -10}),
+ {error, einval} = ?FILE_MODULE:position(Fd, {cur, -10}),
%% That test is actually questionable since file:position/2
%% is documented to leave the file position undefined after
%% it has returned an error. But on Posix systems the position
%% is guaranteed to be unchanged after an error return. On e.g
%% Windows there is nothing stated about this in the documentation.
- ?line test_server:timetrap_cancel(Dog),
ok.
-file_info_basic_file(suite) -> [];
-file_info_basic_file(doc) -> [];
file_info_basic_file(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
%% Create a short file.
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_basic_test.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name, write),
- ?line io:put_chars(Fd1, "foo bar"),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_basic_test.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name, write),
+ io:put_chars(Fd1, "foo bar"),
+ ok = ?FILE_MODULE:close(Fd1),
+
+ %% Don't crash the file server when passing incorrect arguments.
+ {error,badarg} = ?FILE_MODULE:read_file_info(Name, [{time, gurka}]),
+ {error,badarg} = ?FILE_MODULE:read_file_info([#{} | gaffel]),
%% Test that the file has the expected attributes.
%% The times are tricky, so we will save them to a separate test case.
{ok,FileInfo} = ?FILE_MODULE:read_file_info(Name),
{ok,FileInfo} = ?FILE_MODULE:read_file_info(Name, [raw]),
#file_info{size=Size,type=Type,access=Access,
- atime=AccessTime,mtime=ModifyTime} = FileInfo,
- ?line io:format("Access ~p, Modify ~p", [AccessTime, ModifyTime]),
- ?line Size = 7,
- ?line Type = regular,
- ?line read_write = Access,
- ?line true = abs(time_dist(filter_atime(AccessTime, Config),
- filter_atime(ModifyTime,
- Config))) < 2,
- ?line all_integers(tuple_to_list(AccessTime) ++ tuple_to_list(ModifyTime)),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ atime=AccessTime,mtime=ModifyTime} = FileInfo,
+ io:format("Access ~p, Modify ~p", [AccessTime, ModifyTime]),
+ Size = 7,
+ Type = regular,
+ read_write = Access,
+ true = abs(time_dist(filter_atime(AccessTime, Config),
+ filter_atime(ModifyTime,
+ Config))) < 2,
+ all_integers(tuple_to_list(AccessTime) ++ tuple_to_list(ModifyTime)),
+
+ [] = flush(),
ok.
-file_info_basic_directory(suite) -> [];
-file_info_basic_directory(doc) -> [];
file_info_basic_directory(Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(5)),
-
%% Note: filename:join/1 removes any trailing slash,
%% which is essential for ?FILE_MODULE:file_info/1 to work on
%% platforms such as Windows95.
- RootDir = filename:join([?config(priv_dir, Config)]),
+ RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
%% Test that the RootDir directory has the expected attributes.
test_directory(RootDir, read_write),
@@ -1445,65 +1404,57 @@ file_info_basic_directory(Config) when is_list(Config) ->
%% directories.
case os:type() of
{win32, _} ->
- ?line test_directory("/", read_write),
- ?line test_directory("c:/", read_write),
- ?line test_directory("c:\\", read_write);
+ test_directory("/", read_write),
+ test_directory("c:/", read_write),
+ test_directory("c:\\", read_write);
_ ->
- ?line test_directory("/", read)
+ test_directory("/", read)
end,
- test_server:timetrap_cancel(Dog).
+ ok.
test_directory(Name, ExpectedAccess) ->
{ok,FileInfo} = ?FILE_MODULE:read_file_info(Name),
{ok,FileInfo} = ?FILE_MODULE:read_file_info(Name, [raw]),
#file_info{size=Size,type=Type,access=Access,
atime=AccessTime,mtime=ModifyTime} = FileInfo,
- ?line io:format("Testing directory ~s", [Name]),
- ?line io:format("Directory size is ~p", [Size]),
- ?line io:format("Access ~p", [Access]),
- ?line io:format("Access time ~p; Modify time~p",
- [AccessTime, ModifyTime]),
- ?line Type = directory,
- ?line Access = ExpectedAccess,
- ?line all_integers(tuple_to_list(AccessTime) ++ tuple_to_list(ModifyTime)),
- ?line [] = flush(),
+ io:format("Testing directory ~s", [Name]),
+ io:format("Directory size is ~p", [Size]),
+ io:format("Access ~p", [Access]),
+ io:format("Access time ~p; Modify time~p",
+ [AccessTime, ModifyTime]),
+ Type = directory,
+ Access = ExpectedAccess,
+ all_integers(tuple_to_list(AccessTime) ++ tuple_to_list(ModifyTime)),
+ [] = flush(),
ok.
all_integers([{A,B,C}|T]) ->
all_integers([A,B,C|T]);
all_integers([Int|Rest]) when is_integer(Int) ->
- ?line all_integers(Rest);
+ all_integers(Rest);
all_integers([]) -> ok.
%% Try something nonexistent.
-file_info_bad(suite) -> [];
-file_info_bad(doc) -> [];
file_info_bad(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = filename:join([?config(priv_dir, Config)]),
+ RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
FileName = filename:join(RootDir, atom_to_list(?MODULE) ++ "_nonexistent"),
{error,enoent} = ?FILE_MODULE:read_file_info(FileName),
{error,enoent} = ?FILE_MODULE:read_file_info(FileName, [raw]),
- ?line {error, enoent} = ?FILE_MODULE:read_file_info(""),
+ {error, enoent} = ?FILE_MODULE:read_file_info(""),
{error, enoent} = ?FILE_MODULE:read_file_info("", [raw]),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%% Test that the file times behave as they should.
-file_info_times(suite) -> [];
-file_info_times(doc) -> [];
file_info_times(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(60)),
%% We have to try this twice, since if the test runs across the change
%% of a month the time diff calculations will fail. But it won't happen
%% if you run it twice in succession.
- ?line test_server:m_out_of_n(
- 1,2,
- fun() -> ?line file_info_int(Config) end),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:m_out_of_n(
+ 1,2,
+ fun() -> file_info_int(Config) end),
ok.
file_info_int(Config) ->
@@ -1511,14 +1462,14 @@ file_info_int(Config) ->
%% which is essential for ?FILE_MODULE:file_info/1 to work on
%% platforms such as Windows95.
- ?line RootDir = filename:join([?config(priv_dir, Config)]),
- ?line test_server:format("RootDir = ~p", [RootDir]),
+ RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
+ io:format("RootDir = ~p", [RootDir]),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_file_info.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
- ?line io:put_chars(Fd1,"foo"),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_file_info.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ io:put_chars(Fd1,"foo"),
%% check that the file got a modify date max a few seconds away from now
{ok,FileInfo1} = ?FILE_MODULE:read_file_info(Name),
@@ -1531,31 +1482,31 @@ file_info_int(Config) ->
#file_info{type=regular,atime=AccTime1,mtime=ModTime1} = FileInfo1,
- ?line Now = erlang:localtime(), %???
- ?line io:format("Now ~p",[Now]),
- ?line io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]),
- ?line true = abs(time_dist(filter_atime(Now, Config),
- filter_atime(AccTime1,
- Config))) < 8,
- ?line true = abs(time_dist(Now,ModTime1)) < 8,
+ Now = erlang:localtime(), %???
+ io:format("Now ~p",[Now]),
+ io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]),
+ true = abs(time_dist(filter_atime(Now, Config),
+ filter_atime(AccTime1,
+ Config))) < 8,
+ true = abs(time_dist(Now,ModTime1)) < 8,
%% Sleep until we can be sure the seconds value has changed.
%% Note: FAT-based filesystem (like on Windows 95) have
%% a resolution of 2 seconds.
- ?line test_server:sleep(test_server:seconds(2.2)),
+ timer:sleep(2200),
%% close the file, and watch the modify date change
- ?line ok = ?FILE_MODULE:close(Fd1),
+ ok = ?FILE_MODULE:close(Fd1),
{ok,FileInfo2} = ?FILE_MODULE:read_file_info(Name),
{ok,FileInfo2} = ?FILE_MODULE:read_file_info(Name, [raw]),
#file_info{size=Size,type=regular,access=Access,
atime=AccTime2,mtime=ModTime2} = FileInfo2,
- ?line io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]),
- ?line true = time_dist(ModTime1,ModTime2) >= 0,
+ io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]),
+ true = time_dist(ModTime1,ModTime2) >= 0,
%% this file is supposed to be binary, so it'd better keep it's size
- ?line Size = 3,
- ?line Access = read_write,
+ Size = 3,
+ Access = read_write,
%% Do some directory checking
{ok,FileInfo3} = ?FILE_MODULE:read_file_info(RootDir),
@@ -1563,12 +1514,12 @@ file_info_int(Config) ->
#file_info{size=DSize,type=directory,access=DAccess,
atime=AccTime3,mtime=ModTime3} = FileInfo3,
%% this dir was modified only a few secs ago
- ?line io:format("Dir Acc ~p; Mod ~p; Now ~p", [AccTime3, ModTime3, Now]),
- ?line true = abs(time_dist(Now,ModTime3)) < 5,
- ?line DAccess = read_write,
- ?line io:format("Dir size is ~p",[DSize]),
+ io:format("Dir Acc ~p; Mod ~p; Now ~p", [AccTime3, ModTime3, Now]),
+ true = abs(time_dist(Now,ModTime3)) < 5,
+ DAccess = read_write,
+ io:format("Dir size is ~p",[DSize]),
- ?line [] = flush(),
+ [] = flush(),
ok.
%% Filter access times, to copy with a deficiency of FAT file systems
@@ -1579,9 +1530,9 @@ filter_atime(Atime, Config) ->
true ->
case Atime of
{Date, _} ->
- {Date, {0, 0, 0}};
+ {Date, {0, 0, 0}};
{Y, M, D, _, _, _} ->
- {Y, M, D, 0, 0, 0}
+ {Y, M, D, 0, 0, 0}
end;
false ->
Atime
@@ -1589,50 +1540,47 @@ filter_atime(Atime, Config) ->
%% Test the write_file_info/2 function.
-file_write_file_info(suite) -> [];
-file_write_file_info(doc) -> [];
file_write_file_info(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = get_good_directory(Config),
- ?line test_server:format("RootDir = ~p", [RootDir]),
+ RootDir = get_good_directory(Config),
+ io:format("RootDir = ~p", [RootDir]),
%% Set the file to read only AND update the file times at the same time.
%% (This used to fail on Windows NT/95 for a local filesystem.)
%% Note: Seconds must be even; see note in file_info_times/1.
- ?line Name1 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_write_file_info_ro"),
- ?line ok = ?FILE_MODULE:write_file(Name1, "hello"),
- ?line Time = {{1997, 01, 02}, {12, 35, 42}},
- ?line Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time},
- ?line ok = ?FILE_MODULE:write_file_info(Name1, Info),
+ Name1 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_write_file_info_ro"),
+ ok = ?FILE_MODULE:write_file(Name1, "hello"),
+ Time = {{1997, 01, 02}, {12, 35, 42}},
+ Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time},
+ ok = ?FILE_MODULE:write_file_info(Name1, Info),
%% Read back the times.
- ?line {ok, ActualInfo} = ?FILE_MODULE:read_file_info(Name1),
- ?line #file_info{mode=_Mode, atime=ActAtime, mtime=Time,
- ctime=ActCtime} = ActualInfo,
- ?line FilteredAtime = filter_atime(Time, Config),
- ?line FilteredAtime = filter_atime(ActAtime, Config),
- ?line case os:type() of
- {win32, _} ->
- %% On Windows, "ctime" means creation time and it can
- %% be set.
- ActCtime = Time;
- _ ->
- ok
- end,
- ?line {error, eacces} = ?FILE_MODULE:write_file(Name1, "hello again"),
+ {ok, ActualInfo} = ?FILE_MODULE:read_file_info(Name1),
+ #file_info{mode=_Mode, atime=ActAtime, mtime=Time,
+ ctime=ActCtime} = ActualInfo,
+ FilteredAtime = filter_atime(Time, Config),
+ FilteredAtime = filter_atime(ActAtime, Config),
+ case os:type() of
+ {win32, _} ->
+ %% On Windows, "ctime" means creation time and it can
+ %% be set.
+ ActCtime = Time;
+ _ ->
+ ok
+ end,
+ {error, eacces} = ?FILE_MODULE:write_file(Name1, "hello again"),
%% Make the file writable again.
- ?line ?FILE_MODULE:write_file_info(Name1, #file_info{mode=8#600}),
- ?line ok = ?FILE_MODULE:write_file(Name1, "hello again"),
+ ?FILE_MODULE:write_file_info(Name1, #file_info{mode=8#600}),
+ ok = ?FILE_MODULE:write_file(Name1, "hello again"),
%% And unwritable.
- ?line ?FILE_MODULE:write_file_info(Name1, #file_info{mode=8#400}),
- ?line {error, eacces} = ?FILE_MODULE:write_file(Name1, "hello again"),
+ ?FILE_MODULE:write_file_info(Name1, #file_info{mode=8#400}),
+ {error, eacces} = ?FILE_MODULE:write_file(Name1, "hello again"),
%% Same with raw.
?FILE_MODULE:write_file_info(Name1, #file_info{mode=8#600}, [raw]),
@@ -1643,634 +1591,660 @@ file_write_file_info(Config) when is_list(Config) ->
%% Write the times again.
%% Note: Seconds must be even; see note in file_info_times/1.
- ?line NewTime = {{1997, 02, 15}, {13, 18, 20}},
- ?line NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime},
- ?line ok = ?FILE_MODULE:write_file_info(Name1, NewInfo),
- ?line {ok, ActualInfo2} = ?FILE_MODULE:read_file_info(Name1),
- ?line #file_info{atime=NewActAtime, mtime=NewTime,
- ctime=NewActCtime} = ActualInfo2,
- ?line NewFilteredAtime = filter_atime(NewTime, Config),
- ?line NewFilteredAtime = filter_atime(NewActAtime, Config),
- ?line case os:type() of
- {win32, _} -> NewActCtime = NewTime;
- _ -> ok
- end,
+ NewTime = {{1997, 02, 15}, {13, 18, 20}},
+ NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime},
+ ok = ?FILE_MODULE:write_file_info(Name1, NewInfo),
+ {ok, ActualInfo2} = ?FILE_MODULE:read_file_info(Name1),
+ #file_info{atime=NewActAtime, mtime=NewTime,
+ ctime=NewActCtime} = ActualInfo2,
+ NewFilteredAtime = filter_atime(NewTime, Config),
+ NewFilteredAtime = filter_atime(NewActAtime, Config),
+ case os:type() of
+ {win32, _} -> NewActCtime = NewTime;
+ _ -> ok
+ end,
%% The file should still be unwritable.
- ?line {error, eacces} = ?FILE_MODULE:write_file(Name1, "hello again"),
+ {error, eacces} = ?FILE_MODULE:write_file(Name1, "hello again"),
%% Make the file writeable again, so that we can remove the
%% test suites ... :-)
- ?line ?FILE_MODULE:write_file_info(Name1, #file_info{mode=8#600}),
+ ?FILE_MODULE:write_file_info(Name1, #file_info{mode=8#600}),
+
+ [] = flush(),
+ ok.
+
+file_wfi_helpers(Config) when is_list(Config) ->
+ RootDir = get_good_directory(Config),
+ io:format("RootDir = ~p", [RootDir]),
+
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE) ++ "_wfi_helpers"),
+
+ ok = ?FILE_MODULE:write_file(Name, "hello again"),
+ NewTime = {{1997, 02, 15}, {13, 18, 20}},
+ ok = ?FILE_MODULE:change_time(Name, NewTime, NewTime),
+
+ {ok, #file_info{atime=NewActAtime, mtime=NewTime}} =
+ ?FILE_MODULE:read_file_info(Name),
+
+ NewFilteredAtime = filter_atime(NewTime, Config),
+ NewFilteredAtime = filter_atime(NewActAtime, Config),
+
+ %% Make the file unwritable
+ ok = ?FILE_MODULE:change_mode(Name, 8#400),
+ {error, eacces} = ?FILE_MODULE:write_file(Name, "hello again"),
+
+ %% ... and writable again
+ ok = ?FILE_MODULE:change_mode(Name, 8#600),
+ ok = ?FILE_MODULE:write_file(Name, "hello again"),
+
+ %% We have no idea which users will work, so all we can do is to check
+ %% that it returns enoent instead of crashing.
+ {error, enoent} = ?FILE_MODULE:change_group("bogus file name", 0),
+ {error, enoent} = ?FILE_MODULE:change_owner("bogus file name", 0),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%% Returns a directory on a file system that has correct file times.
get_good_directory(Config) ->
- ?line ?config(priv_dir, Config).
+ proplists:get_value(priv_dir, Config).
-consult1(suite) -> [];
-consult1(doc) -> [];
consult1(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_consult.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_consult.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
%% note that there is no final \n (only a space)
- ?line io:format(Fd1,
- "{this,[is,1.0],'journey'}.\n\"into\". (sound). ",
- []),
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line {ok,[{this,[is,1.0],journey},"into",sound]} =
+ io:format(Fd1,
+ "{this,[is,1.0],'journey'}.\n\"into\". (sound). ",
+ []),
+ ok = ?FILE_MODULE:close(Fd1),
+ {ok,[{this,[is,1.0],journey},"into",sound]} =
?FILE_MODULE:consult(Name),
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,write),
+ {ok,Fd2} = ?FILE_MODULE:open(Name,write),
%% note the missing double quote
- ?line io:format(
- Fd2,"{this,[is,1.0],'journey'}.\n \"into. (sound). ",[]),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {error, {_, _, _} = Msg} = ?FILE_MODULE:consult(Name),
- ?line io:format("Errmsg: ~p",[Msg]),
+ io:format(
+ Fd2,"{this,[is,1.0],'journey'}.\n \"into. (sound). ",[]),
+ ok = ?FILE_MODULE:close(Fd2),
+ {error, {_, _, _} = Msg} = ?FILE_MODULE:consult(Name),
+ io:format("Errmsg: ~p",[Msg]),
- ?line {error, enoent} = ?FILE_MODULE:consult(Name ++ ".nonexistent"),
+ {error, enoent} = ?FILE_MODULE:consult(Name ++ ".nonexistent"),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-path_consult(suite) -> [];
-path_consult(doc) -> [];
path_consult(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line FileName = atom_to_list(?MODULE)++"_path_consult.fil",
- ?line Name = filename:join(RootDir, FileName),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd1,"{this,is,a,journey,into,sound}.\n",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ FileName = atom_to_list(?MODULE)++"_path_consult.fil",
+ Name = filename:join(RootDir, FileName),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,"{this,is,a,journey,into,sound}.\n",[]),
+ ok = ?FILE_MODULE:close(Fd1),
%% File last in path
- ?line {ok,[{this,is,a,journey,into,sound}],Dir} =
+ {ok,[{this,is,a,journey,into,sound}],Dir} =
?FILE_MODULE:path_consult(
- [filename:join(RootDir, "dir1"),
- filename:join(RootDir, ".."),
- filename:join(RootDir, "dir2"),
- RootDir], FileName),
- ?line true = lists:prefix(RootDir,Dir),
+ [filename:join(RootDir, "dir1"),
+ filename:join(RootDir, ".."),
+ filename:join(RootDir, "dir2"),
+ RootDir], FileName),
+ true = lists:prefix(RootDir,Dir),
%% While maybe not an error, it may be worth noting that
%% when the full path to a file is given, it's always found
%% regardless of the contents of the path
- ?line {ok,_,_} = ?FILE_MODULE:path_consult(["nosuch1","nosuch2"],Name),
+ {ok,_,_} = ?FILE_MODULE:path_consult(["nosuch1","nosuch2"],Name),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-eval1(suite) -> [];
-eval1(doc) -> [];
eval1(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)++"_eval.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_eval.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
%% note that there is no final \n (only a space)
- ?line io:format(Fd1,"put(evaluated_ok,\ntrue). ",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line ok = ?FILE_MODULE:eval(Name),
- ?line true = get(evaluated_ok),
-
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,"put(evaluated_ok,\ntrue). ",[]),
+ ok = ?FILE_MODULE:close(Fd1),
+ ok = ?FILE_MODULE:eval(Name),
+ true = get(evaluated_ok),
+
+ {ok,Fd2} = ?FILE_MODULE:open(Name,write),
%% note that there is no final \n (only a space)
- ?line io:format(Fd2,"put(evaluated_ok,\nR). ",[]),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line ok = ?FILE_MODULE:eval(
- Name,
- erl_eval:add_binding('R', true, erl_eval:new_bindings())),
- ?line true = get(evaluated_ok),
-
- ?line {ok,Fd3} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd2,"put(evaluated_ok,\nR). ",[]),
+ ok = ?FILE_MODULE:close(Fd2),
+ ok = ?FILE_MODULE:eval(
+ Name,
+ erl_eval:add_binding('R', true, erl_eval:new_bindings())),
+ true = get(evaluated_ok),
+
+ {ok,Fd3} = ?FILE_MODULE:open(Name,write),
%% garbled
- ?line io:format(Fd3,"puGARBLED-GARBLED\ntrue). ",[]),
- ?line ok = ?FILE_MODULE:close(Fd3),
- ?line {error, {_, _, _} = Msg} = ?FILE_MODULE:eval(Name),
- ?line io:format("Errmsg1: ~p",[Msg]),
-
- ?line {error, enoent} = ?FILE_MODULE:eval(Name ++ ".nonexistent"),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ io:format(Fd3,"puGARBLED-GARBLED\ntrue). ",[]),
+ ok = ?FILE_MODULE:close(Fd3),
+ {error, {_, _, _} = Msg} = ?FILE_MODULE:eval(Name),
+ io:format("Errmsg1: ~p",[Msg]),
+
+ {error, enoent} = ?FILE_MODULE:eval(Name ++ ".nonexistent"),
+
+ [] = flush(),
ok.
-path_eval(suite) -> [];
-path_eval(doc) -> [];
path_eval(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line FileName = atom_to_list(?MODULE)++"_path_eval.fil",
- ?line Name = filename:join(RootDir, FileName),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd1,"put(evaluated_ok,true).\n",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ FileName = atom_to_list(?MODULE)++"_path_eval.fil",
+ Name = filename:join(RootDir, FileName),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,"put(evaluated_ok,true).\n",[]),
+ ok = ?FILE_MODULE:close(Fd1),
%% File last in path
- ?line {ok,Dir} =
+ {ok,Dir} =
?FILE_MODULE:path_eval(
- [filename:join(RootDir, "dir1"),
- filename:join(RootDir, ".."),
- filename:join(RootDir, "dir2"),
- RootDir],FileName),
- ?line true = get(evaluated_ok),
- ?line true = lists:prefix(RootDir,Dir),
-
+ [filename:join(RootDir, "dir1"),
+ filename:join(RootDir, ".."),
+ filename:join(RootDir, "dir2"),
+ RootDir],FileName),
+ true = get(evaluated_ok),
+ true = lists:prefix(RootDir,Dir),
+
%% While maybe not an error, it may be worth noting that
%% when the full path to a file is given, it's always found
%% regardless of the contents of the path
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd2,"put(evaluated_ok,R).\n",[]),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {ok,_} = ?FILE_MODULE:path_eval(
- ["nosuch1","nosuch2"],
- Name,
- erl_eval:add_binding('R', true, erl_eval:new_bindings())),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ {ok,Fd2} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd2,"put(evaluated_ok,R).\n",[]),
+ ok = ?FILE_MODULE:close(Fd2),
+ {ok,_} = ?FILE_MODULE:path_eval(
+ ["nosuch1","nosuch2"],
+ Name,
+ erl_eval:add_binding('R', true, erl_eval:new_bindings())),
+
+ [] = flush(),
ok.
-script1(suite) -> [];
-script1(doc) -> "";
script1(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)++"_script.fil"),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_script.fil"),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
%% note that there is no final \n (only a space)
- ?line io:format(Fd1,"A = 11,\nB = 6,\nA+B. ",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line {ok,17} = ?FILE_MODULE:script(Name),
-
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,"A = 11,\nB = 6,\nA+B. ",[]),
+ ok = ?FILE_MODULE:close(Fd1),
+ {ok,17} = ?FILE_MODULE:script(Name),
+
+ {ok,Fd2} = ?FILE_MODULE:open(Name,write),
%% note that there is no final \n (only a space)
- ?line io:format(Fd2,"A = 11,\nA+B. ",[]),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {ok,17} = ?FILE_MODULE:script(
- Name,
- erl_eval:add_binding('B', 6, erl_eval:new_bindings())),
-
- ?line {ok,Fd3} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd3,"A = 11,\nB = six,\nA+B. ",[]),
- ?line ok = ?FILE_MODULE:close(Fd3),
- ?line {error, {_, _, _} = Msg} = ?FILE_MODULE:script(Name),
- ?line io:format("Errmsg1: ~p",[Msg]),
-
- ?line {error, enoent} = ?FILE_MODULE:script(Name ++ ".nonexistent"),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ io:format(Fd2,"A = 11,\nA+B. ",[]),
+ ok = ?FILE_MODULE:close(Fd2),
+ {ok,17} = ?FILE_MODULE:script(
+ Name,
+ erl_eval:add_binding('B', 6, erl_eval:new_bindings())),
+
+ {ok,Fd3} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd3,"A = 11,\nB = six,\nA+B. ",[]),
+ ok = ?FILE_MODULE:close(Fd3),
+ {error, {_, _, _} = Msg} = ?FILE_MODULE:script(Name),
+ io:format("Errmsg1: ~p",[Msg]),
+
+ {error, enoent} = ?FILE_MODULE:script(Name ++ ".nonexistent"),
+
+ [] = flush(),
ok.
-
-path_script(suite) -> [];
-path_script(doc) -> [];
+
path_script(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line FileName = atom_to_list(?MODULE)++"_path_script.fil",
- ?line Name = filename:join(RootDir, FileName),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd1,"A = 11,\nB = 6,\nA+B.\n",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ FileName = atom_to_list(?MODULE)++"_path_script.fil",
+ Name = filename:join(RootDir, FileName),
+ {ok,Fd1} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd1,"A = 11,\nB = 6,\nA+B.\n",[]),
+ ok = ?FILE_MODULE:close(Fd1),
%% File last in path
- ?line {ok, 17, Dir} =
+ {ok, 17, Dir} =
?FILE_MODULE:path_script(
[filename:join(RootDir, "dir1"),
filename:join(RootDir, ".."),
filename:join(RootDir, "dir2"),
RootDir],FileName),
- ?line true = lists:prefix(RootDir,Dir),
-
+ true = lists:prefix(RootDir,Dir),
+
%% While maybe not an error, it may be worth noting that
%% when the full path to a file is given, it's always found
%% regardless of the contents of the path
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name,write),
- ?line io:format(Fd2,"A = 11,\nA+B.",[]),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {ok, 17, Dir} =
+ {ok,Fd2} = ?FILE_MODULE:open(Name,write),
+ io:format(Fd2,"A = 11,\nA+B.",[]),
+ ok = ?FILE_MODULE:close(Fd2),
+ {ok, 17, Dir} =
?FILE_MODULE:path_script(
["nosuch1","nosuch2"],
Name,
erl_eval:add_binding('B', 6, erl_eval:new_bindings())),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+
+ [] = flush(),
ok.
-
-truncate(suite) -> [];
-truncate(doc) -> [];
+
truncate(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_truncate.fil"),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_truncate.fil"),
%% Create a file with some data.
- ?line MyData = "0123456789abcdefghijklmnopqrstuvxyz",
- ?line ok = ?FILE_MODULE:write_file(Name, MyData),
+ MyData = "0123456789abcdefghijklmnopqrstuvxyz",
+ ok = ?FILE_MODULE:write_file(Name, MyData),
%% Truncate the file to 10 characters.
- ?line {ok, Fd} = ?FILE_MODULE:open(Name, read_write),
- ?line {ok, 10} = ?FILE_MODULE:position(Fd, 10),
- ?line ok = ?FILE_MODULE:truncate(Fd),
- ?line ok = ?FILE_MODULE:close(Fd),
+ {ok, Fd} = ?FILE_MODULE:open(Name, read_write),
+ {ok, 10} = ?FILE_MODULE:position(Fd, 10),
+ ok = ?FILE_MODULE:truncate(Fd),
+ ok = ?FILE_MODULE:close(Fd),
%% Read back the file and check that it has been truncated.
- ?line Expected = list_to_binary("0123456789"),
- ?line {ok, Expected} = ?FILE_MODULE:read_file(Name),
+ Expected = list_to_binary("0123456789"),
+ {ok, Expected} = ?FILE_MODULE:read_file(Name),
%% Open the file read only and verify that it is not possible to
%% truncate it, OTP-1960
- ?line {ok, Fd2} = ?FILE_MODULE:open(Name, read),
- ?line {ok, 5} = ?FILE_MODULE:position(Fd2, 5),
- ?line {error, _} = ?FILE_MODULE:truncate(Fd2),
+ {ok, Fd2} = ?FILE_MODULE:open(Name, read),
+ {ok, 5} = ?FILE_MODULE:position(Fd2, 5),
+ {error, _} = ?FILE_MODULE:truncate(Fd2),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-datasync(suite) -> [];
-datasync(doc) -> "Tests that ?FILE_MODULE:datasync/1 at least doesn't crash.";
+%% Tests that ?FILE_MODULE:datasync/1 at least doesn't crash.
datasync(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Sync = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_sync.fil"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Sync = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_sync.fil"),
%% Raw open.
- ?line {ok, Fd} = ?FILE_MODULE:open(Sync, [write, raw]),
- ?line ok = ?FILE_MODULE:datasync(Fd),
- ?line ok = ?FILE_MODULE:close(Fd),
+ {ok, Fd} = ?FILE_MODULE:open(Sync, [write, raw]),
+ ok = ?FILE_MODULE:datasync(Fd),
+ ok = ?FILE_MODULE:close(Fd),
%% Ordinary open.
- ?line {ok, Fd2} = ?FILE_MODULE:open(Sync, [write]),
- ?line ok = ?FILE_MODULE:datasync(Fd2),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ {ok, Fd2} = ?FILE_MODULE:open(Sync, [write]),
+ ok = ?FILE_MODULE:datasync(Fd2),
+ ok = ?FILE_MODULE:close(Fd2),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-sync(suite) -> [];
-sync(doc) -> "Tests that ?FILE_MODULE:sync/1 at least doesn't crash.";
+%% Tests that ?FILE_MODULE:sync/1 at least doesn't crash.
sync(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Sync = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_sync.fil"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Sync = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_sync.fil"),
%% Raw open.
- ?line {ok, Fd} = ?FILE_MODULE:open(Sync, [write, raw]),
- ?line ok = ?FILE_MODULE:sync(Fd),
- ?line ok = ?FILE_MODULE:close(Fd),
+ {ok, Fd} = ?FILE_MODULE:open(Sync, [write, raw]),
+ ok = ?FILE_MODULE:sync(Fd),
+ ok = ?FILE_MODULE:close(Fd),
%% Ordinary open.
- ?line {ok, Fd2} = ?FILE_MODULE:open(Sync, [write]),
- ?line ok = ?FILE_MODULE:sync(Fd2),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ {ok, Fd2} = ?FILE_MODULE:open(Sync, [write]),
+ ok = ?FILE_MODULE:sync(Fd2),
+ ok = ?FILE_MODULE:close(Fd2),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
-advise(suite) -> [];
-advise(doc) -> "Tests that ?FILE_MODULE:advise/4 at least doesn't crash.";
+%% Tests that ?FILE_MODULE:advise/4 at least doesn't crash.
advise(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Advise = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_advise.fil"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Advise = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_advise.fil"),
Line1 = "Hello\n",
Line2 = "World!\n",
- ?line {ok, Fd} = ?FILE_MODULE:open(Advise, [write]),
- ?line ok = ?FILE_MODULE:advise(Fd, 0, 0, normal),
- ?line ok = io:format(Fd, "~s", [Line1]),
- ?line ok = io:format(Fd, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd),
-
- ?line {ok, Fd2} = ?FILE_MODULE:open(Advise, [write]),
- ?line ok = ?FILE_MODULE:advise(Fd2, 0, 0, random),
- ?line ok = io:format(Fd2, "~s", [Line1]),
- ?line ok = io:format(Fd2, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd2),
-
- ?line {ok, Fd3} = ?FILE_MODULE:open(Advise, [write]),
- ?line ok = ?FILE_MODULE:advise(Fd3, 0, 0, sequential),
- ?line ok = io:format(Fd3, "~s", [Line1]),
- ?line ok = io:format(Fd3, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd3),
-
- ?line {ok, Fd4} = ?FILE_MODULE:open(Advise, [write]),
- ?line ok = ?FILE_MODULE:advise(Fd4, 0, 0, will_need),
- ?line ok = io:format(Fd4, "~s", [Line1]),
- ?line ok = io:format(Fd4, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd4),
-
- ?line {ok, Fd5} = ?FILE_MODULE:open(Advise, [write]),
- ?line ok = ?FILE_MODULE:advise(Fd5, 0, 0, dont_need),
- ?line ok = io:format(Fd5, "~s", [Line1]),
- ?line ok = io:format(Fd5, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd5),
-
- ?line {ok, Fd6} = ?FILE_MODULE:open(Advise, [write]),
- ?line ok = ?FILE_MODULE:advise(Fd6, 0, 0, no_reuse),
- ?line ok = io:format(Fd6, "~s", [Line1]),
- ?line ok = io:format(Fd6, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd6),
-
- ?line {ok, Fd7} = ?FILE_MODULE:open(Advise, [write]),
- ?line {error, einval} = ?FILE_MODULE:advise(Fd7, 0, 0, bad_advise),
- ?line ok = ?FILE_MODULE:close(Fd7),
+ {ok, Fd} = ?FILE_MODULE:open(Advise, [write]),
+ ok = ?FILE_MODULE:advise(Fd, 0, 0, normal),
+ ok = io:format(Fd, "~s", [Line1]),
+ ok = io:format(Fd, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd),
+
+ {ok, Fd2} = ?FILE_MODULE:open(Advise, [write]),
+ ok = ?FILE_MODULE:advise(Fd2, 0, 0, random),
+ ok = io:format(Fd2, "~s", [Line1]),
+ ok = io:format(Fd2, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd2),
+
+ {ok, Fd3} = ?FILE_MODULE:open(Advise, [write]),
+ ok = ?FILE_MODULE:advise(Fd3, 0, 0, sequential),
+ ok = io:format(Fd3, "~s", [Line1]),
+ ok = io:format(Fd3, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd3),
+
+ {ok, Fd4} = ?FILE_MODULE:open(Advise, [write]),
+ ok = ?FILE_MODULE:advise(Fd4, 0, 0, will_need),
+ ok = io:format(Fd4, "~s", [Line1]),
+ ok = io:format(Fd4, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd4),
+
+ {ok, Fd5} = ?FILE_MODULE:open(Advise, [write]),
+ ok = ?FILE_MODULE:advise(Fd5, 0, 0, dont_need),
+ ok = io:format(Fd5, "~s", [Line1]),
+ ok = io:format(Fd5, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd5),
+
+ {ok, Fd6} = ?FILE_MODULE:open(Advise, [write]),
+ ok = ?FILE_MODULE:advise(Fd6, 0, 0, no_reuse),
+ ok = io:format(Fd6, "~s", [Line1]),
+ ok = io:format(Fd6, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd6),
+
+ {ok, Fd7} = ?FILE_MODULE:open(Advise, [write]),
+ {error, einval} = ?FILE_MODULE:advise(Fd7, 0, 0, bad_advise),
+ ok = ?FILE_MODULE:close(Fd7),
%% test write without advise, then a read after an advise
- ?line {ok, Fd8} = ?FILE_MODULE:open(Advise, [write]),
- ?line ok = io:format(Fd8, "~s", [Line1]),
- ?line ok = io:format(Fd8, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd8),
- ?line {ok, Fd9} = ?FILE_MODULE:open(Advise, [read]),
+ {ok, Fd8} = ?FILE_MODULE:open(Advise, [write]),
+ ok = io:format(Fd8, "~s", [Line1]),
+ ok = io:format(Fd8, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd8),
+ {ok, Fd9} = ?FILE_MODULE:open(Advise, [read]),
Offset = 0,
%% same as a 0 length in some implementations
Length = length(Line1) + length(Line2),
- ?line ok = ?FILE_MODULE:advise(Fd9, Offset, Length, sequential),
- ?line {ok, Line1} = ?FILE_MODULE:read_line(Fd9),
- ?line {ok, Line2} = ?FILE_MODULE:read_line(Fd9),
- ?line eof = ?FILE_MODULE:read_line(Fd9),
- ?line ok = ?FILE_MODULE:close(Fd9),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ ok = ?FILE_MODULE:advise(Fd9, Offset, Length, sequential),
+ {ok, Line1} = ?FILE_MODULE:read_line(Fd9),
+ {ok, Line2} = ?FILE_MODULE:read_line(Fd9),
+ eof = ?FILE_MODULE:read_line(Fd9),
+ ok = ?FILE_MODULE:close(Fd9),
+
+ [] = flush(),
ok.
-allocate(suite) -> [];
-allocate(doc) -> "Tests that ?FILE_MODULE:allocate/3 at least doesn't crash.";
+%% Tests that ?FILE_MODULE:allocate/3 at least doesn't crash.
allocate(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Allocate = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_allocate.fil"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Allocate = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_allocate.fil"),
Line1 = "Hello\n",
Line2 = "World!\n",
- ?line {ok, Fd} = ?FILE_MODULE:open(Allocate, [write, binary]),
+ {ok, Fd} = ?FILE_MODULE:open(Allocate, [write, binary]),
allocate_and_assert(Fd, 1, iolist_size([Line1, Line2])),
- ?line ok = io:format(Fd, "~s", [Line1]),
- ?line ok = io:format(Fd, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd),
+ ok = io:format(Fd, "~s", [Line1]),
+ ok = io:format(Fd, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd),
- ?line {ok, Fd2} = ?FILE_MODULE:open(Allocate, [write, binary]),
+ {ok, Fd2} = ?FILE_MODULE:open(Allocate, [write, binary]),
allocate_and_assert(Fd2, 1, iolist_size(Line1)),
- ?line ok = io:format(Fd2, "~s", [Line1]),
- ?line ok = io:format(Fd2, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ ok = io:format(Fd2, "~s", [Line1]),
+ ok = io:format(Fd2, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd2),
- ?line {ok, Fd3} = ?FILE_MODULE:open(Allocate, [write, binary]),
+ {ok, Fd3} = ?FILE_MODULE:open(Allocate, [write, binary]),
allocate_and_assert(Fd3, 1, iolist_size(Line1) + 1),
- ?line ok = io:format(Fd3, "~s", [Line1]),
- ?line ok = io:format(Fd3, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd3),
+ ok = io:format(Fd3, "~s", [Line1]),
+ ok = io:format(Fd3, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd3),
- ?line {ok, Fd4} = ?FILE_MODULE:open(Allocate, [write, binary]),
+ {ok, Fd4} = ?FILE_MODULE:open(Allocate, [write, binary]),
allocate_and_assert(Fd4, 1, 4 * iolist_size([Line1, Line2])),
- ?line ok = io:format(Fd4, "~s", [Line1]),
- ?line ok = io:format(Fd4, "~s", [Line2]),
- ?line ok = ?FILE_MODULE:close(Fd4),
+ ok = io:format(Fd4, "~s", [Line1]),
+ ok = io:format(Fd4, "~s", [Line2]),
+ ok = ?FILE_MODULE:close(Fd4),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
allocate_and_assert(Fd, Offset, Length) ->
- % Just verify that calls to ?PRIM_FILE:allocate/3 don't crash or have
- % any other negative side effect. We can't really asssert against a
- % specific return value, because support for file space pre-allocation
- % depends on the OS, OS version and underlying filesystem.
- %
- % The Linux kernel added support for fallocate() in version 2.6.23,
- % which currently works only for the ext4, ocfs2, xfs and btrfs file
- % systems. posix_fallocate() is available in glibc as of version
- % 2.1.94, but it was buggy until glibc version 2.7.
- %
- % Mac OS X, as of version 10.3, supports the fcntl operation F_PREALLOCATE.
- %
- % Solaris supports posix_fallocate() but only for the UFS file system
- % apparently (not supported for ZFS).
- %
- % FreeBSD 9.0 is the first FreeBSD release supporting posix_fallocate().
- %
- % For Windows there's apparently no way to pre-allocate file space, at
- % least with same semantics as posix_fallocate(), fallocate() and
- % fcntl F_PREALLOCATE.
+ %% Just verify that calls to ?PRIM_FILE:allocate/3 don't crash or have
+ %% any other negative side effect. We can't really asssert against a
+ %% specific return value, because support for file space pre-allocation
+ %% depends on the OS, OS version and underlying filesystem.
+ %%
+ %% The Linux kernel added support for fallocate() in version 2.6.23,
+ %% which currently works only for the ext4, ocfs2, xfs and btrfs file
+ %% systems. posix_fallocate() is available in glibc as of version
+ %% 2.1.94, but it was buggy until glibc version 2.7.
+ %%
+ %% Mac OS X, as of version 10.3, supports the fcntl operation F_PREALLOCATE.
+ %%
+ %% Solaris supports posix_fallocate() but only for the UFS file system
+ %% apparently (not supported for ZFS).
+ %%
+ %% FreeBSD 9.0 is the first FreeBSD release supporting posix_fallocate().
+ %%
+ %% For Windows there's apparently no way to pre-allocate file space, at
+ %% least with same semantics as posix_fallocate(), fallocate() and
+ %% fcntl F_PREALLOCATE.
Result = ?FILE_MODULE:allocate(Fd, Offset, Length),
case os:type() of
{win32, _} ->
- ?line {error, enotsup} = Result;
+ {error, enotsup} = Result;
_ ->
- ?line _ = Result
+ _ = Result
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-delete(suite) -> [];
-delete(doc) -> [];
delete(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_delete.fil"),
- ?line {ok, Fd1} = ?FILE_MODULE:open(Name, write),
- ?line io:format(Fd1,"ok.\n",[]),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_delete.fil"),
+ {ok, Fd1} = ?FILE_MODULE:open(Name, write),
+ io:format(Fd1,"ok.\n",[]),
+ ok = ?FILE_MODULE:close(Fd1),
%% Check that the file is readable
- ?line {ok, Fd2} = ?FILE_MODULE:open(Name, read),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line ok = ?FILE_MODULE:delete(Name),
+ {ok, Fd2} = ?FILE_MODULE:open(Name, read),
+ ok = ?FILE_MODULE:close(Fd2),
+ ok = ?FILE_MODULE:delete(Name),
%% Check that the file is not readable anymore
- ?line {error, _} = ?FILE_MODULE:open(Name, read),
+ {error, _} = ?FILE_MODULE:open(Name, read),
%% Try deleting a nonexistent file
- ?line {error, enoent} = ?FILE_MODULE:delete(Name),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ {error, enoent} = ?FILE_MODULE:delete(Name),
+ [] = flush(),
ok.
-rename(suite) ->[];
-rename(doc) ->[];
rename(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line FileName1 = atom_to_list(?MODULE)++"_rename.fil",
- ?line FileName2 = atom_to_list(?MODULE)++"_rename.ful",
- ?line Name1 = filename:join(RootDir, FileName1),
- ?line Name2 = filename:join(RootDir, FileName2),
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name1,write),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ FileName1 = atom_to_list(?MODULE)++"_rename.fil",
+ FileName2 = atom_to_list(?MODULE)++"_rename.ful",
+ Name1 = filename:join(RootDir, FileName1),
+ Name2 = filename:join(RootDir, FileName2),
+ {ok,Fd1} = ?FILE_MODULE:open(Name1,write),
+ ok = ?FILE_MODULE:close(Fd1),
%% Rename, and check that id really changed name
- ?line ok = ?FILE_MODULE:rename(Name1,Name2),
- ?line {error, _} = ?FILE_MODULE:open(Name1,read),
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name2,read),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ ok = ?FILE_MODULE:rename(Name1,Name2),
+ {error, _} = ?FILE_MODULE:open(Name1,read),
+ {ok,Fd2} = ?FILE_MODULE:open(Name2,read),
+ ok = ?FILE_MODULE:close(Fd2),
%% Try renaming something to itself
- ?line ok = ?FILE_MODULE:rename(Name2,Name2),
+ ok = ?FILE_MODULE:rename(Name2,Name2),
%% Try renaming something that doesn't exist
- ?line {error, enoent} = ?FILE_MODULE:rename(Name1,Name2),
+ {error, enoent} = ?FILE_MODULE:rename(Name1,Name2),
%% Try renaming to something else than a string
- ?line {error, badarg} = ?FILE_MODULE:rename(Name1,{foo,bar}),
-
+ {error, badarg} = ?FILE_MODULE:rename(Name1,{foo,bar}),
+
%% Move between directories
- ?line DirName1 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_rename_dir"),
- ?line DirName2 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_second_rename_dir"),
- ?line Name1foo = filename:join(DirName1, "foo.fil"),
- ?line Name2foo = filename:join(DirName2, "foo.fil"),
- ?line Name2bar = filename:join(DirName2, "bar.dir"),
- ?line ok = ?FILE_MODULE:make_dir(DirName1),
+ DirName1 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_rename_dir"),
+ DirName2 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_second_rename_dir"),
+ Name1foo = filename:join(DirName1, "foo.fil"),
+ Name2foo = filename:join(DirName2, "foo.fil"),
+ Name2bar = filename:join(DirName2, "bar.dir"),
+ ok = ?FILE_MODULE:make_dir(DirName1),
%% The name has to include the full file name, path in not enough
- ?line expect({error, eisdir}, {error, eexist},
- ?FILE_MODULE:rename(Name2,DirName1)),
- ?line ok = ?FILE_MODULE:rename(Name2, Name1foo),
+ expect({error, eisdir}, {error, eexist},
+ ?FILE_MODULE:rename(Name2,DirName1)),
+ ok = ?FILE_MODULE:rename(Name2, Name1foo),
%% Now rename the directory
- ?line ok = ?FILE_MODULE:rename(DirName1,DirName2),
+ ok = ?FILE_MODULE:rename(DirName1,DirName2),
%% And check that the file is there now
- ?line {ok,Fd3} = ?FILE_MODULE:open(Name2foo, read),
- ?line ok = ?FILE_MODULE:close(Fd3),
+ {ok,Fd3} = ?FILE_MODULE:open(Name2foo, read),
+ ok = ?FILE_MODULE:close(Fd3),
%% Try some dirty things now: move the directory into itself
- ?line {error, Msg1} = ?FILE_MODULE:rename(DirName2, Name2bar),
- ?line io:format("Errmsg1: ~p",[Msg1]),
+ {error, Msg1} = ?FILE_MODULE:rename(DirName2, Name2bar),
+ io:format("Errmsg1: ~p",[Msg1]),
%% move dir into a file in itself
- ?line {error, Msg2} = ?FILE_MODULE:rename(DirName2, Name2foo),
- ?line io:format("Errmsg2: ~p",[Msg2]),
+ {error, Msg2} = ?FILE_MODULE:rename(DirName2, Name2foo),
+ io:format("Errmsg2: ~p",[Msg2]),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-names(suite) -> [];
-names(doc) -> [];
names(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(5)),
- ?line RootDir = ?config(priv_dir,Config),
- ?line FileName = "foo1.fil",
- ?line Name1 = filename:join(RootDir, FileName),
- ?line Name2 = [RootDir,"/","foo1",".","fil"],
- ?line Name3 = [RootDir,"/",foo,$1,[[[],[],'.']],"f",il],
- ?line {ok,Fd0} = ?FILE_MODULE:open(Name1,write),
- ?line ok = ?FILE_MODULE:close(Fd0),
+ RootDir = proplists:get_value(priv_dir,Config),
+ FileName = "foo1.fil",
+ Name1 = filename:join(RootDir, FileName),
+ Name2 = [RootDir,"/","foo1",".","fil"],
+ Name3 = [RootDir,"/",foo,$1,[[[],[],'.']],"f",il],
+ {ok,Fd0} = ?FILE_MODULE:open(Name1,write),
+ ok = ?FILE_MODULE:close(Fd0),
%% Try some file names
- ?line {ok,Fd1} = ?FILE_MODULE:open(Name1,read),
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line {ok,Fd2f} = ?FILE_MODULE:open(lists:flatten(Name2),read),
- ?line ok = ?FILE_MODULE:close(Fd2f),
- ?line {ok,Fd2} = ?FILE_MODULE:open(Name2,read),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {ok,Fd3} = ?FILE_MODULE:open(Name3,read),
- ?line ok = ?FILE_MODULE:close(Fd3),
- case length(Name1) > 255 of
- true ->
- io:format("Path too long for an atom:\n\n~p\n", [Name1]);
- false ->
- Name4 = list_to_atom(Name1),
- {ok,Fd4} = ?FILE_MODULE:open(Name4,read),
- ok = ?FILE_MODULE:close(Fd4)
- end,
+ {ok,Fd1} = ?FILE_MODULE:open(Name1,read),
+ ok = ?FILE_MODULE:close(Fd1),
+ {ok,Fd2f} = ?FILE_MODULE:open(lists:flatten(Name2),read),
+ ok = ?FILE_MODULE:close(Fd2f),
+ {ok,Fd2} = ?FILE_MODULE:open(Name2,read),
+ ok = ?FILE_MODULE:close(Fd2),
+ {ok,Fd3} = ?FILE_MODULE:open(Name3,read),
+ ok = ?FILE_MODULE:close(Fd3),
+
+ %% Now try the same on raw files.
+ {ok,Fd4} = ?FILE_MODULE:open(Name2, [read, raw]),
+ ok = ?FILE_MODULE:close(Fd4),
+ {ok,Fd4f} = ?FILE_MODULE:open(lists:flatten(Name2), [read, raw]),
+ ok = ?FILE_MODULE:close(Fd4f),
+ {ok,Fd5} = ?FILE_MODULE:open(Name3, [read, raw]),
+ ok = ?FILE_MODULE:close(Fd5),
+
+ case length(Name1) > 255 of
+ true ->
+ io:format("Path too long for an atom:\n\n~p\n", [Name1]);
+ false ->
+ Name4 = list_to_atom(Name1),
+ {ok,Fd6} = ?FILE_MODULE:open(Name4,read),
+ ok = ?FILE_MODULE:close(Fd6)
+ end,
%% Try some path names
- ?line Path1 = RootDir,
- ?line Path2 = [RootDir],
- ?line Path3 = ['',[],[RootDir,[[]]]],
- ?line {ok,Fd11,_} = ?FILE_MODULE:path_open([Path1],FileName,read),
- ?line ok = ?FILE_MODULE:close(Fd11),
- ?line {ok,Fd12,_} = ?FILE_MODULE:path_open([Path2],FileName,read),
- ?line ok = ?FILE_MODULE:close(Fd12),
- ?line {ok,Fd13,_} = ?FILE_MODULE:path_open([Path3],FileName,read),
- ?line ok = ?FILE_MODULE:close(Fd13),
- case length(Path1) > 255 of
- true->
- io:format("Path too long for an atom:\n\n~p\n", [Path1]);
- false ->
- Path4 = list_to_atom(Path1),
- {ok,Fd14,_} = ?FILE_MODULE:path_open([Path4],FileName,read),
- ok = ?FILE_MODULE:close(Fd14)
- end,
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ Path1 = RootDir,
+ Path2 = [RootDir],
+ Path3 = ['',[],[RootDir,[[]]]],
+ {ok,Fd11,_} = ?FILE_MODULE:path_open([Path1],FileName,read),
+ ok = ?FILE_MODULE:close(Fd11),
+ {ok,Fd12,_} = ?FILE_MODULE:path_open([Path2],FileName,read),
+ ok = ?FILE_MODULE:close(Fd12),
+ {ok,Fd13,_} = ?FILE_MODULE:path_open([Path3],FileName,read),
+ ok = ?FILE_MODULE:close(Fd13),
+ case length(Path1) > 255 of
+ true->
+ io:format("Path too long for an atom:\n\n~p\n", [Path1]);
+ false ->
+ Path4 = list_to_atom(Path1),
+ {ok,Fd14,_} = ?FILE_MODULE:path_open([Path4],FileName,read),
+ ok = ?FILE_MODULE:close(Fd14)
+ end,
+ [] = flush(),
ok.
+volume_relative_paths(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ {ok, [Drive, $: | _]} = file:get_cwd(),
+ %% Relative to current device root.
+ {ok, RootInfo} = file:read_file_info([Drive, $:, $/]),
+ {ok, RootInfo} = file:read_file_info("/"),
+ %% Relative to current device directory.
+ {ok, DirContents} = file:list_dir([Drive, $:]),
+ {ok, DirContents} = file:list_dir("."),
+ [] = flush(),
+ ok;
+ _ ->
+ {skip, "This test is Windows-specific."}
+ end.
+
+unc_paths(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ %% We assume administrative shares are set up and reachable, and we
+ %% settle for testing presence as some of the returned data is
+ %% different.
+ {ok, _} = file:read_file_info("C:\\Windows\\explorer.exe"),
+ {ok, _} = file:read_file_info("\\\\localhost\\c$\\Windows\\explorer.exe"),
+
+ {ok, Cwd} = file:get_cwd(),
+
+ try
+ ok = file:set_cwd("\\\\localhost\\c$\\Windows\\"),
+ {ok, _} = file:read_file_info("explorer.exe")
+ after
+ file:set_cwd(Cwd)
+ end,
+
+ [] = flush(),
+ ok;
+ _ ->
+ {skip, "This test is Windows-specific."}
+ end.
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-e_delete(suite) -> [];
-e_delete(doc) -> [];
e_delete(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line Base = filename:join(RootDir,
- atom_to_list(?MODULE)++"_e_delete"),
- ?line ok = ?FILE_MODULE:make_dir(Base),
+ RootDir = proplists:get_value(priv_dir, Config),
+ Base = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_e_delete"),
+ ok = ?FILE_MODULE:make_dir(Base),
%% Delete a non-existing file.
- ?line {error, enoent} =
+ {error, enoent} =
?FILE_MODULE:delete(filename:join(Base, "non_existing")),
%% Delete a directory.
- ?line {error, eperm} = ?FILE_MODULE:delete(Base),
+ {error, eperm} = ?FILE_MODULE:delete(Base),
%% Use a path-name with a non-directory component.
- ?line Afile = filename:join(Base, "a_file"),
- ?line ok = ?FILE_MODULE:write_file(Afile, "hello\n"),
- ?line {error, E} =
+ Afile = filename:join(Base, "a_file"),
+ ok = ?FILE_MODULE:write_file(Afile, "hello\n"),
+ {error, E} =
expect({error, enotdir}, {error, enoent},
?FILE_MODULE:delete(filename:join(Afile, "another_file"))),
- ?line io:format("Result: ~p~n", [E]),
+ io:format("Result: ~p~n", [E]),
%% No permission.
- ?line case os:type() of
- {win32, _} ->
- %% Remove a character device.
- ?line {error, eacces} = ?FILE_MODULE:delete("nul");
- _ ->
- ?line ?FILE_MODULE:write_file_info(
- Base, #file_info {mode=0}),
- ?line {error, eacces} = ?FILE_MODULE:delete(Afile),
- ?line ?FILE_MODULE:write_file_info(
- Base, #file_info {mode=8#600})
- end,
+ case os:type() of
+ {win32, _} ->
+ %% Remove a character device.
+ expect({error, eacces}, {error, einval},
+ ?FILE_MODULE:delete("nul"));
+ _ ->
+ ?FILE_MODULE:write_file_info(
+ Base, #file_info {mode=0}),
+ {error, eacces} = ?FILE_MODULE:delete(Afile),
+ ?FILE_MODULE:write_file_info(
+ Base, #file_info {mode=8#700})
+ end,
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%%% FreeBSD gives EEXIST when renaming a file to an empty dir, although the
@@ -2278,13 +2252,10 @@ e_delete(Config) when is_list(Config) ->
%%% (What about FreeBSD? We store our nightly build results on a FreeBSD
%%% file system, that's what.)
-e_rename(suite) -> [];
-e_rename(doc) -> [];
e_rename(Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(10)),
- RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
Base = filename:join(RootDir,
- atom_to_list(?MODULE)++"_e_rename"),
+ atom_to_list(?MODULE)++"_e_rename"),
ok = ?FILE_MODULE:make_dir(Base),
%% Create an empty directory.
@@ -2295,15 +2266,15 @@ e_rename(Config) when is_list(Config) ->
NonEmptyDir = filename:join(Base, "non_empty_dir"),
ok = ?FILE_MODULE:make_dir(NonEmptyDir),
ok = ?FILE_MODULE:write_file(
- filename:join(NonEmptyDir, "a_file"),
- "hello\n"),
+ filename:join(NonEmptyDir, "a_file"),
+ "hello\n"),
%% Create another non-empty directory.
ADirectory = filename:join(Base, "a_directory"),
ok = ?FILE_MODULE:make_dir(ADirectory),
ok = ?FILE_MODULE:write_file(
- filename:join(ADirectory, "a_file"),
- "howdy\n\n"),
+ filename:join(ADirectory, "a_file"),
+ "howdy\n\n"),
%% Create a data file.
File = filename:join(Base, "just_a_file"),
@@ -2317,15 +2288,15 @@ e_rename(Config) when is_list(Config) ->
%% Move Base into Base/new_name.
{error, einval} =
- ?FILE_MODULE:rename(Base, filename:join(Base, "new_name")),
+ ?FILE_MODULE:rename(Base, filename:join(Base, "new_name")),
%% Overwrite a directory with a file.
expect({error, eexist}, %FreeBSD (?)
- {error, eisdir},
- ?FILE_MODULE:rename(File, EmptyDir)),
+ {error, eisdir},
+ ?FILE_MODULE:rename(File, EmptyDir)),
expect({error, eexist}, %FreeBSD (?)
- {error, eisdir},
- ?FILE_MODULE:rename(File, NonEmptyDir)),
+ {error, eisdir},
+ ?FILE_MODULE:rename(File, NonEmptyDir)),
%% Move a non-existing file.
NonExistingFile = filename:join(Base, "non_existing_file"),
@@ -2333,8 +2304,8 @@ e_rename(Config) when is_list(Config) ->
%% Overwrite a file with a directory.
expect({error, eexist}, %FreeBSD (?)
- {error, enotdir},
- ?FILE_MODULE:rename(ADirectory, File)),
+ {error, enotdir},
+ ?FILE_MODULE:rename(ADirectory, File)),
%% Move a file to another filesystem.
%% XXX - This test case is bogus. We cannot be guaranteed that
@@ -2343,49 +2314,42 @@ e_rename(Config) when is_list(Config) ->
%%
%% XXX - Gross hack!
Comment = case os:type() of
- {unix, _} ->
- OtherFs = "/tmp",
- NameOnOtherFs = filename:join(OtherFs, filename:basename(File)),
- {ok, Com} = case ?FILE_MODULE:rename(File, NameOnOtherFs) of
- {error, exdev} ->
- %% The file could be in
- %% the same filesystem!
- {ok, ok};
- ok ->
- {ok, {comment,
- "Moving between filesystems "
- "suceeded, files are probably "
- "in the same filesystem!"}};
- {error, eperm} ->
- {ok, {comment, "SBS! You don't "
- "have the permission to do "
- "this test!"}};
- Else ->
- Else
- end,
- Com;
- {win32, _} ->
- %% At least Windows NT can
- %% successfully move a file to
- %% another drive.
- ok;
- {ose, _} ->
- %% disabled for now
- ok
- end,
+ {unix, _} ->
+ OtherFs = "/tmp",
+ NameOnOtherFs = filename:join(OtherFs, filename:basename(File)),
+ {ok, Com} = case ?FILE_MODULE:rename(File, NameOnOtherFs) of
+ {error, exdev} ->
+ %% The file could be in
+ %% the same filesystem!
+ {ok, ok};
+ ok ->
+ {ok, {comment,
+ "Moving between filesystems "
+ "suceeded, files are probably "
+ "in the same filesystem!"}};
+ {error, eperm} ->
+ {ok, {comment, "SBS! You don't "
+ "have the permission to do "
+ "this test!"}};
+ Else ->
+ Else
+ end,
+ Com;
+ {win32, _} ->
+ %% At least Windows NT can
+ %% successfully move a file to
+ %% another drive.
+ ok
+ end,
[] = flush(),
- test_server:timetrap_cancel(Dog),
Comment.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-e_make_dir(suite) -> [];
-e_make_dir(doc) -> [];
e_make_dir(Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(10)),
- RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
Base = filename:join(RootDir,
- atom_to_list(?MODULE)++"_e_make_dir"),
+ atom_to_list(?MODULE)++"_e_make_dir"),
ok = ?FILE_MODULE:make_dir(Base),
%% A component of the path does not exist.
@@ -2408,18 +2372,14 @@ e_make_dir(Config) when is_list(Config) ->
?FILE_MODULE:write_file_info(Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:make_dir(filename:join(Base, "xxxx")),
?FILE_MODULE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
- test_server:timetrap_cancel(Dog),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-e_del_dir(suite) -> [];
-e_del_dir(doc) -> [];
e_del_dir(Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(10)),
- RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
Base = test_server:temp_name(filename:join(RootDir, "e_del_dir")),
io:format("Base: ~p", [Base]),
ok = ?FILE_MODULE:make_dir(Base),
@@ -2432,21 +2392,21 @@ e_del_dir(Config) when is_list(Config) ->
Afile = filename:join(Base, "a_directory"),
ok = ?FILE_MODULE:write_file(Afile, "hello\n"),
{error, E1} = expect({error, enotdir}, {error, enoent},
- ?FILE_MODULE:del_dir(
- filename:join(Afile, "another_directory"))),
+ ?FILE_MODULE:del_dir(
+ filename:join(Afile, "another_directory"))),
io:format("Result: ~p", [E1]),
%% Delete a non-empty directory.
{error, E2} = expect({error, enotempty}, {error, eexist}, {error, eacces},
- ?FILE_MODULE:del_dir(Base)),
+ ?FILE_MODULE:del_dir(Base)),
io:format("Result: ~p", [E2]),
%% Remove the current directory.
{error, E3} = expect({error, einval},
- {error, eperm}, % Linux and DUX
- {error, eacces},
- {error, ebusy},
- ?FILE_MODULE:del_dir(".")),
+ {error, eperm}, % Linux and DUX
+ {error, eacces},
+ {error, ebusy},
+ ?FILE_MODULE:del_dir(".")),
io:format("Result: ~p", [E3]),
%% No permission.
@@ -2458,10 +2418,9 @@ e_del_dir(Config) when is_list(Config) ->
ok = ?FILE_MODULE:make_dir(ADirectory),
?FILE_MODULE:write_file_info( Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:del_dir(ADirectory),
- ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#600})
+ ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#700})
end,
[] = flush(),
- test_server:timetrap_cancel(Dog),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2470,35 +2429,35 @@ e_del_dir(Config) when is_list(Config) ->
%% Trying reading and positioning from a compressed file.
read_compressed_cooked(Config) when is_list(Config) ->
- ?line Data = ?config(data_dir, Config),
- ?line Real = filename:join(Data, "realmen.html.gz"),
- ?line {ok, Fd} = ?FILE_MODULE:open(Real, [read,compressed]),
- ?line try_read_file_list(Fd).
+ Data = proplists:get_value(data_dir, Config),
+ Real = filename:join(Data, "realmen.html.gz"),
+ {ok, Fd} = ?FILE_MODULE:open(Real, [read,compressed]),
+ try_read_file_list(Fd).
read_compressed_cooked_binary(Config) when is_list(Config) ->
- ?line Data = ?config(data_dir, Config),
- ?line Real = filename:join(Data, "realmen.html.gz"),
- ?line {ok, Fd} = ?FILE_MODULE:open(Real, [read,compressed,binary]),
- ?line try_read_file_binary(Fd).
+ Data = proplists:get_value(data_dir, Config),
+ Real = filename:join(Data, "realmen.html.gz"),
+ {ok, Fd} = ?FILE_MODULE:open(Real, [read,compressed,binary]),
+ try_read_file_binary(Fd).
%% Trying reading and positioning from an uncompressed file,
%% but with the compressed flag given.
read_not_really_compressed(Config) when is_list(Config) ->
- ?line Data = ?config(data_dir, Config),
- ?line Priv = ?config(priv_dir, Config),
+ Data = proplists:get_value(data_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
%% The file realmen.html might have got CRs added (by WinZip).
%% Remove them, or the file positions will not be correct.
- ?line Real = filename:join(Data, "realmen.html"),
- ?line RealPriv = filename:join(Priv,
- atom_to_list(?MODULE)++"_realmen.html"),
- ?line {ok, RealDataBin} = ?FILE_MODULE:read_file(Real),
- ?line RealData = remove_crs(binary_to_list(RealDataBin), []),
- ?line ok = ?FILE_MODULE:write_file(RealPriv, RealData),
- ?line {ok, Fd} = ?FILE_MODULE:open(RealPriv, [read, compressed]),
- ?line try_read_file_list(Fd).
+ Real = filename:join(Data, "realmen.html"),
+ RealPriv = filename:join(Priv,
+ atom_to_list(?MODULE)++"_realmen.html"),
+ {ok, RealDataBin} = ?FILE_MODULE:read_file(Real),
+ RealData = remove_crs(binary_to_list(RealDataBin), []),
+ ok = ?FILE_MODULE:write_file(RealPriv, RealData),
+ {ok, Fd} = ?FILE_MODULE:open(RealPriv, [read, compressed]),
+ try_read_file_list(Fd).
remove_crs([$\r|Rest], Result) ->
remove_crs(Rest, Result);
@@ -2508,146 +2467,134 @@ remove_crs([], Result) ->
lists:reverse(Result).
try_read_file_list(Fd) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
-
%% Seek to the current position (nothing should happen).
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, 0),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, {cur, 0}),
+ {ok, 0} = ?FILE_MODULE:position(Fd, 0),
+ {ok, 0} = ?FILE_MODULE:position(Fd, {cur, 0}),
%% Read a few lines from a compressed file.
- ?line ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n",
- ?line ShouldBe = io:get_line(Fd, ''),
+ ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n",
+ ShouldBe = io:get_line(Fd, ''),
%% Now seek forward.
- ?line {ok, 381} = ?FILE_MODULE:position(Fd, 381),
- ?line Back = "Back in the good old days -- the \"Golden Era\" " ++
+ {ok, 381} = ?FILE_MODULE:position(Fd, 381),
+ Back = "Back in the good old days -- the \"Golden Era\" " ++
"of computers, it was\n",
- ?line Back = io:get_line(Fd, ''),
+ Back = io:get_line(Fd, ''),
%% Try to search forward relative to the current position.
- ?line {ok, CurPos} = ?FILE_MODULE:position(Fd, {cur, 0}),
- ?line RealPos = 4273,
- ?line {ok, RealPos} = ?FILE_MODULE:position(Fd, {cur, RealPos-CurPos}),
- ?line RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n",
- ?line RealProg = io:get_line(Fd, ''),
+ {ok, CurPos} = ?FILE_MODULE:position(Fd, {cur, 0}),
+ RealPos = 4273,
+ {ok, RealPos} = ?FILE_MODULE:position(Fd, {cur, RealPos-CurPos}),
+ RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n",
+ RealProg = io:get_line(Fd, ''),
%% Seek backward.
- ?line AfterTitle = length("<TITLE>"),
- ?line {ok, AfterTitle} = ?FILE_MODULE:position(Fd, AfterTitle),
- ?line Title = "Real Programmers Don't Use PASCAL</TITLE>\n",
- ?line Title = io:get_line(Fd, ''),
+ AfterTitle = length("<TITLE>"),
+ {ok, AfterTitle} = ?FILE_MODULE:position(Fd, AfterTitle),
+ Title = "Real Programmers Don't Use PASCAL</TITLE>\n",
+ Title = io:get_line(Fd, ''),
%% Seek past the end of the file.
- ?line {ok, _} = ?FILE_MODULE:position(Fd, 25000),
+ {ok, _} = ?FILE_MODULE:position(Fd, 25000),
%% Done.
- ?line ?FILE_MODULE:close(Fd),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ ?FILE_MODULE:close(Fd),
+ [] = flush(),
ok.
try_read_file_binary(Fd) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
-
%% Seek to the current position (nothing should happen).
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, 0),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, {cur, 0}),
+ {ok, 0} = ?FILE_MODULE:position(Fd, 0),
+ {ok, 0} = ?FILE_MODULE:position(Fd, {cur, 0}),
%% Read a few lines from a compressed file.
- ?line ShouldBe = <<"<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n">>,
- ?line ShouldBe = io:get_line(Fd, ''),
+ ShouldBe = <<"<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n">>,
+ ShouldBe = io:get_line(Fd, ''),
%% Now seek forward.
- ?line {ok, 381} = ?FILE_MODULE:position(Fd, 381),
- ?line Back = <<"Back in the good old days -- the \"Golden Era\" "
- "of computers, it was\n">>,
- ?line Back = io:get_line(Fd, ''),
+ {ok, 381} = ?FILE_MODULE:position(Fd, 381),
+ Back = <<"Back in the good old days -- the \"Golden Era\" "
+ "of computers, it was\n">>,
+ Back = io:get_line(Fd, ''),
%% Try to search forward relative to the current position.
- ?line {ok, CurPos} = ?FILE_MODULE:position(Fd, {cur, 0}),
- ?line RealPos = 4273,
- ?line {ok, RealPos} = ?FILE_MODULE:position(Fd, {cur, RealPos-CurPos}),
- ?line RealProg = <<"<LI> Real Programmers aren't afraid to use GOTOs.\n">>,
- ?line RealProg = io:get_line(Fd, ''),
+ {ok, CurPos} = ?FILE_MODULE:position(Fd, {cur, 0}),
+ RealPos = 4273,
+ {ok, RealPos} = ?FILE_MODULE:position(Fd, {cur, RealPos-CurPos}),
+ RealProg = <<"<LI> Real Programmers aren't afraid to use GOTOs.\n">>,
+ RealProg = io:get_line(Fd, ''),
%% Seek backward.
- ?line AfterTitle = length("<TITLE>"),
- ?line {ok, AfterTitle} = ?FILE_MODULE:position(Fd, AfterTitle),
- ?line Title = <<"Real Programmers Don't Use PASCAL</TITLE>\n">>,
- ?line Title = io:get_line(Fd, ''),
+ AfterTitle = length("<TITLE>"),
+ {ok, AfterTitle} = ?FILE_MODULE:position(Fd, AfterTitle),
+ Title = <<"Real Programmers Don't Use PASCAL</TITLE>\n">>,
+ Title = io:get_line(Fd, ''),
%% Done.
- ?line ?FILE_MODULE:close(Fd),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ ?FILE_MODULE:close(Fd),
+ [] = flush(),
ok.
read_cooked_tar_problem(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
+ Data = proplists:get_value(data_dir, Config),
+ ProblemFile = filename:join(Data, "cooked_tar_problem.tar.gz"),
+ {ok,Fd} = ?FILE_MODULE:open(ProblemFile, [read,compressed,binary]),
+
+ {ok,34304} = file:position(Fd, 34304),
+ {ok,Bin} = file:read(Fd, 512),
+ 512 = byte_size(Bin),
- ?line Data = ?config(data_dir, Config),
- ?line ProblemFile = filename:join(Data, "cooked_tar_problem.tar.gz"),
- ?line {ok,Fd} = ?FILE_MODULE:open(ProblemFile, [read,compressed,binary]),
+ {ok,34304+512+1024} = file:position(Fd, {cur,1024}),
- ?line {ok,34304} = file:position(Fd, 34304),
- ?line {ok,Bin} = file:read(Fd, 512),
- ?line 512 = byte_size(Bin),
-
- ?line {ok,34304+512+1024} = file:position(Fd, {cur,1024}),
-
- ?line ok = file:close(Fd),
+ ok = file:close(Fd),
- ?line test_server:timetrap_cancel(Dog),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-write_compressed(suite) -> [];
-write_compressed(doc) -> [];
write_compressed(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line Priv = ?config(priv_dir, Config),
- ?line MyFile = filename:join(Priv,
- atom_to_list(?MODULE)++"_test.gz"),
+ Priv = proplists:get_value(priv_dir, Config),
+ MyFile = filename:join(Priv,
+ atom_to_list(?MODULE)++"_test.gz"),
%% Write a file.
- ?line {ok, Fd} = ?FILE_MODULE:open(MyFile, [write, compressed]),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, 0),
- ?line Prefix = "hello\n",
- ?line End = "end\n",
- ?line ok = io:put_chars(Fd, Prefix),
- ?line {ok, 143} = ?FILE_MODULE:position(Fd, 143),
- ?line ok = io:put_chars(Fd, End),
- ?line ok = ?FILE_MODULE:close(Fd),
+ {ok, Fd} = ?FILE_MODULE:open(MyFile, [write, compressed]),
+ {ok, 0} = ?FILE_MODULE:position(Fd, 0),
+ Prefix = "hello\n",
+ End = "end\n",
+ ok = io:put_chars(Fd, Prefix),
+ {ok, 143} = ?FILE_MODULE:position(Fd, 143),
+ ok = io:put_chars(Fd, End),
+ ok = ?FILE_MODULE:close(Fd),
%% Read the file and verify the contents.
- ?line {ok, Fd1} = ?FILE_MODULE:open(MyFile, [read, compressed]),
- ?line Prefix = io:get_line(Fd1, ''),
- ?line Second = lists:duplicate(143-length(Prefix), 0) ++ End,
- ?line Second = io:get_line(Fd1, ''),
- ?line ok = ?FILE_MODULE:close(Fd1),
+ {ok, Fd1} = ?FILE_MODULE:open(MyFile, [read, compressed]),
+ Prefix = io:get_line(Fd1, ''),
+ Second = lists:duplicate(143-length(Prefix), 0) ++ End,
+ Second = io:get_line(Fd1, ''),
+ ok = ?FILE_MODULE:close(Fd1),
%% Verify successful compression by uncompressing the file
%% using zlib:gunzip/1.
- ?line {ok,Contents} = file:read_file(MyFile),
- ?line <<"hello\n",0:137/unit:8,"end\n">> = zlib:gunzip(Contents),
+ {ok,Contents} = file:read_file(MyFile),
+ <<"hello\n",0:137/unit:8,"end\n">> = zlib:gunzip(Contents),
%% Ensure that the file is compressed.
@@ -2656,99 +2603,92 @@ write_compressed(Config) when is_list(Config) ->
{ok, #file_info{size=Size}} when Size < TotalSize ->
ok;
{ok, #file_info{size=Size}} when Size == TotalSize ->
- test_server:fail(file_not_compressed)
+ ct:fail(file_not_compressed)
end,
%% Write again to ensure that the file is truncated.
- ?line {ok, Fd2} = ?FILE_MODULE:open(MyFile, [write, compressed]),
- ?line NewString = "aaaaaaaaaaa",
- ?line ok = io:put_chars(Fd2, NewString),
- ?line ok = ?FILE_MODULE:close(Fd2),
- ?line {ok, Fd3} = ?FILE_MODULE:open(MyFile, [read, compressed]),
- ?line {ok, NewString} = ?FILE_MODULE:read(Fd3, 1024),
- ?line ok = ?FILE_MODULE:close(Fd3),
+ {ok, Fd2} = ?FILE_MODULE:open(MyFile, [write, compressed]),
+ NewString = "aaaaaaaaaaa",
+ ok = io:put_chars(Fd2, NewString),
+ ok = ?FILE_MODULE:close(Fd2),
+ {ok, Fd3} = ?FILE_MODULE:open(MyFile, [read, compressed]),
+ {ok, NewString} = ?FILE_MODULE:read(Fd3, 1024),
+ ok = ?FILE_MODULE:close(Fd3),
%% Done.
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
catenated_gzips(Config) when is_list(Config) ->
- ?line Priv = ?config(priv_dir, Config),
- ?line MyFile = filename:join(Priv, ?MODULE_STRING++"_test.gz"),
+ Priv = proplists:get_value(priv_dir, Config),
+ MyFile = filename:join(Priv, ?MODULE_STRING++"_test.gz"),
First = "Hello, all good men going to search parties. ",
Second = "Now I really need your help.",
All = iolist_to_binary([First|Second]),
- ?line Cat = [zlib:gzip(First),zlib:gzip(Second)],
-
- ?line ok = file:write_file(MyFile, Cat),
+ Cat = [zlib:gzip(First),zlib:gzip(Second)],
- ?line {ok,Fd} = file:open(MyFile, [read,compressed,binary]),
- ?line {ok,All} = file:read(Fd, 100000),
- ?line ok = file:close(Fd),
+ ok = file:write_file(MyFile, Cat),
+
+ {ok,Fd} = file:open(MyFile, [read,compressed,binary]),
+ {ok,All} = file:read(Fd, 100000),
+ ok = file:close(Fd),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-compress_errors(suite) -> [];
-compress_errors(doc) -> [];
compress_errors(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line DataDir =
+ DataDir =
filename:dirname(
- filename:join(?config(data_dir, Config), "x")),
- ?line DataDirSlash = DataDir++"/",
- ?line {error, enoent} = ?FILE_MODULE:open("non_existing__",
- [compressed, read]),
- ?line {error, einval} = ?FILE_MODULE:open("non_existing__",
- [compressed, read, write]),
- ?line {error, einval} = ?FILE_MODULE:open("non_existing__",
- [compressed, read, append]),
- ?line {error, einval} = ?FILE_MODULE:open("non_existing__",
- [compressed, write, append]),
- ?line {error, E1} = ?FILE_MODULE:open(DataDir, [compressed, read]),
- ?line {error, E2} = ?FILE_MODULE:open(DataDirSlash, [compressed, read]),
- ?line {error, E3} = ?FILE_MODULE:open(DataDir, [compressed, write]),
- ?line {error, E4} = ?FILE_MODULE:open(DataDirSlash, [compressed, write]),
- ?line {eisdir,eisdir,eisdir,eisdir} = {E1,E2,E3,E4},
+ filename:join(proplists:get_value(data_dir, Config), "x")),
+ DataDirSlash = DataDir++"/",
+ {error, enoent} = ?FILE_MODULE:open("non_existing__",
+ [compressed, read]),
+ {error, einval} = ?FILE_MODULE:open("non_existing__",
+ [compressed, read, write]),
+ {error, einval} = ?FILE_MODULE:open("non_existing__",
+ [compressed, read, append]),
+ {error, einval} = ?FILE_MODULE:open("non_existing__",
+ [compressed, write, append]),
+ {error, E1} = ?FILE_MODULE:open(DataDir, [compressed, read]),
+ {error, E2} = ?FILE_MODULE:open(DataDirSlash, [compressed, read]),
+ {error, E3} = ?FILE_MODULE:open(DataDir, [compressed, write]),
+ {error, E4} = ?FILE_MODULE:open(DataDirSlash, [compressed, write]),
+ {eisdir,eisdir,eisdir,eisdir} = {E1,E2,E3,E4},
%% Read a corrupted .gz file.
- ?line Corrupted = filename:join(DataDir, "corrupted.gz"),
- ?line {ok, Fd} = ?FILE_MODULE:open(Corrupted, [read, compressed]),
- ?line {error, eio} = ?FILE_MODULE:read(Fd, 100),
- ?line ?FILE_MODULE:close(Fd),
+ Corrupted = filename:join(DataDir, "corrupted.gz"),
+ {ok, Fd} = ?FILE_MODULE:open(Corrupted, [read, compressed]),
+ {error, eio} = ?FILE_MODULE:read(Fd, 100),
+ ?FILE_MODULE:close(Fd),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-compress_async_crash(suite) -> [];
-compress_async_crash(doc) -> [];
compress_async_crash(Config) when is_list(Config) ->
- ?line DataDir = ?config(data_dir, Config),
- ?line Path = filename:join(DataDir, "test.gz"),
+ DataDir = proplists:get_value(data_dir, Config),
+ Path = filename:join(DataDir, "test.gz"),
ExpectedData = <<"qwerty">>,
- ?line _ = ?FILE_MODULE:delete(Path),
- ?line {ok, Fd} = ?FILE_MODULE:open(Path, [write, binary, compressed]),
- ?line ok = ?FILE_MODULE:write(Fd, ExpectedData),
- ?line ok = ?FILE_MODULE:close(Fd),
+ _ = ?FILE_MODULE:delete(Path),
+ {ok, Fd} = ?FILE_MODULE:open(Path, [write, binary, compressed]),
+ ok = ?FILE_MODULE:write(Fd, ExpectedData),
+ ok = ?FILE_MODULE:close(Fd),
- % Test that when using async thread pool, the emulator doesn't crash
- % when the efile port driver is stopped while a compressed file operation
- % is in progress (being carried by an async thread).
- ?line ok = compress_async_crash_loop(10000, Path, ExpectedData),
- ?line ok = ?FILE_MODULE:delete(Path),
+ %% Test that when using async thread pool, the emulator doesn't crash
+ %% when the efile port driver is stopped while a compressed file operation
+ %% is in progress (being carried by an async thread).
+ ok = compress_async_crash_loop(10000, Path, ExpectedData),
+ ok = ?FILE_MODULE:delete(Path),
ok.
compress_async_crash_loop(0, _Path, _ExpectedData) ->
@@ -2756,35 +2696,35 @@ compress_async_crash_loop(0, _Path, _ExpectedData) ->
compress_async_crash_loop(N, Path, ExpectedData) ->
Parent = self(),
{Pid, Ref} = spawn_monitor(
- fun() ->
- ?line {ok, Fd} = ?FILE_MODULE:open(
- Path, [read, compressed, raw, binary]),
- Len = byte_size(ExpectedData),
- Parent ! {self(), continue},
- ?line {ok, ExpectedData} = ?FILE_MODULE:read(Fd, Len),
- ?line ok = ?FILE_MODULE:close(Fd),
- receive foobar -> ok end
- end),
+ fun() ->
+ {ok, Fd} = ?FILE_MODULE:open(
+ Path, [read, compressed, raw, binary]),
+ Len = byte_size(ExpectedData),
+ Parent ! {self(), continue},
+ {ok, ExpectedData} = ?FILE_MODULE:read(Fd, Len),
+ ok = ?FILE_MODULE:close(Fd),
+ receive foobar -> ok end
+ end),
receive
{Pid, continue} ->
exit(Pid, shutdown),
receive
{'DOWN', Ref, _, _, Reason} ->
- ?line shutdown = Reason
+ shutdown = Reason
end;
{'DOWN', Ref, _, _, Reason2} ->
- test_server:fail({worker_exited, Reason2})
+ ct:fail({worker_exited, Reason2})
after 60000 ->
exit(Pid, shutdown),
erlang:demonitor(Ref, [flush]),
- test_server:fail(worker_timeout)
+ ct:fail(worker_timeout)
end,
compress_async_crash_loop(N - 1, Path, ExpectedData).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
unicode(Config) when is_list(Config) ->
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
Name = filename:join(Dir, "data-utf8.txt"),
Txt = lists:seq(128, 255),
D = unicode:characters_to_binary(Txt, latin1, latin1),
@@ -2817,53 +2757,46 @@ unicode(Config) when is_list(Config) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-altname(doc) ->
- "Test the file:altname/1 function";
-altname(suite) ->
- [];
+%% Test the file:altname/1 function.
altname(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- "long alternative path name with spaces"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
- ?line Name = filename:join(NewDir, "a_file_with_long_name"),
- ?line ShortName = filename:join(NewDir, "short"),
- ?line NonexName = filename:join(NewDir, "nonexistent"),
- ?line ok = ?FILE_MODULE:write_file(Name, "some contents\n"),
- ?line ok = ?FILE_MODULE:write_file(ShortName, "some contents\n"),
- ?line Result =
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ "long alternative path name with spaces"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+ Name = filename:join(NewDir, "a_file_with_long_name"),
+ ShortName = filename:join(NewDir, "short"),
+ NonexName = filename:join(NewDir, "nonexistent"),
+ ok = ?FILE_MODULE:write_file(Name, "some contents\n"),
+ ok = ?FILE_MODULE:write_file(ShortName, "some contents\n"),
+ Result =
case ?FILE_MODULE:altname(NewDir) of
{error, enotsup} ->
{skipped, "Altname not supported on this platform"};
{ok, "LONGAL~1"} ->
- ?line {ok, "A_FILE~1"} = ?FILE_MODULE:altname(Name),
- ?line {ok, "C:/"} = ?FILE_MODULE:altname("C:/"),
- ?line {ok, "C:\\"} = ?FILE_MODULE:altname("C:\\"),
- ?line {error,enoent} = ?FILE_MODULE:altname(NonexName),
- ?line {ok, "short"} = ?FILE_MODULE:altname(ShortName),
+ {ok, "A_FILE~1"} = ?FILE_MODULE:altname(Name),
+ {ok, "c:/"} = ?FILE_MODULE:altname("C:/"),
+ {ok, "c:/"} = ?FILE_MODULE:altname("C:\\"),
+ {error,enoent} = ?FILE_MODULE:altname(NonexName),
+ {ok, "short"} = ?FILE_MODULE:altname(ShortName),
ok
end,
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ [] = flush(),
Result.
-make_link(doc) -> "Test creating a hard link.";
-make_link(suite) -> [];
+%% Test creating a hard link.
make_link(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_make_link"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
-
- ?line Name = filename:join(NewDir, "a_file"),
- ?line ok = ?FILE_MODULE:write_file(Name, "some contents\n"),
-
- ?line Alias = filename:join(NewDir, "an_alias"),
- ?line Result =
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_make_link"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+
+ Name = filename:join(NewDir, "a_file"),
+ ok = ?FILE_MODULE:write_file(Name, "some contents\n"),
+
+ Alias = filename:join(NewDir, "an_alias"),
+ Result =
case ?FILE_MODULE:make_link(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
@@ -2873,53 +2806,45 @@ make_link(Config) when is_list(Config) ->
%% which should in behave exactly as
%% ?FILE_MODULE:read_file_info/1
%% since they are not used on symbolic links.
-
- ?line {ok, Info} = ?FILE_MODULE:read_link_info(Name),
+
+ {ok, Info} = ?FILE_MODULE:read_link_info(Name),
{ok,Info} = ?FILE_MODULE:read_link_info(Name, [raw]),
- ?line {ok, Info} = ?FILE_MODULE:read_link_info(Alias),
+ {ok, Info} = ?FILE_MODULE:read_link_info(Alias),
{ok,Info} = ?FILE_MODULE:read_link_info(Alias, [raw]),
- ?line #file_info{links = 2, type = regular} = Info,
- ?line {error, eexist} =
+ #file_info{links = 2, type = regular} = Info,
+ {error, eexist} =
?FILE_MODULE:make_link(Name, Alias),
ok
end,
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+
+ [] = flush(),
Result.
-read_link_info_for_non_link(doc) ->
- "Test that reading link info for an ordinary file or directory works "
- "(on all platforms).";
-read_link_info_for_non_link(suite) -> [];
+%% Test that reading link info for an ordinary file or directory works
+%% (on all platforms).
read_link_info_for_non_link(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
-
- ?line {ok, #file_info{type=directory}} =
+ {ok, #file_info{type=directory}} =
?FILE_MODULE:read_link_info("."),
{ok, #file_info{type=directory}} = ?FILE_MODULE:read_link_info(".", [raw]),
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+
+ [] = flush(),
ok.
-symlinks(doc) -> "Test operations on symbolic links (for Unix).";
-symlinks(suite) -> [];
+%% Test operations on symbolic links (for Unix).
symlinks(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line {error, _} = ?FILE_MODULE:read_link(lists:duplicate(10000,$a)),
+ {error, _} = ?FILE_MODULE:read_link(lists:duplicate(10000,$a)),
{error, _} = ?FILE_MODULE:read_link_all(lists:duplicate(10000,$a)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_symlinks"),
- ?line ok = ?FILE_MODULE:make_dir(NewDir),
-
- ?line Name = filename:join(NewDir, "a_plain_file"),
- ?line ok = ?FILE_MODULE:write_file(Name, "some stupid content\n"),
-
- ?line Alias = filename:join(NewDir, "a_symlink_alias"),
- ?line Result =
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_symlinks"),
+ ok = ?FILE_MODULE:make_dir(NewDir),
+
+ Name = filename:join(NewDir, "a_plain_file"),
+ ok = ?FILE_MODULE:write_file(Name, "some stupid content\n"),
+
+ Alias = filename:join(NewDir, "a_symlink_alias"),
+ Result =
case ?FILE_MODULE:make_symlink(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
@@ -2927,41 +2852,37 @@ symlinks(Config) when is_list(Config) ->
{win32,_} = os:type(),
{skipped, "Windows user not privileged to create symlinks"};
ok ->
- ?line {ok, Info1} = ?FILE_MODULE:read_file_info(Name),
+ {ok, Info1} = ?FILE_MODULE:read_file_info(Name),
{ok,Info1} = ?FILE_MODULE:read_file_info(Name, [raw]),
- ?line {ok, Info1} = ?FILE_MODULE:read_file_info(Alias),
+ {ok, Info1} = ?FILE_MODULE:read_file_info(Alias),
{ok,Info1} = ?FILE_MODULE:read_file_info(Alias, [raw]),
- ?line {ok, Info1} = ?FILE_MODULE:read_link_info(Name),
+ {ok, Info1} = ?FILE_MODULE:read_link_info(Name),
{ok,Info1} = ?FILE_MODULE:read_link_info(Name, [raw]),
- ?line #file_info{links = 1, type = regular} = Info1,
-
- ?line {ok, Info2} = ?FILE_MODULE:read_link_info(Alias),
+ #file_info{links = 1, type = regular} = Info1,
+
+ {ok, Info2} = ?FILE_MODULE:read_link_info(Alias),
{ok,Info2} = ?FILE_MODULE:read_link_info(Alias, [raw]),
- ?line #file_info{links=1, type=symlink} = Info2,
- ?line {ok, Name} = ?FILE_MODULE:read_link(Alias),
+ #file_info{links=1, type=symlink} = Info2,
+ {ok, Name} = ?FILE_MODULE:read_link(Alias),
{ok, Name} = ?FILE_MODULE:read_link_all(Alias),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(?FILE_MODULE,NewDir),
ok
- end,
-
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ end,
+
+ [] = flush(),
Result.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-copy(doc) -> [];
-copy(suite) -> [];
copy(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
%% Create a text file.
- ?line Name1 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_1.txt"),
- ?line Line = "The quick brown fox jumps over a lazy dog. 0123456789\n",
- ?line Len = length(Line),
- ?line {ok, Handle1} = ?FILE_MODULE:open(Name1, [write]),
- ?line {_, Size1} =
+ Name1 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_1.txt"),
+ Line = "The quick brown fox jumps over a lazy dog. 0123456789\n",
+ Len = length(Line),
+ {ok, Handle1} = ?FILE_MODULE:open(Name1, [write]),
+ {_, Size1} =
iterate({0, 0},
done,
fun({_, S}) when S >= 128*1024 ->
@@ -2971,45 +2892,44 @@ copy(Config) when is_list(Config) ->
ok = ?FILE_MODULE:write(Handle1, [H, " ", Line]),
{N + 1, S + length(H) + 1 + Len}
end),
- ?line ?FILE_MODULE:close(Handle1),
+ ?FILE_MODULE:close(Handle1),
%% Make a copy
- ?line Name2 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_2.txt"),
- ?line {ok, Size1} = ?FILE_MODULE:copy(Name1, Name2),
+ Name2 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_2.txt"),
+ {ok, Size1} = ?FILE_MODULE:copy(Name1, Name2),
%% Concatenate 1
- ?line Name3 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_3.txt"),
- ?line {ok, Handle3} = ?FILE_MODULE:open(Name3, [raw, write, binary]),
- ?line {ok, Size1} = ?FILE_MODULE:copy(Name1, Handle3),
- ?line {ok, Handle2} = ?FILE_MODULE:open(Name2, [read, binary]),
- ?line {ok, Size1} = ?FILE_MODULE:copy(Handle2, Handle3),
- ?line ok = ?FILE_MODULE:close(Handle3),
- ?line ok = ?FILE_MODULE:close(Handle2),
+ Name3 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_3.txt"),
+ {ok, Handle3} = ?FILE_MODULE:open(Name3, [raw, write, binary]),
+ {ok, Size1} = ?FILE_MODULE:copy(Name1, Handle3),
+ {ok, Handle2} = ?FILE_MODULE:open(Name2, [read, binary]),
+ {ok, Size1} = ?FILE_MODULE:copy(Handle2, Handle3),
+ ok = ?FILE_MODULE:close(Handle3),
+ ok = ?FILE_MODULE:close(Handle2),
%% Concatenate 2
- ?line Name4 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_4.txt"),
- ?line {ok, Handle4} = ?FILE_MODULE:open(Name4, [write, binary]),
- ?line {ok, Size1} = ?FILE_MODULE:copy(Name1, Handle4),
- ?line {ok, Handle5} = ?FILE_MODULE:open(Name2, [raw, read, binary]),
- ?line {ok, Size1} = ?FILE_MODULE:copy(Handle5, Handle4),
- ?line ok = ?FILE_MODULE:close(Handle5),
- ?line ok = ?FILE_MODULE:close(Handle4),
+ Name4 = filename:join(RootDir, atom_to_list(?MODULE)++"_copy_4.txt"),
+ {ok, Handle4} = ?FILE_MODULE:open(Name4, [write, binary]),
+ {ok, Size1} = ?FILE_MODULE:copy(Name1, Handle4),
+ {ok, Handle5} = ?FILE_MODULE:open(Name2, [raw, read, binary]),
+ {ok, Size1} = ?FILE_MODULE:copy(Handle5, Handle4),
+ ok = ?FILE_MODULE:close(Handle5),
+ ok = ?FILE_MODULE:close(Handle4),
%% %% Just for test of the test
- %% ?line {ok, Handle2q} = ?FILE_MODULE:open(Name2, [write, append]),
- %% ?line ok = ?FILE_MODULE:write(Handle2q, "q"),
- %% ?line ok = ?FILE_MODULE:close(Handle2q),
+ %% {ok, Handle2q} = ?FILE_MODULE:open(Name2, [write, append]),
+ %% ok = ?FILE_MODULE:write(Handle2q, "q"),
+ %% ok = ?FILE_MODULE:close(Handle2q),
%% Compare the files
- ?line {ok, Handle1a} = ?FILE_MODULE:open(Name1, [raw, read]),
- ?line {ok, Handle2a} = ?FILE_MODULE:open(Name2, [raw, read]),
- ?line true = stream_cmp(fd_stream_factory([Handle1a]),
- fd_stream_factory([Handle2a])),
- ?line {ok, 0} = ?FILE_MODULE:position(Handle1a, 0),
- ?line {ok, 0} = ?FILE_MODULE:position(Handle2a, 0),
- ?line {ok, Handle3a} = ?FILE_MODULE:open(Name3, [raw, read]),
- ?line true = stream_cmp(fd_stream_factory([Handle1a, Handle2a]),
- fd_stream_factory([Handle2a])),
- ?line ok = ?FILE_MODULE:close(Handle1a),
- ?line ok = ?FILE_MODULE:close(Handle2a),
- ?line ok = ?FILE_MODULE:close(Handle3a),
- ?line [] = flush(),
- ?line test_server:timetrap_cancel(Dog),
+ {ok, Handle1a} = ?FILE_MODULE:open(Name1, [raw, read]),
+ {ok, Handle2a} = ?FILE_MODULE:open(Name2, [raw, read]),
+ true = stream_cmp(fd_stream_factory([Handle1a]),
+ fd_stream_factory([Handle2a])),
+ {ok, 0} = ?FILE_MODULE:position(Handle1a, 0),
+ {ok, 0} = ?FILE_MODULE:position(Handle2a, 0),
+ {ok, Handle3a} = ?FILE_MODULE:open(Name3, [raw, read]),
+ true = stream_cmp(fd_stream_factory([Handle1a, Handle2a]),
+ fd_stream_factory([Handle2a])),
+ ok = ?FILE_MODULE:close(Handle1a),
+ ok = ?FILE_MODULE:close(Handle2a),
+ ok = ?FILE_MODULE:close(Handle3a),
+ [] = flush(),
ok.
@@ -3030,7 +2950,7 @@ fd_stream_factory([Fd | T] = L) ->
end
end.
-
+
stream_cmp(F1, F2) when is_function(F1), is_function(F2) ->
stream_cmp(F1(), F2());
@@ -3055,80 +2975,75 @@ stream_cmp([H | T1], [H | T2]) ->
%% Test the get_cwd(), open(), and copy() file server calls.
new_slave(_RootDir, Cwd) ->
- ?line L = "qwertyuiopasdfghjklzxcvbnm",
- ?line N = length(L),
- ?line {ok, Cwd} = ?FILE_MODULE:get_cwd(),
- ?line {error, enotsup} = ?FILE_MODULE:get_cwd("C:"), % Unix only testcase
- ?line {ok, FD1} = ?FILE_MODULE:open("file1.txt", write),
- ?line ok = ?FILE_MODULE:close(FD1),
- ?line {ok, FD2} = ?FILE_MODULE:open("file1.txt",
- [write, append,
- binary, compressed,
- delayed_write,
- {delayed_write, 0, 0},
- read_ahead,
- {read_ahead, 0}]),
- ?line ok = ?FILE_MODULE:write(FD2, L),
- ?line ok = ?FILE_MODULE:close(FD2),
- ?line {ok, N2} = ?FILE_MODULE:copy("file1.txt", "file2.txt"),
- ?line io:format("Size ~p, compressed ~p.~n", [N, N2]),
- ?line {ok, FD3} = ?FILE_MODULE:open("file2.txt",
- [binary, compressed]),
+ L = "qwertyuiopasdfghjklzxcvbnm",
+ N = length(L),
+ {ok, Cwd} = ?FILE_MODULE:get_cwd(),
+ {error, enotsup} = ?FILE_MODULE:get_cwd("C:"), % Unix only testcase
+ {ok, FD1} = ?FILE_MODULE:open("file1.txt", write),
+ ok = ?FILE_MODULE:close(FD1),
+ {ok, FD2} = ?FILE_MODULE:open("file1.txt",
+ [write, append,
+ binary, compressed,
+ delayed_write,
+ {delayed_write, 0, 0},
+ read_ahead,
+ {read_ahead, 0}]),
+ ok = ?FILE_MODULE:write(FD2, L),
+ ok = ?FILE_MODULE:close(FD2),
+ {ok, N2} = ?FILE_MODULE:copy("file1.txt", "file2.txt"),
+ io:format("Size ~p, compressed ~p.~n", [N, N2]),
+ {ok, FD3} = ?FILE_MODULE:open("file2.txt",
+ [binary, compressed]),
%% The file_io_server will translate the binary into a list
- ?line {ok, L} = ?FILE_MODULE:read(FD3, N+1),
- ?line ok = ?FILE_MODULE:close(FD3),
+ {ok, L} = ?FILE_MODULE:read(FD3, N+1),
+ ok = ?FILE_MODULE:close(FD3),
%%
- ?line ok = ?FILE_MODULE:delete("file1.txt"),
- ?line ok = ?FILE_MODULE:delete("file2.txt"),
- ?line [] = flush(),
+ ok = ?FILE_MODULE:delete("file1.txt"),
+ ok = ?FILE_MODULE:delete("file2.txt"),
+ [] = flush(),
ok.
%% Test the get_cwd() and open() file server calls.
old_slave(_RootDir, Cwd) ->
- ?line L = "qwertyuiopasdfghjklzxcvbnm",
- ?line N = length(L),
- ?line {ok, Cwd} = ?FILE_MODULE:get_cwd(),
- ?line {error, enotsup} = ?FILE_MODULE:get_cwd("C:"), % Unix only testcase
- ?line {ok, FD1} = ?FILE_MODULE:open("file1.txt", write),
- ?line ok = ?FILE_MODULE:close(FD1),
- ?line {ok, FD2} = ?FILE_MODULE:open("file1.txt",
- [write, binary, compressed]),
- ?line ok = ?FILE_MODULE:write(FD2, L),
- ?line ok = ?FILE_MODULE:close(FD2),
- ?line {ok, FD3} = ?FILE_MODULE:open("file1.txt", [write, append]),
- ?line ok = ?FILE_MODULE:close(FD3),
- ?line {ok, FD4} = ?FILE_MODULE:open("file1.txt",
- [binary, compressed]),
+ L = "qwertyuiopasdfghjklzxcvbnm",
+ N = length(L),
+ {ok, Cwd} = ?FILE_MODULE:get_cwd(),
+ {error, enotsup} = ?FILE_MODULE:get_cwd("C:"), % Unix only testcase
+ {ok, FD1} = ?FILE_MODULE:open("file1.txt", write),
+ ok = ?FILE_MODULE:close(FD1),
+ {ok, FD2} = ?FILE_MODULE:open("file1.txt",
+ [write, binary, compressed]),
+ ok = ?FILE_MODULE:write(FD2, L),
+ ok = ?FILE_MODULE:close(FD2),
+ {ok, FD3} = ?FILE_MODULE:open("file1.txt", [write, append]),
+ ok = ?FILE_MODULE:close(FD3),
+ {ok, FD4} = ?FILE_MODULE:open("file1.txt",
+ [binary, compressed]),
%% The file_io_server will translate the binary into a list
- ?line {ok, L} = ?FILE_MODULE:read(FD4, N+1),
- ?line ok = ?FILE_MODULE:close(FD4),
+ {ok, L} = ?FILE_MODULE:read(FD4, N+1),
+ ok = ?FILE_MODULE:close(FD4),
%%
- ?line ok = ?FILE_MODULE:delete("file1.txt"),
- ?line [] = flush(),
+ ok = ?FILE_MODULE:delete("file1.txt"),
+ [] = flush(),
ok.
run_test(Test, Args) ->
- ?line case (catch apply(?MODULE, Test, Args)) of
- {'EXIT', _} = Exit ->
- {done, Exit, get(test_server_loc)};
- Result ->
- {done, Result}
- end.
+ case (catch apply(?MODULE, Test, Args)) of
+ {'EXIT', _} = Exit ->
+ {done, Exit, get(test_server_loc)};
+ Result ->
+ {done, Result}
+ end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-delayed_write(suite) ->
- [];
-delayed_write(doc) ->
- ["Tests the file open option {delayed_write, Size, Delay}"];
+%% Tests the file open option {delayed_write, Size, Delay}.
delayed_write(Config) when is_list(Config) ->
- Dog = ?t:timetrap(?t:seconds(20)),
-
- RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
File = filename:join(RootDir,
- atom_to_list(?MODULE)++"_delayed_write.txt"),
+ atom_to_list(?MODULE)++"_delayed_write.txt"),
Data1 = "asdfghjkl",
Data2 = "qwertyuio",
Data3 = "zxcvbnm,.",
@@ -3141,20 +3056,22 @@ delayed_write(Config) when is_list(Config) ->
%%
%% Test caching and normal close of non-raw file
{ok, Fd1} =
- ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 2000}]),
+ ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 400}]),
ok = ?FILE_MODULE:write(Fd1, Data1),
- ?t:sleep(1000), % Just in case the file system is slow
+ %% Wait for a reasonable amount of time to check whether the write was
+ %% practically instantaneous or actually delayed.
+ timer:sleep(100),
{ok, Fd2} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd2, 1),
ok = ?FILE_MODULE:write(Fd1, Data1), % Data flush on size
- ?t:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100),
{ok, Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 2*Size+1),
ok = ?FILE_MODULE:write(Fd1, Data1),
- ?t:sleep(3000), % Wait until data flush on timeout
+ timer:sleep(500), % Wait until data flush on timeout
{ok, Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 3*Size+1),
ok = ?FILE_MODULE:write(Fd1, Data1),
ok = ?FILE_MODULE:close(Fd1), % Data flush on close
- ?t:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100),
{ok, Data1Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 4*Size+1),
ok = ?FILE_MODULE:close(Fd2),
%%
@@ -3162,33 +3079,33 @@ delayed_write(Config) when is_list(Config) ->
%% raw file, default parameters.
Parent = self(),
Fun = fun() ->
- Child = self(),
- Test =
- fun () ->
- {ok, Fd} = ?FILE_MODULE:open(File,
- [raw, write, delayed_write]),
- ok = ?FILE_MODULE:write(Fd, Data1),
- Parent ! {Child, wrote},
- receive
- {Parent, continue, Reason} ->
- {ok, Reason}
- end
- end,
- case (catch Test()) of
- {ok, Reason} -> exit(Reason);
- Unknown ->
- exit({Unknown, get(test_server_loc)})
- end
- end,
+ Child = self(),
+ Test =
+ fun () ->
+ {ok, Fd} = ?FILE_MODULE:open(File,
+ [raw, write, delayed_write]),
+ ok = ?FILE_MODULE:write(Fd, Data1),
+ Parent ! {Child, wrote},
+ receive
+ {Parent, continue, Reason} ->
+ {ok, Reason}
+ end
+ end,
+ case (catch Test()) of
+ {ok, Reason} -> exit(Reason);
+ Unknown ->
+ exit({Unknown, get(test_server_loc)})
+ end
+ end,
Child1 = spawn(Fun),
Mref1 = erlang:monitor(process, Child1),
receive
{Child1, wrote} ->
ok;
{'DOWN', Mref1, _, _, _} = Down1a ->
- ?t:fail(Down1a)
+ ct:fail(Down1a)
end,
- ?t:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Fd3} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd3, 1),
Child1 ! {Parent, continue, normal},
@@ -3196,9 +3113,9 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref1, process, Child1, normal} ->
ok;
{'DOWN', Mref1, _, _, _} = Down1b ->
- ?t:fail(Down1b)
+ ct:fail(Down1b)
end,
- ?t:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Data1} = ?FILE_MODULE:pread(Fd3, bof, Size+1),
ok = ?FILE_MODULE:close(Fd3),
%%
@@ -3209,9 +3126,9 @@ delayed_write(Config) when is_list(Config) ->
{Child2, wrote} ->
ok;
{'DOWN', Mref2, _, _, _} = Down2a ->
- ?t:fail(Down2a)
+ ct:fail(Down2a)
end,
- ?t:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Fd4} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd4, 1),
Child2 ! {Parent, continue, kill},
@@ -3219,15 +3136,15 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref2, process, Child2, kill} ->
ok;
{'DOWN', Mref2, _, _, _} = Down2b ->
- ?t:fail(Down2b)
+ ct:fail(Down2b)
end,
- ?t:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
eof = ?FILE_MODULE:pread(Fd4, bof, 1),
ok = ?FILE_MODULE:close(Fd4),
%%
%% Test if file position works with delayed_write
{ok, Fd5} = ?FILE_MODULE:open(File, [raw, read, write,
- delayed_write]),
+ delayed_write]),
ok = ?FILE_MODULE:truncate(Fd5),
ok = ?FILE_MODULE:write(Fd5, [Data1|Data2]),
{ok, 0} = ?FILE_MODULE:position(Fd5, bof),
@@ -3239,93 +3156,92 @@ delayed_write(Config) when is_list(Config) ->
ok = ?FILE_MODULE:close(Fd5),
%%
[] = flush(),
- ?t:timetrap_cancel(Dog),
ok.
-pid2name(doc) -> "Tests file:pid2name/1.";
-pid2name(suite) -> [];
+%% Tests file:pid2name/1.
pid2name(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line Base = test_server:temp_name(
- filename:join(RootDir, "pid2name_")),
- ?line Name1 = [Base, '.txt'],
- ?line Name2 = Base ++ ".txt",
+ RootDir = proplists:get_value(priv_dir, Config),
+ Base = test_server:temp_name(
+ filename:join(RootDir, "pid2name_")),
+ Name1 = [Base, '.txt'],
+ Name2 = Base ++ ".txt",
%%
- ?line {ok, Pid} = file:open(Name1, [write]),
- ?line {ok, Name2} = file:pid2name(Pid),
- ?line undefined = file:pid2name(self()),
- ?line ok = file:close(Pid),
- ?line test_server:sleep(1000),
- ?line false = is_process_alive(Pid),
- ?line undefined = file:pid2name(Pid),
- %%
- ?line test_server:timetrap_cancel(Dog),
+ {ok, Pid} = file:open(Name1, [write]),
+ {ok, Name2} = file:pid2name(Pid),
+ undefined = file:pid2name(self()),
+ ok = file:close(Pid),
+ ct:sleep(1000),
+ false = is_process_alive(Pid),
+ undefined = file:pid2name(Pid),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-read_ahead(suite) ->
- [];
-read_ahead(doc) ->
- ["Tests the file open option {read_ahead, Size}"];
+%% Tests the file open option {read_ahead, Size}.
read_ahead(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(20)),
- %%
- ?line RootDir = ?config(priv_dir, Config),
- ?line File = filename:join(RootDir,
- atom_to_list(?MODULE)++"_read_ahead.txt"),
- ?line Data1 = "asdfghjkl", % Must be
- ?line Data2 = "qwertyuio", % same
- ?line Data3 = "zxcvbnm,.", % length
- ?line Size = length(Data1),
- ?line Size = length(Data2),
- ?line Size = length(Data3),
+ RootDir = proplists:get_value(priv_dir, Config),
+ File = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_read_ahead.txt"),
+ Data1 = "asdfghjkl", % Must be
+ Data2 = "qwertyuio", % same
+ Data3 = "zxcvbnm,.", % length
+ Size = length(Data1),
+ Size = length(Data2),
+ Size = length(Data3),
%%
%% Test caching of normal non-raw file
- ?line {ok, Fd1} = ?FILE_MODULE:open(File, [write]),
- ?line ok = ?FILE_MODULE:write(Fd1, [Data1|Data1]),
- ?line ?t:sleep(1000), % Just in case the file system is slow
- ?line {ok, Fd2} = ?FILE_MODULE:open(File, [read, {read_ahead, 2*Size}]),
- ?line {ok, Data1} = ?FILE_MODULE:read(Fd2, Size),
- ?line ok = ?FILE_MODULE:pwrite(Fd1, Size, Data2),
- ?line ?t:sleep(1000), % Just in case the file system is slow
- ?line {ok, Data1} = ?FILE_MODULE:read(Fd2, Size), % Will read cached data
- ?line Data2Data2Data2 = Data2++Data2++Data2,
- ?line ok = ?FILE_MODULE:pwrite(Fd1, eof, Data2Data2Data2),
- ?line ?t:sleep(1000), % Just in case the file system is slow
- ?line {ok, Data2Data2Data2} =
+ {ok, Fd1} = ?FILE_MODULE:open(File, [write]),
+ ok = ?FILE_MODULE:write(Fd1, [Data1|Data1]),
+ timer:sleep(1000), % Just in case the file system is slow
+ {ok, Fd2} = ?FILE_MODULE:open(File, [read, {read_ahead, 2*Size}]),
+ {ok, Data1} = ?FILE_MODULE:read(Fd2, Size),
+ ok = ?FILE_MODULE:pwrite(Fd1, Size, Data2),
+ timer:sleep(1000), % Just in case the file system is slow
+ {ok, Data1} = ?FILE_MODULE:read(Fd2, Size), % Will read cached data
+ Data2Data2Data2 = Data2++Data2++Data2,
+ ok = ?FILE_MODULE:pwrite(Fd1, eof, Data2Data2Data2),
+ timer:sleep(1000), % Just in case the file system is slow
+ {ok, Data2Data2Data2} =
?FILE_MODULE:read(Fd2, 3*Size), % Read more than cache buffer
- ?line ok = ?FILE_MODULE:close(Fd1),
- ?line ok = ?FILE_MODULE:close(Fd2),
+ ok = ?FILE_MODULE:close(Fd1),
+ ok = ?FILE_MODULE:close(Fd2),
%% Test caching of raw file and default parameters
- ?line {ok, Fd3} = ?FILE_MODULE:open(File, [raw, write]),
- ?line ok = ?FILE_MODULE:write(Fd3, [Data1|Data1]),
- ?line ?t:sleep(1000), % Just in case the file system is slow
- ?line {ok, Fd4} = ?FILE_MODULE:open(File, [raw, read, read_ahead]),
- ?line {ok, Data1} = ?FILE_MODULE:read(Fd4, Size),
- ?line ok = ?FILE_MODULE:pwrite(Fd3, Size, Data2),
- ?line ?t:sleep(1000), % Just in case the file system is slow
- ?line {ok, Data1} = ?FILE_MODULE:read(Fd4, Size), % Will read cached data
- ?line ok = ?FILE_MODULE:close(Fd3),
- ?line ok = ?FILE_MODULE:close(Fd4),
+ {ok, Fd3} = ?FILE_MODULE:open(File, [raw, write]),
+ ok = ?FILE_MODULE:write(Fd3, [Data1|Data1]),
+ timer:sleep(1000), % Just in case the file system is slow
+ {ok, Fd4} = ?FILE_MODULE:open(File, [raw, read, read_ahead]),
+ {ok, Data1} = ?FILE_MODULE:read(Fd4, Size),
+ ok = ?FILE_MODULE:pwrite(Fd3, Size, Data2),
+ timer:sleep(1000), % Just in case the file system is slow
+ {ok, Data1} = ?FILE_MODULE:read(Fd4, Size), % Will read cached data
+ ok = ?FILE_MODULE:close(Fd3),
+ ok = ?FILE_MODULE:close(Fd4),
%% Test if the file position works in combination with read_ahead
- ?line {ok, Fd5} = ?FILE_MODULE:open(File, [raw, read, write, read_ahead]),
- ?line ok = ?FILE_MODULE:truncate(Fd5),
- ?line ok = ?FILE_MODULE:write(Fd5, [Data1,Data1|Data3]),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd5, bof),
- ?line {ok, Data1} = ?FILE_MODULE:read(Fd5, Size),
- ?line ok = ?FILE_MODULE:write(Fd5, Data2),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd5, bof),
- ?line Data1Data2Data3 = Data1++Data2++Data3,
- ?line {ok, Data1Data2Data3} = ?FILE_MODULE:read(Fd5, 3*Size+1),
- ?line ok = ?FILE_MODULE:close(Fd5),
+ {ok, Fd5} = ?FILE_MODULE:open(File, [raw, read, write, read_ahead]),
+ ok = ?FILE_MODULE:truncate(Fd5),
+ ok = ?FILE_MODULE:write(Fd5, [Data1,Data1|Data3]),
+ {ok, 0} = ?FILE_MODULE:position(Fd5, bof),
+ {ok, Data1} = ?FILE_MODULE:read(Fd5, Size),
+ ok = ?FILE_MODULE:write(Fd5, Data2),
+ {ok, 0} = ?FILE_MODULE:position(Fd5, bof),
+ Data1Data2Data3 = Data1++Data2++Data3,
+ {ok, Data1Data2Data3} = ?FILE_MODULE:read(Fd5, 3*Size+1),
+ ok = ?FILE_MODULE:close(Fd5),
+
+ %% Ensure that a read that draws from both the buffer and the file won't
+ %% return anything wonky.
+ SplitData = << <<(I rem 256)>> || I <- lists:seq(1, 1024) >>,
+ file:write_file(File, SplitData),
+ {ok, Fd6} = ?FILE_MODULE:open(File, [raw, read, binary, {read_ahead, 256}]),
+ {ok, <<1>>} = file:read(Fd6, 1),
+ <<1, Shifted:512/binary, _Rest/binary>> = SplitData,
+ {ok, Shifted} = file:read(Fd6, 512),
+
%%
- ?line [] = flush(),
- ?line ?t:timetrap_cancel(Dog),
+ [] = flush(),
ok.
@@ -3334,137 +3250,131 @@ read_ahead(Config) when is_list(Config) ->
-segment_read(suite) ->
- [];
-segment_read(doc) ->
- ["Tests the segmenting of large reads"];
+%% Tests the segmenting of large reads.
segment_read(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(60)),
- %%
- ?line Name = filename:join(?config(priv_dir, Config),
- ?MODULE_STRING ++ "_segment_read"),
- ?line SegSize = 256*1024,
- ?line SegCnt = SegSize div 4,
- ?line Cnt = 4 * SegCnt,
- ?line ok = create_file(Name, Cnt),
+ Name = filename:join(proplists:get_value(priv_dir, Config),
+ ?MODULE_STRING ++ "_segment_read"),
+ SegSize = 256*1024,
+ SegCnt = SegSize div 4,
+ Cnt = 4 * SegCnt,
+ ok = create_file(Name, Cnt),
%%
%% read_file/1
%%
- ?line {ok, Bin} = ?FILE_MODULE:read_file(Name),
- ?line true = verify_bin(Bin, 0, Cnt),
+ {ok, Bin} = ?FILE_MODULE:read_file(Name),
+ true = verify_bin(Bin, 0, Cnt),
%%
%% read/2
%%
%% Not segmented
- ?line {ok, FD1} = ?FILE_MODULE:open(Name, [read, raw, binary]),
- ?line {ok, B1a} = ?FILE_MODULE:read(FD1, SegSize),
- ?line {ok, B1b} = ?FILE_MODULE:read(FD1, SegSize),
- ?line {ok, B1c} = ?FILE_MODULE:read(FD1, SegSize),
- ?line {ok, B1d} = ?FILE_MODULE:read(FD1, SegSize),
- ?line ok = ?FILE_MODULE:close(FD1),
- ?line true = verify_bin(B1a, 0*SegCnt, SegCnt),
- ?line true = verify_bin(B1b, 1*SegCnt, SegCnt),
- ?line true = verify_bin(B1c, 2*SegCnt, SegCnt),
- ?line true = verify_bin(B1d, 3*SegCnt, SegCnt),
+ {ok, FD1} = ?FILE_MODULE:open(Name, [read, raw, binary]),
+ {ok, B1a} = ?FILE_MODULE:read(FD1, SegSize),
+ {ok, B1b} = ?FILE_MODULE:read(FD1, SegSize),
+ {ok, B1c} = ?FILE_MODULE:read(FD1, SegSize),
+ {ok, B1d} = ?FILE_MODULE:read(FD1, SegSize),
+ ok = ?FILE_MODULE:close(FD1),
+ true = verify_bin(B1a, 0*SegCnt, SegCnt),
+ true = verify_bin(B1b, 1*SegCnt, SegCnt),
+ true = verify_bin(B1c, 2*SegCnt, SegCnt),
+ true = verify_bin(B1d, 3*SegCnt, SegCnt),
%%
%% Segmented
- ?line {ok, FD2} = ?FILE_MODULE:open(Name, [read, raw, binary]),
- ?line {ok, B2a} = ?FILE_MODULE:read(FD2, 1*SegSize),
- ?line {ok, B2b} = ?FILE_MODULE:read(FD2, 2*SegSize),
- ?line {ok, B2c} = ?FILE_MODULE:read(FD2, 2*SegSize),
- ?line ok = ?FILE_MODULE:close(FD2),
- ?line true = verify_bin(B2a, 0*SegCnt, 1*SegCnt),
- ?line true = verify_bin(B2b, 1*SegCnt, 2*SegCnt),
- ?line true = verify_bin(B2c, 3*SegCnt, 1*SegCnt),
+ {ok, FD2} = ?FILE_MODULE:open(Name, [read, raw, binary]),
+ {ok, B2a} = ?FILE_MODULE:read(FD2, 1*SegSize),
+ {ok, B2b} = ?FILE_MODULE:read(FD2, 2*SegSize),
+ {ok, B2c} = ?FILE_MODULE:read(FD2, 2*SegSize),
+ ok = ?FILE_MODULE:close(FD2),
+ true = verify_bin(B2a, 0*SegCnt, 1*SegCnt),
+ true = verify_bin(B2b, 1*SegCnt, 2*SegCnt),
+ true = verify_bin(B2c, 3*SegCnt, 1*SegCnt),
%%
%% pread/3
%%
- ?line {ok, FD3} = ?FILE_MODULE:open(Name, [read, raw, binary]),
+ {ok, FD3} = ?FILE_MODULE:open(Name, [read, raw, binary]),
%%
%% Not segmented
- ?line {ok, B3d} = ?FILE_MODULE:pread(FD3, 3*SegSize, SegSize),
- ?line {ok, B3c} = ?FILE_MODULE:pread(FD3, 2*SegSize, SegSize),
- ?line {ok, B3b} = ?FILE_MODULE:pread(FD3, 1*SegSize, SegSize),
- ?line {ok, B3a} = ?FILE_MODULE:pread(FD3, 0*SegSize, SegSize),
- ?line true = verify_bin(B3a, 0*SegCnt, SegCnt),
- ?line true = verify_bin(B3b, 1*SegCnt, SegCnt),
- ?line true = verify_bin(B3c, 2*SegCnt, SegCnt),
- ?line true = verify_bin(B3d, 3*SegCnt, SegCnt),
+ {ok, B3d} = ?FILE_MODULE:pread(FD3, 3*SegSize, SegSize),
+ {ok, B3c} = ?FILE_MODULE:pread(FD3, 2*SegSize, SegSize),
+ {ok, B3b} = ?FILE_MODULE:pread(FD3, 1*SegSize, SegSize),
+ {ok, B3a} = ?FILE_MODULE:pread(FD3, 0*SegSize, SegSize),
+ true = verify_bin(B3a, 0*SegCnt, SegCnt),
+ true = verify_bin(B3b, 1*SegCnt, SegCnt),
+ true = verify_bin(B3c, 2*SegCnt, SegCnt),
+ true = verify_bin(B3d, 3*SegCnt, SegCnt),
%%
%% Segmented
- ?line {ok, B3g} = ?FILE_MODULE:pread(FD3, 3*SegSize, 2*SegSize),
- ?line {ok, B3f} = ?FILE_MODULE:pread(FD3, 1*SegSize, 2*SegSize),
- ?line {ok, B3e} = ?FILE_MODULE:pread(FD3, 0*SegSize, 1*SegSize),
- ?line true = verify_bin(B3e, 0*SegCnt, 1*SegCnt),
- ?line true = verify_bin(B3f, 1*SegCnt, 2*SegCnt),
- ?line true = verify_bin(B3g, 3*SegCnt, 1*SegCnt),
+ {ok, B3g} = ?FILE_MODULE:pread(FD3, 3*SegSize, 2*SegSize),
+ {ok, B3f} = ?FILE_MODULE:pread(FD3, 1*SegSize, 2*SegSize),
+ {ok, B3e} = ?FILE_MODULE:pread(FD3, 0*SegSize, 1*SegSize),
+ true = verify_bin(B3e, 0*SegCnt, 1*SegCnt),
+ true = verify_bin(B3f, 1*SegCnt, 2*SegCnt),
+ true = verify_bin(B3g, 3*SegCnt, 1*SegCnt),
%%
- ?line ok = ?FILE_MODULE:close(FD3),
+ ok = ?FILE_MODULE:close(FD3),
%%
%% pread/2
%%
- ?line {ok, FD5} = ?FILE_MODULE:open(Name, [read, raw, binary]),
+ {ok, FD5} = ?FILE_MODULE:open(Name, [read, raw, binary]),
%%
%% +---+---+---+---+
%% | 4 | 3 | 2 | 1 |
%% +---+---+---+---+
%% < ^ >
- ?line {ok, [B5d, B5c, B5b, B5a]} =
+ {ok, [B5d, B5c, B5b, B5a]} =
?FILE_MODULE:pread(FD5, [{3*SegSize, SegSize},
{2*SegSize, SegSize},
{1*SegSize, SegSize},
{0*SegSize, SegSize}]),
- ?line true = verify_bin(B5a, 0*SegCnt, SegCnt),
- ?line true = verify_bin(B5b, 1*SegCnt, SegCnt),
- ?line true = verify_bin(B5c, 2*SegCnt, SegCnt),
- ?line true = verify_bin(B5d, 3*SegCnt, SegCnt),
+ true = verify_bin(B5a, 0*SegCnt, SegCnt),
+ true = verify_bin(B5b, 1*SegCnt, SegCnt),
+ true = verify_bin(B5c, 2*SegCnt, SegCnt),
+ true = verify_bin(B5d, 3*SegCnt, SegCnt),
%%
%% +---+-------+-------+
%% | 3 | 2 | 1 |
%% +---+-------+-------+
%% < ^ ^ >
- ?line {ok, [B5g, B5f, B5e]} =
+ {ok, [B5g, B5f, B5e]} =
?FILE_MODULE:pread(FD5, [{3*SegSize, 2*SegSize},
{1*SegSize, 2*SegSize},
{0*SegSize, 1*SegSize}]),
- ?line true = verify_bin(B5e, 0*SegCnt, 1*SegCnt),
- ?line true = verify_bin(B5f, 1*SegCnt, 2*SegCnt),
- ?line true = verify_bin(B5g, 3*SegCnt, 1*SegCnt),
+ true = verify_bin(B5e, 0*SegCnt, 1*SegCnt),
+ true = verify_bin(B5f, 1*SegCnt, 2*SegCnt),
+ true = verify_bin(B5g, 3*SegCnt, 1*SegCnt),
%%
%%
%% +-------+-----------+
%% | 2 | 1 |
%% +-------+-----------+
%% < ^ ^ >
- ?line {ok, [B5i, B5h]} =
+ {ok, [B5i, B5h]} =
?FILE_MODULE:pread(FD5, [{2*SegSize, 3*SegSize},
{0*SegSize, 2*SegSize}]),
- ?line true = verify_bin(B5h, 0*SegCnt, 2*SegCnt),
- ?line true = verify_bin(B5i, 2*SegCnt, 2*SegCnt),
+ true = verify_bin(B5h, 0*SegCnt, 2*SegCnt),
+ true = verify_bin(B5i, 2*SegCnt, 2*SegCnt),
%%
%% +-------+---+---+
%% | 3 | 2 | 1 |
%% +-------+---+---+
%% < ^ ^ >
- ?line {ok, [B5l, B5k, B5j]} =
+ {ok, [B5l, B5k, B5j]} =
?FILE_MODULE:pread(FD5, [{3*SegSize, 1*SegSize},
{2*SegSize, 1*SegSize},
{0*SegSize, 2*SegSize}]),
- ?line true = verify_bin(B5j, 0*SegCnt, 2*SegCnt),
- ?line true = verify_bin(B5k, 2*SegCnt, 1*SegCnt),
- ?line true = verify_bin(B5l, 3*SegCnt, 1*SegCnt),
+ true = verify_bin(B5j, 0*SegCnt, 2*SegCnt),
+ true = verify_bin(B5k, 2*SegCnt, 1*SegCnt),
+ true = verify_bin(B5l, 3*SegCnt, 1*SegCnt),
%%
%% Real time response time test.
%%
Req = lists:flatten(lists:duplicate(17,
[{2*SegSize, 2*SegSize},
{0*SegSize, 2*SegSize}])),
- ?line {{ok, _}, Comment} =
+ {{ok, _}, Comment} =
response_analysis(?FILE_MODULE, pread, [FD5, Req]),
- ?line ok = ?FILE_MODULE:close(FD5),
+ ok = ?FILE_MODULE:close(FD5),
%%
- ?line [] = flush(),
- ?line ?t:timetrap_cancel(Dog),
+ [] = flush(),
{comment, Comment}.
@@ -3473,100 +3383,95 @@ segment_read(Config) when is_list(Config) ->
-segment_write(suite) ->
- [];
-segment_write(doc) ->
- ["Tests the segmenting of large writes"];
+%% Tests the segmenting of large writes.
segment_write(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(60)),
- %%
- ?line Name = filename:join(?config(priv_dir, Config),
- ?MODULE_STRING ++ "_segment_write"),
- ?line SegSize = 256*1024,
- ?line SegCnt = SegSize div 4,
- ?line Cnt = 4 * SegCnt,
- ?line Bin = create_bin(0, Cnt),
+ Name = filename:join(proplists:get_value(priv_dir, Config),
+ ?MODULE_STRING ++ "_segment_write"),
+ SegSize = 256*1024,
+ SegCnt = SegSize div 4,
+ Cnt = 4 * SegCnt,
+ Bin = create_bin(0, Cnt),
%%
%% write/2
%%
%% Not segmented
- ?line {ok, FD1} = ?FILE_MODULE:open(Name, [write, raw, binary]),
- ?line ok = ?FILE_MODULE:write(FD1, subbin(Bin, 0*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:write(FD1, subbin(Bin, 1*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:write(FD1, subbin(Bin, 2*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:write(FD1, subbin(Bin, 3*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:close(FD1),
- ?line true = verify_file(Name, Cnt),
+ {ok, FD1} = ?FILE_MODULE:open(Name, [write, raw, binary]),
+ ok = ?FILE_MODULE:write(FD1, subbin(Bin, 0*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:write(FD1, subbin(Bin, 1*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:write(FD1, subbin(Bin, 2*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:write(FD1, subbin(Bin, 3*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:close(FD1),
+ true = verify_file(Name, Cnt),
%%
%% Segmented
- ?line {ok, FD2} = ?FILE_MODULE:open(Name, [write, raw, binary]),
- ?line ok = ?FILE_MODULE:write(FD2, subbin(Bin, 0*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:write(FD2, subbin(Bin, 1*SegSize, 2*SegSize)),
- ?line ok = ?FILE_MODULE:write(FD2, subbin(Bin, 3*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:close(FD2),
- ?line true = verify_file(Name, Cnt),
+ {ok, FD2} = ?FILE_MODULE:open(Name, [write, raw, binary]),
+ ok = ?FILE_MODULE:write(FD2, subbin(Bin, 0*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:write(FD2, subbin(Bin, 1*SegSize, 2*SegSize)),
+ ok = ?FILE_MODULE:write(FD2, subbin(Bin, 3*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:close(FD2),
+ true = verify_file(Name, Cnt),
%%
%% +---+---+---+---+
%% | | | | |
%% +---+---+---+---+
%% < ^ >
- ?line ok = write_file(Name, [subbin(Bin, 0*SegSize, 1*SegSize),
- subbin(Bin, 1*SegSize, 1*SegSize),
- subbin(Bin, 2*SegSize, 1*SegSize),
- subbin(Bin, 3*SegSize, 1*SegSize)]),
- ?line true = verify_file(Name, Cnt),
+ ok = write_file(Name, [subbin(Bin, 0*SegSize, 1*SegSize),
+ subbin(Bin, 1*SegSize, 1*SegSize),
+ subbin(Bin, 2*SegSize, 1*SegSize),
+ subbin(Bin, 3*SegSize, 1*SegSize)]),
+ true = verify_file(Name, Cnt),
%%
%% +---+-------+---+
%% | | | |
%% +---+-------+---+
%% < ^ ^ >
- ?line ok = write_file(Name, [subbin(Bin, 0*SegSize, 1*SegSize),
- subbin(Bin, 1*SegSize, 2*SegSize),
- subbin(Bin, 3*SegSize, 1*SegSize)]),
- ?line true = verify_file(Name, Cnt),
+ ok = write_file(Name, [subbin(Bin, 0*SegSize, 1*SegSize),
+ subbin(Bin, 1*SegSize, 2*SegSize),
+ subbin(Bin, 3*SegSize, 1*SegSize)]),
+ true = verify_file(Name, Cnt),
%%
%% +-------+-------+
%% | | |
%% +-------+-------+
%% < ^ ^ >
- ?line ok = write_file(Name, [subbin(Bin, 0*SegSize, 2*SegSize),
- subbin(Bin, 2*SegSize, 2*SegSize)]),
- ?line true = verify_file(Name, Cnt),
+ ok = write_file(Name, [subbin(Bin, 0*SegSize, 2*SegSize),
+ subbin(Bin, 2*SegSize, 2*SegSize)]),
+ true = verify_file(Name, Cnt),
%%
%% +-------+---+---+
%% | | | |
%% +-------+---+---+
%% < ^ ^ >
- ?line ok = write_file(Name, [subbin(Bin, 0*SegSize, 2*SegSize),
- subbin(Bin, 2*SegSize, 1*SegSize),
- subbin(Bin, 3*SegSize, 1*SegSize)]),
- ?line true = verify_file(Name, Cnt),
+ ok = write_file(Name, [subbin(Bin, 0*SegSize, 2*SegSize),
+ subbin(Bin, 2*SegSize, 1*SegSize),
+ subbin(Bin, 3*SegSize, 1*SegSize)]),
+ true = verify_file(Name, Cnt),
%%
%% pwrite/3
%%
%% Not segmented
- ?line {ok, FD3} = ?FILE_MODULE:open(Name, [write, raw, binary]),
- ?line ok = ?FILE_MODULE:pwrite(FD3, 3*SegSize,
- subbin(Bin, 3*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:pwrite(FD3, 2*SegSize,
- subbin(Bin, 2*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:pwrite(FD3, 1*SegSize,
- subbin(Bin, 1*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:pwrite(FD3, 0*SegSize,
- subbin(Bin, 0*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:close(FD3),
- ?line true = verify_file(Name, Cnt),
+ {ok, FD3} = ?FILE_MODULE:open(Name, [write, raw, binary]),
+ ok = ?FILE_MODULE:pwrite(FD3, 3*SegSize,
+ subbin(Bin, 3*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:pwrite(FD3, 2*SegSize,
+ subbin(Bin, 2*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:pwrite(FD3, 1*SegSize,
+ subbin(Bin, 1*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:pwrite(FD3, 0*SegSize,
+ subbin(Bin, 0*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:close(FD3),
+ true = verify_file(Name, Cnt),
%%
%% Segmented
- ?line {ok, FD4} = ?FILE_MODULE:open(Name, [write, raw, binary]),
- ?line ok = ?FILE_MODULE:pwrite(FD4, 3*SegSize,
- subbin(Bin, 3*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:pwrite(FD4, 1*SegSize,
- subbin(Bin, 1*SegSize, 2*SegSize)),
- ?line ok = ?FILE_MODULE:pwrite(FD4, 0*SegSize,
- subbin(Bin, 0*SegSize, 1*SegSize)),
- ?line ok = ?FILE_MODULE:close(FD4),
- ?line true = verify_file(Name, Cnt),
+ {ok, FD4} = ?FILE_MODULE:open(Name, [write, raw, binary]),
+ ok = ?FILE_MODULE:pwrite(FD4, 3*SegSize,
+ subbin(Bin, 3*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:pwrite(FD4, 1*SegSize,
+ subbin(Bin, 1*SegSize, 2*SegSize)),
+ ok = ?FILE_MODULE:pwrite(FD4, 0*SegSize,
+ subbin(Bin, 0*SegSize, 1*SegSize)),
+ ok = ?FILE_MODULE:close(FD4),
+ true = verify_file(Name, Cnt),
@@ -3574,125 +3479,118 @@ segment_write(Config) when is_list(Config) ->
%% pwrite/2
%%
%% Not segmented
- ?line {ok, FD5} = ?FILE_MODULE:open(Name, [write, raw, binary]),
- ?line ok = ?FILE_MODULE:pwrite(FD5, [{3*SegSize,
- subbin(Bin, 3*SegSize, 1*SegSize)}]),
- ?line ok = ?FILE_MODULE:pwrite(FD5, [{2*SegSize,
- subbin(Bin, 2*SegSize, 1*SegSize)}]),
- ?line ok = ?FILE_MODULE:pwrite(FD5, [{1*SegSize,
- subbin(Bin, 1*SegSize, 1*SegSize)}]),
- ?line ok = ?FILE_MODULE:pwrite(FD5, [{0*SegSize,
- subbin(Bin, 0*SegSize, 1*SegSize)}]),
- ?line ok = ?FILE_MODULE:close(FD5),
- ?line true = verify_file(Name, Cnt),
+ {ok, FD5} = ?FILE_MODULE:open(Name, [write, raw, binary]),
+ ok = ?FILE_MODULE:pwrite(FD5, [{3*SegSize,
+ subbin(Bin, 3*SegSize, 1*SegSize)}]),
+ ok = ?FILE_MODULE:pwrite(FD5, [{2*SegSize,
+ subbin(Bin, 2*SegSize, 1*SegSize)}]),
+ ok = ?FILE_MODULE:pwrite(FD5, [{1*SegSize,
+ subbin(Bin, 1*SegSize, 1*SegSize)}]),
+ ok = ?FILE_MODULE:pwrite(FD5, [{0*SegSize,
+ subbin(Bin, 0*SegSize, 1*SegSize)}]),
+ ok = ?FILE_MODULE:close(FD5),
+ true = verify_file(Name, Cnt),
%%
%% Segmented
- ?line {ok, FD6} = ?FILE_MODULE:open(Name, [write, raw, binary]),
- ?line ok = ?FILE_MODULE:pwrite(FD6, [{3*SegSize,
- subbin(Bin, 3*SegSize, 1*SegSize)}]),
- ?line ok = ?FILE_MODULE:pwrite(FD6, [{1*SegSize,
- subbin(Bin, 1*SegSize, 2*SegSize)}]),
- ?line ok = ?FILE_MODULE:pwrite(FD6, [{0*SegSize,
- subbin(Bin, 0*SegSize, 1*SegSize)}]),
- ?line ok = ?FILE_MODULE:close(FD6),
- ?line true = verify_file(Name, Cnt),
+ {ok, FD6} = ?FILE_MODULE:open(Name, [write, raw, binary]),
+ ok = ?FILE_MODULE:pwrite(FD6, [{3*SegSize,
+ subbin(Bin, 3*SegSize, 1*SegSize)}]),
+ ok = ?FILE_MODULE:pwrite(FD6, [{1*SegSize,
+ subbin(Bin, 1*SegSize, 2*SegSize)}]),
+ ok = ?FILE_MODULE:pwrite(FD6, [{0*SegSize,
+ subbin(Bin, 0*SegSize, 1*SegSize)}]),
+ ok = ?FILE_MODULE:close(FD6),
+ true = verify_file(Name, Cnt),
%%
%% +---+---+---+---+
%% | 4 | 3 | 2 | 1 |
%% +---+---+---+---+
%% < ^ >
- ?line ok = pwrite_file(Name, [{3*SegSize,
- subbin(Bin, 3*SegSize, 1*SegSize)},
- {2*SegSize,
- subbin(Bin, 2*SegSize, 1*SegSize)},
- {1*SegSize,
- subbin(Bin, 1*SegSize, 1*SegSize)},
- {0*SegSize,
- subbin(Bin, 0*SegSize, 1*SegSize)}]),
- ?line true = verify_file(Name, Cnt),
+ ok = pwrite_file(Name, [{3*SegSize,
+ subbin(Bin, 3*SegSize, 1*SegSize)},
+ {2*SegSize,
+ subbin(Bin, 2*SegSize, 1*SegSize)},
+ {1*SegSize,
+ subbin(Bin, 1*SegSize, 1*SegSize)},
+ {0*SegSize,
+ subbin(Bin, 0*SegSize, 1*SegSize)}]),
+ true = verify_file(Name, Cnt),
%%
%% +---+-------+---+
%% | 3 | 2 | 1 |
%% +---+-------+---+
%% < ^ ^ >
- ?line ok = pwrite_file(Name, [{3*SegSize,
- subbin(Bin, 3*SegSize, 1*SegSize)},
- {1*SegSize,
- subbin(Bin, 1*SegSize, 2*SegSize)},
- {0*SegSize,
- subbin(Bin, 0*SegSize, 1*SegSize)}]),
- ?line true = verify_file(Name, Cnt),
+ ok = pwrite_file(Name, [{3*SegSize,
+ subbin(Bin, 3*SegSize, 1*SegSize)},
+ {1*SegSize,
+ subbin(Bin, 1*SegSize, 2*SegSize)},
+ {0*SegSize,
+ subbin(Bin, 0*SegSize, 1*SegSize)}]),
+ true = verify_file(Name, Cnt),
%%
%% +-------+-------+
%% | 2 | 1 |
%% +-------+-------+
%% < ^ ^ >
- ?line ok = pwrite_file(Name, [{2*SegSize,
- subbin(Bin, 2*SegSize, 2*SegSize)},
- {0*SegSize,
- subbin(Bin, 0*SegSize, 2*SegSize)}]),
- ?line true = verify_file(Name, Cnt),
+ ok = pwrite_file(Name, [{2*SegSize,
+ subbin(Bin, 2*SegSize, 2*SegSize)},
+ {0*SegSize,
+ subbin(Bin, 0*SegSize, 2*SegSize)}]),
+ true = verify_file(Name, Cnt),
%%
%% +-------+---+---+
%% | 3 | 2 | 1 |
%% +-------+---+---+
%% < ^ ^ >
- ?line ok = pwrite_file(Name, [{3*SegSize,
- subbin(Bin, 3*SegSize, 1*SegSize)},
- {2*SegSize,
- subbin(Bin, 2*SegSize, 1*SegSize)},
- {0*SegSize,
- subbin(Bin, 0*SegSize, 2*SegSize)}]),
- ?line true = verify_file(Name, Cnt),
+ ok = pwrite_file(Name, [{3*SegSize,
+ subbin(Bin, 3*SegSize, 1*SegSize)},
+ {2*SegSize,
+ subbin(Bin, 2*SegSize, 1*SegSize)},
+ {0*SegSize,
+ subbin(Bin, 0*SegSize, 2*SegSize)}]),
+ true = verify_file(Name, Cnt),
%%
%% Real time response time test.
%%
- ?line {ok, FD7} = ?FILE_MODULE:open(Name, [write, raw, binary]),
+ {ok, FD7} = ?FILE_MODULE:open(Name, [write, raw, binary]),
Req = lists:flatten(lists:duplicate(17,
[{2*SegSize,
subbin(Bin, 2*SegSize, 2*SegSize)},
- {0*SegSize,
- subbin(Bin, 0*SegSize, 2*SegSize)}])),
- ?line {ok, Comment} =
+ {0*SegSize,
+ subbin(Bin, 0*SegSize, 2*SegSize)}])),
+ {ok, Comment} =
response_analysis(?FILE_MODULE, pwrite, [FD7, Req]),
- ?line ok = ?FILE_MODULE:close(FD7),
+ ok = ?FILE_MODULE:close(FD7),
%%
- ?line [] = flush(),
- ?line ?t:timetrap_cancel(Dog),
+ [] = flush(),
{comment, Comment}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ipread(suite) ->
- [];
-ipread(doc) ->
- ["Test Dets special indirect pread"];
+%% Test Dets special indirect pread.
ipread(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(30)),
- %%
- ?line Dir = ?config(priv_dir, Config),
- ?line ok = ipread_int(Dir, [raw, binary]),
- ?line ok = ipread_int(Dir, [raw]),
- ?line ok = ipread_int(Dir, [binary]),
- ?line ok = ipread_int(Dir, []),
- ?line ok = ipread_int(Dir, [ram, binary]),
- ?line ok = ipread_int(Dir, [ram]),
+ Dir = proplists:get_value(priv_dir, Config),
+ ok = ipread_int(Dir, [raw, binary]),
+ ok = ipread_int(Dir, [raw]),
+ ok = ipread_int(Dir, [binary]),
+ ok = ipread_int(Dir, []),
+ ok = ipread_int(Dir, [ram, binary]),
+ ok = ipread_int(Dir, [ram]),
%%
- ?line [] = flush(),
- ?line ?t:timetrap_cancel(Dog),
+ [] = flush(),
ok.
ipread_int(Dir, ModeList) ->
- ?line Name =
+ Name =
filename:join(Dir,
lists:flatten([?MODULE_STRING, "_ipread",
- lists:map(fun (X) ->
- ["_", atom_to_list(X)]
- end,
- ModeList)])),
- ?line io:format("ipread_int<~p, ~p>~n", [Name, ModeList]),
- ?line {Conv, Sizeof} =
+ lists:map(fun (X) ->
+ ["_", atom_to_list(X)]
+ end,
+ ModeList)])),
+ io:format("ipread_int<~p, ~p>~n", [Name, ModeList]),
+ {Conv, Sizeof} =
case lists:member(binary, ModeList) of
true ->
{fun (Bin) when is_binary(Bin) -> Bin;
@@ -3705,144 +3603,130 @@ ipread_int(Dir, ModeList) ->
end,
fun erlang:length/1}
end,
- ?line Pos = 4711,
- ?line Data = Conv("THE QUICK BROWN FOX JUMPS OVER A LAZY DOG"),
- ?line Size = Sizeof(Data),
- ?line Init = Conv(" "),
- ?line SizeInit = Sizeof(Init),
- ?line Head = Conv(<<Size:32/big-unsigned, Pos:32/big-unsigned>>),
- ?line Filler = Conv(bytes($ , Pos-SizeInit-Sizeof(Head))),
- ?line Size1 = Size+1,
- ?line SizePos = Size+Pos,
+ Pos = 4711,
+ Data = Conv("THE QUICK BROWN FOX JUMPS OVER A LAZY DOG"),
+ Size = Sizeof(Data),
+ Init = Conv(" "),
+ SizeInit = Sizeof(Init),
+ Head = Conv(<<Size:32/big-unsigned, Pos:32/big-unsigned>>),
+ Filler = Conv(bytes($ , Pos-SizeInit-Sizeof(Head))),
+ Size1 = Size+1,
+ SizePos = Size+Pos,
%%
- ?line {ok, FD} = ?FILE_MODULE:open(Name, [write, read | ModeList]),
- ?line ok = ?FILE_MODULE:truncate(FD),
- ?line ok = ?FILE_MODULE:write(FD, Init),
- ?line ok = ?FILE_MODULE:write(FD, Head),
- ?line ok = ?FILE_MODULE:write(FD, Filler),
- ?line ok = ?FILE_MODULE:write(FD, Data),
+ {ok, FD} = ?FILE_MODULE:open(Name, [write, read | ModeList]),
+ ok = ?FILE_MODULE:truncate(FD),
+ ok = ?FILE_MODULE:write(FD, Init),
+ ok = ?FILE_MODULE:write(FD, Head),
+ ok = ?FILE_MODULE:write(FD, Filler),
+ ok = ?FILE_MODULE:write(FD, Data),
%% Correct read
- ?line {ok, {Size, Pos, Data}} =
+ {ok, {Size, Pos, Data}} =
?FILE_MODULE:ipread_s32bu_p32bu(FD, SizeInit, infinity),
%% Invalid header - size > max
- ?line eof =
+ eof =
?FILE_MODULE:ipread_s32bu_p32bu(FD, SizeInit, Size-1),
%% Data block protudes over eof
- ?line ok =
+ ok =
?FILE_MODULE:pwrite(FD, SizeInit,
<<Size1:32/big-unsigned,
- Pos:32/big-unsigned>>),
- ?line {ok, {Size1, Pos, Data}} =
+ Pos:32/big-unsigned>>),
+ {ok, {Size1, Pos, Data}} =
?FILE_MODULE:ipread_s32bu_p32bu(FD, SizeInit, Size1),
%% Data block outside file
- ?line ok =
+ ok =
?FILE_MODULE:pwrite(FD, SizeInit,
<<Size:32/big-unsigned,
- SizePos:32/big-unsigned>>),
- ?line {ok, {Size, SizePos, eof}} =
+ SizePos:32/big-unsigned>>),
+ {ok, {Size, SizePos, eof}} =
?FILE_MODULE:ipread_s32bu_p32bu(FD, SizeInit, Size),
%% Zero size
- ?line ok =
+ ok =
?FILE_MODULE:pwrite(FD, SizeInit,
<<0:32/big-unsigned,
- Pos:32/big-unsigned>>),
- ?line {ok, {0, Pos, eof}} =
+ Pos:32/big-unsigned>>),
+ {ok, {0, Pos, eof}} =
?FILE_MODULE:ipread_s32bu_p32bu(FD, SizeInit, Size),
%% Invalid header - protudes over eof
- ?line eof =
+ eof =
?FILE_MODULE:ipread_s32bu_p32bu(FD,
Pos+Size-(Sizeof(Head)-1),
infinity),
%% Header not even in file
- ?line eof =
+ eof =
?FILE_MODULE:ipread_s32bu_p32bu(FD, Pos+Size, infinity),
%%
- ?line ok = ?FILE_MODULE:close(FD),
+ ok = ?FILE_MODULE:close(FD),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-interleaved_read_write(suite) ->
- [];
-interleaved_read_write(doc) ->
- ["Tests interleaved read and writes"];
+%% Tests interleaved read and writes.
interleaved_read_write(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(30)),
- %%
- ?line Dir = ?config(priv_dir, Config),
- ?line File =
+ Dir = proplists:get_value(priv_dir, Config),
+ File =
filename:join(Dir, ?MODULE_STRING++"interleaved_read_write.txt"),
- ?line {ok,F1} = ?FILE_MODULE:open(File, [write]),
- ?line ok = ?FILE_MODULE:write(F1, "data---r1."), % 10 chars each
- ?line ok = ?FILE_MODULE:write(F1, "data---r2."),
- ?line ok = ?FILE_MODULE:write(F1, "data---r3."),
- ?line ok = ?FILE_MODULE:close(F1),
- ?line {ok,F2} = ?FILE_MODULE:open(File, [read, write]),
- ?line {ok, "data---r1."} = ?FILE_MODULE:read(F2, 10),
- ?line ok = ?FILE_MODULE:write(F2, "data---w2."),
- ?line ok = ?FILE_MODULE:close(F2),
- ?line {ok,F3} = ?FILE_MODULE:open(File, [read]),
- ?line {ok, "data---r1."} = ?FILE_MODULE:read(F3, 10),
- ?line {ok, "data---w2."} = ?FILE_MODULE:read(F3, 10),
- ?line {ok, "data---r3."} = ?FILE_MODULE:read(F3, 10),
- ?line eof = ?FILE_MODULE:read(F3, 1),
- ?line ok = ?FILE_MODULE:close(F2),
+ {ok,F1} = ?FILE_MODULE:open(File, [write]),
+ ok = ?FILE_MODULE:write(F1, "data---r1."), % 10 chars each
+ ok = ?FILE_MODULE:write(F1, "data---r2."),
+ ok = ?FILE_MODULE:write(F1, "data---r3."),
+ ok = ?FILE_MODULE:close(F1),
+ {ok,F2} = ?FILE_MODULE:open(File, [read, write]),
+ {ok, "data---r1."} = ?FILE_MODULE:read(F2, 10),
+ ok = ?FILE_MODULE:write(F2, "data---w2."),
+ ok = ?FILE_MODULE:close(F2),
+ {ok,F3} = ?FILE_MODULE:open(File, [read]),
+ {ok, "data---r1."} = ?FILE_MODULE:read(F3, 10),
+ {ok, "data---w2."} = ?FILE_MODULE:read(F3, 10),
+ {ok, "data---r3."} = ?FILE_MODULE:read(F3, 10),
+ eof = ?FILE_MODULE:read(F3, 1),
+ ok = ?FILE_MODULE:close(F2),
%%
- ?line [] = flush(),
- ?line ?t:timetrap_cancel(Dog),
+ [] = flush(),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-otp_5814(suite) ->
- [];
-otp_5814(doc) ->
- ["OTP-5814. eval/consult/script return correct line numbers"];
+%% OTP-5814. eval/consult/script return correct line numbers.
otp_5814(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(10)),
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
File = filename:join(PrivDir, "otp_5814"),
Path = [PrivDir],
- ?line ok = file:write_file(File, <<"{a,b,c}.
+ ok = file:write_file(File, <<"{a,b,c}.
a.
- b.
- c.
- {d,e,
- [}.">>),
- ?line {error, {6,erl_parse,_}} = file:eval(File),
- ?line {error, {6,erl_parse,_}} = file:consult(File),
- ?line {error, {6,erl_parse,_}} = file:path_consult(Path, File),
- ?line {error, {6,erl_parse,_}} = file:path_eval(Path, File),
- ?line {error, {6,erl_parse,_}} = file:script(File),
- ?line {error, {6,erl_parse,_}} = file:path_script(Path, File),
-
- ?line ok = file:write_file(File, <<>>),
- ?line {error, {1,file,undefined_script}} = file:path_script(Path, File),
+b.
+c.
+{d,e,
+ [}.">>),
+ {error, {6,erl_parse,_}} = file:eval(File),
+ {error, {6,erl_parse,_}} = file:consult(File),
+ {error, {6,erl_parse,_}} = file:path_consult(Path, File),
+ {error, {6,erl_parse,_}} = file:path_eval(Path, File),
+ {error, {6,erl_parse,_}} = file:script(File),
+ {error, {6,erl_parse,_}} = file:path_script(Path, File),
+
+ ok = file:write_file(File, <<>>),
+ {error, {1,file,undefined_script}} = file:path_script(Path, File),
%% The error is not propagated...
- ?line ok = file:write_file(File, <<"a.
+ ok = file:write_file(File, <<"a.
b.
- 1/0.">>),
- ?line {error, {3, file, {error, badarith, _}}} = file:eval(File),
-
- ?line ok = file:write_file(File, <<"erlang:raise(throw, apa, []).">>),
- ?line {error, {1, file, {throw, apa, _}}} = file:eval(File),
+1/0.">>),
+ {error, {3, file, {error, badarith, _}}} = file:eval(File),
- file:delete(File),
- ?line ?t:timetrap_cancel(Dog),
- ok.
+ok = file:write_file(File, <<"erlang:raise(throw, apa, []).">>),
+{error, {1, file, {throw, apa, _}}} = file:eval(File),
+
+file:delete(File),
+ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-otp_10852(suite) ->
- [];
-otp_10852(doc) ->
- ["OTP-10852. +fnu and latin1 filenames"];
+%% OTP-10852. +fnu and latin1 filenames.
otp_10852(Config) when is_list(Config) ->
Node = start_node(erl_pp_helper, "+fnu"),
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
B = filename:join(Dir, <<"\xE4">>),
ok = rpc_call(Node, get_cwd, [B]),
{error, no_translation} = rpc_call(Node, set_cwd, [B]),
@@ -3857,10 +3741,10 @@ otp_10852(Config) when is_list(Config) ->
ok = rpc_call(Node, read_file, [B]),
ok = rpc_call(Node, make_link, [B,B]),
case rpc_call(Node, make_symlink, [B,B]) of
- ok -> ok;
- {error, E} when (E =:= enotsup) or (E =:= eperm) ->
- {win32,_} = os:type()
- end,
+ ok -> ok;
+ {error, E} when (E =:= enotsup) or (E =:= eperm) ->
+ {win32,_} = os:type()
+ end,
ok = rpc_call(Node, delete, [B]),
ok = rpc_call(Node, make_dir, [B]),
ok = rpc_call(Node, del_dir, [B]),
@@ -3883,58 +3767,58 @@ rpc_call(N, F, As) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-large_file(suite) ->
- [];
-large_file(doc) ->
- ["Tests positioning in large files (> 4G)"];
+large_file() ->
+ [{timetrap,{minutes,20}}].
+
+%% Tests positioning in large files (> 4G).
large_file(Config) when is_list(Config) ->
run_large_file_test(Config,
fun(Name) -> do_large_file(Name) end,
"_large_file").
do_large_file(Name) ->
- ?line Watchdog = ?t:timetrap(?t:minutes(20)),
-
- ?line S = "1234567890",
+ S = "1234567890",
L = length(S),
R = lists:reverse(S),
P = 1 bsl 32,
Ss = lists:sort(S),
Rs = lists:reverse(Ss),
- ?line {ok,F} = ?FILE_MODULE:open(Name, [raw,read,write]),
- ?line ok = ?FILE_MODULE:write(F, S),
- ?line {ok,P} = ?FILE_MODULE:position(F, P),
- ?line ok = ?FILE_MODULE:write(F, R),
- ?line {ok,0} = ?FILE_MODULE:position(F, bof),
- ?line {ok,S} = ?FILE_MODULE:read(F, L),
- ?line {ok,P} = ?FILE_MODULE:position(F, {eof,-L}),
- ?line {ok,R} = ?FILE_MODULE:read(F, L+1),
- ?line {ok,S} = ?FILE_MODULE:pread(F, 0, L),
- ?line {ok,R} = ?FILE_MODULE:pread(F, P, L+1),
- ?line ok = ?FILE_MODULE:pwrite(F, 0, Ss),
- ?line ok = ?FILE_MODULE:pwrite(F, P, Rs),
- ?line {ok,0} = ?FILE_MODULE:position(F, bof),
- ?line {ok,Ss} = ?FILE_MODULE:read(F, L),
- ?line {ok,P} = ?FILE_MODULE:position(F, {eof,-L}),
- ?line {ok,Rs} = ?FILE_MODULE:read(F, L+1),
- ?line ok = ?FILE_MODULE:close(F),
+ {ok,F} = ?FILE_MODULE:open(Name, [raw,read,write]),
+ ok = ?FILE_MODULE:write(F, S),
+ {ok,P} = ?FILE_MODULE:position(F, P),
+ ok = ?FILE_MODULE:write(F, R),
+ {ok,0} = ?FILE_MODULE:position(F, bof),
+ {ok,S} = ?FILE_MODULE:read(F, L),
+ {ok,P} = ?FILE_MODULE:position(F, {eof,-L}),
+ {ok,R} = ?FILE_MODULE:read(F, L+1),
+ {ok,S} = ?FILE_MODULE:pread(F, 0, L),
+ {ok,R} = ?FILE_MODULE:pread(F, P, L+1),
+ ok = ?FILE_MODULE:pwrite(F, 0, Ss),
+ ok = ?FILE_MODULE:pwrite(F, P, Rs),
+ {ok,0} = ?FILE_MODULE:position(F, bof),
+ {ok,Ss} = ?FILE_MODULE:read(F, L),
+ {ok,P} = ?FILE_MODULE:position(F, {eof,-L}),
+ {ok,Rs} = ?FILE_MODULE:read(F, L+1),
+ ok = ?FILE_MODULE:close(F),
%% Reopen the file with 'append'; used to fail on Windows causing
%% writes to go to the beginning of the file for files > 4GB.
- ?line PL = P + L,
- ?line PLL = PL + L,
- ?line {ok,F1} = ?FILE_MODULE:open(Name, [raw,read,write,append]),
- ?line ok = ?FILE_MODULE:write(F1, R),
- ?line {ok,PLL} = ?FILE_MODULE:position(F1, {cur,0}),
- ?line {ok,Rs} = ?FILE_MODULE:pread(F1, P, L),
- ?line {ok,PL} = ?FILE_MODULE:position(F1, {eof,-L}),
- ?line {ok,R} = ?FILE_MODULE:read(F1, L+1),
- ?line ok = ?FILE_MODULE:close(F1),
- %%
- ?line ?t:timetrap_cancel(Watchdog),
+ PL = P + L,
+ PLL = PL + L,
+ {ok,F1} = ?FILE_MODULE:open(Name, [raw,read,write,append]),
+ ok = ?FILE_MODULE:write(F1, R),
+ {ok,PLL} = ?FILE_MODULE:position(F1, {cur,0}),
+ {ok,Rs} = ?FILE_MODULE:pread(F1, P, L),
+ {ok,PL} = ?FILE_MODULE:position(F1, {eof,-L}),
+ {ok,R} = ?FILE_MODULE:read(F1, L+1),
+ ok = ?FILE_MODULE:close(F1),
+
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+large_write() ->
+ [{timetrap,{minutes,20}}].
+
large_write(Config) when is_list(Config) ->
run_large_file_test(Config,
fun(Name) -> do_large_write(Name) end,
@@ -3960,12 +3844,89 @@ do_large_write(Name) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Benchmarks
+%%
+%% Note that we only measure the time it takes to run the isolated file
+%% operations and that the actual test runtime can differ significantly,
+%% especially on the write side as the files need to be truncated before
+%% writing.
+
+large_writes(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 4096,
+ Data = <<0:(64 bsl 10)/unit:8>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+large_writes_delayed(Config) when is_list(Config) ->
+ %% Each write is exactly as large as the delay buffer, causing the writes
+ %% to pass through each time, giving us a decent idea of how much overhead
+ %% delayed_write adds.
+ Modes = [raw, binary, {delayed_write, 64 bsl 10, 2000}],
+ OpCount = 4096,
+ Data = <<0:(64 bsl 10)/unit:8>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+tiny_writes(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 512 bsl 10,
+ Data = <<0>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+tiny_writes_delayed(Config) when is_list(Config) ->
+ Modes = [raw, binary, {delayed_write, 512 bsl 10, 2000}],
+ OpCount = 512 bsl 10,
+ Data = <<0>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+%% The read benchmarks assume that "benchmark_scratch_file" has been filled by
+%% the write benchmarks.
+
+tiny_reads(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 512 bsl 10,
+ run_read_benchmark(Config, Modes, OpCount, 1).
+
+tiny_reads_ahead(Config) when is_list(Config) ->
+ Modes = [raw, binary, {read_ahead, 512 bsl 10}],
+ OpCount = 512 bsl 10,
+ run_read_benchmark(Config, Modes, OpCount, 1).
+
+run_write_benchmark(Config, Modes, OpCount, Data) ->
+ run_benchmark(Config, [write | Modes], OpCount, fun file:write/2, Data).
+
+run_read_benchmark(Config, Modes, OpCount, OpSize) ->
+ run_benchmark(Config, [read | Modes], OpCount, fun file:read/2, OpSize).
+
+run_benchmark(Config, Modes, OpCount, Fun, Arg) ->
+ ScratchDir = proplists:get_value(priv_dir, Config),
+ Path = filename:join(ScratchDir, "benchmark_scratch_file"),
+ {ok, Fd} = file:open(Path, Modes),
+ submit_throughput_results(Fun, [Fd, Arg], OpCount).
+
+submit_throughput_results(Fun, Args, Times) ->
+ MSecs = measure_repeated_file_op(Fun, Args, Times, millisecond),
+ IOPS = trunc(Times * (1000 / MSecs)),
+ ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }),
+ {comment, io_lib:format("~p IOPS, ~p ms", [IOPS, trunc(MSecs)])}.
+
+measure_repeated_file_op(Fun, Args, Times, Unit) ->
+ Start = os:perf_counter(Unit),
+ repeated_apply(Fun, Args, Times),
+ os:perf_counter(Unit) - Start.
+
+repeated_apply(_F, _Args, Times) when Times =< 0 ->
+ ok;
+repeated_apply(F, Args, Times) ->
+ erlang:apply(F, Args),
+ repeated_apply(F, Args, Times - 1).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
response_analysis(Module, Function, Arguments) ->
Parent = self(),
- ?line erlang:yield(), % Schedule out before test
- ?line Child =
+ erlang:yield(), % Schedule out before test
+ Child =
spawn_link(
fun () ->
receive {Parent, start, Ts} -> ok end,
@@ -3985,21 +3946,21 @@ response_analysis(Module, Function, Arguments) ->
Parent ! {self(), stopped, response_stat(Stat, micro_ts())}
end),
Child ! {Parent, start, micro_ts()},
- ?line Result = apply(Module, Function, Arguments),
- ?line Child ! {Parent, stop},
- ?line {N, Sum, _, M, Max} = receive {Child, stopped, X} -> X end,
- ?line Mean_ms = (0.001*Sum) / (N-1),
- ?line Max_ms = 0.001 * Max,
- ?line Comment =
+ Result = apply(Module, Function, Arguments),
+ Child ! {Parent, stop},
+ {N, Sum, _, M, Max} = receive {Child, stopped, X} -> X end,
+ Mean_ms = (0.001*Sum) / (N-1),
+ Max_ms = 0.001 * Max,
+ Comment =
lists:flatten(
io_lib:format(
"Scheduling interval: Mean = ~.3f ms, "
++"Max = ~.3f ms for no ~p of ~p.~n",
[Mean_ms, Max_ms, M, (N-1)])),
- ?line {Result, Comment}.
-
+ {Result, Comment}.
+
micro_ts() ->
- erlang:monotonic_time(micro_seconds).
+ erlang:monotonic_time(microsecond).
response_stat(init, Ts) ->
{0, 0, Ts, 0, 0};
@@ -4021,10 +3982,10 @@ response_stat({N, Sum, Ts0, M, Max}, Ts) ->
%% create_file/2 below is some 44 times faster.
create_file_slow(Name, N) when is_integer(N), N >= 0 ->
- ?line {ok, FD} =
+ {ok, FD} =
?FILE_MODULE:open(Name, [raw, write, delayed_write, binary]),
- ?line ok = create_file_slow(FD, 0, N),
- ?line ok = ?FILE_MODULE:close(FD),
+ ok = create_file_slow(FD, 0, N),
+ ok = ?FILE_MODULE:close(FD),
ok.
create_file_slow(_FD, M, M) ->
@@ -4039,10 +4000,10 @@ create_file_slow(FD, M, N) ->
%% from 0 to N-1.
create_file(Name, N) when is_integer(N), N >= 0 ->
- ?line {ok, FD} =
+ {ok, FD} =
?FILE_MODULE:open(Name, [raw, write, delayed_write, binary]),
- ?line ok = create_file(FD, 0, N),
- ?line ok = ?FILE_MODULE:close(FD),
+ ok = create_file(FD, 0, N),
+ ok = ?FILE_MODULE:close(FD),
ok.
create_file(_FD, M, M) ->
@@ -4059,10 +4020,10 @@ create_file(FD, M, N0, R) when M + 8 =< N0 ->
N1 = N0-1, N2 = N0-2, N3 = N0-3, N4 = N0-4,
N5 = N0-5, N6 = N0-6, N7 = N0-7, N8 = N0-8,
create_file(FD, M, N8,
- [<<N8:32/unsigned, N7:32/unsigned,
- N6:32/unsigned, N5:32/unsigned,
- N4:32/unsigned, N3:32/unsigned,
- N2:32/unsigned, N1:32/unsigned>> | R]);
+ [<<N8:32/unsigned, N7:32/unsigned,
+ N6:32/unsigned, N5:32/unsigned,
+ N4:32/unsigned, N3:32/unsigned,
+ N2:32/unsigned, N1:32/unsigned>> | R]);
create_file(FD, M, N0, R) ->
N1 = N0-1,
create_file(FD, M, N1, [<<N1:32/unsigned>> | R]).
@@ -4079,14 +4040,14 @@ create_bin(M, N0, R) when M+8 =< N0 ->
N5 = N0-5, N6 = N0-6, N7 = N0-7, N8 = N0-8,
create_bin(M, N8,
[<<N8:32/unsigned, N7:32/unsigned,
- N6:32/unsigned, N5:32/unsigned,
- N4:32/unsigned, N3:32/unsigned,
- N2:32/unsigned, N1:32/unsigned>> | R]);
+ N6:32/unsigned, N5:32/unsigned,
+ N4:32/unsigned, N3:32/unsigned,
+ N2:32/unsigned, N1:32/unsigned>> | R]);
create_bin(M, N0, R) ->
N1 = N0-1,
create_bin(M, N1, [<<N1:32/unsigned>> | R]).
-
-
+
+
verify_bin(<<>>, _, 0) ->
@@ -4098,8 +4059,8 @@ verify_bin(Bin, N, Cnt) ->
N4 = N + 4, N5 = N + 5, N6 = N + 6, N7 = N + 7,
case Bin of
<<N0:32/unsigned, N1:32/unsigned, N2:32/unsigned, N3:32/unsigned,
- N4:32/unsigned, N5:32/unsigned, N6:32/unsigned, N7:32/unsigned,
- B/binary>> ->
+ N4:32/unsigned, N5:32/unsigned, N6:32/unsigned, N7:32/unsigned,
+ B/binary>> ->
verify_bin(B, N+8, Cnt-8);
<<N:32/unsigned, B/binary>> ->
verify_bin(B, N+1, Cnt-1);
@@ -4180,13 +4141,13 @@ pwrite_file(Name, Data) ->
read_line_testdata(PrivDir) ->
All0 = [{fun read_line_create0/1,"Testdata1.txt",5,10},
- {fun read_line_create1/1,"Testdata2.txt",401,802},
- {fun read_line_create2/1,"Testdata3.txt",1,2},
- {fun read_line_create3/1,"Testdata4.txt",601,fail},
- {fun read_line_create4/1,"Testdata5.txt",601,1002},
- {fun read_line_create5/1,"Testdata6.txt",601,1202},
- {fun read_line_create6/1,"Testdata7.txt",601,1202},
- {fun read_line_create7/1,"Testdata8.txt",4001,8002}],
+ {fun read_line_create1/1,"Testdata2.txt",401,802},
+ {fun read_line_create2/1,"Testdata3.txt",1,2},
+ {fun read_line_create3/1,"Testdata4.txt",601,fail},
+ {fun read_line_create4/1,"Testdata5.txt",601,1002},
+ {fun read_line_create5/1,"Testdata6.txt",601,1202},
+ {fun read_line_create6/1,"Testdata7.txt",601,1202},
+ {fun read_line_create7/1,"Testdata8.txt",4001,8002}],
[ {A,filename:join([PrivDir,B]),C,D} || {A,B,C,D} <- All0 ].
read_line_create_files(TestData) ->
@@ -4195,105 +4156,93 @@ read_line_create_files(TestData) ->
read_line_remove_files(TestData) ->
[ file:delete(File) || {_Function,File,_,_} <- TestData ].
-read_line_1(suite) ->
- [];
-read_line_1(doc) ->
- ["read_line with prim_file"];
+%% read_line with ?PRIM_FILE.
read_line_1(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line All = read_line_testdata(PrivDir),
- ?line read_line_create_files(All),
- ?line [ begin
- io:format("read_line_all: ~s~n",[File]),
- {X,_} = read_line_all(File),
- true
- end || {_,File,X,_} <- All ],
- ?line [ begin
- io:format("read_line_all_alternating: ~s~n",[File]),
- {Y,_} = read_line_all_alternating(File),
- true
- end || {_,File,_,Y} <- All , Y =/= fail],
- ?line [ begin
- io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
- {'EXIT',_} = (catch read_line_all_alternating(File)),
- true
- end || {_,File,_,Y} <- All , Y =:= fail],
- ?line read_line_remove_files(All),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ All = read_line_testdata(PrivDir),
+ read_line_create_files(All),
+ [ begin
+ io:format("read_line_all: ~s~n",[File]),
+ {X,_} = read_line_all(File),
+ true
+ end || {_,File,X,_} <- All ],
+ [ begin
+ io:format("read_line_all_alternating: ~s~n",[File]),
+ {Y,_} = read_line_all_alternating(File),
+ true
+ end || {_,File,_,Y} <- All , Y =/= fail],
+ [ begin
+ io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
+ {'EXIT',_} = (catch read_line_all_alternating(File)),
+ true
+ end || {_,File,_,Y} <- All , Y =:= fail],
+ read_line_remove_files(All),
ok.
-read_line_2(suite) ->
- [];
-read_line_2(doc) ->
- ["read_line with file"];
+%% read_line with file.
read_line_2(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line All = read_line_testdata(PrivDir),
- ?line read_line_create_files(All),
- ?line [ begin
- io:format("read_line_all: ~s~n",[File]),
- {X,_} = read_line_all2(File),
- true
- end || {_,File,X,_} <- All ],
- ?line [ begin
- io:format("read_line_all_alternating: ~s~n",[File]),
- {Y,_} = read_line_all_alternating2(File),
- true
- end || {_,File,_,Y} <- All , Y =/= fail],
- ?line [ begin
- io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
- {'EXIT',_} = (catch read_line_all_alternating2(File)),
- true
- end || {_,File,_,Y} <- All , Y =:= fail],
- ?line read_line_remove_files(All),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ All = read_line_testdata(PrivDir),
+ read_line_create_files(All),
+ [ begin
+ io:format("read_line_all: ~s~n",[File]),
+ {X,_} = read_line_all2(File),
+ true
+ end || {_,File,X,_} <- All ],
+ [ begin
+ io:format("read_line_all_alternating: ~s~n",[File]),
+ {Y,_} = read_line_all_alternating2(File),
+ true
+ end || {_,File,_,Y} <- All , Y =/= fail],
+ [ begin
+ io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
+ {'EXIT',_} = (catch read_line_all_alternating2(File)),
+ true
+ end || {_,File,_,Y} <- All , Y =:= fail],
+ read_line_remove_files(All),
ok.
-read_line_3(suite) ->
- [];
-read_line_3(doc) ->
- ["read_line with raw file"];
+%% read_line with raw file.
read_line_3(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line All = read_line_testdata(PrivDir),
- ?line read_line_create_files(All),
- ?line [ begin
- io:format("read_line_all: ~s~n",[File]),
- {X,_} = read_line_all3(File),
- true
- end || {_,File,X,_} <- All ],
- ?line [ begin
- io:format("read_line_all_alternating: ~s~n",[File]),
- {Y,_} = read_line_all_alternating3(File),
- true
- end || {_,File,_,Y} <- All , Y =/= fail],
- ?line [ begin
- io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
- {'EXIT',_} = (catch read_line_all_alternating3(File)),
- true
- end || {_,File,_,Y} <- All , Y =:= fail],
- ?line read_line_remove_files(All),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ All = read_line_testdata(PrivDir),
+ read_line_create_files(All),
+ [ begin
+ io:format("read_line_all: ~s~n",[File]),
+ {X,_} = read_line_all3(File),
+ true
+ end || {_,File,X,_} <- All ],
+ [ begin
+ io:format("read_line_all_alternating: ~s~n",[File]),
+ {Y,_} = read_line_all_alternating3(File),
+ true
+ end || {_,File,_,Y} <- All , Y =/= fail],
+ [ begin
+ io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
+ {'EXIT',_} = (catch read_line_all_alternating3(File)),
+ true
+ end || {_,File,_,Y} <- All , Y =:= fail],
+ read_line_remove_files(All),
ok.
-read_line_4(suite) ->
- [];
-read_line_4(doc) ->
- ["read_line with raw buffered file"];
+%% read_line with raw buffered file.
read_line_4(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line All = read_line_testdata(PrivDir),
- ?line read_line_create_files(All),
- ?line [ begin
- io:format("read_line_all: ~s~n",[File]),
- {X,_} = read_line_all4(File),
- true
- end || {_,File,X,_} <- All ],
- ?line [ begin
- io:format("read_line_all_alternating: ~s~n",[File]),
- {Y,_} = read_line_all_alternating4(File),
- true
- end || {_,File,_,Y} <- All , Y =/= fail],
- ?line [ begin
- io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
- {'EXIT',_} = (catch read_line_all_alternating4(File)),
- true
- end || {_,File,_,Y} <- All , Y =:= fail],
- ?line read_line_remove_files(All),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ All = read_line_testdata(PrivDir),
+ read_line_create_files(All),
+ [ begin
+ io:format("read_line_all: ~s~n",[File]),
+ {X,_} = read_line_all4(File),
+ true
+ end || {_,File,X,_} <- All ],
+ [ begin
+ io:format("read_line_all_alternating: ~s~n",[File]),
+ {Y,_} = read_line_all_alternating4(File),
+ true
+ end || {_,File,_,Y} <- All , Y =/= fail],
+ [ begin
+ io:format("read_line_all_alternating (failing as should): ~s~n",[File]),
+ {'EXIT',_} = (catch read_line_all_alternating4(File)),
+ true
+ end || {_,File,_,Y} <- All , Y =:= fail],
+ read_line_remove_files(All),
ok.
rl_lines() ->
@@ -4376,9 +4325,9 @@ read_line_create7(Filename) ->
file:close(F).
read_line_all(Filename) ->
- {ok,F} = prim_file:open(Filename,[read,binary]),
+ {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]),
X=read_rl_lines(F),
- prim_file:close(F),
+ ?PRIM_FILE:close(F),
Bin = list_to_binary([B || {ok,B} <- X]),
Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]),
"\r\n","\n",[global,{return,binary}]),
@@ -4411,7 +4360,7 @@ read_line_all4(Filename) ->
{length(X),Bin}.
read_rl_lines(F) ->
- case prim_file:read_line(F) of
+ case ?PRIM_FILE:read_line(F) of
eof ->
[];
{error,X} ->
@@ -4431,9 +4380,9 @@ read_rl_lines2(F) ->
end.
read_line_all_alternating(Filename) ->
- {ok,F} = prim_file:open(Filename,[read,binary]),
+ {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]),
X=read_rl_lines(F,true),
- prim_file:close(F),
+ ?PRIM_FILE:close(F),
Bin = list_to_binary([B || {ok,B} <- X]),
Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]),
"\r\n","\n",[global,{return,binary}]),
@@ -4467,8 +4416,8 @@ read_line_all_alternating4(Filename) ->
read_rl_lines(F,Alternate) ->
case begin
case Alternate of
- true -> prim_file:read(F,1);
- false -> prim_file:read_line(F)
+ true -> ?PRIM_FILE:read(F,1);
+ false -> ?PRIM_FILE:read_line(F)
end
end of
eof ->
@@ -4548,7 +4497,7 @@ run_large_file_test(Config, Run, Name) ->
{{unix,sunos},OsVersion} when OsVersion < {5,5,1} ->
{skip,"Only supported on Win32, Unix or SunOS >= 5.5.1"};
{{unix,_},_} ->
- N = disc_free(?config(priv_dir, Config)),
+ N = disc_free(proplists:get_value(priv_dir, Config)),
io:format("Free disk: ~w KByte~n", [N]),
if N < 5 * (1 bsl 20) ->
%% Less than 5 GByte free
@@ -4562,9 +4511,9 @@ run_large_file_test(Config, Run, Name) ->
do_run_large_file_test(Config, Run, Name0) ->
- Name = filename:join(?config(priv_dir, Config),
+ Name = filename:join(proplists:get_value(priv_dir, Config),
?MODULE_STRING ++ Name0),
-
+
%% Set up a process that will delete this file.
Tester = self(),
Deleter =
@@ -4577,7 +4526,7 @@ do_run_large_file_test(Config, Run, Name0) ->
end,
?FILE_MODULE:delete(Name)
end),
-
+
%% Run the test case.
Res = Run(Name),
diff --git a/lib/kernel/test/file_SUITE_data/realmen.html b/lib/kernel/test/file_SUITE_data/realmen.html
index c810a5d088..92e13f23b8 100644
--- a/lib/kernel/test/file_SUITE_data/realmen.html
+++ b/lib/kernel/test/file_SUITE_data/realmen.html
@@ -237,7 +237,7 @@ destroy most of the interesting uses for EQUIVALENCE, and make it
impossible to modify the operating system code with negative
subscripts. Worst of all, bounds checking is inefficient.
-<LI> Source code maintainance systems. A Real Programmer keeps his
+<LI> Source code maintenance systems. A Real Programmer keeps his
code locked up in a card file, because it implies that its owner
cannot leave his important programs unguarded [5].
@@ -396,7 +396,7 @@ double stuff Oreos for special occasions.
<LI> Underneath the Oreos is a flow-charting template, left there by
the previous occupant of the office. (Real Programmers write programs,
-not documentation. Leave that to the maintainence people.)
+not documentation. Leave that to the maintenance people.)
</UL> <P>
diff --git a/lib/kernel/test/file_name_SUITE.erl b/lib/kernel/test/file_name_SUITE.erl
index 32006d893e..3afc647081 100644
--- a/lib/kernel/test/file_name_SUITE.erl
+++ b/lib/kernel/test/file_name_SUITE.erl
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%% %CopyrightEnd%
%%
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/file.hrl").
%%
@@ -77,16 +77,17 @@
init_per_testcase/2, end_per_testcase/2]).
-export([normal/1,icky/1,very_icky/1,normalize/1,home_dir/1]).
+-define(PRIM_FILE, prim_file).
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:seconds(60)),
- [{watchdog,Dog}|Config].
+ Config.
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+end_per_testcase(_Func, _Config) ->
+ ok.
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[normal, icky, very_icky, normalize, home_dir].
@@ -101,19 +102,16 @@ end_per_suite(_Config) ->
ok.
init_per_group(_GroupName, Config) ->
- Config.
+ Config.
end_per_group(_GroupName, Config) ->
- Config.
+ Config.
-home_dir(suite) ->
- [];
-home_dir(doc) ->
- ["Check that Erlang can be started with unicode named home directory"];
+%% Check that Erlang can be started with unicode named home directory.
home_dir(Config) when is_list(Config) ->
try
Name=[960,945,964,961,953,954],
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
UniMode = file:native_name_encoding() =/= latin1,
if
not UniMode ->
@@ -134,7 +132,7 @@ home_dir(Config) when is_list(Config) ->
os:putenv("HOME",NewHome),
{"HOME",Save};
_ ->
- rm_rf(prim_file,NewHome),
+ rm_rf(?PRIM_FILE,NewHome),
throw(unsupported_os)
end,
try
@@ -142,8 +140,13 @@ home_dir(Config) when is_list(Config) ->
test_server:stop_node(Node),
ok
after
- os:putenv(SaveOldName,SaveOldValue),
- rm_rf(prim_file,NewHome)
+ case SaveOldValue of
+ false ->
+ os:unsetenv(SaveOldName);
+ _ ->
+ os:putenv(SaveOldName,SaveOldValue)
+ end,
+ rm_rf(?PRIM_FILE,NewHome)
end
catch
throw:need_unicode_mode ->
@@ -154,49 +157,41 @@ home_dir(Config) when is_list(Config) ->
{skipped,"Runs only on Unix/Windows"}
end.
-normalize(suite) ->
- [];
-normalize(doc) ->
- ["Check that filename normalization works"];
+%% Check that filename normalization works.
normalize(Config) when is_list(Config) ->
- random:seed({1290,431421,830412}),
+ rand:seed(exsplus, {1290,431421,830412}),
try
- ?line UniMode = file:native_name_encoding() =/= latin1,
+ UniMode = file:native_name_encoding() =/= latin1,
if
not UniMode ->
throw(need_unicode_mode);
true ->
ok
end,
- ?line Pairs = [rand_comp_decomp(200) || _ <- lists:seq(1,1000)],
+ Pairs = [rand_comp_decomp(200) || _ <- lists:seq(1,1000)],
case os:type() of
{unix,darwin} ->
- ?line [ true = (A =:= prim_file:internal_native2name(B)) ||
+ [ true = (A =:= prim_file:internal_native2name(B)) ||
{A,B} <- Pairs ];
_ ->
ok
end,
- ?line [ true = (A =:= prim_file:internal_normalize_utf8(B)) ||
- {A,B} <- Pairs ]
-
+ [ true = (A =:= prim_file:internal_normalize_utf8(B)) ||
+ {A,B} <- Pairs ]
+
catch
throw:need_unicode_mode ->
io:format("Sorry, can only run in unicode mode.~n"),
{skipped,"VM needs to be started in Unicode filename mode"}
end.
-
-normal(suite) ->
- [];
-normal(doc) ->
- "Check file operations on normal file names regardless of unicode mode";
+
+%% Check file operations on normal file names regardless of unicode mode.
normal(Config) when is_list(Config) ->
{ok,Dir} = file:get_cwd(),
try
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- put(file_module,prim_file),
- ok = check_normal(prim_file),
- put(file_module,file),
+ ok = check_normal(?PRIM_FILE),
ok = check_normal(file),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(file,"normal_dir"),
@@ -204,12 +199,9 @@ normal(Config) when is_list(Config) ->
after
file:set_cwd(Dir)
end.
-
-icky(suite) ->
- [];
-icky(doc) ->
- "Check file operations on normal file names regardless of unicode mode";
+
+%% Check file operations on normal file names regardless of unicode mode.
icky(Config) when is_list(Config) ->
case hopeless_darwin() of
true ->
@@ -217,11 +209,9 @@ icky(Config) when is_list(Config) ->
false ->
{ok,Dir} = file:get_cwd(),
try
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- put(file_module,prim_file),
- ok = check_icky(prim_file),
- put(file_module,file),
+ ok = check_icky(?PRIM_FILE),
ok = check_icky(file),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(file,"icky_dir"),
@@ -230,10 +220,7 @@ icky(Config) when is_list(Config) ->
file:set_cwd(Dir)
end
end.
-very_icky(suite) ->
- [];
-very_icky(doc) ->
- "Check file operations on normal file names regardless of unicode mode";
+%% Check file operations on normal file names regardless of unicode mode.
very_icky(Config) when is_list(Config) ->
case hopeless_darwin() of
true ->
@@ -241,14 +228,12 @@ very_icky(Config) when is_list(Config) ->
false ->
{ok,Dir} = file:get_cwd(),
try
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- put(file_module,prim_file),
- case check_very_icky(prim_file) of
+ case check_very_icky(?PRIM_FILE) of
need_unicode_mode ->
{skipped,"VM needs to be started in Unicode filename mode"};
ok ->
- put(file_module,file),
ok = check_very_icky(file),
%% If all is good, delete dir again
%% (avoid hanging dir on windows)
@@ -259,78 +244,76 @@ very_icky(Config) when is_list(Config) ->
file:set_cwd(Dir)
end
end.
-
+
check_normal(Mod) ->
{ok,Dir} = Mod:get_cwd(),
try
- ?line make_normal_dir(Mod),
- ?line {ok, L0} = Mod:list_dir("."),
- ?line L1 = lists:sort(L0),
- %erlang:display(L1),
- ?line L1 = lists:sort(list(normal_dir())),
- ?line {ok,D2} = Mod:get_cwd(),
- ?line true = is_list(D2),
- ?line case Mod:altname("fil1") of
+ NormalDir = make_normal_dir(Mod, "normal_dir"),
+ io:format("Normaldir = ~p\n", [NormalDir]),
+ L1 = lists:sort(list(NormalDir)),
+ {ok, L0} = Mod:list_dir("."),
+ io:format("L0 = ~p\n", [L0]),
+ L1 = lists:sort(L0),
+ {ok,D2} = Mod:get_cwd(),
+ true = is_list(D2),
+ case Mod:altname("fil1") of
{error,enotsup} ->
ok;
{ok,LLL} when is_list(LLL) ->
ok
end,
- ?line [ true = is_list(El) || El <- L1],
- ?line Syms = [ {S,Targ,list_to_binary(get_data(Targ,normal_dir()))}
- || {T,S,Targ} <- normal_dir(), T =:= symlink ],
- ?line [ {ok, Cont} = Mod:read_file(SymL) || {SymL,_,Cont} <- Syms ],
- ?line [ {ok, Targ} = fixlink(Mod:read_link(SymL)) || {SymL,Targ,_} <- Syms ],
- ?line chk_cre_dir(Mod,[{directory,"temp_dir",normal_dir()}]),
- ?line {ok,BeginAt} = Mod:get_cwd(),
- ?line true = is_list(BeginAt),
- ?line {error,enoent} = Mod:set_cwd("tmp_dir"),
- ?line ok = Mod:set_cwd("temp_dir"),
- ?line {ok, NowAt} = Mod:get_cwd(),
- ?line true = BeginAt =/= NowAt,
- ?line ok = Mod:set_cwd(".."),
- ?line {ok,BeginAt} = Mod:get_cwd(),
- ?line rm_r(Mod,"temp_dir"),
- ?line true = is_list(Dir),
- ?line [ true = is_list(FN) || FN <- L0 ],
- case has_links() of
- true ->
- ?line ok = Mod:make_link("fil1","nisse"),
- ?line {ok, <<"fil1">>} = Mod:read_file("nisse"),
- ?line {ok, #file_info{type = regular}} = Mod:read_link_info("nisse"),
- ?line ok = Mod:delete("nisse"),
- ?line {ok, <<"fil1">>} = Mod:read_file("fil1"),
- ?line {error,enoent} = Mod:read_file("nisse"),
- ?line {error,enoent} = Mod:read_link_info("nisse");
- false ->
+ [ true = is_list(El) || El <- L1],
+ Syms = [ {S,Targ,list_to_binary(get_data(Targ, NormalDir))}
+ || {T,S,Targ} <- NormalDir, T =:= symlink ],
+ [ {ok, Cont} = Mod:read_file(SymL) || {SymL,_,Cont} <- Syms ],
+ [ {ok, Targ} = fixlink(Mod:read_link(SymL)) || {SymL,Targ,_} <- Syms ],
+
+ {ok,BeginAt} = Mod:get_cwd(),
+ true = is_list(BeginAt),
+ TempDir = "temp_dir",
+ make_normal_dir(Mod, TempDir),
+ {error,enoent} = Mod:set_cwd("tmp_dir"),
+ {ok, NowAt} = Mod:get_cwd(),
+ true = BeginAt =/= NowAt,
+ ok = Mod:set_cwd(".."),
+ {ok,BeginAt} = Mod:get_cwd(),
+ rm_r(Mod, TempDir),
+ true = is_list(Dir),
+ [ true = is_list(FN) || FN <- L0 ],
+ case Mod:make_link("fil1","nisse") of
+ ok ->
+ {ok, <<"fil1">>} = Mod:read_file("nisse"),
+ {ok, #file_info{type = regular}} = Mod:read_link_info("nisse"),
+ ok = Mod:delete("nisse"),
+ {ok, <<"fil1">>} = Mod:read_file("fil1"),
+ {error,enoent} = Mod:read_file("nisse"),
+ {error,enoent} = Mod:read_link_info("nisse");
+ {error,enotsup} ->
ok
end,
- ?line [ begin
- ?line {ok, FD} = Mod:open(Name,[read]),
- ?line {ok, Content} = Mod:read(FD,1024),
- ?line ok = file:close(FD)
- end || {regular,Name,Content} <- normal_dir() ],
- ?line [ begin
- ?line {ok, FD} = Mod:open(Name,[read,binary]),
- ?line BC = list_to_binary(Content),
- ?line {ok, BC} = Mod:read(FD,1024),
- ?line ok = file:close(FD)
- end || {regular,Name,Content} <- normal_dir() ],
- ?line Mod:rename("fil1","tmp_fil1"),
- ?line {ok, <<"fil1">>} = Mod:read_file("tmp_fil1"),
- ?line {error,enoent} = Mod:read_file("fil1"),
- ?line Mod:rename("tmp_fil1","fil1"),
- ?line {ok, <<"fil1">>} = Mod:read_file("fil1"),
- ?line {error,enoent} = Mod:read_file("tmp_fil1"),
- ?line {ok,FI} = Mod:read_file_info("fil1"),
- ?line NewMode = FI#file_info.mode band (bnot 8#333),
- ?line NewMode2 = NewMode bor 8#222,
- ?line true = NewMode2 =/= NewMode,
- ?line ok = Mod:write_file_info("fil1",FI#file_info{mode = NewMode}),
- ?line {ok,#file_info{mode = NewMode}} = Mod:read_file_info("fil1"),
- ?line ok = Mod:write_file_info("fil1",FI#file_info{mode = NewMode2}),
- ?line {ok,#file_info{mode = NewMode2}} = Mod:read_file_info("fil1"),
+ [ begin
+ {ok, FD} = Mod:open(Name,[read,binary]),
+ BC = list_to_binary(Content),
+ {ok, BC} = Mod:read(FD,1024),
+ ok = file:close(FD)
+ end || {regular,Name,Content} <- NormalDir ],
+ {error, badarg} = Mod:rename("fil1\0tmp_fil2","tmp_fil1"),
+ Mod:rename("fil1","tmp_fil1"),
+ {error, badarg} = Mod:read_file("tmp_fil1\0.txt"),
+ {ok, <<"fil1">>} = Mod:read_file("tmp_fil1"),
+ {error,enoent} = Mod:read_file("fil1"),
+ Mod:rename("tmp_fil1","fil1"),
+ {ok, <<"fil1">>} = Mod:read_file("fil1"),
+ {error,enoent} = Mod:read_file("tmp_fil1"),
+ {ok,FI} = Mod:read_file_info("fil1"),
+ NewMode = FI#file_info.mode band (bnot 8#333),
+ NewMode2 = NewMode bor 8#222,
+ true = NewMode2 =/= NewMode,
+ ok = Mod:write_file_info("fil1",FI#file_info{mode = NewMode}),
+ {ok,#file_info{mode = NewMode}} = Mod:read_file_info("fil1"),
+ ok = Mod:write_file_info("fil1",FI#file_info{mode = NewMode2}),
+ {ok,#file_info{mode = NewMode2}} = Mod:read_file_info("fil1"),
ok
after
case Mod:read_file_info("fil1") of
@@ -347,129 +330,125 @@ check_normal(Mod) ->
check_icky(Mod) ->
{ok,Dir} = Mod:get_cwd(),
try
- ?line true=(length("åäö") =:= 3),
- ?line UniMode = file:native_name_encoding() =/= latin1,
- ?line make_icky_dir(Mod),
+ true=(length("åäö") =:= 3),
+ UniMode = file:native_name_encoding() =/= latin1,
+ IckyDir = make_icky_dir(Mod, "icky_dir"),
{ok, L0} = Mod:list_dir_all("."),
- ?line L1 = lists:sort(L0),
- io:format("~p~n~p~n~n",[L1,lists:sort(list(icky_dir()))]),
- ?line L1 = lists:sort(convlist(list(icky_dir()))),
- ?line {ok,D2} = Mod:get_cwd(),
- ?line true = is_list(D2),
-%% Altname only on windows, and there are no non native filenames there
-%% ?line case Mod:altname("fil1") of
-%% {error,enotsup} ->
-%% ok;
-%% {ok,LLL} when is_list(LLL) ->
-%% ok
-%% end,
- ?line [ true = ((is_list(El) or (UniMode and is_binary(El)))) || El <- L1],
- ?line Syms = [ {S,conv(Targ),list_to_binary(get_data(Targ,icky_dir()))}
- || {T,S,Targ} <- icky_dir(), T =:= symlink ],
- ?line [ {ok, Cont} = Mod:read_file(SymL) || {SymL,_,Cont} <- Syms ],
+ L1 = lists:sort(L0),
+ io:format("~p~n~p~n~n",[L1,lists:sort(list(IckyDir))]),
+ L1 = lists:sort(convlist(list(IckyDir))),
+ {ok,D2} = Mod:get_cwd(),
+ true = is_list(D2),
+ %% Altname only on windows, and there are no non native filenames there
+ %% case Mod:altname("fil1") of
+ %% {error,enotsup} ->
+ %% ok;
+ %% {ok,LLL} when is_list(LLL) ->
+ %% ok
+ %% end,
+ [ true = ((is_list(El) or (UniMode and is_binary(El)))) || El <- L1],
+ Syms = [ {S,conv(Targ),list_to_binary(get_data(Targ,IckyDir))}
+ || {T,S,Targ} <- IckyDir, T =:= symlink ],
+ [ {ok, Cont} = Mod:read_file(SymL) || {SymL,_,Cont} <- Syms ],
[ {ok, Targ} = fixlink(Mod:read_link_all(SymL)) ||
{SymL,Targ,_} <- Syms ],
- ?line chk_cre_dir(Mod,[{directory,"åäö_dir",icky_dir()}]),
- ?line {ok,BeginAt} = Mod:get_cwd(),
- ?line true = is_list(BeginAt),
- ?line {error,enoent} = Mod:set_cwd("åä_dir"),
- ?line ok = Mod:set_cwd("åäö_dir"),
- ?line {ok, NowAt} = Mod:get_cwd(),
- ?line true = is_list(NowAt),
- ?line true = BeginAt =/= NowAt,
- ?line ok = Mod:set_cwd(".."),
- ?line {ok,BeginAt} = Mod:get_cwd(),
- ?line rm_r2(Mod,"åäö_dir"),
+
+ {ok,BeginAt} = Mod:get_cwd(),
+ true = is_list(BeginAt),
+ _ = make_icky_dir(Mod, "åäö_dir"),
+ {error,enoent} = Mod:set_cwd("åä_dir"),
+ {ok, NowAt} = Mod:get_cwd(),
+ true = is_list(NowAt),
+ true = BeginAt =/= NowAt,
+ ok = Mod:set_cwd(".."),
+ {ok,BeginAt} = Mod:get_cwd(),
+ rm_r2(Mod,"åäö_dir"),
{OS,_} = os:type(),
- % Check that treat_icky really converts to the same as the OS
+
+ %% Check that treat_icky really converts to the same as the OS
case UniMode of
true ->
- ?line chk_cre_dir(Mod,[{directory,"åäö_dir",[]}]),
- ?line ok = Mod:set_cwd("åäö_dir"),
- ?line ok = Mod:write_file(<<"ååå">>,<<"hello">>),
- ?line Treated = treat_icky(<<"ååå">>),
+ ok = Mod:make_dir("åäö_dir"),
+ ok = Mod:set_cwd("åäö_dir"),
+ ok = Mod:write_file(<<"ååå">>,<<"hello">>),
+ Treated = treat_icky(<<"ååå">>),
{ok,[Treated]} = Mod:list_dir_all("."),
- ?line ok = Mod:delete(<<"ååå">>),
- ?line {ok,[]} = Mod:list_dir("."),
- ?line ok = Mod:set_cwd(".."),
- ?line rm_r2(Mod,"åäö_dir");
+ ok = Mod:delete(<<"ååå">>),
+ {ok,[]} = Mod:list_dir("."),
+ ok = Mod:set_cwd(".."),
+ rm_r2(Mod,"åäö_dir");
false ->
ok
end,
- ?line chk_cre_dir(Mod,[{directory,treat_icky(<<"åäö_dir">>),icky_dir()}]),
+ _ = make_icky_dir(Mod, treat_icky(<<"åäö_dir"/utf8>>)),
if
UniMode and (OS =/= win32) ->
- ?line {error,enoent} = Mod:set_cwd("åäö_dir");
+ {error,enoent} = Mod:set_cwd("åäö_dir");
true ->
ok
end,
- ?line {ok,BeginAt} = Mod:get_cwd(),
- case has_links() of
- true ->
- ?line ok = Mod:make_link("fil1","nisseö"),
- ?line {ok, <<"fil1">>} = Mod:read_file("nisseö"),
- ?line {ok, #file_info{type = regular}} = Mod:read_link_info("nisseö"),
- ?line ok = Mod:delete("nisseö"),
- ?line ok = Mod:make_link("fil1",treat_icky(<<"nisseö">>)),
- ?line {ok, <<"fil1">>} = Mod:read_file(treat_icky(<<"nisseö">>)),
- ?line {ok, #file_info{type = regular}} = Mod:read_link_info(treat_icky(<<"nisseö">>)),
- ?line ok = Mod:delete(treat_icky(<<"nisseö">>)),
- ?line {ok, <<"fil1">>} = Mod:read_file("fil1"),
- ?line {error,enoent} = Mod:read_file("nisseö"),
- ?line {error,enoent} = Mod:read_link_info("nisseö"),
- ?line {error,enoent} = Mod:read_file(treat_icky(<<"nisseö">>)),
- ?line {error,enoent} = Mod:read_link_info(treat_icky(<<"nisseö">>));
- false ->
+ ok = Mod:set_cwd(".."),
+ {ok,BeginAt} = Mod:get_cwd(),
+ case Mod:make_link("fil1", "nisseö") of
+ ok ->
+ {ok, <<"fil1">>} = Mod:read_file("nisseö"),
+ {ok, #file_info{type = regular}} = Mod:read_link_info("nisseö"),
+ ok = Mod:delete("nisseö"),
+ ok = Mod:make_link("fil1",treat_icky(<<"nisseö">>)),
+ {ok, <<"fil1">>} = Mod:read_file(treat_icky(<<"nisseö">>)),
+ {ok, #file_info{type = regular}} = Mod:read_link_info(treat_icky(<<"nisseö">>)),
+ ok = Mod:delete(treat_icky(<<"nisseö">>)),
+ {ok, <<"fil1">>} = Mod:read_file("fil1"),
+ {error,enoent} = Mod:read_file("nisseö"),
+ {error,enoent} = Mod:read_link_info("nisseö"),
+ {error,enoent} = Mod:read_file(treat_icky(<<"nisseö">>)),
+ {error,enoent} = Mod:read_link_info(treat_icky(<<"nisseö">>));
+ {error,enotsup} ->
ok
end,
- ?line [ begin
- ?line {ok, FD} = Mod:open(Name,[read]),
- ?line {ok, Content} = Mod:read(FD,1024),
- ?line ok = file:close(FD)
- end || {regular,Name,Content} <- icky_dir() ],
- ?line [ begin
- ?line {ok, FD} = Mod:open(Name,[read,binary]),
- ?line BC = list_to_binary([Content]),
- ?line {ok, BC} = Mod:read(FD,1024),
- ?line ok = file:close(FD)
- end || {regular,Name,Content} <- icky_dir() ],
- ?line Mod:rename("åäö2","åäö_fil1"),
- ?line {ok, <<"åäö2">>} = Mod:read_file("åäö_fil1"),
- ?line {error,enoent} = Mod:read_file("åäö2"),
- ?line Mod:rename("åäö_fil1","åäö2"),
- ?line {ok, <<"åäö2">>} = Mod:read_file("åäö2"),
- ?line {error,enoent} = Mod:read_file("åäö_fil1"),
+ [ begin
+ {ok, FD} = Mod:open(Name,[read,binary]),
+ BC = list_to_binary([Content]),
+ {ok, BC} = Mod:read(FD,1024),
+ ok = file:close(FD)
+ end || {regular,Name,Content} <- IckyDir ],
+ Mod:rename("åäö2","åäö_fil1"),
+ {ok, <<"åäö2">>} = Mod:read_file("åäö_fil1"),
+ {error,enoent} = Mod:read_file("åäö2"),
+ Mod:rename("åäö_fil1","åäö2"),
+ {ok, <<"åäö2">>} = Mod:read_file("åäö2"),
+ {error,enoent} = Mod:read_file("åäö_fil1"),
- ?line Mod:rename("åäö2",treat_icky(<<"åäö_fil1">>)),
- ?line {ok, <<"åäö2">>} = Mod:read_file(treat_icky(<<"åäö_fil1">>)),
+ Mod:rename("åäö2",treat_icky(<<"åäö_fil1">>)),
+ {ok, <<"åäö2">>} = Mod:read_file(treat_icky(<<"åäö_fil1">>)),
if
UniMode and (OS =/= win32) ->
{error,enoent} = Mod:read_file("åäö_fil1");
true ->
ok
end,
- ?line {error,enoent} = Mod:read_file("åäö2"),
- ?line Mod:rename(treat_icky(<<"åäö_fil1">>),"åäö2"),
- ?line {ok, <<"åäö2">>} = Mod:read_file("åäö2"),
- ?line {error,enoent} = Mod:read_file("åäö_fil1"),
- ?line {error,enoent} = Mod:read_file(treat_icky(<<"åäö_fil1">>)),
+ {error,enoent} = Mod:read_file("åäö2"),
+ Mod:rename(treat_icky(<<"åäö_fil1">>),"åäö2"),
+ {ok, <<"åäö2">>} = Mod:read_file("åäö2"),
+ {error,enoent} = Mod:read_file("åäö_fil1"),
+ {error,enoent} = Mod:read_file(treat_icky(<<"åäö_fil1">>)),
- ?line {ok,FI} = Mod:read_file_info("åäö2"),
- ?line NewMode = FI#file_info.mode band (bnot 8#333),
- ?line NewMode2 = NewMode bor 8#222,
- ?line true = NewMode2 =/= NewMode,
- ?line ok = Mod:write_file_info("åäö2",FI#file_info{mode = NewMode}),
- ?line {ok,#file_info{mode = NewMode}} = Mod:read_file_info("åäö2"),
- ?line ok = Mod:write_file_info("åäö2",FI#file_info{mode = NewMode2}),
- ?line {ok,#file_info{mode = NewMode2}} = Mod:read_file_info("åäö2"),
+ {ok,FI} = Mod:read_file_info("åäö2"),
+ NewMode = FI#file_info.mode band (bnot 8#333),
+ NewMode2 = NewMode bor 8#222,
+ true = NewMode2 =/= NewMode,
+ ok = Mod:write_file_info("åäö2",FI#file_info{mode = NewMode}),
+ {ok,#file_info{mode = NewMode}} = Mod:read_file_info("åäö2"),
+ ok = Mod:write_file_info("åäö2",FI#file_info{mode = NewMode2}),
+ {ok,#file_info{mode = NewMode2}} = Mod:read_file_info("åäö2"),
- ?line {ok,FII} = Mod:read_file_info(treat_icky(<<"åäö5">>)),
- ?line true = NewMode2 =/= NewMode,
- ?line ok = Mod:write_file_info(treat_icky(<<"åäö5">>),FII#file_info{mode = NewMode}),
- ?line {ok,#file_info{mode = NewMode}} = Mod:read_file_info(treat_icky(<<"åäö5">>)),
- ?line ok = Mod:write_file_info(<<"åäö5">>,FII#file_info{mode = NewMode2}),
- ?line {ok,#file_info{mode = NewMode2}} = Mod:read_file_info(treat_icky(<<"åäö5">>)),
+ {ok,FII} = Mod:read_file_info(treat_icky(<<"åäö5">>)),
+ true = NewMode2 =/= NewMode,
+ ok = Mod:write_file_info(treat_icky(<<"åäö5">>),FII#file_info{mode = NewMode}),
+ {ok,#file_info{mode = NewMode}} = Mod:read_file_info(treat_icky(<<"åäö5">>)),
+ ok = Mod:write_file_info(<<"åäö5">>,FII#file_info{mode = NewMode2}),
+ {ok,#file_info{mode = NewMode2}} = Mod:read_file_info(treat_icky(<<"åäö5">>)),
ok
after
Mod:set_cwd(Dir),
@@ -479,90 +458,85 @@ check_icky(Mod) ->
check_very_icky(Mod) ->
{ok,Dir} = Mod:get_cwd(),
try
- ?line true=(length("åäö") =:= 3),
- ?line UniMode = file:native_name_encoding() =/= latin1,
+ true=(length("åäö") =:= 3),
+ UniMode = file:native_name_encoding() =/= latin1,
if
not UniMode ->
throw(need_unicode_mode);
true ->
ok
end,
- ?line make_very_icky_dir(Mod),
- {ok, L0} = Mod:list_dir_all("."),
- ?line L1 = lists:sort(L0),
- ?line L1 = lists:sort(convlist(list(very_icky_dir()))),
- ?line {ok,D2} = Mod:get_cwd(),
- ?line true = is_list(D2),
- ?line [ true = ((is_list(El) or is_binary(El))) || El <- L1],
- ?line Syms = [ {S,conv(Targ),list_to_binary(get_data(Targ,very_icky_dir()))}
- || {T,S,Targ} <- very_icky_dir(), T =:= symlink ],
- ?line [ {ok, Cont} = Mod:read_file(SymL) || {SymL,_,Cont} <- Syms ],
- ?line [ {ok, Targ} = fixlink(Mod:read_link_all(SymL)) ||
- {SymL,Targ,_} <- Syms ],
- ?line chk_cre_dir(Mod,[{directory,[1088,1079,1091]++"_dir",very_icky_dir()}]),
- ?line {ok,BeginAt} = Mod:get_cwd(),
- ?line true = is_list(BeginAt),
- ?line {error,enoent} = Mod:set_cwd("åä_dir"),
- ?line ok = Mod:set_cwd([1088,1079,1091]++"_dir"),
- ?line {ok, NowAt} = Mod:get_cwd(),
- ?line true = is_list(NowAt),
- ?line true = BeginAt =/= NowAt,
- ?line ok = Mod:set_cwd(".."),
- ?line {ok,BeginAt} = Mod:get_cwd(),
- ?line rm_r2(Mod,[1088,1079,1091]++"_dir"),
+ VeryIckyDir = make_very_icky_dir(Mod, "very_icky_dir"),
+ Expected = lists:sort(convlist(list(VeryIckyDir))),
+ {ok, Actual} = Mod:list_dir_all("."),
+ Expected = lists:sort(Actual),
+ {ok,D2} = Mod:get_cwd(),
+ true = is_list(D2),
+ [ true = ((is_list(El) or is_binary(El))) || El <- Expected],
+ Syms = [{S,conv(Targ),list_to_binary(get_data(Targ, VeryIckyDir))}
+ || {symlink,S,Targ} <- VeryIckyDir],
+ [ {ok, Cont} = Mod:read_file(SymL) || {SymL,_,Cont} <- Syms ],
+ [ {ok, Targ} = fixlink(Mod:read_link_all(SymL)) ||
+ {SymL,Targ,_} <- Syms ],
- case has_links() of
- true ->
- ?line ok = Mod:make_link("fil1","nisse"++[1088,1079,1091]),
- ?line {ok, <<"fil1">>} =
+ {ok,BeginAt} = Mod:get_cwd(),
+ OtherDir = [1088,1079,1091] ++ "_dir",
+ true = is_list(BeginAt),
+ make_very_icky_dir(Mod, OtherDir),
+ {error,enoent} = Mod:set_cwd("åä_dir"),
+ {ok, NowAt} = Mod:get_cwd(),
+ true = is_list(NowAt),
+ true = BeginAt =/= NowAt,
+ ok = Mod:set_cwd(".."),
+ {ok,BeginAt} = Mod:get_cwd(),
+ rm_r2(Mod, OtherDir),
+
+ case Mod:make_link("fil1","nisse"++[1088,1079,1091]) of
+ ok ->
+ {ok, <<"fil1">>} =
Mod:read_file("nisse"++[1088,1079,1091]),
- ?line {ok, #file_info{type = regular}} =
+ {ok, #file_info{type = regular}} =
Mod:read_link_info("nisse"++[1088,1079,1091]),
- ?line ok = Mod:delete("nisse"++[1088,1079,1091]),
- ?line ok = Mod:make_link("fil1",<<"nisseö">>),
- ?line {ok, <<"fil1">>} = Mod:read_file(<<"nisseö">>),
- ?line {ok, #file_info{type = regular}} =
+ ok = Mod:delete("nisse"++[1088,1079,1091]),
+ ok = Mod:make_link("fil1",<<"nisseö">>),
+ {ok, <<"fil1">>} = Mod:read_file(<<"nisseö">>),
+ {ok, #file_info{type = regular}} =
Mod:read_link_info(<<"nisseö">>),
- ?line ok = Mod:delete(<<"nisseö">>),
- ?line {ok, <<"fil1">>} = Mod:read_file("fil1"),
- ?line {error,enoent} = Mod:read_file("nisse"++[1088,1079,1091]),
- ?line {error,enoent} = Mod:read_link_info("nisse"++[1088,1079,1091]),
- ?line {error,enoent} = Mod:read_file(<<"nisseö">>),
- ?line {error,enoent} = Mod:read_link_info(<<"nisseö">>);
- false ->
+ ok = Mod:delete(<<"nisseö">>),
+ {ok, <<"fil1">>} = Mod:read_file("fil1"),
+ {error,enoent} = Mod:read_file("nisse"++[1088,1079,1091]),
+ {error,enoent} = Mod:read_link_info("nisse"++[1088,1079,1091]),
+ {error,enoent} = Mod:read_file(<<"nisseö">>),
+ {error,enoent} = Mod:read_link_info(<<"nisseö">>);
+ {error,enotsup} ->
ok
end,
- ?line [ begin
- ?line {ok, FD} = Mod:open(Name,[read]),
- ?line {ok, Content} = Mod:read(FD,1024),
- ?line ok = file:close(FD)
- end || {regular,Name,Content} <- very_icky_dir() ],
- ?line [ begin
- ?line {ok, FD} = Mod:open(Name,[read,binary]),
- ?line BC = list_to_binary([Content]),
- ?line {ok, BC} = Mod:read(FD,1024),
- ?line ok = file:close(FD)
- end || {regular,Name,Content} <- very_icky_dir() ],
- ?line Mod:rename([956,965,963,954,959,49],
- [956,965,963,954,959]++"_fil1"),
- ?line {ok, <<"åäö2">>} = Mod:read_file([956,965,963,954,959]++"_fil1"),
- ?line {error,enoent} = Mod:read_file([956,965,963,954,959,49]),
- ?line Mod:rename([956,965,963,954,959]++"_fil1",[956,965,963,954,959,49]),
- ?line {ok, <<"åäö2">>} = Mod:read_file([956,965,963,954,959,49]),
- ?line {error,enoent} = Mod:read_file([956,965,963,954,959]++"_fil1"),
+ [ begin
+ {ok, FD} = Mod:open(Name,[read,binary]),
+ BC = list_to_binary([Content]),
+ {ok, BC} = Mod:read(FD,1024),
+ ok = file:close(FD)
+ end || {regular,Name,Content} <- VeryIckyDir ],
+ Mod:rename([956,965,963,954,959,49],
+ [956,965,963,954,959]++"_fil1"),
+ {ok, <<"åäö2">>} = Mod:read_file([956,965,963,954,959]++"_fil1"),
+ {error,enoent} = Mod:read_file([956,965,963,954,959,49]),
+ Mod:rename([956,965,963,954,959]++"_fil1",[956,965,963,954,959,49]),
+ {ok, <<"åäö2">>} = Mod:read_file([956,965,963,954,959,49]),
+ {error,enoent} = Mod:read_file([956,965,963,954,959]++"_fil1"),
- ?line {ok,FI} = Mod:read_file_info([956,965,963,954,959,49]),
- ?line NewMode = FI#file_info.mode band (bnot 8#333),
- ?line NewMode2 = NewMode bor 8#222,
- ?line true = NewMode2 =/= NewMode,
- ?line ok = Mod:write_file_info([956,965,963,954,959,49],
- FI#file_info{mode = NewMode}),
- ?line {ok,#file_info{mode = NewMode}} =
- Mod:read_file_info([956,965,963,954,959,49]),
- ?line ok = Mod:write_file_info([956,965,963,954,959,49],
- FI#file_info{mode = NewMode2}),
- ?line {ok,#file_info{mode = NewMode2}} =
- Mod:read_file_info([956,965,963,954,959,49]),
+ {ok,FI} = Mod:read_file_info([956,965,963,954,959,49]),
+ NewMode = FI#file_info.mode band (bnot 8#333),
+ NewMode2 = NewMode bor 8#222,
+ true = NewMode2 =/= NewMode,
+ ok = Mod:write_file_info([956,965,963,954,959,49],
+ FI#file_info{mode = NewMode}),
+ {ok,#file_info{mode = NewMode}} =
+ Mod:read_file_info([956,965,963,954,959,49]),
+ ok = Mod:write_file_info([956,965,963,954,959,49],
+ FI#file_info{mode = NewMode2}),
+ {ok,#file_info{mode = NewMode2}} =
+ Mod:read_file_info([956,965,963,954,959,49]),
ok
catch
throw:need_unicode_mode ->
@@ -592,7 +566,6 @@ rm_rf(Mod,Dir) ->
end.
rm_r(Mod,Dir) ->
- %erlang:display({rm_r,Dir}),
case Mod:read_link_info(Dir) of
{ok, #file_info{type = directory}} ->
{ok,#file_info{type = directory}} = Mod:read_file_info(Dir),
@@ -610,7 +583,7 @@ rm_r(Mod,Dir) ->
end.
%% For icky test, allow binaries sometimes
rm_r2(Mod,Dir) ->
- %erlang:display({rm_r2,Dir}),
+ %% erlang:display({rm_r2,Dir}),
case Mod:read_link_info(Dir) of
{ok, #file_info{type = directory}} ->
{ok,#file_info{type = directory}} = Mod:read_file_info(Dir),
@@ -627,90 +600,35 @@ rm_r2(Mod,Dir) ->
{ok, #file_info{type = symlink}} ->
ok = Mod:delete(Dir)
end.
-chk_cre_dir(_,[]) ->
- ok;
-chk_cre_dir(Mod,[{regular,Name,Content}|T]) ->
- %io:format("~p~n",[Name]),
- ok = Mod:write_file(Name,Content),
- chk_cre_dir(Mod,T);
-chk_cre_dir(Mod,[{link,Name,Target}|T]) ->
- ok = Mod:make_link(Target,Name),
- chk_cre_dir(Mod,T);
-chk_cre_dir(Mod,[{symlink,Name,Target}|T]) ->
- ok = Mod:make_symlink(Target,Name),
- chk_cre_dir(Mod,T);
-chk_cre_dir(Mod,[{directory,Name,Content}|T]) ->
- ok = Mod:make_dir(Name),
- %io:format("Content = ~p~n",[Content]),
- Content2 = [{Ty,filename:join(Name,N),case Ty of link -> filename:join(Name,C); _ -> C end} || {Ty,N,C} <- Content ],
- %io:format("Content2 = ~p~n",[Content2]),
- chk_cre_dir(Mod,Content2),
- chk_cre_dir(Mod,T).
-
-has_links() ->
- case os:type() of
- {win32,_} ->
- case os:version() of
- {N,NN,_} when (N > 5) andalso (NN >= 1) ->
- true;
- _ ->
- false
- end;
- _ ->
- true
- end.
-
-make_normal_dir(Mod) ->
- rm_rf(Mod,"normal_dir"),
- Mod:make_dir("normal_dir"),
- Mod:set_cwd("normal_dir"),
- Mod:write_file("fil1","fil1"),
- Mod:write_file("fil2","fil2"),
- case has_links() of
- true ->
- Mod:make_link("fil2","fil3"),
- Mod:make_symlink("fil2","fil4");
- _ ->
- ok
- end,
- Mod:make_dir("subdir"),
- Mod:write_file(filename:join("subdir","subfil1"),"subfil1"),
- ok.
-
-normal_dir() ->
- [{regular,"fil1","fil1"},
- {regular,"fil2","fil2"}] ++
- case has_links() of
- true ->
- [{regular,"fil3","fil2"},
- {symlink,"fil4","fil2"}];
- false ->
- []
- end ++
- [{directory,"subdir",
- [{regular,"subfil1","subfil1"}]}].
-make_icky_dir(Mod) ->
- rm_rf(Mod,"icky_dir"),
- Icky=icky_dir(),
- chk_cre_dir(Mod,[{directory,"icky_dir",linkify([],Icky)}]),
- Mod:set_cwd("icky_dir"),
- ok.
+make_normal_dir(Mod, DirName) ->
+ Dir = [{regular,"fil1","fil1"},
+ {regular,"fil2","fil2"},
+ {hardlink,"fil3","fil2"},
+ {symlink,"fil4","fil2"},
+ {directory,"subdir",
+ [{regular,"subfil1","subfil1"}]}],
+ rm_rf(Mod, DirName),
+ Mod:make_dir(DirName),
+ Mod:set_cwd(DirName),
+ make_dir_contents(Dir, Mod).
-linkify(_Passed,[]) ->
- [];
-linkify(Passed,[{regular,Name,Content}|T]) ->
- Regulars = [ {N,C} || {regular,N,C} <- Passed, N =/= Name ],
- case lists:keysearch(Content,2,Regulars) of
- {value, {Linkto, Content}} ->
- [{link,Name,Linkto} | linkify(Passed,T)];
- _ ->
- [{regular,Name,Content} | linkify([{regular,Name,Content}|Passed],T)]
- end;
-linkify(Passed,[{directory, Name, Content}|T]) ->
- [{directory,Name, linkify(Content,Content)}|linkify(Passed,T)];
-linkify(Passed,[H|T]) ->
- [H|linkify([H|Passed],T)].
+make_icky_dir(Mod, IckyDirName) ->
+ Icky = [{regular,"fil1","fil1"},
+ {regular,"åäö2","åäö2"},
+ {hardlink,"åäö3","åäö2"},
+ {symlink,"åäö4","åäö2"},
+ {regular,treat_icky(<<"åäö5">>),"åäö5"},
+ {symlink,treat_icky(<<"åäö6">>),treat_icky(<<"åäö5">>)},
+ {directory,treat_icky(<<"åäösubdir2">>),
+ [{regular,treat_icky(<<"åäösubfil2">>),"åäösubfil12"},
+ {regular,"åäösubfil3","åäösubfil13"}]},
+ {directory,"åäösubdir",
+ [{regular,"åäösubfil1","åäösubfil1"}]}],
+ rm_rf(Mod, IckyDirName),
+ ok = Mod:make_dir(IckyDirName),
+ ok = Mod:set_cwd(IckyDirName),
+ make_dir_contents(Icky, Mod).
hopeless_darwin() ->
case {os:type(),os:version()} of
@@ -720,58 +638,24 @@ hopeless_darwin() ->
false
end.
-icky_dir() ->
- [{regular,"fil1","fil1"},
- {regular,"åäö2","åäö2"}] ++
- case has_links() of
- true ->
- [{regular,"åäö3","åäö2"},
- {symlink,"åäö4","åäö2"}];
- false ->
- []
- end ++
- [{regular,treat_icky(<<"åäö5">>),"åäö5"}] ++
- case has_links() of
- true ->
- [{symlink,treat_icky(<<"åäö6">>),treat_icky(<<"åäö5">>)}];
- false ->
- []
- end ++
- [{directory,treat_icky(<<"åäösubdir2">>),
- [{regular,treat_icky(<<"åäösubfil2">>),"åäösubfil12"},
- {regular,"åäösubfil3","åäösubfil13"}]},
- {directory,"åäösubdir",
- [{regular,"åäösubfil1","åäösubfil1"}]}].
-
-make_very_icky_dir(Mod) ->
- rm_rf(Mod,"very_icky_dir"),
- Icky=very_icky_dir(),
- chk_cre_dir(Mod,[{directory,"very_icky_dir",linkify([],Icky)}]),
- Mod:set_cwd("very_icky_dir"),
- ok.
-
-very_icky_dir() ->
- [{regular,"fil1","fil1"},
- {regular,[956,965,963,954,959,49],"åäö2"}] ++
- case has_links() of
- true ->
- [{regular,[956,965,963,954,959,50],"åäö2"},
- {symlink,[956,965,963,954,959,51],[956,965,963,954,959,49]}];
- false ->
- []
- end ++
- [{regular,treat_icky(<<"åäö5">>),"åäö5"}] ++
- case has_links() of
- true ->
- [{symlink,treat_icky(<<"åäö6">>),treat_icky(<<"åäö5">>)}];
- false ->
- []
- end ++
- [{directory,treat_icky(<<"åäösubdir2">>),
- [{regular,treat_icky(<<"åäösubfil2">>),"åäösubfil12"},
- {regular,"åäösubfil3","åäösubfil13"}]},
- {directory,[956,965,963,954,959]++"subdir1",
- [{regular,[956,965,963,954,959]++"subfil1","åäösubfil1"}]}].
+make_very_icky_dir(Mod, DirName) ->
+ Desc = [{regular,"fil1","fil1"},
+ {regular,[956,965,963,954,959,49],"åäö2"},
+ {hardlink,[956,965,963,954,959,50],
+ [956,965,963,954,959,49],
+ "åäö2"},
+ {symlink,[956,965,963,954,959,51],[956,965,963,954,959,49]},
+ {regular,treat_icky(<<"åäö5">>),"åäö5"},
+ {symlink,treat_icky(<<"åäö6">>),treat_icky(<<"åäö5">>)},
+ {directory,treat_icky(<<"åäösubdir2">>),
+ [{regular,treat_icky(<<"åäösubfil2">>),"åäösubfil12"},
+ {regular,"åäösubfil3","åäösubfil13"}]},
+ {directory,[956,965,963,954,959]++"subdir1",
+ [{regular,[956,965,963,954,959]++"subfil1","åäösubfil1"}]}],
+ rm_rf(Mod, DirName),
+ ok = Mod:make_dir(DirName),
+ ok = Mod:set_cwd(DirName),
+ make_dir_contents(Desc, Mod).
%% Some OS'es simply do not allow non UTF8 filenames
treat_icky(Bin) ->
@@ -784,7 +668,7 @@ treat_icky(Bin) ->
Bin
end.
-% Handle windows having absolute soft link targets.
+%% Handle windows having absolute soft link targets.
fixlink({ok,Link}) ->
case os:type() of
{win32,_} ->
@@ -811,7 +695,7 @@ list([]) ->
[];
list([{_,Name,_} | T]) ->
[Name | list(T)].
-
+
get_data(FN,List) ->
case lists:keysearch(FN,2,List) of
@@ -827,7 +711,7 @@ get_data(FN,List) ->
convlist(L) ->
convlist(file:native_name_encoding(),L).
convlist(latin1,[Bin|T]) when is_binary(Bin) ->
- %erlang:display('Convert...'),
+ %% erlang:display('Convert...'),
[binary_to_list(Bin)| convlist(latin1,T)];
convlist(Any,[H|T]) ->
[H|convlist(Any,T)];
@@ -844,18 +728,60 @@ conv(L) ->
end.
+make_dir_contents([{regular,Name,Contents}=H|T], Mod) ->
+ ok = Mod:write_file(Name, Contents),
+ [H|make_dir_contents(T, Mod)];
+make_dir_contents([{hardlink,Target,Name}|T], Mod) ->
+ case Mod:make_link(Name, Target) of
+ ok ->
+ [{regular,Target,Name}|make_dir_contents(T, Mod)];
+ {error,enotsup} ->
+ make_dir_contents(T, Mod)
+ end;
+make_dir_contents([{hardlink,Target,Name,Contents}|T], Mod) ->
+ case Mod:make_link(Name, Target) of
+ ok ->
+ [{regular,Target,Contents}|make_dir_contents(T, Mod)];
+ {error,enotsup} ->
+ make_dir_contents(T, Mod)
+ end;
+make_dir_contents([{symlink,Target,Name}=H|T], Mod) ->
+ case Mod:make_symlink(Name, Target) of
+ ok ->
+ [H|make_dir_contents(T, Mod)];
+ {error,enotsup} ->
+ make_dir_contents(T, Mod);
+ {error,eperm} ->
+ make_dir_contents(T, Mod)
+ end;
+make_dir_contents([{directory,Dir,C0}|T], Mod) ->
+ ok = Mod:make_dir(Dir),
+ C1 = [case Op of
+ Link when Link =:= hardlink; Link =:= symlink ->
+ {Op,filename:join(Dir, Name0),filename:join(Dir, Extra)};
+ _ ->
+ {Op,filename:join(Dir, Name0),Extra}
+ end || {Op,Name0,Extra} <- C0],
+ C2 = make_dir_contents(C1, Mod),
+ C = [{Op,filename:basename(Name0),Extra} ||
+ {Op,Name0,Extra} <- C2],
+ [{directory,Dir,C}|make_dir_contents(T, Mod)];
+make_dir_contents([], _Mod) ->
+ [].
+
+
rand_comp_decomp(Max) ->
- N = random:uniform(Max),
+ N = rand:uniform(Max),
L = [ rand_decomp() || _ <- lists:seq(1,N) ],
LC = [ A || {A,_} <- L],
LD = lists:flatten([B || {_,B} <- L]),
LB = unicode:characters_to_binary(LD,unicode,utf8),
{LC,LB}.
-
+
rand_decomp() ->
BT = bigtup(),
SZ = tuple_size(BT),
- element(random:uniform(SZ),BT).
+ element(rand:uniform(SZ),BT).
bigtup() ->
{{192,[65,768]},
{200,[69,768]},
diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl
index 91a57d3290..a0ae792ba9 100644
--- a/lib/kernel/test/gen_sctp_SUITE.erl
+++ b/lib/kernel/test/gen_sctp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(gen_sctp_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/inet_sctp.hrl").
%%-compile(export_all).
@@ -29,7 +29,8 @@
init_per_group/2,end_per_group/2,
init_per_testcase/2, end_per_testcase/2]).
-export(
- [basic/1,
+ [skip_old_solaris/1,
+ basic/1,
api_open_close/1,api_listen/1,api_connect_init/1,api_opts/1,
xfer_min/1,xfer_active/1,def_sndrcvinfo/1,implicit_inet6/1,
open_multihoming_ipv4_socket/1,
@@ -42,22 +43,32 @@
names_unihoming_ipv4/1, names_unihoming_ipv6/1,
names_multihoming_ipv4/1, names_multihoming_ipv6/1]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
-all() ->
- [basic, api_open_close, api_listen, api_connect_init,
- api_opts, xfer_min, xfer_active, def_sndrcvinfo, implicit_inet6,
- open_multihoming_ipv4_socket,
- open_unihoming_ipv6_socket,
- open_multihoming_ipv6_socket,
- open_multihoming_ipv4_and_ipv6_socket, active_n,
- basic_stream, xfer_stream_min, peeloff_active_once,
- peeloff_active_true, peeloff_active_n, buffers,
- names_unihoming_ipv4, names_unihoming_ipv6,
- names_multihoming_ipv4, names_multihoming_ipv6].
+all() ->
+ G = case is_old_solaris() of
+ true -> old_solaris;
+ false -> extensive
+ end,
+ [{group,smoke},
+ {group,G}].
groups() ->
- [].
+ [{smoke,[],[basic,basic_stream]},
+ {old_solaris,[],[skip_old_solaris]},
+ {extensive,[],
+ [api_open_close, api_listen, api_connect_init,
+ api_opts, xfer_min, xfer_active, def_sndrcvinfo, implicit_inet6,
+ open_multihoming_ipv4_socket,
+ open_unihoming_ipv6_socket,
+ open_multihoming_ipv6_socket,
+ open_multihoming_ipv4_and_ipv6_socket, active_n,
+ xfer_stream_min, peeloff_active_once,
+ peeloff_active_true, peeloff_active_n, buffers,
+ names_unihoming_ipv4, names_unihoming_ipv6,
+ names_multihoming_ipv4, names_multihoming_ipv6]}].
init_per_suite(_Config) ->
case gen_sctp:open() of
@@ -81,48 +92,48 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:seconds(15)),
- [{watchdog, Dog}|Config].
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+ Config.
+end_per_testcase(_Func, _Config) ->
+ ok.
-define(LOGVAR(Var), begin io:format(??Var" = ~p~n", [Var]) end).
+is_old_solaris() ->
+ os:type() =:= {unix,sunos} andalso os:version() < {5,12,0}.
+skip_old_solaris(_Config) ->
+ {skip,"Unreliable test cases and/or implementation on old Solaris"}.
-basic(doc) ->
- "Hello world";
-basic(suite) ->
- [];
+%% Hello world.
basic(Config) when is_list(Config) ->
- ?line {ok,S} = gen_sctp:open(),
- ?line ok = gen_sctp:close(S),
+ {ok,S} = gen_sctp:open(),
+ ok = gen_sctp:close(S),
ok.
-xfer_min(doc) ->
- "Minimal data transfer";
-xfer_min(suite) ->
- [];
+%% Minimal data transfer.
xfer_min(Config) when is_list(Config) ->
- ?line Stream = 0,
- ?line Data = <<"The quick brown fox jumps over a lazy dog 0123456789">>,
- ?line Loopback = {127,0,0,1},
- ?line {ok,Sb} = gen_sctp:open([{type,seqpacket}]),
- ?line {ok,Pb} = inet:port(Sb),
- ?line ok = gen_sctp:listen(Sb, true),
-
- ?line {ok,Sa} = gen_sctp:open(),
- ?line {ok,Pa} = inet:port(Sa),
- ?line {ok,#sctp_assoc_change{state=comm_up,
- error=0,
- outbound_streams=SaOutboundStreams,
- inbound_streams=SaInboundStreams,
- assoc_id=SaAssocId}=SaAssocChange} =
+ Stream = 0,
+ Data = <<"The quick brown fox jumps over a lazy dog 0123456789">>,
+ Loopback = {127,0,0,1},
+ StatOpts =
+ [recv_avg,recv_cnt,recv_max,recv_oct,
+ send_avg,send_cnt,send_max,send_oct],
+ {ok,Sb} = gen_sctp:open([{type,seqpacket}]),
+ {ok,SbStat1} = inet:getstat(Sb, StatOpts),
+ {ok,Pb} = inet:port(Sb),
+ ok = gen_sctp:listen(Sb, true),
+
+ {ok,Sa} = gen_sctp:open(),
+ {ok,Pa} = inet:port(Sa),
+ {ok,#sctp_assoc_change{state=comm_up,
+ error=0,
+ outbound_streams=SaOutboundStreams,
+ inbound_streams=SaInboundStreams,
+ assoc_id=SaAssocId}=SaAssocChange} =
gen_sctp:connect(Sa, Loopback, Pb, []),
- ?line {SbAssocId,SaOutboundStreams,SaInboundStreams} =
+ {SbAssocId,SaOutboundStreams,SaInboundStreams} =
case recv_event(log_ok(gen_sctp:recv(Sb, infinity))) of
{Loopback,Pa,
#sctp_assoc_change{state=comm_up,
@@ -142,184 +153,195 @@ xfer_min(Config) when is_list(Config) ->
outbound_streams=SbOutboundStreams,
inbound_streams=SbInboundStreams,
assoc_id=AssocId}} =
- ?line recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
+ recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
{AssocId,SbInboundStreams,SbOutboundStreams}
end,
- ?line ok = gen_sctp:send(Sa, SaAssocId, 0, Data),
- ?line case log_ok(gen_sctp:recv(Sb, infinity)) of
- {Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data} -> ok;
- Event1 ->
- case recv_event(Event1) of
- {Loopback,Pa,
- #sctp_paddr_change{addr = {Loopback,_},
- state = State,
- error = 0,
- assoc_id = SbAssocId}}
- when State =:= addr_available;
- State =:= addr_confirmed ->
- {Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data} = log_ok(gen_sctp:recv(Sb, infinity))
- end
- end,
- ?line ok = gen_sctp:send(Sb, SbAssocId, 0, Data),
- ?line case log_ok(gen_sctp:recv(Sa, infinity)) of
- {Loopback,Pb,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SaAssocId}],
- Data} ->
- ok;
- Event2 ->
- {Loopback,Pb,
- #sctp_paddr_change{addr={_,Pb},
- state=addr_confirmed,
- error=0,
- assoc_id=SaAssocId}} =
- ?line recv_event(Event2),
- ?line {Loopback,
- Pb,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SaAssocId}],
- Data} =
- log_ok(gen_sctp:recv(Sa, infinity))
- end,
+ ok = gen_sctp:send(Sa, SaAssocId, 0, Data),
+ case log_ok(gen_sctp:recv(Sb, infinity)) of
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} -> ok;
+ Event1 ->
+ case recv_event(Event1) of
+ {Loopback,Pa,
+ #sctp_paddr_change{addr = {Loopback,_},
+ state = State,
+ error = 0,
+ assoc_id = SbAssocId}}
+ when State =:= addr_available;
+ State =:= addr_confirmed ->
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} = log_ok(gen_sctp:recv(Sb, infinity))
+ end
+ end,
+ ok = gen_sctp:send(Sb, SbAssocId, 0, Data),
+ case log_ok(gen_sctp:recv(Sa, infinity)) of
+ {Loopback,Pb,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SaAssocId}],
+ Data} ->
+ ok;
+ Event2 ->
+ {Loopback,Pb,
+ #sctp_paddr_change{addr={_,Pb},
+ state=addr_confirmed,
+ error=0,
+ assoc_id=SaAssocId}} =
+ recv_event(Event2),
+ {Loopback,
+ Pb,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SaAssocId}],
+ Data} =
+ log_ok(gen_sctp:recv(Sa, infinity))
+ end,
%%
- ?line ok = gen_sctp:eof(Sa, SaAssocChange),
- ?line {Loopback,Pa,#sctp_shutdown_event{assoc_id=SbAssocId}} =
+ ok = gen_sctp:eof(Sa, SaAssocChange),
+ {Loopback,Pa,#sctp_shutdown_event{assoc_id=SbAssocId}} =
recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
- ?line {Loopback,Pb,
- #sctp_assoc_change{state=shutdown_comp,
- error=0,
- assoc_id=SaAssocId}} =
+ {Loopback,Pb,
+ #sctp_assoc_change{state=shutdown_comp,
+ error=0,
+ assoc_id=SaAssocId}} =
recv_event(log_ok(gen_sctp:recv(Sa, infinity))),
- ?line {Loopback,Pa,
- #sctp_assoc_change{state=shutdown_comp,
- error=0,
- assoc_id=SbAssocId}} =
+ {Loopback,Pa,
+ #sctp_assoc_change{state=shutdown_comp,
+ error=0,
+ assoc_id=SbAssocId}} =
recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
- ?line ok = gen_sctp:close(Sa),
- ?line ok = gen_sctp:close(Sb),
+ ok = gen_sctp:close(Sa),
+ {ok,SbStat2} = inet:getstat(Sb, StatOpts),
+ [] = filter_stat_eq(SbStat1, SbStat2),
+ ok = gen_sctp:close(Sb),
- ?line receive
- Msg -> test_server:fail({received,Msg})
- after 17 -> ok
- end,
+ receive
+ Msg -> ct:fail({received,Msg})
+ after 17 -> ok
+ end,
ok.
-xfer_active(doc) ->
- "Minimal data transfer in active mode";
-xfer_active(suite) ->
+filter_stat_eq([], []) ->
[];
+filter_stat_eq([{Tag,Val1}=Stat|SbStat1], [{Tag,Val2}|SbStat2]) ->
+ if
+ Val1 == Val2 ->
+ [Stat|filter_stat_eq(SbStat1, SbStat2)];
+ true ->
+ filter_stat_eq(SbStat1, SbStat2)
+ end.
+
+
+
+%% Minimal data transfer in active mode.
xfer_active(Config) when is_list(Config) ->
- ?line Timeout = 2000,
- ?line Stream = 0,
- ?line Data = <<"The quick brown fox jumps over a lazy dog 0123456789">>,
- ?line Loopback = {127,0,0,1},
- ?line {ok,Sb} = gen_sctp:open([{active,true}]),
- ?line {ok,Pb} = inet:port(Sb),
- ?line ok = gen_sctp:listen(Sb, true),
-
- ?line {ok,Sa} = gen_sctp:open([{active,true}]),
- ?line {ok,Pa} = inet:port(Sa),
- ?line ok = gen_sctp:connect_init(Sa, Loopback, Pb, []),
- ?line #sctp_assoc_change{state=comm_up,
- error=0,
- outbound_streams=SaOutboundStreams,
- inbound_streams=SaInboundStreams,
- assoc_id=SaAssocId} = SaAssocChange =
+ Timeout = 2000,
+ Stream = 0,
+ Data = <<"The quick brown fox jumps over a lazy dog 0123456789">>,
+ Loopback = {127,0,0,1},
+ {ok,Sb} = gen_sctp:open([{active,true}]),
+ {ok,Pb} = inet:port(Sb),
+ ok = gen_sctp:listen(Sb, true),
+
+ {ok,Sa} = gen_sctp:open([{active,true}]),
+ {ok,Pa} = inet:port(Sa),
+ ok = gen_sctp:connect_init(Sa, Loopback, Pb, []),
+ #sctp_assoc_change{state=comm_up,
+ error=0,
+ outbound_streams=SaOutboundStreams,
+ inbound_streams=SaInboundStreams,
+ assoc_id=SaAssocId} = SaAssocChange =
recv_assoc_change(Sa, Loopback, Pb, Timeout),
- ?line io:format("Sa=~p, Pa=~p, Sb=~p, Pb=~p, SaAssocId=~p, "
- "SaOutboundStreams=~p, SaInboundStreams=~p~n",
- [Sa,Pa,Sb,Pb,SaAssocId,
- SaOutboundStreams,SaInboundStreams]),
- ?line #sctp_assoc_change{state=comm_up,
- error=0,
- outbound_streams=SbOutboundStreams,
- inbound_streams=SbInboundStreams,
- assoc_id=SbAssocId} =
+ io:format("Sa=~p, Pa=~p, Sb=~p, Pb=~p, SaAssocId=~p, "
+ "SaOutboundStreams=~p, SaInboundStreams=~p~n",
+ [Sa,Pa,Sb,Pb,SaAssocId,
+ SaOutboundStreams,SaInboundStreams]),
+ #sctp_assoc_change{state=comm_up,
+ error=0,
+ outbound_streams=SbOutboundStreams,
+ inbound_streams=SbInboundStreams,
+ assoc_id=SbAssocId} =
recv_assoc_change(Sb, Loopback, Pa, Timeout),
- ?line SbOutboundStreams = SaInboundStreams,
- ?line SbInboundStreams = SaOutboundStreams,
- ?line io:format("SbAssocId=~p~n", [SbAssocId]),
-
- ?line case recv_paddr_change(Sa, Loopback, Pb, 314) of
- #sctp_paddr_change{state=addr_confirmed,
- addr={_,Pb},
- error=0,
- assoc_id=SaAssocId} -> ok;
- #sctp_paddr_change{state=addr_available,
- addr={_,Pb},
- error=0,
- assoc_id=SaAssocId} -> ok;
- timeout -> ok
- end,
- ?line case recv_paddr_change(Sb, Loopback, Pa, 314) of
- #sctp_paddr_change{state=addr_confirmed,
- addr={Loopback,Pa},
- error=0,
- assoc_id=SbAssocId} -> ok;
- #sctp_paddr_change{state=addr_available,
- addr={Loopback,P},
- error=0,
- assoc_id=SbAssocId} ->
- ?line match_unless_solaris(Pa, P);
- timeout -> ok
- end,
- ?line [] = flush(),
-
- ?line ok =
+ SbOutboundStreams = SaInboundStreams,
+ SbInboundStreams = SaOutboundStreams,
+ io:format("SbAssocId=~p~n", [SbAssocId]),
+
+ case recv_paddr_change(Sa, Loopback, Pb, 314) of
+ #sctp_paddr_change{state=addr_confirmed,
+ addr={_,Pb},
+ error=0,
+ assoc_id=SaAssocId} -> ok;
+ #sctp_paddr_change{state=addr_available,
+ addr={_,Pb},
+ error=0,
+ assoc_id=SaAssocId} -> ok;
+ timeout -> ok
+ end,
+ case recv_paddr_change(Sb, Loopback, Pa, 314) of
+ #sctp_paddr_change{state=addr_confirmed,
+ addr={Loopback,Pa},
+ error=0,
+ assoc_id=SbAssocId} -> ok;
+ #sctp_paddr_change{state=addr_available,
+ addr={Loopback,P},
+ error=0,
+ assoc_id=SbAssocId} ->
+ match_unless_solaris(Pa, P);
+ timeout -> ok
+ end,
+ [] = flush(),
+
+ ok =
do_from_other_process(
fun () -> gen_sctp:send(Sa, SaAssocId, 0, Data) end),
- ?line receive
- {sctp,Sb,Loopback,Pa,
- {[#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data}} -> ok
- after Timeout ->
- ?line test_server:fail({timeout,flush()})
- end,
- ?line ok = gen_sctp:send(Sb, SbAssocId, 0, Data),
- ?line receive
- {sctp,Sa,Loopback,Pb,
- {[#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SaAssocId}],
- Data}} -> ok
- after Timeout ->
- ?line test_server:fail({timeout,flush()})
- end,
+ receive
+ {sctp,Sb,Loopback,Pa,
+ {[#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data}} -> ok
+ after Timeout ->
+ ct:fail({timeout,flush()})
+ end,
+ ok = gen_sctp:send(Sb, SbAssocId, 0, Data),
+ receive
+ {sctp,Sa,Loopback,Pb,
+ {[#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SaAssocId}],
+ Data}} -> ok
+ after Timeout ->
+ ct:fail({timeout,flush()})
+ end,
%%
- ?line ok = gen_sctp:abort(Sa, SaAssocChange),
- ?line case recv_assoc_change(Sb, Loopback, Pa, Timeout) of
- #sctp_assoc_change{state=comm_lost,
- assoc_id=SbAssocId} -> ok;
- timeout ->
- ?line test_server:fail({timeout,flush()})
- end,
- ?line ok = gen_sctp:close(Sb),
- ?line case recv_assoc_change(Sa, Loopback, Pb, Timeout) of
- #sctp_assoc_change{state=comm_lost,
- assoc_id=SaAssocId} -> ok;
- timeout ->
- ?line io:format("timeout waiting for comm_lost on Sa~n"),
- ?line match_unless_solaris(ok, {timeout,flush()})
- end,
- ?line receive
- {sctp_error,Sa,enotconn} -> ok % Solaris
- after 17 -> ok
- end,
- ?line ok = gen_sctp:close(Sa),
+ ok = gen_sctp:abort(Sa, SaAssocChange),
+ case recv_assoc_change(Sb, Loopback, Pa, Timeout) of
+ #sctp_assoc_change{state=comm_lost,
+ assoc_id=SbAssocId} -> ok;
+ timeout ->
+ ct:fail({timeout,flush()})
+ end,
+ ok = gen_sctp:close(Sb),
+ case recv_assoc_change(Sa, Loopback, Pb, Timeout) of
+ #sctp_assoc_change{state=comm_lost,
+ assoc_id=SaAssocId} -> ok;
+ timeout ->
+ io:format("timeout waiting for comm_lost on Sa~n"),
+ match_unless_solaris(ok, {timeout,flush()})
+ end,
+ receive
+ {sctp_error,Sa,enotconn} -> ok % Solaris
+ after 17 -> ok
+ end,
+ ok = gen_sctp:close(Sa),
%%
- ?line receive
- Msg -> test_server:fail({unexpected,[Msg]++flush()})
- after 17 -> ok
- end,
+ receive
+ Msg -> ct:fail({unexpected,[Msg]++flush()})
+ after 17 -> ok
+ end,
ok.
recv_assoc_change(S, Addr, Port, Timeout) ->
@@ -346,142 +368,141 @@ recv_paddr_change(S, Addr, Port, Timeout) ->
timeout
end.
-def_sndrcvinfo(doc) ->
- "Test that #sctp_sndrcvinfo{} parameters set on a socket "
- "are used by gen_sctp:send/4";
-def_sndrcvinfo(suite) ->
- [];
+%% Test that #sctp_sndrcvinfo{} parameters set on a socket
+%% are used by gen_sctp:send/4.
def_sndrcvinfo(Config) when is_list(Config) ->
- ?line Loopback = {127,0,0,1},
- ?line Data = <<"What goes up, must come down.">>,
+ Loopback = {127,0,0,1},
+ Data = <<"What goes up, must come down.">>,
%%
- ?line S1 =
+ S1 =
log_ok(gen_sctp:open(
0, [{sctp_default_send_param,#sctp_sndrcvinfo{ppid=17}}])),
?LOGVAR(S1),
- ?line P1 =
+ P1 =
log_ok(inet:port(S1)),
?LOGVAR(P1),
- ?line #sctp_sndrcvinfo{ppid=17, context=0, timetolive=0, assoc_id=0} =
+ #sctp_sndrcvinfo{ppid=17, context=0, timetolive=0, assoc_id=0} =
getopt(S1, sctp_default_send_param),
- ?line ok =
+ ok =
gen_sctp:listen(S1, true),
%%
- ?line S2 =
+ S2 =
log_ok(gen_sctp:open()),
?LOGVAR(S2),
- ?line P2 =
+ P2 =
log_ok(inet:port(S2)),
?LOGVAR(P2),
- ?line #sctp_sndrcvinfo{ppid=0, context=0, timetolive=0, assoc_id=0} =
+ #sctp_sndrcvinfo{ppid=0, context=0, timetolive=0, assoc_id=0} =
getopt(S2, sctp_default_send_param),
%%
- ?line #sctp_assoc_change{
+ #sctp_assoc_change{
state=comm_up,
error=0,
assoc_id=S2AssocId} = S2AssocChange =
log_ok(gen_sctp:connect(S2, Loopback, P1, [])),
?LOGVAR(S2AssocChange),
- ?line case recv_event(log_ok(gen_sctp:recv(S1))) of
- {Loopback,P2,
- #sctp_assoc_change{
- state=comm_up,
- error=0,
- assoc_id=S1AssocId}} ->
- ?LOGVAR(S1AssocId);
- {Loopback,P2,
- #sctp_paddr_change{
- state=addr_confirmed,
- error=0,
- assoc_id=S1AssocId}} ->
- ?LOGVAR(S1AssocId),
- {Loopback,P2,
- #sctp_assoc_change{
- state=comm_up,
- error=0,
- assoc_id=S1AssocId}} =
- recv_event(log_ok(gen_sctp:recv(S1)))
- end,
-
- ?line #sctp_sndrcvinfo{
+ S1AssocId =
+ case recv_event(log_ok(gen_sctp:recv(S1))) of
+ {Loopback,P2,
+ #sctp_assoc_change{
+ state=comm_up,
+ error=0,
+ assoc_id=AssocId}} ->
+ AssocId;
+ {Loopback,P2,
+ #sctp_paddr_change{
+ state=addr_confirmed,
+ error=0,
+ assoc_id=AssocId}} ->
+ {Loopback,P2,
+ #sctp_assoc_change{
+ state=comm_up,
+ error=0,
+ assoc_id=AssocId}} =
+ recv_event(log_ok(gen_sctp:recv(S1))),
+ AssocId
+ end,
+ ?LOGVAR(S1AssocId),
+
+ #sctp_sndrcvinfo{
ppid=17, context=0, timetolive=0} = %, assoc_id=S1AssocId} =
getopt(
S1, sctp_default_send_param, #sctp_sndrcvinfo{assoc_id=S1AssocId}),
- ?line #sctp_sndrcvinfo{
+ #sctp_sndrcvinfo{
ppid=0, context=0, timetolive=0} = %, assoc_id=S2AssocId} =
getopt(
S2, sctp_default_send_param, #sctp_sndrcvinfo{assoc_id=S2AssocId}),
%%
- ?line ok =
+ ok =
gen_sctp:send(S1, S1AssocId, 1, <<"1: ",Data/binary>>),
- ?line case log_ok(gen_sctp:recv(S2)) of
- {Loopback,P1,
- [#sctp_sndrcvinfo{
- stream=1, ppid=17, context=0, assoc_id=S2AssocId}],
- <<"1: ",Data/binary>>} -> ok;
- Event1 ->
- ?line {Loopback,P1,
- #sctp_paddr_change{state=addr_confirmed,
- addr={_,P1},
- error=0,
- assoc_id=S2AssocId}} =
- recv_event(Event1),
- ?line {Loopback,P1,
- [#sctp_sndrcvinfo{
- stream=1, ppid=17, context=0, assoc_id=S2AssocId}],
- <<"1: ",Data/binary>>} =
- log_ok(gen_sctp:recv(S2))
- end,
+ case log_ok(gen_sctp:recv(S2)) of
+ {Loopback,P1,
+ [#sctp_sndrcvinfo{
+ stream=1, ppid=17, context=0, assoc_id=S2AssocId}],
+ <<"1: ",Data/binary>>} -> ok;
+ Event1 ->
+ {Loopback,P1,
+ #sctp_paddr_change{state=addr_confirmed,
+ addr={_,P1},
+ error=0,
+ assoc_id=S2AssocId}} =
+ recv_event(Event1),
+ {Loopback,P1,
+ [#sctp_sndrcvinfo{
+ stream=1, ppid=17, context=0, assoc_id=S2AssocId}],
+ <<"1: ",Data/binary>>} =
+ log_ok(gen_sctp:recv(S2))
+ end,
%%
- ?line ok =
+ ok =
setopt(
S1, sctp_default_send_param, #sctp_sndrcvinfo{ppid=18}),
- ?line ok =
+ ok =
setopt(
S1, sctp_default_send_param,
#sctp_sndrcvinfo{ppid=19, assoc_id=S1AssocId}),
- ?line #sctp_sndrcvinfo{
+ #sctp_sndrcvinfo{
ppid=18, context=0, timetolive=0, assoc_id=0} =
getopt(S1, sctp_default_send_param),
- ?line #sctp_sndrcvinfo{
+ #sctp_sndrcvinfo{
ppid=19, context=0, timetolive=0, assoc_id=S1AssocId} =
getopt(
S1, sctp_default_send_param, #sctp_sndrcvinfo{assoc_id=S1AssocId}),
%%
- ?line ok =
+ ok =
gen_sctp:send(S1, S1AssocId, 0, <<"2: ",Data/binary>>),
- ?line case log_ok(gen_sctp:recv(S2)) of
- {Loopback,P1,
- [#sctp_sndrcvinfo{
- stream=0, ppid=19, context=0, assoc_id=S2AssocId}],
- <<"2: ",Data/binary>>} -> ok
- end,
- ?line ok =
+ case log_ok(gen_sctp:recv(S2)) of
+ {Loopback,P1,
+ [#sctp_sndrcvinfo{
+ stream=0, ppid=19, context=0, assoc_id=S2AssocId}],
+ <<"2: ",Data/binary>>} -> ok
+ end,
+ ok =
gen_sctp:send(S2, S2AssocChange, 1, <<"3: ",Data/binary>>),
- ?line case log_ok(gen_sctp:recv(S1)) of
- {Loopback,P2,
- [#sctp_sndrcvinfo{
- stream=1, ppid=0, context=0, assoc_id=S1AssocId}],
- <<"3: ",Data/binary>>} -> ok;
- Event2 ->
- case recv_event(Event2) of
- {Loopback,P2,
- #sctp_paddr_change{
- addr={Loopback,_},
- state=State,
- error=0, assoc_id=S1AssocId}}
- when State =:= addr_available;
- State =:= addr_confirmed ->
- ?line case log_ok(gen_sctp:recv(S1)) of
- {Loopback,P2,
- [#sctp_sndrcvinfo{
- stream=1, ppid=0, context=0,
- assoc_id=S1AssocId}],
- <<"3: ",Data/binary>>} -> ok
- end
- end
- end,
- ?line ok =
+ case log_ok(gen_sctp:recv(S1)) of
+ {Loopback,P2,
+ [#sctp_sndrcvinfo{
+ stream=1, ppid=0, context=0, assoc_id=S1AssocId}],
+ <<"3: ",Data/binary>>} -> ok;
+ Event2 ->
+ case recv_event(Event2) of
+ {Loopback,P2,
+ #sctp_paddr_change{
+ addr={Loopback,_},
+ state=State,
+ error=0, assoc_id=S1AssocId}}
+ when State =:= addr_available;
+ State =:= addr_confirmed ->
+ case log_ok(gen_sctp:recv(S1)) of
+ {Loopback,P2,
+ [#sctp_sndrcvinfo{
+ stream=1, ppid=0, context=0,
+ assoc_id=S1AssocId}],
+ <<"3: ",Data/binary>>} -> ok
+ end
+ end
+ end,
+ ok =
do_from_other_process(
fun () ->
gen_sctp:send(
@@ -489,22 +510,22 @@ def_sndrcvinfo(Config) when is_list(Config) ->
#sctp_sndrcvinfo{stream=0, ppid=20, assoc_id=S2AssocId},
<<"4: ",Data/binary>>)
end),
- ?line case log_ok(do_from_other_process(fun() -> gen_sctp:recv(S1) end)) of
- {Loopback,P2,
- [#sctp_sndrcvinfo{
- stream=0, ppid=20, context=0, assoc_id=S1AssocId}],
- <<"4: ",Data/binary>>} -> ok
- end,
+ case log_ok(do_from_other_process(fun() -> gen_sctp:recv(S1) end)) of
+ {Loopback,P2,
+ [#sctp_sndrcvinfo{
+ stream=0, ppid=20, context=0, assoc_id=S1AssocId}],
+ <<"4: ",Data/binary>>} -> ok
+ end,
%%
- ?line ok =
+ ok =
gen_sctp:close(S1),
- ?line ok =
+ ok =
gen_sctp:close(S2),
- ?line receive
- Msg ->
- test_server:fail({received,Msg})
- after 17 -> ok
- end,
+ receive
+ Msg ->
+ ct:fail({received,Msg})
+ after 17 -> ok
+ end,
ok.
getopt(S, Opt) ->
@@ -541,147 +562,138 @@ flush() ->
[]
end.
-api_open_close(doc) ->
- "Test the API function open/1,2 and close/1";
-api_open_close(suite) ->
- [];
+%% Test the API function open/1,2 and close/1.
api_open_close(Config) when is_list(Config) ->
- ?line {ok,S1} = gen_sctp:open(0),
- ?line {ok,P} = inet:port(S1),
- ?line ok = gen_sctp:close(S1),
+ {ok,S1} = gen_sctp:open(0),
+ {ok,P} = inet:port(S1),
+ ok = gen_sctp:close(S1),
- ?line {ok,S2} = gen_sctp:open(P),
- ?line {ok,P} = inet:port(S2),
- ?line ok = gen_sctp:close(S2),
+ {ok,S2} = gen_sctp:open(P),
+ {ok,P} = inet:port(S2),
+ ok = gen_sctp:close(S2),
- ?line {ok,S3} = gen_sctp:open([{port,P}]),
- ?line {ok,P} = inet:port(S3),
- ?line ok = gen_sctp:close(S3),
+ {ok,S3} = gen_sctp:open([{port,P}]),
+ {ok,P} = inet:port(S3),
+ ok = gen_sctp:close(S3),
- ?line {ok,S4} = gen_sctp:open(P, []),
- ?line {ok,P} = inet:port(S4),
- ?line ok = gen_sctp:close(S4),
+ {ok,S4} = gen_sctp:open(P, []),
+ {ok,P} = inet:port(S4),
+ ok = gen_sctp:close(S4),
- ?line {ok,S5} = gen_sctp:open(P, [{ifaddr,any}]),
- ?line {ok,P} = inet:port(S5),
- ?line ok = gen_sctp:close(S5),
+ {ok,S5} = gen_sctp:open(P, [{ifaddr,any}]),
+ {ok,P} = inet:port(S5),
+ ok = gen_sctp:close(S5),
- ?line ok = gen_sctp:close(S5),
+ ok = gen_sctp:close(S5),
- ?line try gen_sctp:close(0)
- catch error:badarg -> ok
- end,
+ try gen_sctp:close(0)
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open({})
- catch error:badarg -> ok
- end,
+ try gen_sctp:open({})
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open(-1)
- catch error:badarg -> ok
- end,
+ try gen_sctp:open(-1)
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open(65536)
- catch error:badarg -> ok
- end,
+ try gen_sctp:open(65536)
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open(make_ref(), [])
- catch error:badarg -> ok
- end,
+ try gen_sctp:open(make_ref(), [])
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open(0, {})
- catch error:badarg -> ok
- end,
+ try gen_sctp:open(0, {})
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open(0, [make_ref()])
- catch error:badarg -> ok
- end,
+ try gen_sctp:open(0, [make_ref()])
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open([{invalid_option,0}])
- catch error:badarg -> ok
- end,
+ try gen_sctp:open([{invalid_option,0}])
+ catch error:badarg -> ok
+ end,
- ?line try gen_sctp:open(0, [{mode,invalid_mode}])
- catch error:badarg -> ok
- end,
+ try gen_sctp:open(0, [{mode,invalid_mode}])
+ catch error:badarg -> ok
+ end,
ok.
-api_listen(doc) ->
- "Test the API function listen/2";
-api_listen(suite) ->
- [];
+%% Test the API function listen/2.
api_listen(Config) when is_list(Config) ->
- ?line Localhost = {127,0,0,1},
-
- ?line try gen_sctp:listen(0, true)
- catch error:badarg -> ok
- end,
-
- ?line {ok,S} = gen_sctp:open(),
- ?line {ok,Pb} = inet:port(S),
- ?line try gen_sctp:listen(S, not_allowed_for_listen)
- catch error:badarg -> ok
- end,
- ?line ok = gen_sctp:close(S),
- ?line {error,closed} = gen_sctp:listen(S, true),
-
- ?line {ok,Sb} = gen_sctp:open(Pb),
- ?line {ok,Sa} = gen_sctp:open(),
- ?line case gen_sctp:connect(Sa, localhost, Pb, []) of
- {error,econnrefused} ->
- ?line {ok,{Localhost,
- Pb,[],
- #sctp_assoc_change{
- state=comm_lost}}} =
- gen_sctp:recv(Sa, infinity);
- {error,#sctp_assoc_change{state=cant_assoc}} ->
- ok%;
- %% {error,{Localhost,Pb,_,#sctp_assoc_change{state=cant_assoc}}} ->
- %% ok
- end,
- ?line ok = gen_sctp:listen(Sb, true),
- ?line {ok,#sctp_assoc_change{state=comm_up,
- error=0}} =
+ Localhost = {127,0,0,1},
+
+ try gen_sctp:listen(0, true)
+ catch error:badarg -> ok
+ end,
+
+ {ok,S} = gen_sctp:open(),
+ {ok,Pb} = inet:port(S),
+ try gen_sctp:listen(S, not_allowed_for_listen)
+ catch error:badarg -> ok
+ end,
+ ok = gen_sctp:close(S),
+ {error,closed} = gen_sctp:listen(S, true),
+
+ {ok,Sb} = gen_sctp:open(Pb),
+ {ok,Sa} = gen_sctp:open(),
+ case gen_sctp:connect(Sa, localhost, Pb, []) of
+ {error,econnrefused} ->
+ {ok,{Localhost,
+ Pb,[],
+ #sctp_assoc_change{
+ state=comm_lost}}} =
+ gen_sctp:recv(Sa, infinity);
+ {error,#sctp_assoc_change{state=cant_assoc}} ->
+ ok%;
+ %% {error,{Localhost,Pb,_,#sctp_assoc_change{state=cant_assoc}}} ->
+ %% ok
+ end,
+ ok = gen_sctp:listen(Sb, true),
+ {ok,#sctp_assoc_change{state=comm_up,
+ error=0}} =
gen_sctp:connect(Sa, localhost, Pb, []),
- ?line ok = gen_sctp:close(Sa),
- ?line ok = gen_sctp:close(Sb),
+ ok = gen_sctp:close(Sa),
+ ok = gen_sctp:close(Sb),
ok.
-api_connect_init(doc) ->
- "Test the API function connect_init/4";
-api_connect_init(suite) ->
- [];
+%% Test the API function connect_init/4.
api_connect_init(Config) when is_list(Config) ->
- ?line Localhost = {127,0,0,1},
-
- ?line {ok,S} = gen_sctp:open(),
- ?line {ok,Pb} = inet:port(S),
- ?line try gen_sctp:connect_init(S, Localhost, not_allowed_for_port, [])
- catch error:badarg -> ok
- end,
- ?line try gen_sctp:connect_init(S, Localhost, 12345, not_allowed_for_opts)
- catch error:badarg -> ok
- end,
- ?line ok = gen_sctp:close(S),
- ?line {error,closed} = gen_sctp:connect_init(S, Localhost, 12345, []),
-
- ?line {ok,Sb} = gen_sctp:open(Pb),
- ?line {ok,Sa} = gen_sctp:open(),
- ?line case gen_sctp:connect_init(Sa, localhost, Pb, []) of
- {error,econnrefused} ->
- ?line {Localhost,Pb,#sctp_assoc_change{state=comm_lost}} =
- recv_event(log_ok(gen_sctp:recv(Sa, infinity)));
- ok ->
- ?line {Localhost,Pb,#sctp_assoc_change{state=cant_assoc}} =
- recv_event(log_ok(gen_sctp:recv(Sa, infinity)))
- end,
- ?line ok = gen_sctp:listen(Sb, true),
- ?line case gen_sctp:connect_init(Sa, localhost, Pb, []) of
- ok ->
- ?line {Localhost,Pb,#sctp_assoc_change{state=comm_up}} =
- recv_event(log_ok(gen_sctp:recv(Sa, infinity)))
- end,
- ?line ok = gen_sctp:close(Sa),
- ?line ok = gen_sctp:close(Sb),
+ Localhost = {127,0,0,1},
+
+ {ok,S} = gen_sctp:open(),
+ {ok,Pb} = inet:port(S),
+ try gen_sctp:connect_init(S, Localhost, not_allowed_for_port, [])
+ catch error:badarg -> ok
+ end,
+ try gen_sctp:connect_init(S, Localhost, 12345, not_allowed_for_opts)
+ catch error:badarg -> ok
+ end,
+ ok = gen_sctp:close(S),
+ {error,closed} = gen_sctp:connect_init(S, Localhost, 12345, []),
+
+ {ok,Sb} = gen_sctp:open(Pb),
+ {ok,Sa} = gen_sctp:open(),
+ case gen_sctp:connect_init(Sa, localhost, Pb, []) of
+ {error,econnrefused} ->
+ {Localhost,Pb,#sctp_assoc_change{state=comm_lost}} =
+ recv_event(log_ok(gen_sctp:recv(Sa, infinity)));
+ ok ->
+ {Localhost,Pb,#sctp_assoc_change{state=cant_assoc}} =
+ recv_event(log_ok(gen_sctp:recv(Sa, infinity)))
+ end,
+ ok = gen_sctp:listen(Sb, true),
+ case gen_sctp:connect_init(Sa, localhost, Pb, []) of
+ ok ->
+ {Localhost,Pb,#sctp_assoc_change{state=comm_up}} =
+ recv_event(log_ok(gen_sctp:recv(Sa, infinity)))
+ end,
+ ok = gen_sctp:close(Sa),
+ ok = gen_sctp:close(Sb),
ok.
recv_event({Addr,Port,[],#sctp_assoc_change{}=AssocChange}) ->
@@ -703,94 +715,86 @@ recv_event({Addr,Port,
#sctp_shutdown_event{assoc_id=Assoc}=ShutdownEvent}) ->
{Addr,Port,ShutdownEvent}.
-api_opts(doc) ->
- "Test socket options";
-api_opts(suite) ->
- [];
+%% Test socket options.
api_opts(Config) when is_list(Config) ->
- ?line Sndbuf = 32768,
- ?line Recbuf = 65536,
- ?line {ok,S} = gen_sctp:open(0),
- ?line OSType = os:type(),
- ?line case {inet:setopts(S, [{linger,{true,2}}]),OSType} of
- {ok,_} ->
- ok;
- {{error,einval},{unix,sunos}} ->
- ok
- end,
- ?line ok = inet:setopts(S, [{sndbuf,Sndbuf}]),
- ?line ok = inet:setopts(S, [{recbuf,Recbuf}]),
- ?line case inet:getopts(S, [sndbuf]) of
- {ok,[{sndbuf,SB}]} when SB >= Sndbuf -> ok
- end,
- ?line case inet:getopts(S, [recbuf]) of
- {ok,[{recbuf,RB}]} when RB >= Recbuf -> ok
- end.
+ Sndbuf = 32768,
+ Recbuf = 65536,
+ {ok,S} = gen_sctp:open(0),
+ OSType = os:type(),
+ case {inet:setopts(S, [{linger,{true,2}}]),OSType} of
+ {ok,_} ->
+ ok;
+ {{error,einval},{unix,sunos}} ->
+ ok
+ end,
+ ok = inet:setopts(S, [{sndbuf,Sndbuf}]),
+ ok = inet:setopts(S, [{recbuf,Recbuf}]),
+ case inet:getopts(S, [sndbuf]) of
+ {ok,[{sndbuf,SB}]} when SB >= Sndbuf -> ok
+ end,
+ case inet:getopts(S, [recbuf]) of
+ {ok,[{recbuf,RB}]} when RB >= Recbuf -> ok
+ end.
implicit_inet6(Config) when is_list(Config) ->
- ?line Hostname = log_ok(inet:gethostname()),
- ?line
- case gen_sctp:open(0, [inet6]) of
- {ok,S1} ->
- ?line
- case inet:getaddr(Hostname, inet6) of
- {ok,Host} ->
- ?line Loopback = {0,0,0,0,0,0,0,1},
- ?line io:format("~s ~p~n", ["Loopback",Loopback]),
- ?line implicit_inet6(S1, Loopback),
- ?line ok = gen_sctp:close(S1),
- %%
- ?line Localhost =
- log_ok(inet:getaddr("localhost", inet6)),
- ?line io:format("~s ~p~n", ["localhost",Localhost]),
- ?line S2 =
- log_ok(gen_sctp:open(0, [{ip,Localhost}])),
- ?line implicit_inet6(S2, Localhost),
- ?line ok = gen_sctp:close(S2),
- %%
- ?line io:format("~s ~p~n", [Hostname,Host]),
- ?line S3 =
- log_ok(gen_sctp:open(0, [{ifaddr,Host}])),
- ?line implicit_inet6(S3, Host),
- ?line ok = gen_sctp:close(S1);
- {error,eafnosupport} ->
- ?line ok = gen_sctp:close(S1),
- {skip,"Can not look up IPv6 address"}
- end;
- _ ->
- {skip,"IPv6 not supported"}
- end.
+ Hostname = log_ok(inet:gethostname()),
+ case gen_sctp:open(0, [inet6]) of
+ {ok,S1} ->
+ case inet:getaddr(Hostname, inet6) of
+ {ok,Host} ->
+ Loopback = {0,0,0,0,0,0,0,1},
+ io:format("~s ~p~n", ["Loopback",Loopback]),
+ implicit_inet6(S1, Loopback),
+ ok = gen_sctp:close(S1),
+ %%
+ Localhost =
+ log_ok(inet:getaddr("localhost", inet6)),
+ io:format("~s ~p~n", ["localhost",Localhost]),
+ S2 =
+ log_ok(gen_sctp:open(0, [{ip,Localhost}])),
+ implicit_inet6(S2, Localhost),
+ ok = gen_sctp:close(S2),
+ %%
+ io:format("~s ~p~n", [Hostname,Host]),
+ S3 =
+ log_ok(gen_sctp:open(0, [{ifaddr,Host}])),
+ implicit_inet6(S3, Host),
+ ok = gen_sctp:close(S1);
+ {error,eafnosupport} ->
+ ok = gen_sctp:close(S1),
+ {skip,"Can not look up IPv6 address"}
+ end;
+ _ ->
+ {skip,"IPv6 not supported"}
+ end.
implicit_inet6(S1, Addr) ->
- ?line ok = gen_sctp:listen(S1, true),
- ?line P1 = log_ok(inet:port(S1)),
- ?line S2 = log_ok(gen_sctp:open(0, [inet6])),
- ?line P2 = log_ok(inet:port(S2)),
- ?line #sctp_assoc_change{state=comm_up} =
+ ok = gen_sctp:listen(S1, true),
+ P1 = log_ok(inet:port(S1)),
+ S2 = log_ok(gen_sctp:open(0, [inet6])),
+ P2 = log_ok(inet:port(S2)),
+ #sctp_assoc_change{state=comm_up} =
log_ok(gen_sctp:connect(S2, Addr, P1, [])),
- ?line case recv_event(log_ok(gen_sctp:recv(S1))) of
- {Addr,P2,#sctp_assoc_change{state=comm_up}} ->
- ok;
- {Addr,P2,#sctp_paddr_change{state=addr_confirmed,
- addr={Addr,P2},
- error=0}} ->
- {Addr,P2,#sctp_assoc_change{state=comm_up}} =
- recv_event(log_ok(gen_sctp:recv(S1)))
- end,
- ?line case log_ok(inet:sockname(S1)) of
- {Addr,P1} -> ok;
- {{0,0,0,0,0,0,0,0},P1} -> ok
- end,
- ?line case log_ok(inet:sockname(S2)) of
- {Addr,P2} -> ok;
- {{0,0,0,0,0,0,0,0},P2} -> ok
- end,
- ?line ok = gen_sctp:close(S2).
-
-active_n(doc) ->
- "Verify {active,N} socket management";
-active_n(suite) ->
- [];
+ case recv_event(log_ok(gen_sctp:recv(S1))) of
+ {Addr,P2,#sctp_assoc_change{state=comm_up}} ->
+ ok;
+ {Addr,P2,#sctp_paddr_change{state=addr_confirmed,
+ addr={Addr,P2},
+ error=0}} ->
+ {Addr,P2,#sctp_assoc_change{state=comm_up}} =
+ recv_event(log_ok(gen_sctp:recv(S1)))
+ end,
+ case log_ok(inet:sockname(S1)) of
+ {Addr,P1} -> ok;
+ {{0,0,0,0,0,0,0,0},P1} -> ok
+ end,
+ case log_ok(inet:sockname(S2)) of
+ {Addr,P2} -> ok;
+ {{0,0,0,0,0,0,0,0},P2} -> ok
+ end,
+ ok = gen_sctp:close(S2).
+
+%% Verify {active,N} socket management.
active_n(Config) when is_list(Config) ->
N = 3,
S1 = ok(gen_sctp:open([{active,N}])),
@@ -887,51 +891,45 @@ active_n(Config) when is_list(Config) ->
ok = gen_sctp:close(S1),
ok.
-basic_stream(doc) ->
- "Hello world stream socket";
-basic_stream(suite) ->
- [];
+%% Hello world stream socket.
basic_stream(Config) when is_list(Config) ->
- ?line {ok,S} = gen_sctp:open([{type,stream}]),
- ?line ok = gen_sctp:listen(S, true),
- ?line ok =
+ {ok,S} = gen_sctp:open([{type,stream}]),
+ ok = gen_sctp:listen(S, true),
+ ok =
do_from_other_process(
fun () -> gen_sctp:listen(S, 10) end),
- ?line ok = gen_sctp:close(S),
+ ok = gen_sctp:close(S),
ok.
-xfer_stream_min(doc) ->
- "Minimal data transfer";
-xfer_stream_min(suite) ->
- [];
+%% Minimal data transfer.
xfer_stream_min(Config) when is_list(Config) ->
- ?line Stream = 0,
- ?line Data = <<"The quick brown fox jumps over a lazy dog 0123456789">>,
- ?line Loopback = {127,0,0,1},
- ?line {ok,Sb} = gen_sctp:open([{type,seqpacket}]),
- ?line ?LOGVAR(Sb),
- ?line {ok,Pb} = inet:port(Sb),
- ?line ?LOGVAR(Pb),
- ?line ok = gen_sctp:listen(Sb, true),
-
- ?line {ok,Sa} = gen_sctp:open([{type,stream}]),
- ?line ?LOGVAR(Sa),
- ?line {ok,Pa} = inet:port(Sa),
- ?line ?LOGVAR(Pa),
- ?line #sctp_assoc_change{state=comm_up,
- error=0,
- outbound_streams=SaOutboundStreams,
- inbound_streams=SaInboundStreams,
- assoc_id=SaAssocId_X} =
+ Stream = 0,
+ Data = <<"The quick brown fox jumps over a lazy dog 0123456789">>,
+ Loopback = {127,0,0,1},
+ {ok,Sb} = gen_sctp:open([{type,seqpacket}]),
+ ?LOGVAR(Sb),
+ {ok,Pb} = inet:port(Sb),
+ ?LOGVAR(Pb),
+ ok = gen_sctp:listen(Sb, true),
+
+ {ok,Sa} = gen_sctp:open([{type,stream}]),
+ ?LOGVAR(Sa),
+ {ok,Pa} = inet:port(Sa),
+ ?LOGVAR(Pa),
+ #sctp_assoc_change{state=comm_up,
+ error=0,
+ outbound_streams=SaOutboundStreams,
+ inbound_streams=SaInboundStreams,
+ assoc_id=SaAssocId_X} =
log_ok(gen_sctp:connect(Sa, Loopback, Pb, [])),
- ?line ?LOGVAR(SaAssocId_X),
- ?line [{_,#sctp_paddrinfo{assoc_id=SaAssocId,state=active}}] =
+ ?LOGVAR(SaAssocId_X),
+ [{_,#sctp_paddrinfo{assoc_id=SaAssocId,state=active}}] =
log_ok(inet:getopts(Sa, [{sctp_get_peer_addr_info,
#sctp_paddrinfo{address={Loopback,Pb}}}])),
- ?line ?LOGVAR(SaAssocId),
- ?line match_unless_solaris(SaAssocId_X, SaAssocId),
+ ?LOGVAR(SaAssocId),
+ match_unless_solaris(SaAssocId_X, SaAssocId),
- ?line {SbOutboundStreams,SbInboundStreams,SbAssocId} =
+ {SbOutboundStreams,SbInboundStreams,SbAssocId} =
case recv_event(log_ok(gen_sctp:recv(Sb, infinity))) of
{Loopback,Pa,
#sctp_assoc_change{state=comm_up,
@@ -946,87 +944,87 @@ xfer_stream_min(Config) when is_list(Config) ->
error=0,
assoc_id=AI}} ->
{Loopback,Pa,
- ?line #sctp_assoc_change{state=comm_up,
- error=0,
- outbound_streams=OS,
- inbound_streams=IS,
- assoc_id=AI}} =
+ #sctp_assoc_change{state=comm_up,
+ error=0,
+ outbound_streams=OS,
+ inbound_streams=IS,
+ assoc_id=AI}} =
recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
{OS,IS,AI}
end,
- ?line ?LOGVAR(SbAssocId),
- ?line SaOutboundStreams = SbInboundStreams,
- ?line ?LOGVAR(SaOutboundStreams),
- ?line SbOutboundStreams = SaInboundStreams,
- ?line ?LOGVAR(SbOutboundStreams),
- ?line ok = gen_sctp:send(Sa, SaAssocId, 0, Data),
- ?line case log_ok(gen_sctp:recv(Sb, infinity)) of
- {Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data} -> ok;
- {Loopback,
- Pa,[],
- #sctp_paddr_change{addr = {Loopback,_},
- state = addr_available,
- error = 0,
- assoc_id = SbAssocId}} ->
- {Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data} = log_ok(gen_sctp:recv(Sb, infinity));
- {Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- #sctp_paddr_change{addr = {Loopback,_},
- state = addr_confirmed,
- error = 0,
- assoc_id = SbAssocId}} ->
- {Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data} = log_ok(gen_sctp:recv(Sb, infinity))
- end,
- ?line ok =
+ ?LOGVAR(SbAssocId),
+ SaOutboundStreams = SbInboundStreams,
+ ?LOGVAR(SaOutboundStreams),
+ SbOutboundStreams = SaInboundStreams,
+ ?LOGVAR(SbOutboundStreams),
+ ok = gen_sctp:send(Sa, SaAssocId, 0, Data),
+ case log_ok(gen_sctp:recv(Sb, infinity)) of
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} -> ok;
+ {Loopback,
+ Pa,[],
+ #sctp_paddr_change{addr = {Loopback,_},
+ state = addr_available,
+ error = 0,
+ assoc_id = SbAssocId}} ->
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} = log_ok(gen_sctp:recv(Sb, infinity));
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ #sctp_paddr_change{addr = {Loopback,_},
+ state = addr_confirmed,
+ error = 0,
+ assoc_id = SbAssocId}} ->
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} = log_ok(gen_sctp:recv(Sb, infinity))
+ end,
+ ok =
do_from_other_process(
fun () -> gen_sctp:send(Sb, SbAssocId, 0, Data) end),
- ?line case log_ok(gen_sctp:recv(Sa, infinity)) of
- {Loopback,Pb,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SaAssocId}],
- Data} -> ok;
- Event1 ->
- ?line {Loopback,Pb,
- #sctp_paddr_change{state=addr_confirmed,
- addr={_,Pb},
- error=0,
- assoc_id=SaAssocId}} =
- recv_event(Event1),
- ?line {Loopback,Pb,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SaAssocId}],
- Data} =
- log_ok(gen_sctp:recv(Sa, infinity))
- end,
- ?line ok = gen_sctp:close(Sa),
- ?line {Loopback,Pa,
- #sctp_shutdown_event{assoc_id=SbAssocId}} =
+ case log_ok(gen_sctp:recv(Sa, infinity)) of
+ {Loopback,Pb,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SaAssocId}],
+ Data} -> ok;
+ Event1 ->
+ {Loopback,Pb,
+ #sctp_paddr_change{state=addr_confirmed,
+ addr={_,Pb},
+ error=0,
+ assoc_id=SaAssocId}} =
+ recv_event(Event1),
+ {Loopback,Pb,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SaAssocId}],
+ Data} =
+ log_ok(gen_sctp:recv(Sa, infinity))
+ end,
+ ok = gen_sctp:close(Sa),
+ {Loopback,Pa,
+ #sctp_shutdown_event{assoc_id=SbAssocId}} =
recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
- ?line {Loopback,Pa,
- #sctp_assoc_change{state=shutdown_comp,
- error=0,
- assoc_id=SbAssocId}} =
+ {Loopback,Pa,
+ #sctp_assoc_change{state=shutdown_comp,
+ error=0,
+ assoc_id=SbAssocId}} =
recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
- ?line ok = gen_sctp:close(Sb),
+ ok = gen_sctp:close(Sb),
- ?line receive
- Msg -> test_server:fail({received,Msg})
- after 17 -> ok
- end,
+ receive
+ Msg -> ct:fail({received,Msg})
+ after 17 -> ok
+ end,
ok.
@@ -1040,8 +1038,7 @@ do_from_other_process(Fun) ->
Result ->
Parent ! {Ref,Result}
catch
- Class:Reason ->
- Stacktrace = erlang:get_stacktrace(),
+ Class:Reason:Stacktrace ->
Parent ! {Ref,Class,Reason,Stacktrace}
end
end),
@@ -1058,205 +1055,186 @@ do_from_other_process(Fun) ->
end.
-peeloff_active_once(doc) ->
- "Peel off an SCTP stream socket ({active,once})";
-peeloff_active_once(suite) ->
- [];
+%% Peel off an SCTP stream socket ({active,once}).
peeloff_active_once(Config) ->
peeloff(Config, [{active,once}]).
-peeloff_active_true(doc) ->
- "Peel off an SCTP stream socket ({active,true})";
-peeloff_active_true(suite) ->
- [];
+%% Peel off an SCTP stream socket ({active,true}).
peeloff_active_true(Config) ->
peeloff(Config, [{active,true}]).
-peeloff_active_n(doc) ->
- "Peel off an SCTP stream socket ({active,N})";
-peeloff_active_n(suite) ->
- [];
+%% Peel off an SCTP stream socket ({active,N}).
peeloff_active_n(Config) ->
peeloff(Config, [{active,1}]).
peeloff(Config, SockOpts) when is_list(Config) ->
- ?line Addr = {127,0,0,1},
- ?line Stream = 0,
- ?line Timeout = 333,
- ?line S1 = socket_open([{ifaddr,Addr}|SockOpts], Timeout),
- ?line ?LOGVAR(S1),
- ?line P1 = socket_call(S1, get_port),
- ?line ?LOGVAR(P1),
- ?line Socket1 = socket_call(S1, get_socket),
- ?line ?LOGVAR(Socket1),
- ?line socket_call(S1, {listen,true}),
- ?line S2 = socket_open([{ifaddr,Addr}|SockOpts], Timeout),
- ?line ?LOGVAR(S2),
- ?line P2 = socket_call(S2, get_port),
- ?line ?LOGVAR(P2),
- ?line Socket2 = socket_call(S2, get_socket),
- ?line ?LOGVAR(Socket2),
+ Addr = {127,0,0,1},
+ Stream = 0,
+ Timeout = 333,
+ StartTime = timestamp(),
+ S1 = socket_open([{ifaddr,Addr}|SockOpts], Timeout),
+ ?LOGVAR(S1),
+ P1 = socket_call(S1, get_port),
+ ?LOGVAR(P1),
+ Socket1 = socket_call(S1, get_socket),
+ ?LOGVAR(Socket1),
+ socket_call(S1, {listen,true}),
+ S2 = socket_open([{ifaddr,Addr}|SockOpts], Timeout),
+ ?LOGVAR(S2),
+ P2 = socket_call(S2, get_port),
+ ?LOGVAR(P2),
+ Socket2 = socket_call(S2, get_socket),
+ ?LOGVAR(Socket2),
%%
- ?line socket_call(S2, {connect_init,Addr,P1,[]}),
- ?line S2Ai =
+ socket_call(S2, {connect_init,Addr,P1,[]}),
+ S2Ai =
receive
{S2,{Addr,P1,
#sctp_assoc_change{
- state=comm_up,
- assoc_id=AssocId2}}} -> AssocId2
+ state=comm_up,
+ assoc_id=AssocId2}}} -> AssocId2
after Timeout ->
- socket_bailout([S1,S2])
+ socket_bailout([S1,S2], StartTime)
end,
- ?line ?LOGVAR(S2Ai),
- ?line S1Ai =
+ ?LOGVAR(S2Ai),
+ S1Ai =
receive
{S1,{Addr,P2,
#sctp_assoc_change{
- state=comm_up,
- assoc_id=AssocId1}}} -> AssocId1
+ state=comm_up,
+ assoc_id=AssocId1}}} -> AssocId1
after Timeout ->
- socket_bailout([S1,S2])
+ socket_bailout([S1,S2], StartTime)
end,
- ?line ?LOGVAR(S1Ai),
+ ?LOGVAR(S1Ai),
%%
- ?line socket_call(S2, {send,S2Ai,Stream,<<"Number one">>}),
- ?line
- receive
- {S1,{Addr,P2,S1Ai,Stream,<<"Number one">>}} -> ok
- after Timeout ->
- socket_bailout([S1,S2])
- end,
- ?line socket_call(S2, {send,Socket1,S1Ai,Stream,<<"Number two">>}),
- ?line
- receive
- {S2,{Addr,P1,S2Ai,Stream,<<"Number two">>}} -> ok
- after Timeout ->
- socket_bailout([S1,S2])
- end,
+ socket_call(S2, {send,S2Ai,Stream,<<"Number one">>}),
+ receive
+ {S1,{Addr,P2,S1Ai,Stream,<<"Number one">>}} -> ok
+ after Timeout ->
+ socket_bailout([S1,S2], StartTime)
+ end,
+ socket_call(S2, {send,Socket1,S1Ai,Stream,<<"Number two">>}),
+ receive
+ {S2,{Addr,P1,S2Ai,Stream,<<"Number two">>}} -> ok
+ after Timeout ->
+ socket_bailout([S1,S2], StartTime)
+ end,
%%
- ?line S3 = socket_peeloff(Socket1, S1Ai, SockOpts, Timeout),
- ?line ?LOGVAR(S3),
- ?line P3_X = socket_call(S3, get_port),
- ?line ?LOGVAR(P3_X),
- ?line P3 = case P3_X of 0 -> P1; _ -> P3_X end,
- ?line [{_,#sctp_paddrinfo{assoc_id=S3Ai,state=active}}] =
+ S3 = socket_peeloff(Socket1, S1Ai, SockOpts, Timeout),
+ ?LOGVAR(S3),
+ P3_X = socket_call(S3, get_port),
+ ?LOGVAR(P3_X),
+ P3 = case P3_X of 0 -> P1; _ -> P3_X end,
+ [{_,#sctp_paddrinfo{assoc_id=S3Ai,state=active}}] =
socket_call(S3,
{getopts,[{sctp_get_peer_addr_info,
#sctp_paddrinfo{address={Addr,P2}}}]}),
- %%?line S3Ai = S1Ai,
- ?line ?LOGVAR(S3Ai),
+ %%S3Ai = S1Ai,
+ ?LOGVAR(S3Ai),
%%
- ?line socket_call(S3, {send,S3Ai,Stream,<<"Number three">>}),
- ?line
- receive
- {S2,{Addr,P3,S2Ai,Stream,<<"Number three">>}} -> ok
- after Timeout ->
- socket_bailout([S1,S2,S3])
- end,
- ?line socket_call(S3, {send,Socket2,S2Ai,Stream,<<"Number four">>}),
- ?line
- receive
- {S3,{Addr,P2,S3Ai,Stream,<<"Number four">>}} -> ok
- after Timeout ->
- socket_bailout([S1,S2,S3])
- end,
+ socket_call(S3, {send,S3Ai,Stream,<<"Number three">>}),
+ receive
+ {S2,{Addr,P3,S2Ai,Stream,<<"Number three">>}} -> ok
+ after Timeout ->
+ socket_bailout([S1,S2,S3], StartTime)
+ end,
+ socket_call(S3, {send,Socket2,S2Ai,Stream,<<"Number four">>}),
+ receive
+ {S3,{Addr,P2,S3Ai,Stream,<<"Number four">>}} -> ok
+ after Timeout ->
+ socket_bailout([S1,S2,S3], StartTime)
+ end,
%%
- ?line inet:i(sctp),
- ?line socket_close_verbose(S1),
- ?line socket_close_verbose(S2),
- ?line
- receive
- {S3,{Addr,P2,#sctp_shutdown_event{assoc_id=S3Ai_X}}} ->
- ?line match_unless_solaris(S3Ai, S3Ai_X)
- after Timeout ->
- socket_bailout([S3])
- end,
- ?line
- receive
- {S3,{Addr,P2,#sctp_assoc_change{state=shutdown_comp,
- assoc_id=S3Ai}}} -> ok
- after Timeout ->
- socket_bailout([S3])
- end,
- ?line socket_close_verbose(S3),
- ?line [] = flush(),
+ inet:i(sctp),
+ socket_close_verbose(S1, StartTime),
+ socket_close_verbose(S2, StartTime),
+ receive
+ {S3,{Addr,P2,#sctp_shutdown_event{assoc_id=S3Ai_X}}} ->
+ match_unless_solaris(S3Ai, S3Ai_X)
+ after Timeout ->
+ socket_bailout([S3], StartTime)
+ end,
+ receive
+ {S3,{Addr,P2,#sctp_assoc_change{state=shutdown_comp,
+ assoc_id=S3Ai}}} -> ok
+ after Timeout ->
+ socket_bailout([S3], StartTime)
+ end,
+ socket_close_verbose(S3, StartTime),
+ [] = flush(),
ok.
-buffers(doc) ->
- ["Check sndbuf and recbuf behaviour"];
-buffers(suite) ->
- [];
+%% Check sndbuf and recbuf behaviour.
buffers(Config) when is_list(Config) ->
- ?line Limit = 4096,
- ?line Addr = {127,0,0,1},
- ?line Stream = 1,
- ?line Timeout = 3333,
- ?line S1 = socket_open([{ip,Addr}], Timeout),
- ?line ?LOGVAR(S1),
- ?line P1 = socket_call(S1, get_port),
- ?line ?LOGVAR(P1),
- ?line ok = socket_call(S1, {listen,true}),
- ?line S2 = socket_open([{ip,Addr}], Timeout),
- ?line ?LOGVAR(S2),
- ?line P2 = socket_call(S2, get_port),
- ?line ?LOGVAR(P2),
+ Limit = 4096,
+ Addr = {127,0,0,1},
+ Stream = 1,
+ Timeout = 3333,
+ StartTime = timestamp(),
+ S1 = socket_open([{ip,Addr}], Timeout),
+ ?LOGVAR(S1),
+ P1 = socket_call(S1, get_port),
+ ?LOGVAR(P1),
+ ok = socket_call(S1, {listen,true}),
+ S2 = socket_open([{ip,Addr}], Timeout),
+ ?LOGVAR(S2),
+ P2 = socket_call(S2, get_port),
+ ?LOGVAR(P2),
%%
- ?line socket_call(S2, {connect_init,Addr,P1,[]}),
- ?line S2Ai =
+ socket_call(S2, {connect_init,Addr,P1,[]}),
+ S2Ai =
receive
{S2,{Addr,P1,
#sctp_assoc_change{
- state=comm_up,
- assoc_id=AssocId2}}} -> AssocId2
+ state=comm_up,
+ assoc_id=AssocId2}}} -> AssocId2
after Timeout ->
- socket_bailout([S1,S2])
+ socket_bailout([S1,S2], StartTime)
end,
- ?line S1Ai =
+ S1Ai =
receive
{S1,{Addr,P2,
#sctp_assoc_change{
- state=comm_up,
- assoc_id=AssocId1}}} -> AssocId1
+ state=comm_up,
+ assoc_id=AssocId1}}} -> AssocId1
after Timeout ->
- socket_bailout([S1,S2])
+ socket_bailout([S1,S2], StartTime)
end,
%%
- ?line socket_call(S1, {setopts,[{recbuf,Limit}]}),
- ?line Recbuf =
+ socket_call(S1, {setopts,[{recbuf,Limit}]}),
+ Recbuf =
case socket_call(S1, {getopts,[recbuf]}) of
[{recbuf,RB1}] when RB1 >= Limit -> RB1
end,
- ?line Data = mk_data(Recbuf+Limit),
- ?line socket_call(S2, {setopts,[{sndbuf,Recbuf+Limit}]}),
- ?line socket_call(S2, {send,S2Ai,Stream,Data}),
- ?line
- receive
- {S1,{Addr,P2,S1Ai,Stream,Data}} -> ok
- after Timeout ->
- socket_bailout([S1,S2])
- end,
+ Data = mk_data(Recbuf+Limit),
+ socket_call(S2, {setopts,[{sndbuf,Recbuf+Limit}]}),
+ socket_call(S2, {send,S2Ai,Stream,Data}),
+ receive
+ {S1,{Addr,P2,S1Ai,Stream,Data}} -> ok
+ after Timeout ->
+ socket_bailout([S1,S2], StartTime)
+ end,
%%
- ?line socket_close_verbose(S1),
- ?line
- receive
- {S2,{Addr,P1,#sctp_shutdown_event{assoc_id=S2Ai}}} -> ok
- after Timeout ->
- socket_bailout([S2])
- end,
- ?line
- receive
- {S2,{Addr,P1,#sctp_assoc_change{state=shutdown_comp,
- assoc_id=S2Ai}}} -> ok
- after Timeout ->
- socket_bailout([S2])
- end,
- ?line socket_close_verbose(S2),
- ?line [] = flush(),
+ socket_close_verbose(S1, StartTime),
+ receive
+ {S2,{Addr,P1,#sctp_shutdown_event{assoc_id=S2Ai}}} -> ok
+ after Timeout ->
+ socket_bailout([S2], StartTime)
+ end,
+ receive
+ {S2,{Addr,P1,#sctp_assoc_change{state=shutdown_comp,
+ assoc_id=S2Ai}}} -> ok
+ after Timeout ->
+ socket_bailout([S2], StartTime)
+ end,
+ socket_close_verbose(S2, StartTime),
+ [] = flush(),
ok.
mk_data(Bytes) ->
@@ -1269,153 +1247,129 @@ mk_data(_, _, Bin) ->
-open_multihoming_ipv4_socket(doc) ->
- "Test opening a multihoming ipv4 socket";
-open_multihoming_ipv4_socket(suite) ->
- [];
+%% Test opening a multihoming ipv4 socket.
open_multihoming_ipv4_socket(Config) when is_list(Config) ->
- ?line case get_addrs_by_family(inet, 2) of
- {ok, [Addr1, Addr2]} ->
- ?line do_open_and_connect([Addr1, Addr2], Addr1);
- {error, Reason} ->
- {skip, Reason}
- end.
-
-open_unihoming_ipv6_socket(doc) ->
- %% This test is mostly aimed to indicate
- %% whether host has a non-working ipv6 setup
- "Test opening a unihoming (non-multihoming) ipv6 socket";
-open_unihoming_ipv6_socket(suite) ->
- [];
+ case get_addrs_by_family(inet, 2) of
+ {ok, [Addr1, Addr2]} ->
+ do_open_and_connect([Addr1, Addr2], Addr1);
+ {error, Reason} ->
+ {skip, Reason}
+ end.
+
+%% This test is mostly aimed to indicate whether host has a
+%% non-working ipv6 setup. Test opening a unihoming (non-multihoming)
+%% ipv6 socket.
open_unihoming_ipv6_socket(Config) when is_list(Config) ->
- ?line case get_addrs_by_family(inet6, 1) of
- {ok, [Addr]} ->
- ?line do_open_and_connect([Addr], Addr);
- {error, Reason} ->
- {skip, Reason}
- end.
+ case get_addrs_by_family(inet6, 1) of
+ {ok, [Addr]} ->
+ do_open_and_connect([Addr], Addr);
+ {error, Reason} ->
+ {skip, Reason}
+ end.
-open_multihoming_ipv6_socket(doc) ->
- "Test opening a multihoming ipv6 socket";
-open_multihoming_ipv6_socket(suite) ->
- [];
+%% Test opening a multihoming ipv6 socket.
open_multihoming_ipv6_socket(Config) when is_list(Config) ->
- ?line case get_addrs_by_family(inet6, 2) of
- {ok, [Addr1, Addr2]} ->
- ?line do_open_and_connect([Addr1, Addr2], Addr1);
- {error, Reason} ->
- {skip, Reason}
- end.
-
-open_multihoming_ipv4_and_ipv6_socket(doc) ->
- "Test opening a multihoming ipv6 socket with ipv4 and ipv6 addresses";
-open_multihoming_ipv4_and_ipv6_socket(suite) ->
- [];
+ case get_addrs_by_family(inet6, 2) of
+ {ok, [Addr1, Addr2]} ->
+ do_open_and_connect([Addr1, Addr2], Addr1);
+ {error, Reason} ->
+ {skip, Reason}
+ end.
+
+%% Test opening a multihoming ipv6 socket with ipv4 and ipv6 addresses.
open_multihoming_ipv4_and_ipv6_socket(Config) when is_list(Config) ->
- ?line case get_addrs_by_family(inet_and_inet6, 2) of
- {ok, [[InetAddr1, InetAddr2], [Inet6Addr1, Inet6Addr2]]} ->
- %% Connect to the first address to test bind
- ?line do_open_and_connect([InetAddr1, Inet6Addr1, InetAddr2],
- InetAddr1),
- ?line do_open_and_connect([Inet6Addr1, InetAddr1],
- Inet6Addr1),
-
- %% Connect an address, not the first,
- %% to test sctp_bindx
- ?line do_open_and_connect([Inet6Addr1, Inet6Addr2, InetAddr1],
- Inet6Addr2),
- ?line do_open_and_connect([Inet6Addr1, Inet6Addr2, InetAddr1],
- InetAddr1);
- {error, Reason} ->
- {skip, Reason}
- end.
-
-names_unihoming_ipv4(doc) ->
- "Test inet:socknames/peernames on unihoming IPv4 sockets";
-names_unihoming_ipv4(suite) ->
- [];
+ case get_addrs_by_family(inet_and_inet6, 2) of
+ {ok, [[InetAddr1, InetAddr2], [Inet6Addr1, Inet6Addr2]]} ->
+ %% Connect to the first address to test bind
+ do_open_and_connect([InetAddr1, Inet6Addr1, InetAddr2],
+ InetAddr1),
+ do_open_and_connect([Inet6Addr1, InetAddr1],
+ Inet6Addr1),
+
+ %% Connect an address, not the first,
+ %% to test sctp_bindx
+ do_open_and_connect([Inet6Addr1, Inet6Addr2, InetAddr1],
+ Inet6Addr2),
+ do_open_and_connect([Inet6Addr1, Inet6Addr2, InetAddr1],
+ InetAddr1);
+ {error, Reason} ->
+ {skip, Reason}
+ end.
+
+%% Test inet:socknames/peernames on unihoming IPv4 sockets.
names_unihoming_ipv4(Config) when is_list(Config) ->
- ?line do_names(Config, inet, 1).
+ do_names(Config, inet, 1).
-names_unihoming_ipv6(doc) ->
- "Test inet:socknames/peernames on unihoming IPv6 sockets";
-names_unihoming_ipv6(suite) ->
- [];
+%% Test inet:socknames/peernames on unihoming IPv6 sockets.
names_unihoming_ipv6(Config) when is_list(Config) ->
- ?line do_names(Config, inet6, 1).
+ do_names(Config, inet6, 1).
-names_multihoming_ipv4(doc) ->
- "Test inet:socknames/peernames on multihoming IPv4 sockets";
-names_multihoming_ipv4(suite) ->
- [];
+%% Test inet:socknames/peernames on multihoming IPv4 sockets.
names_multihoming_ipv4(Config) when is_list(Config) ->
- ?line do_names(Config, inet, 2).
+ do_names(Config, inet, 2).
-names_multihoming_ipv6(doc) ->
- "Test inet:socknames/peernames on multihoming IPv6 sockets";
-names_multihoming_ipv6(suite) ->
- [];
+%% Test inet:socknames/peernames on multihoming IPv6 sockets.
names_multihoming_ipv6(Config) when is_list(Config) ->
- ?line do_names(Config, inet6, 2).
+ do_names(Config, inet6, 2).
do_names(_, FamilySpec, AddressCount) ->
Fun =
fun (ServerSocket, _, ServerAssoc, ClientSocket, _, ClientAssoc) ->
- ?line ServerSocknamesNoassoc =
+ ServerSocknamesNoassoc =
lists:sort(ok(inet:socknames(ServerSocket))),
- ?line ?LOGVAR(ServerSocknamesNoassoc),
- ?line ServerSocknames =
+ ?LOGVAR(ServerSocknamesNoassoc),
+ ServerSocknames =
lists:sort(ok(inet:socknames(ServerSocket, ServerAssoc))),
- ?line ?LOGVAR(ServerSocknames),
- ?line [_|_] =
+ ?LOGVAR(ServerSocknames),
+ [_|_] =
ordsets:intersection
(ServerSocknamesNoassoc, ServerSocknames),
- ?line ClientSocknamesNoassoc =
+ ClientSocknamesNoassoc =
lists:sort(ok(inet:socknames(ClientSocket))),
- ?line ?LOGVAR(ClientSocknamesNoassoc),
- ?line ClientSocknames =
+ ?LOGVAR(ClientSocknamesNoassoc),
+ ClientSocknames =
lists:sort(ok(inet:socknames(ClientSocket, ClientAssoc))),
- ?line ?LOGVAR(ClientSocknames),
- ?line [_|_] =
+ ?LOGVAR(ClientSocknames),
+ [_|_] =
ordsets:intersection
(ClientSocknamesNoassoc, ClientSocknames),
- ?line err([einval,enotconn], inet:peernames(ServerSocket)),
- ?line ServerPeernames =
+ err([einval,enotconn], inet:peernames(ServerSocket)),
+ ServerPeernames =
lists:sort(ok(inet:peernames(ServerSocket, ServerAssoc))),
- ?line ?LOGVAR(ServerPeernames),
- ?line err([einval,enotconn], inet:peernames(ClientSocket)),
- ?line ClientPeernames =
+ ?LOGVAR(ServerPeernames),
+ err([einval,enotconn], inet:peernames(ClientSocket)),
+ ClientPeernames =
lists:sort(ok(inet:peernames(ClientSocket, ClientAssoc))),
- ?line ?LOGVAR(ClientPeernames),
- ?line ServerSocknames = ClientPeernames,
- ?line ClientSocknames = ServerPeernames,
- ?line {ok,Socket} =
+ ?LOGVAR(ClientPeernames),
+ ServerSocknames = ClientPeernames,
+ ClientSocknames = ServerPeernames,
+ {ok,Socket} =
gen_sctp:peeloff(ServerSocket, ServerAssoc),
- ?line SocknamesNoassoc =
+ SocknamesNoassoc =
lists:sort(ok(inet:socknames(Socket))),
- ?line ?LOGVAR(SocknamesNoassoc),
- ?line Socknames =
+ ?LOGVAR(SocknamesNoassoc),
+ Socknames =
lists:sort(ok(inet:socknames(Socket, ServerAssoc))),
- ?line ?LOGVAR(Socknames),
- ?line true =
+ ?LOGVAR(Socknames),
+ true =
ordsets:is_subset(SocknamesNoassoc, Socknames),
- ?line Peernames =
+ Peernames =
lists:sort(ok(inet:peernames(Socket, ServerAssoc))),
- ?line ?LOGVAR(Peernames),
- ?line ok = gen_sctp:close(Socket),
- ?line Socknames = ClientPeernames,
- ?line ClientSocknames = Peernames,
+ ?LOGVAR(Peernames),
+ ok = gen_sctp:close(Socket),
+ Socknames = ClientPeernames,
+ ClientSocknames = Peernames,
ok
end,
- ?line case get_addrs_by_family(FamilySpec, AddressCount) of
- {ok, Addresses} when length(Addresses) =:= AddressCount ->
- ?line do_open_and_connect(Addresses, hd(Addresses), Fun);
- {error, Reason} ->
- {skip, Reason}
- end.
+ case get_addrs_by_family(FamilySpec, AddressCount) of
+ {ok, Addresses} when length(Addresses) =:= AddressCount ->
+ do_open_and_connect(Addresses, hd(Addresses), Fun);
+ {error, Reason} ->
+ {skip, Reason}
+ end.
@@ -1451,29 +1405,28 @@ get_addrs_by_family(Family, NumAddrs) ->
get_addrs_by_family_aux(Family, NumAddrs) when Family =:= inet;
Family =:= inet6 ->
- ?line
- case inet:getaddr(localhost, Family) of
- {error,eafnosupport} ->
- {skip, f("No support for ~p", Family)};
- {ok, _} ->
- ?line IfAddrs = ok(inet:getifaddrs()),
- ?line case filter_addrs_by_family(IfAddrs, Family) of
- Addrs when length(Addrs) >= NumAddrs ->
- {ok, lists:sublist(Addrs, NumAddrs)};
- [] ->
- {error, f("Need ~p ~p address(es) found none~n",
- [NumAddrs, Family])};
- Addrs ->
- {error,
- f("Need ~p ~p address(es) found only ~p: ~p~n",
- [NumAddrs, Family, length(Addrs), Addrs])}
- end
- end;
+ case inet:getaddr(localhost, Family) of
+ {error,eafnosupport} ->
+ {skip, f("No support for ~p", Family)};
+ {ok, _} ->
+ IfAddrs = ok(inet:getifaddrs()),
+ case filter_addrs_by_family(IfAddrs, Family) of
+ Addrs when length(Addrs) >= NumAddrs ->
+ {ok, lists:sublist(Addrs, NumAddrs)};
+ [] ->
+ {error, f("Need ~p ~p address(es) found none~n",
+ [NumAddrs, Family])};
+ Addrs ->
+ {error,
+ f("Need ~p ~p address(es) found only ~p: ~p~n",
+ [NumAddrs, Family, length(Addrs), Addrs])}
+ end
+ end;
get_addrs_by_family_aux(inet_and_inet6, NumAddrs) ->
- ?line catch {ok, [case get_addrs_by_family_aux(Family, NumAddrs) of
- {ok, Addrs} -> Addrs;
- {error, Reason} -> throw({error, Reason})
- end || Family <- [inet, inet6]]}.
+ catch {ok, [case get_addrs_by_family_aux(Family, NumAddrs) of
+ {ok, Addrs} -> Addrs;
+ {error, Reason} -> throw({error, Reason})
+ end || Family <- [inet, inet6]]}.
filter_addrs_by_family(IfAddrs, Family) ->
lists:flatten([[Addr || {addr, Addr} <- Info,
@@ -1502,21 +1455,21 @@ f(F, A) ->
lists:flatten(io_lib:format(F, A)).
do_open_and_connect(ServerAddresses, AddressToConnectTo) ->
- ?line Fun = fun (_, _, _, _, _, _) -> ok end,
- ?line do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun).
+ Fun = fun (_, _, _, _, _, _) -> ok end,
+ do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun).
%%
do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
- ?line ServerFamily = get_family_by_addrs(ServerAddresses),
- ?line io:format("Serving ~p addresses: ~p~n",
- [ServerFamily, ServerAddresses]),
- ?line S1 = ok(gen_sctp:open(0, [{ip,Addr} || Addr <- ServerAddresses] ++
- [ServerFamily])),
- ?line ok = gen_sctp:listen(S1, true),
- ?line P1 = ok(inet:port(S1)),
- ?line ClientFamily = get_family_by_addr(AddressToConnectTo),
- ?line io:format("Connecting to ~p ~p~n",
- [ClientFamily, AddressToConnectTo]),
- ?line ClientOpts =
+ ServerFamily = get_family_by_addrs(ServerAddresses),
+ io:format("Serving ~p addresses: ~p~n",
+ [ServerFamily, ServerAddresses]),
+ S1 = ok(gen_sctp:open(0, [{ip,Addr} || Addr <- ServerAddresses] ++
+ [ServerFamily])),
+ ok = gen_sctp:listen(S1, true),
+ P1 = ok(inet:port(S1)),
+ ClientFamily = get_family_by_addr(AddressToConnectTo),
+ io:format("Connecting to ~p ~p~n",
+ [ClientFamily, AddressToConnectTo]),
+ ClientOpts =
[ClientFamily |
case ClientFamily of
inet6 ->
@@ -1524,39 +1477,39 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
_ ->
[]
end],
- ?line S2 = ok(gen_sctp:open(0, ClientOpts)),
+ S2 = ok(gen_sctp:open(0, ClientOpts)),
log(open),
%% Verify client can connect
- ?line #sctp_assoc_change{state=comm_up} = S2Assoc =
+ #sctp_assoc_change{state=comm_up} = S2Assoc =
ok(gen_sctp:connect(S2, AddressToConnectTo, P1, [])),
log(comm_up),
%% verify server side also receives comm_up from client
- ?line S1Assoc = recv_comm_up_eventually(S1),
- ?line Result = Fun(S1, ServerFamily, S1Assoc, S2, ClientFamily, S2Assoc),
- ?line ok = gen_sctp:close(S2),
- ?line ok = gen_sctp:close(S1),
+ S1Assoc = recv_comm_up_eventually(S1),
+ Result = Fun(S1, ServerFamily, S1Assoc, S2, ClientFamily, S2Assoc),
+ ok = gen_sctp:close(S2),
+ ok = gen_sctp:close(S1),
Result.
%% If at least one of the addresses is an ipv6 address, return inet6, else inet.
get_family_by_addrs(Addresses) ->
- ?line case lists:usort([get_family_by_addr(Addr) || Addr <- Addresses]) of
- [inet, inet6] -> inet6;
- [inet] -> inet;
- [inet6] -> inet6
- end.
+ case lists:usort([get_family_by_addr(Addr) || Addr <- Addresses]) of
+ [inet, inet6] -> inet6;
+ [inet] -> inet;
+ [inet6] -> inet6
+ end.
get_family_by_addr(Addr) when tuple_size(Addr) =:= 4 -> inet;
get_family_by_addr(Addr) when tuple_size(Addr) =:= 8 -> inet6.
recv_comm_up_eventually(S) ->
- ?line case ok(gen_sctp:recv(S)) of
- {_Addr, _Port, _Info,
- #sctp_assoc_change{state=comm_up} = Assoc} ->
- Assoc;
- {_Addr, _Port, _Info, _OtherSctpMsg} = Msg ->
- ?line log({unexpected,Msg}),
- ?line recv_comm_up_eventually(S)
- end.
+ case ok(gen_sctp:recv(S)) of
+ {_Addr, _Port, _Info,
+ #sctp_assoc_change{state=comm_up} = Assoc} ->
+ Assoc;
+ {_Addr, _Port, _Info, _OtherSctpMsg} = Msg ->
+ log({unexpected,Msg}),
+ recv_comm_up_eventually(S)
+ end.
%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% socket gen_server ultra light
@@ -1589,8 +1542,8 @@ socket_peeloff(Socket, AssocId, SocketOpts, Timeout) ->
end,
s_start(Starter, Timeout).
-socket_close_verbose(S) ->
- History = socket_history(socket_close(S)),
+socket_close_verbose(S, StartTime) ->
+ History = socket_history(socket_close(S), StartTime),
io:format("socket_close ~p:~n ~p.~n", [S,History]),
History.
@@ -1603,19 +1556,19 @@ socket_call(S, Request) ->
%% socket_get(S, Key) ->
%% s_req(S, {get,Key}).
-socket_bailout([S|Ss]) ->
- History = socket_history(socket_close(S)),
+socket_bailout([S|Ss], StartTime) ->
+ History = socket_history(socket_close(S), StartTime),
io:format("bailout ~p:~n ~p.~n", [S,History]),
- socket_bailout(Ss);
-socket_bailout([]) ->
+ socket_bailout(Ss, StartTime);
+socket_bailout([], _) ->
io:format("flush: ~p.~n", [flush()]),
- test_server:fail(socket_bailout).
+ ct:fail(socket_bailout).
-socket_history({State,Flush}) ->
+socket_history({State,Flush}, StartTime) ->
{lists:keysort(
2,
lists:flatten(
- [[{Key,Val} || Val <- Vals]
+ [[{Key,{TS-StartTime,Val}} || {TS,Val} <- Vals]
|| {Key,Vals} <- gb_trees:to_list(State)])),
Flush}.
@@ -1663,8 +1616,7 @@ s_start(Socket, Timeout, Parent) ->
try
s_loop(Socket, Timeout, Parent, Handler, gb_trees:empty())
catch
- Class:Reason ->
- Stacktrace = erlang:get_stacktrace(),
+ Class:Reason:Stacktrace ->
io:format(?MODULE_STRING":socket exception ~w:~w at~n"
"~p.~n", [Class,Reason,Stacktrace]),
erlang:raise(Class, Reason, Stacktrace)
@@ -1678,14 +1630,12 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
{Parent,Ref,exit} ->
ok = gen_sctp:close(Socket),
Key = exit,
- Val = {now(),Socket},
- NewState = gb_push(Key, Val, State),
+ NewState = gb_push(Key, Socket, State),
Parent ! {self(),Ref,{NewState,flush()}};
{Parent,Ref,{Msg}} ->
Result = Handler(Msg),
Key = req,
- Val = {now(),{Msg,Result}},
- NewState = gb_push(Key, Val, State),
+ NewState = gb_push(Key, {Msg,Result}, State),
Parent ! {self(),Ref,Result},
s_loop(Socket, Timeout, Parent, Handler, NewState);
%% {Parent,Ref,{get,Key}} ->
@@ -1695,16 +1645,15 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
{[#sctp_sndrcvinfo{stream=Stream,assoc_id=AssocId}=SRI],Data}}
when not is_tuple(Data) ->
case gb_get({assoc_change,AssocId}, State) of
- [{_,{Addr,Port,
- #sctp_assoc_change{
- state=comm_up,
- inbound_streams=Is}}}|_]
+ [{Addr,Port,
+ #sctp_assoc_change{
+ state=comm_up,
+ inbound_streams=Is}}|_]
when 0 =< Stream, Stream < Is-> ok;
[] -> ok
end,
Key = {msg,AssocId,Stream},
- Val = {now(),{Addr,Port,SRI,Data}},
- NewState = gb_push(Key, Val, State),
+ NewState = gb_push(Key, {Addr,Port,SRI,Data}, State),
Parent ! {self(),{Addr,Port,AssocId,Stream,Data}},
again(Socket),
s_loop(Socket, Timeout, Parent, Handler, NewState);
@@ -1715,13 +1664,12 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
[] -> ok
end,
Key = {assoc_change,AssocId},
- Val = {now(),{Addr,Port,SAC}},
case {gb_get(Key, State),St} of
{[],_} -> ok;
- {[{_,{Addr,Port,#sctp_assoc_change{state=comm_up}}}|_],_}
+ {[{Addr,Port,#sctp_assoc_change{state=comm_up}}|_],_}
when St =:= comm_lost; St =:= shutdown_comp -> ok
end,
- NewState = gb_push(Key, Val, State),
+ NewState = gb_push(Key, {Addr,Port,SAC}, State),
Parent ! {self(),{Addr,Port,SAC}},
again(Socket),
s_loop(Socket, Timeout, Parent, Handler, NewState);
@@ -1735,14 +1683,13 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
[] -> ok
end,
case {gb_get({assoc_change,AssocId}, State),St} of
- {[{_,{Addr,Port,#sctp_assoc_change{state=comm_up}}}|_],_}
+ {[{Addr,Port,#sctp_assoc_change{state=comm_up}}|_],_}
when St =:= addr_available;
St =:= addr_confirmed -> ok;
{[],addr_confirmed} -> ok
end,
Key = {paddr_change,AssocId},
- Val = {now(),{Addr,Port,SPC}},
- NewState = gb_push(Key, Val, State),
+ NewState = gb_push(Key, {Addr,Port,SPC}, State),
again(Socket),
s_loop(Socket, Timeout, Parent, Handler, NewState);
{sctp,Socket,Addr,Port,
@@ -1752,12 +1699,11 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
[] -> ok
end,
case gb_get({assoc_change,AssocId}, State) of
- [{_,{Addr,Port,#sctp_assoc_change{state=comm_up}}}|_] -> ok;
+ [{Addr,Port,#sctp_assoc_change{state=comm_up}}|_] -> ok;
[] -> ok
end,
Key = {shutdown_event,AssocId},
- Val = {now(),{Addr,Port}},
- NewState = gb_push(Key, Val, State),
+ NewState = gb_push(Key, {Addr,Port}, State),
Parent ! {self(), {Addr,Port,SSE}},
again(Socket),
s_loop(Socket, Timeout, Parent, Handler, NewState);
@@ -1775,11 +1721,12 @@ again(Socket) ->
end.
gb_push(Key, Val, GBT) ->
+ TS = timestamp(),
case gb_trees:lookup(Key, GBT) of
none ->
- gb_trees:insert(Key, [Val], GBT);
+ gb_trees:insert(Key, [{TS,Val}], GBT);
{value,V} ->
- gb_trees:update(Key, [Val|V], GBT)
+ gb_trees:update(Key, [{TS,Val}|V], GBT)
end.
gb_get(Key, GBT) ->
@@ -1787,7 +1734,7 @@ gb_get(Key, GBT) ->
none ->
[];
{value,V} ->
- V
+ [Val || {_TS,Val} <- V]
end.
match_unless_solaris(A, B) ->
@@ -1795,3 +1742,6 @@ match_unless_solaris(A, B) ->
{unix,sunos} -> B;
_ -> A = B
end.
+
+timestamp() ->
+ erlang:monotonic_time().
diff --git a/lib/kernel/test/gen_tcp_api_SUITE.erl b/lib/kernel/test/gen_tcp_api_SUITE.erl
index 962471c20c..1be016444f 100644
--- a/lib/kernel/test/gen_tcp_api_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_api_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -34,21 +34,34 @@
t_recv_timeout/1, t_recv_eof/1, t_recv_delim/1,
t_shutdown_write/1, t_shutdown_both/1, t_shutdown_error/1,
t_shutdown_async/1,
- t_fdopen/1, t_fdconnect/1, t_implicit_inet6/1]).
+ t_fdopen/1, t_fdconnect/1, t_implicit_inet6/1,
+ t_local_basic/1, t_local_unbound/1, t_local_fdopen/1,
+ t_local_fdopen_listen/1, t_local_fdopen_listen_unbound/1,
+ t_local_fdopen_connect/1, t_local_fdopen_connect_unbound/1,
+ t_local_abstract/1, t_accept_inet6_tclass/1]).
-export([getsockfd/0,closesockfd/1]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[{group, t_accept}, {group, t_connect}, {group, t_recv},
t_shutdown_write, t_shutdown_both, t_shutdown_error,
- t_shutdown_async, t_fdopen, t_fdconnect, t_implicit_inet6].
+ t_shutdown_async, t_fdopen, t_fdconnect, t_implicit_inet6,
+ t_accept_inet6_tclass,
+ {group, t_local}].
groups() ->
[{t_accept, [], [t_accept_timeout]},
{t_connect, [], [t_connect_timeout, t_connect_bad]},
- {t_recv, [], [t_recv_timeout, t_recv_eof, t_recv_delim]}].
+ {t_recv, [], [t_recv_timeout, t_recv_eof, t_recv_delim]},
+ {t_local, [],
+ [t_local_basic, t_local_unbound, t_local_fdopen,
+ t_local_fdopen_listen, t_local_fdopen_listen_unbound,
+ t_local_fdopen_connect, t_local_fdopen_connect_unbound,
+ t_local_abstract]}].
@@ -58,81 +71,93 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok.
+init_per_group(t_local, Config) ->
+ case gen_tcp:connect({local,<<"/">>}, 0, []) of
+ {error,eafnosupport} ->
+ {skip, "AF_LOCAL not supported"};
+ {error,_} ->
+ Config
+ end;
init_per_group(_GroupName, Config) ->
Config.
-end_per_group(_,_Config) ->
+end_per_group(t_local, _Config) ->
+ delete_local_filenames();
+end_per_group(_, _Config) ->
ok.
+
+init_per_testcase(Func, Config)
+ when Func =:= undefined -> % Insert your testcase name here
+ dbg:tracer(),
+ dbg:p(self(), c),
+ dbg:tpl(prim_inet, cx),
+ dbg:tpl(local_tcp, cx),
+ dbg:tpl(inet, cx),
+ dbg:tpl(gen_tcp, cx),
+ Config;
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:seconds(60)),
- [{watchdog, Dog}|Config].
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+ Config.
+
+end_per_testcase(_Func, _Config) ->
+ dbg:stop().
%%% gen_tcp:accept/1,2
-t_accept_timeout(doc) -> "Test that gen_tcp:accept/2 (with timeout) works.";
-t_accept_timeout(suite) -> [];
+%% Test that gen_tcp:accept/2 (with timeout) works.
t_accept_timeout(Config) when is_list(Config) ->
- ?line {ok, L} = gen_tcp:listen(0, []),
- ?line timeout({gen_tcp, accept, [L, 200]}, 0.2, 1.0).
+ {ok, L} = gen_tcp:listen(0, []),
+ timeout({gen_tcp, accept, [L, 200]}, 0.2, 1.0).
%%% gen_tcp:connect/X
-t_connect_timeout(doc) -> "Test that gen_tcp:connect/4 (with timeout) works.";
+%% Test that gen_tcp:connect/4 (with timeout) works.
t_connect_timeout(Config) when is_list(Config) ->
- %%?line BadAddr = {134,138,177,16},
- %%?line TcpPort = 80,
- ?line {ok, BadAddr} = unused_ip(),
- ?line TcpPort = 45638,
- ?line ok = io:format("Connecting to ~p, port ~p", [BadAddr, TcpPort]),
- ?line connect_timeout({gen_tcp,connect,[BadAddr,TcpPort,[],200]}, 0.2, 5.0).
-
-t_connect_bad(doc) ->
- ["Test that gen_tcp:connect/3 handles non-existings hosts, and other ",
- "invalid things."];
-t_connect_bad(suite) -> [];
+ %%BadAddr = {134,138,177,16},
+ %%TcpPort = 80,
+ {ok, BadAddr} = unused_ip(),
+ TcpPort = 45638,
+ ok = io:format("Connecting to ~p, port ~p", [BadAddr, TcpPort]),
+ connect_timeout({gen_tcp,connect,[BadAddr,TcpPort,[],200]}, 0.2, 5.0).
+
+%% Test that gen_tcp:connect/3 handles non-existings hosts, and other
+%% invalid things.
t_connect_bad(Config) when is_list(Config) ->
- ?line NonExistingPort = 45638, % Not in use, I hope.
- ?line {error, Reason1} = gen_tcp:connect(localhost, NonExistingPort, []),
- ?line io:format("Error for connection attempt to port not in use: ~p",
- [Reason1]),
-
- ?line {error, Reason2} = gen_tcp:connect("non-existing-host-xxx", 7, []),
- ?line io:format("Error for connection attempt to non-existing host: ~p",
- [Reason2]),
+ NonExistingPort = 45638, % Not in use, I hope.
+ {error, Reason1} = gen_tcp:connect(localhost, NonExistingPort, []),
+ io:format("Error for connection attempt to port not in use: ~p",
+ [Reason1]),
+
+ {error, Reason2} = gen_tcp:connect("non-existing-host-xxx", 7, []),
+ io:format("Error for connection attempt to non-existing host: ~p",
+ [Reason2]),
ok.
%%% gen_tcp:recv/X
-t_recv_timeout(doc) -> "Test that gen_tcp:recv/3 (with timeout works).";
-t_recv_timeout(suite) -> [];
+%% Test that gen_tcp:recv/3 (with timeout works).
t_recv_timeout(Config) when is_list(Config) ->
- ?line {ok, L} = gen_tcp:listen(0, []),
- ?line {ok, Port} = inet:port(L),
- ?line {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
- ?line {ok, _A} = gen_tcp:accept(L),
- ?line timeout({gen_tcp, recv, [Client, 0, 200]}, 0.2, 5.0).
-
-t_recv_eof(doc) -> "Test that end of file on a socket is reported correctly.";
-t_recv_eof(suite) -> [];
+ {ok, L} = gen_tcp:listen(0, []),
+ {ok, Port} = inet:port(L),
+ {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
+ {ok, _A} = gen_tcp:accept(L),
+ timeout({gen_tcp, recv, [Client, 0, 200]}, 0.2, 5.0).
+
+%% Test that end of file on a socket is reported correctly.
t_recv_eof(Config) when is_list(Config) ->
- ?line {ok, L} = gen_tcp:listen(0, []),
- ?line {ok, Port} = inet:port(L),
- ?line {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
- ?line {ok, A} = gen_tcp:accept(L),
- ?line ok = gen_tcp:close(A),
- ?line {error, closed} = gen_tcp:recv(Client, 0),
+ {ok, L} = gen_tcp:listen(0, []),
+ {ok, Port} = inet:port(L),
+ {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
+ {ok, A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(A),
+ {error, closed} = gen_tcp:recv(Client, 0),
ok.
-t_recv_delim(doc) -> "Test using message delimiter $X";
-t_recv_delim(suite) -> [];
+%% Test using message delimiter $X.
t_recv_delim(Config) when is_list(Config) ->
{ok, L} = gen_tcp:listen(0, []),
{ok, Port} = inet:port(L),
@@ -140,8 +165,8 @@ t_recv_delim(Config) when is_list(Config) ->
{ok, Client} = gen_tcp:connect(localhost, Port, Opts),
{ok, A} = gen_tcp:accept(L),
ok = gen_tcp:send(A, "abcXefgX"),
- {ok, "abcX"} = gen_tcp:recv(Client, 0, 0),
- {ok, "efgX"} = gen_tcp:recv(Client, 0, 0),
+ {ok, "abcX"} = gen_tcp:recv(Client, 0, 200),
+ {ok, "efgX"} = gen_tcp:recv(Client, 0, 200),
ok = gen_tcp:close(Client),
ok = gen_tcp:close(A),
ok.
@@ -149,96 +174,96 @@ t_recv_delim(Config) when is_list(Config) ->
%%% gen_tcp:shutdown/2
t_shutdown_write(Config) when is_list(Config) ->
- ?line {ok, L} = gen_tcp:listen(0, []),
- ?line {ok, Port} = inet:port(L),
- ?line {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
- ?line {ok, A} = gen_tcp:accept(L),
- ?line ok = gen_tcp:shutdown(A, write),
- ?line {error, closed} = gen_tcp:recv(Client, 0),
+ {ok, L} = gen_tcp:listen(0, []),
+ {ok, Port} = inet:port(L),
+ {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
+ {ok, A} = gen_tcp:accept(L),
+ ok = gen_tcp:shutdown(A, write),
+ {error, closed} = gen_tcp:recv(Client, 0),
ok.
t_shutdown_both(Config) when is_list(Config) ->
- ?line {ok, L} = gen_tcp:listen(0, []),
- ?line {ok, Port} = inet:port(L),
- ?line {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
- ?line {ok, A} = gen_tcp:accept(L),
- ?line ok = gen_tcp:shutdown(A, read_write),
- ?line {error, closed} = gen_tcp:recv(Client, 0),
+ {ok, L} = gen_tcp:listen(0, []),
+ {ok, Port} = inet:port(L),
+ {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
+ {ok, A} = gen_tcp:accept(L),
+ ok = gen_tcp:shutdown(A, read_write),
+ {error, closed} = gen_tcp:recv(Client, 0),
ok.
t_shutdown_error(Config) when is_list(Config) ->
- ?line {ok, L} = gen_tcp:listen(0, []),
- ?line {error, enotconn} = gen_tcp:shutdown(L, read_write),
- ?line ok = gen_tcp:close(L),
- ?line {error, closed} = gen_tcp:shutdown(L, read_write),
+ {ok, L} = gen_tcp:listen(0, []),
+ {error, enotconn} = gen_tcp:shutdown(L, read_write),
+ ok = gen_tcp:close(L),
+ {error, closed} = gen_tcp:shutdown(L, read_write),
ok.
t_shutdown_async(Config) when is_list(Config) ->
- ?line {OS, _} = os:type(),
- ?line {ok, L} = gen_tcp:listen(0, [{sndbuf, 4096}]),
- ?line {ok, Port} = inet:port(L),
- ?line {ok, Client} = gen_tcp:connect(localhost, Port,
- [{recbuf, 4096},
- {active, false}]),
- ?line {ok, S} = gen_tcp:accept(L),
- ?line PayloadSize = 1024 * 1024,
- ?line Payload = lists:duplicate(PayloadSize, $.),
- ?line ok = gen_tcp:send(S, Payload),
- ?line case erlang:port_info(S, queue_size) of
- {queue_size, N} when N > 0 -> ok;
- {queue_size, 0} when OS =:= win32 -> ok;
- {queue_size, 0} = T -> ?t:fail({unexpected, T})
- end,
-
- ?line ok = gen_tcp:shutdown(S, write),
- ?line {ok, Buf} = gen_tcp:recv(Client, PayloadSize),
- ?line {error, closed} = gen_tcp:recv(Client, 0),
- ?line case length(Buf) of
- PayloadSize -> ok;
- Sz -> ?t:fail({payload_size,
- {expected, PayloadSize},
- {received, Sz}})
- end.
+ {OS, _} = os:type(),
+ {ok, L} = gen_tcp:listen(0, [{sndbuf, 4096}]),
+ {ok, Port} = inet:port(L),
+ {ok, Client} = gen_tcp:connect(localhost, Port,
+ [{recbuf, 4096},
+ {active, false}]),
+ {ok, S} = gen_tcp:accept(L),
+ PayloadSize = 1024 * 1024,
+ Payload = lists:duplicate(PayloadSize, $.),
+ ok = gen_tcp:send(S, Payload),
+ case erlang:port_info(S, queue_size) of
+ {queue_size, N} when N > 0 -> ok;
+ {queue_size, 0} when OS =:= win32 -> ok;
+ {queue_size, 0} = T -> ct:fail({unexpected, T})
+ end,
+
+ ok = gen_tcp:shutdown(S, write),
+ {ok, Buf} = gen_tcp:recv(Client, PayloadSize),
+ {error, closed} = gen_tcp:recv(Client, 0),
+ case length(Buf) of
+ PayloadSize -> ok;
+ Sz -> ct:fail({payload_size,
+ {expected, PayloadSize},
+ {received, Sz}})
+ end.
%%% gen_tcp:fdopen/2
t_fdopen(Config) when is_list(Config) ->
- ?line Question = "Aaaa... Long time ago in a small town in Germany,",
- ?line Question1 = list_to_binary(Question),
- ?line Question2 = [<<"Aaaa">>, "... ", $L, <<>>, $o, "ng time ago ",
- ["in ", [], <<"a small town">>, [" in Germany,", <<>>]]],
- ?line Question1 = iolist_to_binary(Question2),
- ?line Answer = "there was a shoemaker, Schumacher was his name.",
- ?line {ok, L} = gen_tcp:listen(0, [{active, false}]),
- ?line {ok, Port} = inet:port(L),
- ?line {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
- ?line {ok, A} = gen_tcp:accept(L),
- ?line {ok, FD} = prim_inet:getfd(A),
- ?line {ok, Server} = gen_tcp:fdopen(FD, []),
- ?line ok = gen_tcp:send(Client, Question),
- ?line {ok, Question} = gen_tcp:recv(Server, length(Question), 2000),
- ?line ok = gen_tcp:send(Client, Question1),
- ?line {ok, Question} = gen_tcp:recv(Server, length(Question), 2000),
- ?line ok = gen_tcp:send(Client, Question2),
- ?line {ok, Question} = gen_tcp:recv(Server, length(Question), 2000),
- ?line ok = gen_tcp:send(Server, Answer),
- ?line {ok, Answer} = gen_tcp:recv(Client, length(Answer), 2000),
- ?line ok = gen_tcp:close(Client),
- ?line {error,closed} = gen_tcp:recv(A, 1, 2000),
- ?line ok = gen_tcp:close(Server),
- ?line ok = gen_tcp:close(A),
- ?line ok = gen_tcp:close(L),
+ Question = "Aaaa... Long time ago in a small town in Germany,",
+ Question1 = list_to_binary(Question),
+ Question2 = [<<"Aaaa">>, "... ", $L, <<>>, $o, "ng time ago ",
+ ["in ", [], <<"a small town">>, [" in Germany,", <<>>]]],
+ Question1 = iolist_to_binary(Question2),
+ Answer = "there was a shoemaker, Schumacher was his name.",
+ {ok, L} = gen_tcp:listen(0, [{active, false}]),
+ {ok, Port} = inet:port(L),
+ {ok, Client} = gen_tcp:connect(localhost, Port, [{active, false}]),
+ {ok, A} = gen_tcp:accept(L),
+ {ok, FD} = prim_inet:getfd(A),
+ {ok, Server} = gen_tcp:fdopen(FD, []),
+ ok = gen_tcp:send(Client, Question),
+ {ok, Question} = gen_tcp:recv(Server, length(Question), 2000),
+ ok = gen_tcp:send(Client, Question1),
+ {ok, Question} = gen_tcp:recv(Server, length(Question), 2000),
+ ok = gen_tcp:send(Client, Question2),
+ {ok, Question} = gen_tcp:recv(Server, length(Question), 2000),
+ ok = gen_tcp:send(Server, Answer),
+ {ok, Answer} = gen_tcp:recv(Client, length(Answer), 2000),
+ ok = gen_tcp:close(Client),
+ {error,closed} = gen_tcp:recv(A, 1, 2000),
+ ok = gen_tcp:close(Server),
+ ok = gen_tcp:close(A),
+ ok = gen_tcp:close(L),
ok.
t_fdconnect(Config) when is_list(Config) ->
Question = "Aaaa... Long time ago in a small town in Germany,",
Question1 = list_to_binary(Question),
Question2 = [<<"Aaaa">>, "... ", $L, <<>>, $o, "ng time ago ",
- ["in ", [], <<"a small town">>, [" in Germany,", <<>>]]],
+ ["in ", [], <<"a small town">>, [" in Germany,", <<>>]]],
Question1 = iolist_to_binary(Question2),
Answer = "there was a shoemaker, Schumacher was his name.",
- Path = ?config(data_dir, Config),
+ Path = proplists:get_value(data_dir, Config),
Lib = "gen_tcp_api_SUITE",
ok = erlang:load_nif(filename:join(Path,Lib), []),
{ok, L} = gen_tcp:listen(0, [{active, false}]),
@@ -266,53 +291,253 @@ t_fdconnect(Config) when is_list(Config) ->
%%% implicit inet6 option to api functions
t_implicit_inet6(Config) when is_list(Config) ->
- ?line Host = ok(inet:gethostname()),
- ?line
- case inet:getaddr(Host, inet6) of
- {ok,Addr} ->
- ?line t_implicit_inet6(Host, Addr);
- {error,Reason} ->
- {skip,
- "Can not look up IPv6 address: "
- ++atom_to_list(Reason)}
- end.
+ Host = ok(inet:gethostname()),
+ case inet:getaddr(Host, inet6) of
+ {ok,Addr} ->
+ t_implicit_inet6(Host, Addr);
+ {error,Reason} ->
+ {skip,
+ "Can not look up IPv6 address: "
+ ++atom_to_list(Reason)}
+ end.
t_implicit_inet6(Host, Addr) ->
- ?line
- case gen_tcp:listen(0, [inet6]) of
- {ok,S1} ->
- ?line Loopback = {0,0,0,0,0,0,0,1},
- ?line io:format("~s ~p~n", ["::1",Loopback]),
- ?line implicit_inet6(S1, Loopback),
- ?line ok = gen_tcp:close(S1),
- %%
- ?line Localhost = "localhost",
- ?line Localaddr = ok(inet:getaddr(Localhost, inet6)),
- ?line io:format("~s ~p~n", [Localhost,Localaddr]),
- ?line S2 = ok(gen_tcp:listen(0, [{ip,Localaddr}])),
- ?line implicit_inet6(S2, Localaddr),
- ?line ok = gen_tcp:close(S2),
- %%
- ?line io:format("~s ~p~n", [Host,Addr]),
- ?line S3 = ok(gen_tcp:listen(0, [{ifaddr,Addr}])),
- ?line implicit_inet6(S3, Addr),
- ?line ok = gen_tcp:close(S3);
- {error,_} ->
- {skip,"IPv6 not supported"}
- end.
+ Loopback = {0,0,0,0,0,0,0,1},
+ case gen_tcp:listen(0, [inet6, {ip,Loopback}]) of
+ {ok,S1} ->
+ io:format("~s ~p~n", ["::1",Loopback]),
+ implicit_inet6(S1, Loopback),
+ ok = gen_tcp:close(S1),
+ %%
+ Localaddr = ok(get_localaddr()),
+ S2 = ok(gen_tcp:listen(0, [{ip,Localaddr}])),
+ implicit_inet6(S2, Localaddr),
+ ok = gen_tcp:close(S2),
+ %%
+ io:format("~s ~p~n", [Host,Addr]),
+ S3 = ok(gen_tcp:listen(0, [{ifaddr,Addr}])),
+ implicit_inet6(S3, Addr),
+ ok = gen_tcp:close(S3);
+ {error,_} ->
+ {skip,"IPv6 not supported"}
+ end.
implicit_inet6(S, Addr) ->
- ?line P = ok(inet:port(S)),
- ?line S2 = ok(gen_tcp:connect(Addr, P, [])),
- ?line P2 = ok(inet:port(S2)),
- ?line S1 = ok(gen_tcp:accept(S)),
- ?line P1 = P = ok(inet:port(S1)),
- ?line {Addr,P2} = ok(inet:peername(S1)),
- ?line {Addr,P1} = ok(inet:peername(S2)),
- ?line {Addr,P1} = ok(inet:sockname(S1)),
- ?line {Addr,P2} = ok(inet:sockname(S2)),
- ?line ok = gen_tcp:close(S2),
- ?line ok = gen_tcp:close(S1).
+ P = ok(inet:port(S)),
+ S2 = ok(gen_tcp:connect(Addr, P, [])),
+ P2 = ok(inet:port(S2)),
+ S1 = ok(gen_tcp:accept(S)),
+ P1 = P = ok(inet:port(S1)),
+ {Addr,P2} = ok(inet:peername(S1)),
+ {Addr,P1} = ok(inet:peername(S2)),
+ {Addr,P1} = ok(inet:sockname(S1)),
+ {Addr,P2} = ok(inet:sockname(S2)),
+ ok = gen_tcp:close(S2),
+ ok = gen_tcp:close(S1).
+
+
+
+t_local_basic(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ CFile = local_filename(client),
+ CAddr = {local,bin_filename(CFile)},
+ _ = file:delete(SFile),
+ _ = file:delete(CFile),
+ %%
+ L =
+ ok(
+ gen_tcp:listen(0, [{ifaddr,{local,SFile}},{active,false}])),
+ C =
+ ok(
+ gen_tcp:connect(
+ {local,SFile}, 0, [{ifaddr,{local,CFile}},{active,false}])),
+ S = ok(gen_tcp:accept(L)),
+ SAddr = ok(inet:sockname(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, CAddr),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ %%
+ ok = file:delete(SFile),
+ ok = file:delete(CFile),
+ ok.
+
+t_local_unbound(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ _ = file:delete(SFile),
+ %%
+ L = ok(gen_tcp:listen(0, [{ifaddr,SAddr},{active,false}])),
+ C = ok(gen_tcp:connect(SAddr, 0, [{active,false}])),
+ S = ok(gen_tcp:accept(L)),
+ SAddr = ok(inet:sockname(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, {local,<<>>}),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ ok = file:delete(SFile),
+ ok.
+
+t_local_fdopen(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ _ = file:delete(SFile),
+ %%
+ L = ok(gen_tcp:listen(0, [{ifaddr,SAddr},{active,false}])),
+ C0 = ok(gen_tcp:connect(SAddr, 0, [{active,false}])),
+ Fd = ok(prim_inet:getfd(C0)),
+ ok = prim_inet:ignorefd(C0, true),
+ C = ok(gen_tcp:fdopen(Fd, [local])),
+ S = ok(gen_tcp:accept(L)),
+ SAddr = ok(inet:sockname(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, {local,<<>>}),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ ok = gen_tcp:close(C0),
+ ok = file:delete(SFile),
+ ok.
+
+t_local_fdopen_listen(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ _ = file:delete(SFile),
+ L0 = ok(gen_tcp:listen(0, [{ifaddr,SAddr},{active,false}])),
+ Fd = ok(prim_inet:getfd(L0)),
+ L = ok(gen_tcp:listen(0, [{fd,Fd},local,{active,false}])),
+ C = ok(gen_tcp:connect(SAddr, 0, [{active,false}])),
+ S = ok(gen_tcp:accept(L)),
+ SAddr = ok(inet:sockname(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, {local,<<>>}),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(L0),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ ok = file:delete(SFile),
+ ok.
+
+t_local_fdopen_listen_unbound(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ _ = file:delete(SFile),
+ P = ok(prim_inet:open(tcp, local, stream)),
+ Fd = ok(prim_inet:getfd(P)),
+ L =
+ ok(gen_tcp:listen(
+ 0, [{fd,Fd},{ifaddr,SAddr},{active,false}])),
+ C = ok(gen_tcp:connect(SAddr, 0, [{active,false}])),
+ S = ok(gen_tcp:accept(L)),
+ SAddr = ok(inet:sockname(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, {local,<<>>}),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(P),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ ok = file:delete(SFile),
+ ok.
+
+t_local_fdopen_connect(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ CFile = local_filename(client),
+ CAddr = {local,bin_filename(CFile)},
+ _ = file:delete(SFile),
+ _ = file:delete(CFile),
+ L = ok(gen_tcp:listen(0, [{ifaddr,SAddr},{active,false}])),
+ P = ok(prim_inet:open(tcp, local, stream)),
+ Fd = ok(prim_inet:getfd(P)),
+ C =
+ ok(gen_tcp:connect(
+ SAddr, 0, [{fd,Fd},{ifaddr,CAddr},{active,false}])),
+ S = ok(gen_tcp:accept(L)),
+ SAddr = ok(inet:sockname(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, CAddr),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ ok = gen_tcp:close(P),
+ ok = file:delete(SFile),
+ ok.
+
+t_local_fdopen_connect_unbound(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ _ = file:delete(SFile),
+ L = ok(gen_tcp:listen(0, [{ifaddr,SAddr},{active,false}])),
+ P = ok(prim_inet:open(tcp, local, stream)),
+ Fd = ok(prim_inet:getfd(P)),
+ C = ok(gen_tcp:connect(SAddr, 0, [{fd,Fd},{active,false}])),
+ S = ok(gen_tcp:accept(L)),
+ SAddr = ok(inet:sockname(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, {local,<<>>}),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ ok = gen_tcp:close(P),
+ ok = file:delete(SFile),
+ ok.
+
+t_local_abstract(_Config) ->
+ case os:type() of
+ {unix,linux} ->
+ AbstAddr = {local,<<>>},
+ L =
+ ok(gen_tcp:listen(
+ 0, [{ifaddr,AbstAddr},{active,false}])),
+ {local,_} = SAddr = ok(inet:sockname(L)),
+ C =
+ ok(gen_tcp:connect(
+ SAddr, 0, [{ifaddr,AbstAddr},{active,false}])),
+ {local,_} = CAddr = ok(inet:sockname(C)),
+ S = ok(gen_tcp:accept(L)),
+ {error,enotconn} = inet:peername(L),
+ local_handshake(S, SAddr, C, CAddr),
+ ok = gen_tcp:close(L),
+ ok = gen_tcp:close(S),
+ ok = gen_tcp:close(C),
+ ok;
+ _ ->
+ {skip,"AF_LOCAL Abstract Addresses only supported on Linux"}
+ end.
+
+
+local_handshake(S, SAddr, C, CAddr) ->
+ SData = "9876543210",
+ CData = "0123456789",
+ SAddr = ok(inet:sockname(S)),
+ CAddr = ok(inet:sockname(C)),
+ CAddr = ok(inet:peername(S)),
+ SAddr = ok(inet:peername(C)),
+ ok = gen_tcp:send(C, CData),
+ ok = gen_tcp:send(S, SData),
+ CData = ok(gen_tcp:recv(S, length(CData))),
+ SData = ok(gen_tcp:recv(C, length(SData))),
+ ok.
+
+t_accept_inet6_tclass(Config) when is_list(Config) ->
+ TClassOpt = {tclass,8#56 bsl 2}, % Expedited forwarding
+ Loopback = {0,0,0,0,0,0,0,1},
+ case gen_tcp:listen(0, [inet6, {ip, Loopback}, TClassOpt]) of
+ {ok,L} ->
+ LPort = ok(inet:port(L)),
+ Sa = ok(gen_tcp:connect(Loopback, LPort, [])),
+ Sb = ok(gen_tcp:accept(L)),
+ [TClassOpt] = ok(inet:getopts(Sb, [tclass])),
+ ok = gen_tcp:close(Sb),
+ ok = gen_tcp:close(Sa),
+ ok = gen_tcp:close(L),
+ ok;
+ {error,_} ->
+ {skip,"IPv6 TCLASS not supported"}
+ end.
%%% Utilities
@@ -323,13 +548,13 @@ implicit_inet6(S, Addr) ->
timeout({M,F,A}, Lower, Upper) ->
case test_server:timecall(M, F, A) of
{Time, Result} when Time < Lower ->
- test_server:fail({too_short_time, Time, Result});
+ ct:fail({too_short_time, Time, Result});
{Time, Result} when Time > Upper ->
- test_server:fail({too_long_time, Time, Result});
+ ct:fail({too_long_time, Time, Result});
{_, {error, timeout}} ->
ok;
{_, Result} ->
- test_server:fail({unexpected_result, Result})
+ ct:fail({unexpected_result, Result})
end.
connect_timeout({M,F,A}, Lower, Upper) ->
@@ -344,28 +569,28 @@ connect_timeout({M,F,A}, Lower, Upper) ->
Pinfo = erlang:port_info(Socket),
Db = inet_db:lookup_socket(Socket),
Peer = inet:peername(Socket),
- test_server:fail({too_short_time, Time,
- [Result,Pinfo,Db,Peer]});
+ ct:fail({too_short_time, Time,
+ [Result,Pinfo,Db,Peer]});
_ ->
- test_server:fail({too_short_time, Time, Result})
+ ct:fail({too_short_time, Time, Result})
end;
{Time, Result} when Time > Upper ->
- test_server:fail({too_long_time, Time, Result});
+ ct:fail({too_long_time, Time, Result});
{_, {error, timeout}} ->
ok;
{_, Result} ->
- test_server:fail({unexpected_result, Result})
+ ct:fail({unexpected_result, Result})
end.
%% Try to obtain an unused IP address in the local network.
unused_ip() ->
- ?line {ok, Host} = inet:gethostname(),
- ?line {ok, Hent} = inet:gethostbyname(Host),
- ?line #hostent{h_addr_list=[{A, B, C, _D}|_]} = Hent,
+ {ok, Host} = inet:gethostname(),
+ {ok, Hent} = inet:gethostbyname(Host),
+ #hostent{h_addr_list=[{A, B, C, _D}|_]} = Hent,
%% Note: In our net, addresses below 16 are reserved for routers and
%% other strange creatures.
- ?line IP = unused_ip(A, B, C, 16),
+ IP = unused_ip(A, B, C, 16),
io:format("we = ~p, unused_ip = ~p~n", [Hent, IP]),
IP.
@@ -376,8 +601,42 @@ unused_ip(A, B, C, D) ->
{error, _} -> {ok, {A, B, C, D}}
end.
-ok({ok,V}) -> V.
+ok({ok,V}) -> V;
+ok(NotOk) ->
+ try throw(not_ok)
+ catch
+ throw:Thrown:Stacktrace ->
+ erlang:raise(
+ error, {Thrown, NotOk}, tl(Stacktrace))
+ end.
+get_localaddr() ->
+ get_localaddr(["localhost", "localhost6", "ip6-localhost"]).
+
+get_localaddr([]) ->
+ {error, localaddr_not_found};
+get_localaddr([Localhost|Ls]) ->
+ case inet:getaddr(Localhost, inet6) of
+ {ok, LocalAddr} ->
+ io:format("~s ~p~n", [Localhost, LocalAddr]),
+ {ok, LocalAddr};
+ _ ->
+ get_localaddr(Ls)
+ end.
getsockfd() -> undefined.
closesockfd(_FD) -> undefined.
+
+local_filename(Tag) ->
+ "/tmp/" ?MODULE_STRING "_" ++ os:getpid() ++ "_" ++ atom_to_list(Tag).
+
+bin_filename(String) ->
+ unicode:characters_to_binary(String, file:native_name_encoding()).
+
+delete_local_filenames() ->
+ _ =
+ [file:delete(F) ||
+ F <-
+ filelib:wildcard(
+ "/tmp/" ?MODULE_STRING "_" ++ os:getpid() ++ "_*")],
+ ok.
diff --git a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
index ca8eacdf40..b91dca61d4 100644
--- a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
+++ b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 2009-2013. All Rights Reserved.
+ * Copyright Ericsson AB 2009-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
*
* %CopyrightEnd%
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stdio.h>
#include <string.h>
diff --git a/lib/kernel/test/gen_tcp_echo_SUITE.erl b/lib/kernel/test/gen_tcp_echo_SUITE.erl
index 6dcb21758b..57525f8015 100644
--- a/lib/kernel/test/gen_tcp_echo_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_echo_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(gen_tcp_echo_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
%%-compile(export_all).
@@ -34,7 +34,9 @@
-define(TPKT_VRSN, 3).
-define(LINE_LENGTH, 1023). % (default value of gen_tcp option 'recbuf') - 1
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,5}}].
all() ->
[active_echo, passive_echo, active_once_echo,
@@ -59,94 +61,75 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:minutes(5)),
- [{watchdog, Dog}|Config].
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
-
-active_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in active mode)."];
-active_echo(suite) -> [];
+ Config.
+
+end_per_testcase(_Func, _Config) ->
+ ok.
+
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in active mode).
active_echo(Config) when is_list(Config) ->
- ?line echo_test([], fun active_echo/4, [{echo, fun echo_server/0}]).
+ echo_test([], fun active_echo/4, [{echo, fun echo_server/0}]).
-passive_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in passive mode)."];
-passive_echo(suite) -> [];
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in passive mode).
passive_echo(Config) when is_list(Config) ->
- ?line echo_test([{active, false}], fun passive_echo/4,
- [{echo, fun echo_server/0}]).
+ echo_test([{active, false}], fun passive_echo/4,
+ [{echo, fun echo_server/0}]).
-active_once_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in active once mode)."];
-active_once_echo(suite) -> [];
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in active once mode).
active_once_echo(Config) when is_list(Config) ->
- ?line echo_test([{active, once}], fun active_once_echo/4,
- [{echo, fun echo_server/0}]).
-
-slow_active_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in active mode). ",
- "The echo server is a special one that delays between every character."];
-slow_active_echo(suite) -> [];
+ echo_test([{active, once}], fun active_once_echo/4,
+ [{echo, fun echo_server/0}]).
+
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in active mode).
+%% The echo server is a special one that delays between every character.
slow_active_echo(Config) when is_list(Config) ->
- ?line echo_test([], fun active_echo/4,
- [slow_echo, {echo, fun slow_echo_server/0}]).
-
-slow_passive_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to an echo server and receiving them again (socket in passive mode).",
- "The echo server is a special one that delays between every character."];
-slow_passive_echo(suite) -> [];
+ echo_test([], fun active_echo/4,
+ [slow_echo, {echo, fun slow_echo_server/0}]).
+
+%% Test sending packets of various sizes and various packet types
+%% to an echo server and receiving them again (socket in passive mode).
+%% The echo server is a special one that delays between every character.
slow_passive_echo(Config) when is_list(Config) ->
- ?line echo_test([{active, false}], fun passive_echo/4,
- [slow_echo, {echo, fun slow_echo_server/0}]).
-
-limit_active_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in active mode) "
- "with packet_size limitation."];
-limit_active_echo(suite) -> [];
+ echo_test([{active, false}], fun passive_echo/4,
+ [slow_echo, {echo, fun slow_echo_server/0}]).
+
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in active mode)
+%% with packet_size limitation.
limit_active_echo(Config) when is_list(Config) ->
- ?line echo_test([{packet_size, 10}],
- fun active_echo/4,
- [{packet_size, 10}, {echo, fun echo_server/0}]).
-
-limit_passive_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in passive mode) ",
- "with packet_size limitation."];
-limit_passive_echo(suite) -> [];
+ echo_test([{packet_size, 10}],
+ fun active_echo/4,
+ [{packet_size, 10}, {echo, fun echo_server/0}]).
+
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in passive mode)
+%% with packet_size limitation.
limit_passive_echo(Config) when is_list(Config) ->
- ?line echo_test([{packet_size, 10},{active, false}],
- fun passive_echo/4,
- [{packet_size, 10}, {echo, fun echo_server/0}]).
-
-large_limit_active_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in active mode) "
- "with large packet_size limitation."];
-large_limit_active_echo(suite) -> [];
+ echo_test([{packet_size, 10},{active, false}],
+ fun passive_echo/4,
+ [{packet_size, 10}, {echo, fun echo_server/0}]).
+
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in active mode)
+%% with large packet_size limitation.
large_limit_active_echo(Config) when is_list(Config) ->
- ?line echo_test([{packet_size, 10}],
- fun active_echo/4,
- [{packet_size, (1 bsl 32)-1},
- {echo, fun echo_server/0}]).
-
-large_limit_passive_echo(doc) ->
- ["Test sending packets of various sizes and various packet types ",
- "to the echo port and receiving them again (socket in passive mode) ",
- "with large packet_size limitation."];
-large_limit_passive_echo(suite) -> [];
+ echo_test([{packet_size, 10}],
+ fun active_echo/4,
+ [{packet_size, (1 bsl 32)-1},
+ {echo, fun echo_server/0}]).
+
+%% Test sending packets of various sizes and various packet types
+%% to the echo port and receiving them again (socket in passive mode)
+%% with large packet_size limitation.
large_limit_passive_echo(Config) when is_list(Config) ->
- ?line echo_test([{packet_size, 10},{active, false}],
- fun passive_echo/4,
- [{packet_size, (1 bsl 32) -1},
- {echo, fun echo_server/0}]).
+ echo_test([{packet_size, 10},{active, false}],
+ fun passive_echo/4,
+ [{packet_size, (1 bsl 32) -1},
+ {echo, fun echo_server/0}]).
echo_test(SockOpts, EchoFun, Config0) ->
echo_test_1(SockOpts, EchoFun, Config0),
@@ -154,53 +137,53 @@ echo_test(SockOpts, EchoFun, Config0) ->
echo_test_1([{delay_send,true}|SockOpts], EchoFun, Config0).
echo_test_1(SockOpts, EchoFun, Config0) ->
- ?line EchoSrvFun = ?config(echo, Config0),
- ?line {ok, EchoPort} = EchoSrvFun(),
- ?line Config = [{echo_port, EchoPort}|Config0],
-
- ?line echo_packet([{packet, 1}|SockOpts], EchoFun, Config),
- ?line echo_packet([{packet, 2}|SockOpts], EchoFun, Config),
- ?line echo_packet([{packet, 4}|SockOpts], EchoFun, Config),
- ?line echo_packet([{packet, sunrm}|SockOpts], EchoFun, Config),
- ?line echo_packet([{packet, cdr}|SockOpts], EchoFun,
- [{type, {cdr, big}}|Config]),
- ?line echo_packet([{packet, cdr}|SockOpts], EchoFun,
- [{type, {cdr, little}}|Config]),
- ?line case lists:keymember(packet_size, 1, SockOpts) of
- false ->
- % This is cheating, we should test that packet_size
- % also works for line and http.
- echo_packet([{packet, line}|SockOpts], EchoFun, Config),
- echo_packet([{packet, http}|SockOpts], EchoFun, Config),
- echo_packet([{packet, http_bin}|SockOpts], EchoFun, Config);
-
- true -> ok
- end,
- ?line echo_packet([{packet, tpkt}|SockOpts], EchoFun, Config),
-
- ?line ShortTag = [16#E0],
- ?line LongTag = [16#1F, 16#83, 16#27],
- ?line echo_packet([{packet, asn1}|SockOpts], EchoFun,
- [{type, {asn1, short, ShortTag}}|Config]),
- ?line echo_packet([{packet, asn1}|SockOpts], EchoFun,
- [{type, {asn1, long, ShortTag}}|Config]),
- ?line echo_packet([{packet, asn1}|SockOpts], EchoFun,
- [{type, {asn1, short, LongTag}}|Config]),
- ?line echo_packet([{packet, asn1}|SockOpts], EchoFun,
- [{type, {asn1, long, LongTag}}|Config]),
+ EchoSrvFun = proplists:get_value(echo, Config0),
+ {ok, EchoPort} = EchoSrvFun(),
+ Config = [{echo_port, EchoPort}|Config0],
+
+ echo_packet([{packet, 1}|SockOpts], EchoFun, Config),
+ echo_packet([{packet, 2}|SockOpts], EchoFun, Config),
+ echo_packet([{packet, 4}|SockOpts], EchoFun, Config),
+ echo_packet([{packet, sunrm}|SockOpts], EchoFun, Config),
+ echo_packet([{packet, cdr}|SockOpts], EchoFun,
+ [{type, {cdr, big}}|Config]),
+ echo_packet([{packet, cdr}|SockOpts], EchoFun,
+ [{type, {cdr, little}}|Config]),
+ case lists:keymember(packet_size, 1, SockOpts) of
+ false ->
+ %% This is cheating, we should test that packet_size
+ %% also works for line and http.
+ echo_packet([{packet, line}|SockOpts], EchoFun, Config),
+ echo_packet([{packet, http}|SockOpts], EchoFun, Config),
+ echo_packet([{packet, http_bin}|SockOpts], EchoFun, Config);
+
+ true -> ok
+ end,
+ echo_packet([{packet, tpkt}|SockOpts], EchoFun, Config),
+
+ ShortTag = [16#E0],
+ LongTag = [16#1F, 16#83, 16#27],
+ echo_packet([{packet, asn1}|SockOpts], EchoFun,
+ [{type, {asn1, short, ShortTag}}|Config]),
+ echo_packet([{packet, asn1}|SockOpts], EchoFun,
+ [{type, {asn1, long, ShortTag}}|Config]),
+ echo_packet([{packet, asn1}|SockOpts], EchoFun,
+ [{type, {asn1, short, LongTag}}|Config]),
+ echo_packet([{packet, asn1}|SockOpts], EchoFun,
+ [{type, {asn1, long, LongTag}}|Config]),
ok.
echo_packet(SockOpts, EchoFun, Opts) ->
Type = case lists:keysearch(type, 1, Opts) of
- {value, {type, T}} ->
- T;
- _ ->
- {value, {packet, T}} = lists:keysearch(packet, 1, SockOpts),
- T
- end,
+ {value, {type, T}} ->
+ T;
+ _ ->
+ {value, {packet, T}} = lists:keysearch(packet, 1, SockOpts),
+ T
+ end,
%% Connect to the echo server.
- EchoPort = ?config(echo_port, Opts),
+ EchoPort = proplists:get_value(echo_port, Opts),
{ok, Echo} = gen_tcp:connect(localhost, EchoPort, SockOpts),
SlowEcho = lists:member(slow_echo, Opts),
@@ -223,83 +206,78 @@ echo_packet_http(Echo, Type, EchoFun) ->
EchoFun(Echo, Type, P2, http_reply(P2, Type)).
echo_packet0(Echo, Type, EchoFun, SlowEcho, Opts) ->
- ?line PacketSize =
+ PacketSize =
case lists:keysearch(packet_size, 1, Opts) of
{value,{packet_size,Sz}} when Sz < 10 -> Sz;
{value,{packet_size,_}} -> 10;
false -> 0
end,
%% Echo small packets first.
- ?line echo_packet1(Echo, Type, EchoFun, 0),
- ?line echo_packet1(Echo, Type, EchoFun, 1),
- ?line echo_packet1(Echo, Type, EchoFun, 2),
- ?line echo_packet1(Echo, Type, EchoFun, 3),
- ?line echo_packet1(Echo, Type, EchoFun, 4),
- ?line echo_packet1(Echo, Type, EchoFun, 7),
+ echo_packet1(Echo, Type, EchoFun, 0),
+ echo_packet1(Echo, Type, EchoFun, 1),
+ echo_packet1(Echo, Type, EchoFun, 2),
+ echo_packet1(Echo, Type, EchoFun, 3),
+ echo_packet1(Echo, Type, EchoFun, 4),
+ echo_packet1(Echo, Type, EchoFun, 7),
if PacketSize =/= 0 ->
- ?line echo_packet1(Echo, Type, EchoFun,
- {PacketSize-1, PacketSize}),
- ?line echo_packet1(Echo, Type, EchoFun,
- {PacketSize, PacketSize}),
- ?line echo_packet1(Echo, Type, EchoFun,
- {PacketSize+1, PacketSize});
+ echo_packet1(Echo, Type, EchoFun,
+ {PacketSize-1, PacketSize}),
+ echo_packet1(Echo, Type, EchoFun,
+ {PacketSize, PacketSize}),
+ echo_packet1(Echo, Type, EchoFun,
+ {PacketSize+1, PacketSize});
not SlowEcho -> % Go on with bigger packets if not slow echo server.
- ?line echo_packet1(Echo, Type, EchoFun, 10),
- ?line echo_packet1(Echo, Type, EchoFun, 13),
- ?line echo_packet1(Echo, Type, EchoFun, 126),
- ?line echo_packet1(Echo, Type, EchoFun, 127),
- ?line echo_packet1(Echo, Type, EchoFun, 128),
- ?line echo_packet1(Echo, Type, EchoFun, 255),
- ?line echo_packet1(Echo, Type, EchoFun, 256),
- ?line echo_packet1(Echo, Type, EchoFun, 1023),
- ?line echo_packet1(Echo, Type, EchoFun, 3747),
- ?line echo_packet1(Echo, Type, EchoFun, 32767),
- ?line echo_packet1(Echo, Type, EchoFun, 32768),
- ?line echo_packet1(Echo, Type, EchoFun, 65531),
- ?line echo_packet1(Echo, Type, EchoFun, 65535),
- ?line echo_packet1(Echo, Type, EchoFun, 65536),
- ?line echo_packet1(Echo, Type, EchoFun, 70000),
- ?line echo_packet1(Echo, Type, EchoFun, infinite);
+ echo_packet1(Echo, Type, EchoFun, 10),
+ echo_packet1(Echo, Type, EchoFun, 13),
+ echo_packet1(Echo, Type, EchoFun, 126),
+ echo_packet1(Echo, Type, EchoFun, 127),
+ echo_packet1(Echo, Type, EchoFun, 128),
+ echo_packet1(Echo, Type, EchoFun, 255),
+ echo_packet1(Echo, Type, EchoFun, 256),
+ echo_packet1(Echo, Type, EchoFun, 1023),
+ echo_packet1(Echo, Type, EchoFun, 3747),
+ echo_packet1(Echo, Type, EchoFun, 32767),
+ echo_packet1(Echo, Type, EchoFun, 32768),
+ echo_packet1(Echo, Type, EchoFun, 65531),
+ echo_packet1(Echo, Type, EchoFun, 65535),
+ echo_packet1(Echo, Type, EchoFun, 65536),
+ echo_packet1(Echo, Type, EchoFun, 70000),
+ echo_packet1(Echo, Type, EchoFun, infinite);
true -> ok
end,
- ?line gen_tcp:close(Echo),
+ gen_tcp:close(Echo),
ok.
echo_packet1(EchoSock, Type, EchoFun, Size) ->
- ?line case packet(Size, Type) of
- false ->
- ok;
- Packet ->
- ?line io:format("Type ~p, size ~p, time ~p",
- [Type, Size, time()]),
- ?line
- case EchoFun(EchoSock, Type, Packet, [Packet]) of
- ok ->
- ?line
- case Size of
- {N, Max} when N > Max ->
- ?line
- test_server:fail(
- {packet_through, {N, Max}});
- _ -> ok
- end;
- {error, emsgsize} ->
- ?line
- case Size of
- {N, Max} when N > Max ->
- io:format(" Blocked!");
- _ ->
- ?line
- test_server:fail(
- {packet_blocked, Size})
- end;
- Error ->
- ?line test_server:fail(Error)
- end
- end.
+ case packet(Size, Type) of
+ false ->
+ ok;
+ Packet ->
+ io:format("Type ~p, size ~p, time ~p",
+ [Type, Size, time()]),
+ case EchoFun(EchoSock, Type, Packet, [Packet]) of
+ ok ->
+ case Size of
+ {N, Max} when N > Max ->
+ ct:fail(
+ {packet_through, {N, Max}});
+ _ -> ok
+ end;
+ {error, emsgsize} ->
+ case Size of
+ {N, Max} when N > Max ->
+ io:format(" Blocked!");
+ _ ->
+ ct:fail(
+ {packet_blocked, Size})
+ end;
+ Error ->
+ ct:fail(Error)
+ end
+ end.
active_echo(Sock, Type, Packet, PacketEchos) ->
- ?line ok = gen_tcp:send(Sock, Packet),
+ ok = gen_tcp:send(Sock, Packet),
active_recv(Sock, Type, PacketEchos).
active_recv(_, _, []) ->
@@ -310,21 +288,21 @@ active_recv(Sock, Type, [PacketEcho|Tail]) ->
http_bin -> http;
_ -> tcp
end,
- ?line receive Recv->Recv end,
+ receive Recv->Recv end,
%%io:format("Active received: ~p\n",[Recv]),
- ?line case Recv of
- {Tag, Sock, PacketEcho} ->
- active_recv(Sock, Type, Tail);
- {Tag, Sock, Bad} ->
- ?line test_server:fail({wrong_data, Bad, expected, PacketEcho});
- {tcp_error, Sock, Reason} ->
- {error, Reason};
- Other ->
- ?line test_server:fail({unexpected_message, Other, Tag})
- end.
+ case Recv of
+ {Tag, Sock, PacketEcho} ->
+ active_recv(Sock, Type, Tail);
+ {Tag, Sock, Bad} ->
+ ct:fail({wrong_data, Bad, expected, PacketEcho});
+ {tcp_error, Sock, Reason} ->
+ {error, Reason};
+ Other ->
+ ct:fail({unexpected_message, Other, Tag})
+ end.
passive_echo(Sock, _Type, Packet, PacketEchos) ->
- ?line ok = gen_tcp:send(Sock, Packet),
+ ok = gen_tcp:send(Sock, Packet),
passive_recv(Sock, PacketEchos).
passive_recv(_, []) ->
@@ -332,22 +310,22 @@ passive_recv(_, []) ->
passive_recv(Sock, [PacketEcho | Tail]) ->
Recv = gen_tcp:recv(Sock, 0),
%%io:format("Passive received: ~p\n",[Recv]),
- ?line case Recv of
- {ok, PacketEcho} ->
- passive_recv(Sock, Tail);
- {ok, Bad} ->
- io:format("Expected: ~p\nGot: ~p\n",[PacketEcho,Bad]),
- ?line test_server:fail({wrong_data, Bad});
- {error,PacketEcho} ->
- passive_recv(Sock, Tail); % expected error
- {error, _}=Error ->
- Error;
- Other ->
- ?line test_server:fail({unexpected_message, Other})
- end.
+ case Recv of
+ {ok, PacketEcho} ->
+ passive_recv(Sock, Tail);
+ {ok, Bad} ->
+ io:format("Expected: ~p\nGot: ~p\n",[PacketEcho,Bad]),
+ ct:fail({wrong_data, Bad});
+ {error,PacketEcho} ->
+ passive_recv(Sock, Tail); % expected error
+ {error, _}=Error ->
+ Error;
+ Other ->
+ ct:fail({unexpected_message, Other})
+ end.
active_once_echo(Sock, Type, Packet, PacketEchos) ->
- ?line ok = gen_tcp:send(Sock, Packet),
+ ok = gen_tcp:send(Sock, Packet),
active_once_recv(Sock, Type, PacketEchos).
active_once_recv(_, _, []) ->
@@ -358,17 +336,17 @@ active_once_recv(Sock, Type, [PacketEcho | Tail]) ->
http_bin -> http;
_ -> tcp
end,
- ?line receive
- {Tag, Sock, PacketEcho} ->
- inet:setopts(Sock, [{active, once}]),
- active_once_recv(Sock, Type, Tail);
- {Tag, Sock, Bad} ->
- ?line test_server:fail({wrong_data, Bad});
- {tcp_error, Sock, Reason} ->
- {error, Reason};
- Other ->
- ?line test_server:fail({unexpected_message, Other, expected, {Tag, Sock, PacketEcho}})
- end.
+ receive
+ {Tag, Sock, PacketEcho} ->
+ inet:setopts(Sock, [{active, once}]),
+ active_once_recv(Sock, Type, Tail);
+ {Tag, Sock, Bad} ->
+ ct:fail({wrong_data, Bad});
+ {tcp_error, Sock, Reason} ->
+ {error, Reason};
+ Other ->
+ ct:fail({unexpected_message, Other, expected, {Tag, Sock, PacketEcho}})
+ end.
%%% Building of random packets.
@@ -442,14 +420,7 @@ random_char(Chars) ->
lists:nth(uniform(length(Chars)), Chars).
uniform(N) ->
- case get(random_seed) of
- undefined ->
- {X, Y, Z} = time(),
- random:seed(X, Y, Z);
- _ ->
- ok
- end,
- random:uniform(N).
+ rand:uniform(N).
put_int32(X, big, List) ->
[ (X bsr 24) band 16#ff,
@@ -458,9 +429,9 @@ put_int32(X, big, List) ->
(X) band 16#ff | List ];
put_int32(X, little, List) ->
[ (X) band 16#ff,
- (X bsr 8) band 16#ff,
- (X bsr 16) band 16#ff,
- (X bsr 24) band 16#ff | List].
+ (X bsr 8) band 16#ff,
+ (X bsr 16) band 16#ff,
+ (X bsr 24) band 16#ff | List].
put_int16(X, ByteOrder) ->
put_int16(X, ByteOrder, []).
@@ -470,16 +441,16 @@ put_int16(X, big, List) ->
(X) band 16#ff | List ];
put_int16(X, little, List) ->
[ (X) band 16#ff,
- (X bsr 8) band 16#ff | List ].
+ (X bsr 8) band 16#ff | List ].
%%% A normal echo server, for systems that don't have one.
echo_server() ->
Self = self(),
- ?line spawn_link(fun() -> echo_server(Self) end),
- ?line receive
- {echo_port, Port} ->
- {ok, Port}
+ spawn_link(fun() -> echo_server(Self) end),
+ receive
+ {echo_port, Port} ->
+ {ok, Port}
end.
echo_server(ReplyTo) ->
@@ -512,11 +483,11 @@ echoer_loop(Sock) ->
slow_echo_server() ->
Self = self(),
- ?line spawn_link(fun() -> slow_echo_server(Self) end),
- ?line receive
- {echo_port, Port} ->
- {ok, Port}
- end.
+ spawn_link(fun() -> slow_echo_server(Self) end),
+ receive
+ {echo_port, Port} ->
+ {ok, Port}
+ end.
slow_echo_server(ReplyTo) ->
{ok, S} = gen_tcp:listen(0, [{active, false}, {nodelay, true}]),
@@ -552,17 +523,17 @@ slow_send(_, []) ->
http_request(Uri) ->
list_to_binary(["POST ", Uri, <<" HTTP/1.1\r\n"
- "Connection: close\r\n"
- "Host: localhost:8000\r\n"
- "User-Agent: perl post\r\n"
- "Content-Length: 4\r\n"
- "Content-Type: text/xml; charset=utf-8\r\n"
- "Other-Field: with some text\r\n"
- "Multi-Line: Once upon a time in a land far far away,\r\n"
- " there lived a princess imprisoned in the highest tower\r\n"
- " of the most haunted castle.\r\n"
- "Invalid line without a colon\r\n"
- "\r\n">>]).
+ "Connection: close\r\n"
+ "Host: localhost:8000\r\n"
+ "User-Agent: perl post\r\n"
+ "Content-Length: 4\r\n"
+ "Content-Type: text/xml; charset=utf-8\r\n"
+ "Other-Field: with some text\r\n"
+ "Multi-Line: Once upon a time in a land far far away,\r\n"
+ " there lived a princess imprisoned in the highest tower\r\n"
+ " of the most haunted castle.\r\n"
+ "Invalid line without a colon\r\n"
+ "\r\n">>]).
http_uri_variants() ->
["*",
@@ -575,11 +546,11 @@ http_uri_variants() ->
http_response() ->
<<"HTTP/1.0 404 Object Not Found\r\n"
- "Server: inets/4.7.16\r\n"
- "Date: Fri, 04 Jul 2008 17:16:22 GMT\r\n"
- "Content-Type: text/html\r\n"
- "Content-Length: 207\r\n"
- "\r\n">>.
+ "Server: inets/4.7.16\r\n"
+ "Date: Fri, 04 Jul 2008 17:16:22 GMT\r\n"
+ "Content-Type: text/html\r\n"
+ "Content-Length: 207\r\n"
+ "\r\n">>.
http_reply(Bin, Type) ->
{ok, Line, Rest} = erlang:decode_packet(Type,Bin,[]),
@@ -596,7 +567,3 @@ http_reply(<<>>, Acc, _) ->
http_reply(Bin, Acc, HType) ->
{ok, Line, Rest} = erlang:decode_packet(HType,Bin,[]),
http_reply(Rest, [Line | Acc], HType).
-
-
-
-
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 81c6dcd0fd..04c0c48e3a 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,15 +19,15 @@
%%
-module(gen_tcp_misc_SUITE).
--include_lib("test_server/include/test_server.hrl").
-
-%-compile(export_all).
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
controlling_process/1, controlling_process_self/1,
no_accept/1, close_with_pending_output/1, active_n/1,
- data_before_close/1, iter_max_socks/1, get_status/1,
+ data_before_close/1,
+ iter_max_socks/0, iter_max_socks/1,
+ get_status/1,
passive_sockets/1, accept_closed_by_other_process/1,
init_per_testcase/2, end_per_testcase/2,
otp_3924/1, otp_3924_sender/4, closed_socket/1,
@@ -41,7 +41,8 @@
busy_send/1, busy_disconnect_passive/1, busy_disconnect_active/1,
fill_sendq/1, partial_recv_and_close/1,
partial_recv_and_close_2/1,partial_recv_and_close_3/1,so_priority/1,
- % Accept tests
+ recvtos/1, recvttl/1, recvtosttl/1, recvtclass/1,
+ %% Accept tests
primitive_accept/1,multi_accept_close_listen/1,accept_timeout/1,
accept_timeouts_in_order/1,accept_timeouts_in_order2/1,
accept_timeouts_in_order3/1,accept_timeouts_in_order4/1,
@@ -50,49 +51,24 @@
killing_acceptor/1,killing_multi_acceptors/1,killing_multi_acceptors2/1,
several_accepts_in_one_go/1, accept_system_limit/1,
active_once_closed/1, send_timeout/1, send_timeout_active/1,
- otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1, wrapping_oct/1,
- otp_9389/1]).
+ otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
+ wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1,
+ otp_12242/1]).
%% Internal exports.
-export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1,
oct_acceptor/1,
otp_7731_server/1, zombie_server/2, do_iter_max_socks/2]).
-init_per_testcase(wrapping_oct, Config) when is_list(Config) ->
- Dog = case os:type() of
- {ose,_} ->
- test_server:timetrap(test_server:minutes(20));
- _Else ->
- test_server:timetrap(test_server:seconds(600))
- end,
- [{watchdog, Dog}|Config];
-init_per_testcase(iter_max_socks, Config) when is_list(Config) ->
- Dog = case os:type() of
- {win32,_} ->
- test_server:timetrap(test_server:minutes(30));
- _Else ->
- test_server:timetrap(test_server:seconds(240))
- end,
- [{watchdog, Dog}|Config];
-init_per_testcase(accept_system_limit, Config) when is_list(Config) ->
- case os:type() of
- {ose,_} ->
- {skip,"Skip in OSE"};
- _ ->
- Dog = test_server:timetrap(test_server:seconds(240)),
- [{watchdog,Dog}|Config]
- end;
-init_per_testcase(wrapping_oct, Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(600)),
- [{watchdog, Dog}|Config];
-init_per_testcase(_Func, Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(240)),
- [{watchdog, Dog}|Config].
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
-
-suite() -> [{ct_hooks,[ts_install_cth]}].
+init_per_testcase(_Func, Config) ->
+ Config.
+
+end_per_testcase(_Func, _Config) ->
+ ok.
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,4}}].
all() ->
[controlling_process, controlling_process_self, no_accept,
@@ -109,7 +85,8 @@ all() ->
busy_disconnect_passive, busy_disconnect_active,
fill_sendq, partial_recv_and_close,
partial_recv_and_close_2, partial_recv_and_close_3,
- so_priority, primitive_accept,
+ so_priority, recvtos, recvttl, recvtosttl,
+ recvtclass, primitive_accept,
multi_accept_close_listen, accept_timeout,
accept_timeouts_in_order, accept_timeouts_in_order2,
accept_timeouts_in_order3, accept_timeouts_in_order4,
@@ -119,7 +96,8 @@ all() ->
killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit,
active_once_closed, send_timeout, send_timeout_active, otp_7731,
wrapping_oct,
- zombie_sockets, otp_7816, otp_8102, otp_9389].
+ zombie_sockets, otp_7816, otp_8102, otp_9389,
+ otp_12242].
groups() ->
[].
@@ -136,11 +114,13 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
-default_options(doc) ->
- ["Tests kernel application variables inet_default_listen_options and "
- "inet_default_connect_options"];
-default_options(suite) ->
- [];
+-define(UNIQ_NODE_NAME,
+ list_to_atom(?MODULE_STRING ++ "__" ++
+ atom_to_list(?FUNCTION_NAME) ++ "_" ++
+ integer_to_list(erlang:unique_integer([positive])))).
+
+%% Tests kernel application variables inet_default_listen_options and
+%% inet_default_connect_options.
default_options(Config) when is_list(Config) ->
%% First check the delay_send option
{true,true,true}=do_delay_send_1(),
@@ -215,7 +195,7 @@ default_options(Config) when is_list(Config) ->
do_delay_on_other_node(XArgs, Function) ->
Dir = filename:dirname(code:which(?MODULE)),
- {ok,Node} = test_server:start_node(test_default_options_slave,slave,
+ {ok,Node} = test_server:start_node(?UNIQ_NODE_NAME, slave,
[{args,"-pa " ++ Dir ++ " " ++ XArgs}]),
Res = rpc:call(Node,erlang,apply,[Function,[]]),
test_server:stop_node(Node),
@@ -312,11 +292,9 @@ do_delay_send_7() ->
gen_tcp:close(LS),
{B1,B2,B3}.
-controlling_process(doc) ->
- ["Open a listen port and change controlling_process for it",
- "The result should be ok of done by the owner process,"
- "Otherwise is should return {error,not_owner} or similar"];
-controlling_process(suite) -> [];
+%% Open a listen port and change controlling_process for it
+%% The result should be ok of done by the owner process,
+%% Otherwise is should return {error,not_owner} or similar.
controlling_process(Config) when is_list(Config) ->
{ok,S} = gen_tcp:listen(0,[]),
Pid2 = spawn(?MODULE,not_owner,[S]),
@@ -345,9 +323,8 @@ not_owner(S) ->
ok
end.
-controlling_process_self(doc) ->
- ["Open a listen port and assign the controlling process to "
- "it self, then exit and make sure the port is closed properly."];
+%% Open a listen port and assign the controlling process to
+%% it self, then exit and make sure the port is closed properly.
controlling_process_self(Config) when is_list(Config) ->
S = self(),
process_flag(trap_exit,true),
@@ -371,11 +348,9 @@ controlling_process_self(Config) when is_list(Config) ->
end.
-no_accept(doc) ->
- ["Open a listen port and connect to it, then close the listen port ",
- "without doing any accept. The connected socket should receive ",
- "a tcp_closed message."];
-no_accept(suite) -> [];
+%% Open a listen port and connect to it, then close the listen port
+%% without doing any accept. The connected socket should receive
+%% a tcp_closed message.
no_accept(Config) when is_list(Config) ->
{ok, L} = gen_tcp:listen(0, []),
{ok, {_, Port}} = inet:sockname(L),
@@ -385,14 +360,12 @@ no_accept(Config) when is_list(Config) ->
{tcp_closed, Client} ->
ok
after 5000 ->
- test_server:fail(never_closed)
+ ct:fail(never_closed)
end.
-close_with_pending_output(doc) ->
- ["Send several packets to a socket and close it. All packets should arrive ",
- "to the other end."];
-close_with_pending_output(suite) -> [];
+%% Send several packets to a socket and close it. All packets should
+%% arrive to the other end.
close_with_pending_output(Config) when is_list(Config) ->
{ok, L} = gen_tcp:listen(0, [binary, {active, false}]),
{ok, {_, Port}} = inet:sockname(L),
@@ -408,16 +381,16 @@ close_with_pending_output(Config) when is_list(Config) ->
gen_tcp:close(A),
gen_tcp:close(L);
{ok, Bin} ->
- test_server:fail({small_packet,
+ ct:fail({small_packet,
byte_size(Bin)});
Error ->
- test_server:fail({unexpected, Error})
+ ct:fail({unexpected, Error})
end,
ok;
{error, no_remote_hosts} ->
{skipped,"No remote hosts"};
{error, Other} ->
- ?t:fail({failed_to_start_slave_node, Other})
+ ct:fail({failed_to_start_slave_node, Other})
end.
sender(Port, Packets, Host) ->
@@ -435,9 +408,7 @@ send_loop(Sock, Data, Left) ->
send_loop(Sock, Data, Left-1).
%% Test {active,N} option
-active_n(doc) ->
- ["Verify operation of the {active,N} option."];
-active_n(suite) -> [];
+%% Verify operation of the {active,N} option.
active_n(Config) when is_list(Config) ->
N = 3,
LS = ok(gen_tcp:listen(0, [{active,N}])),
@@ -547,9 +518,7 @@ active_n(Config) when is_list(Config) ->
%% I expect propagation of a close to be quite fast
%% so 100 ms seems reasonable.
-otp_3924(doc) ->
- ["Tests that a socket can be closed fast enough."];
-otp_3924(suite) -> [];
+%% Tests that a socket can be closed fast enough.
otp_3924(Config) when is_list(Config) ->
MaxDelay = (case has_superfluous_schedulers() of
true -> 4;
@@ -567,8 +536,8 @@ otp_3924_1(MaxDelay) ->
{ok, Node} = start_node(otp_3924),
DataLen = 100*1024,
Data = otp_3924_data(DataLen),
- % Repeat the test a couple of times to prevent the test from passing
- % by chance.
+ %% Repeat the test a couple of times to prevent the test from passing
+ %% by chance.
repeat(10, fun(N) ->
ok = otp_3924(MaxDelay, Node, Data, DataLen, N)
end),
@@ -607,17 +576,17 @@ otp_3924_receive_data(LSock, Sender, MaxDelay, Len, N) ->
process_flag(priority, OP),
receive
{'EXIT', _, TimeoutRef} ->
- test_server:fail({close_not_fast_enough,MaxDelay,N});
+ ct:fail({close_not_fast_enough,MaxDelay,N});
{'EXIT', Sender, Reason} ->
- test_server:fail({sender_exited, Reason});
+ ct:fail({sender_exited, Reason});
{'EXIT', _Other, Reason} ->
- test_server:fail({linked_process_exited, Reason})
+ ct:fail({linked_process_exited, Reason})
after 0 ->
case Data of
{'EXIT', {A,B}} ->
- test_server:fail({A,B,N});
+ ct:fail({A,B,N});
{'EXIT', Failure} ->
- test_server:fail(Failure);
+ ct:fail(Failure);
_ ->
Data
end
@@ -676,8 +645,7 @@ otp_3924_sender(Receiver, Host, Port, Data) ->
end.
-data_before_close(doc) ->
- ["Tests that a huge amount of data can be received before a close."];
+%% Tests that a huge amount of data can be received before a close.
data_before_close(Config) when is_list(Config) ->
{ok, L} = gen_tcp:listen(0, [binary]),
{ok, {_, TcpPort}} = inet:sockname(L),
@@ -689,7 +657,7 @@ data_before_close(Config) when is_list(Config) ->
io:format("Result: ~p", [Result]);
{Wrong, Result} ->
io:format("Result: ~p", [Result]),
- test_server:fail({wrong_count, Wrong})
+ ct:fail({wrong_count, Wrong})
end,
ok.
@@ -714,11 +682,9 @@ make_zero_packet(N) when N rem 2 == 0 ->
make_zero_packet(N) ->
P = make_zero_packet(N div 2),
[0, P|P].
-get_status(doc) ->
- ["OTP-2924",
- "test that the socket process does not crash when sys:get_status(Pid)",
- "is called."];
-get_status(suite) -> [];
+
+%% OTP-2924. Test that the socket process does not crash when
+%% sys:get_status(Pid) is called.
get_status(Config) when is_list(Config) ->
{ok,{socket,Pid,_,_}} = gen_tcp:listen(5678,[]),
{status,Pid,_,_} = sys:get_status(Pid).
@@ -726,9 +692,11 @@ get_status(Config) when is_list(Config) ->
-define(RECOVER_SLEEP, 60000).
-define(RETRY_SLEEP, 15000).
-iter_max_socks(doc) ->
- ["Open as many sockets as possible. Do this several times and check ",
- "that we get the same number of sockets every time."];
+iter_max_socks() ->
+ [{timetrap,{minutes,30}}].
+
+%% Open as many sockets as possible. Do this several times and check
+%% that we get the same number of sockets every time.
iter_max_socks(Config) when is_list(Config) ->
N = case os:type() of {win32,_} -> 10; _ -> 20 end,
%% Run on a different node in order to limit the effect if this test fails.
@@ -757,7 +725,7 @@ do_iter_max_socks(N, First) when is_integer(First) ->
true ->
io:format("Sleeping for ~p seconds...~n",
[?RETRY_SLEEP/1000]),
- ?t:sleep(?RETRY_SLEEP),
+ ct:sleep(?RETRY_SLEEP),
io:format("Trying again...~n", []),
RetryMS = max_socks(),
if RetryMS == First ->
@@ -775,10 +743,10 @@ all_equal([Rule | T]) ->
all_equal(Rule, [Rule | T]) ->
all_equal(Rule, T);
all_equal(_, [_ | _]) ->
- ?t:sleep(?RECOVER_SLEEP), % Wait a while and *hope* that we'll
- % recover so other tests won't be
- % affected.
- ?t:fail(max_socket_mismatch);
+ ct:sleep(?RECOVER_SLEEP), % Wait a while and *hope* that we'll
+ %% recover so other tests won't be
+ %% affected.
+ ct:fail(max_socket_mismatch);
all_equal(_Rule, []) ->
ok.
@@ -820,21 +788,20 @@ start_remote(Name) ->
Pa = filename:dirname(code:which(?MODULE)),
test_server:start_node(Name, slave, [{remote, true}, {args, "-pa " ++ Pa}]).
-passive_sockets(doc) ->
- ["Tests that when 'the other side' on a passive socket closes, the connecting",
- "side still can read until the end of data."];
+%% Tests that when 'the other side' on a passive socket closes, the
+%% connecting, side still can read until the end of data.
passive_sockets(Config) when is_list(Config) ->
spawn_link(?MODULE, passive_sockets_server,
[[{active,false}],self()]),
receive
{socket,Port} -> ok
end,
- ?t:sleep(500),
+ ct:sleep(500),
case gen_tcp:connect("localhost", Port, [{active, false}]) of
{ok, Sock} ->
passive_sockets_read(Sock);
Error ->
- ?t:fail({"Could not connect to server", Error})
+ ct:fail({"Could not connect to server", Error})
end.
%%
@@ -850,7 +817,7 @@ passive_sockets_read(Sock) ->
gen_tcp:close(Sock);
Error ->
gen_tcp:close(Sock),
- ?t:fail({"Did not get {error, closed} before other error", Error})
+ ct:fail({"Did not get {error, closed} before other error", Error})
end.
passive_sockets_server(Opts, Parent) ->
@@ -860,17 +827,17 @@ passive_sockets_server(Opts, Parent) ->
Parent ! {socket,Port},
passive_sockets_server_accept(LSock);
Error ->
- ?t:fail({"Could not create listen socket", Error})
+ ct:fail({"Could not create listen socket", Error})
end.
passive_sockets_server_accept(Sock) ->
case gen_tcp:accept(Sock) of
{ok, Socket} ->
- ?t:sleep(500), % Simulate latency
+ timer:sleep(500), % Simulate latency
passive_sockets_server_send(Socket, 5),
passive_sockets_server_accept(Sock);
Error ->
- ?t:fail({"Could not accept connection", Error})
+ ct:fail({"Could not accept connection", Error})
end.
passive_sockets_server_send(Socket, 0) ->
@@ -880,16 +847,15 @@ passive_sockets_server_send(Socket, X) ->
Data = lists:duplicate(1024*X, $a),
case gen_tcp:send(Socket, Data) of
ok ->
- ?t:sleep(50), % Simulate some processing.
+ ct:sleep(50), % Simulate some processing.
passive_sockets_server_send(Socket, X-1);
{error, _Reason} ->
- ?t:fail("Failed to send data")
+ ct:fail("Failed to send data")
end.
-accept_closed_by_other_process(doc) ->
- ["Tests the return value from gen_tcp:accept when ",
- "the socket is closed from another process. (OTP-3817)"];
+%% Tests the return value from gen_tcp:accept when
+%% the socket is closed from another process. (OTP-3817)
accept_closed_by_other_process(Config) when is_list(Config) ->
Parent = self(),
{ok, ListenSocket} = gen_tcp:listen(0, []),
@@ -904,7 +870,7 @@ accept_closed_by_other_process(Config) when is_list(Config) ->
{Child, {error, closed}} ->
ok;
{Child, Other} ->
- ?t:fail({"Wrong result of gen_tcp:accept", Other})
+ ct:fail({"Wrong result of gen_tcp:accept", Other})
end.
repeat(N, Fun) ->
@@ -917,10 +883,7 @@ repeat(_, _, _) ->
ok.
-closed_socket(suite) ->
- [];
-closed_socket(doc) ->
- ["Tests the response when using a closed socket as argument"];
+%% Tests the response when using a closed socket as argument.
closed_socket(Config) when is_list(Config) ->
{ok, LS1} = gen_tcp:listen(0, []),
erlang:yield(),
@@ -932,7 +895,7 @@ closed_socket(Config) when is_list(Config) ->
%% in inet_db processes the 'EXIT' message from the port,
%% the socket is unregistered.
%%
- %% test_server:sleep(test_server:seconds(2)),
+ %% ct:sleep({seconds,2})
%%
{error, R_send} = gen_tcp:send(LS1, "data"),
{error, R_recv} = gen_tcp:recv(LS1, 17),
@@ -974,7 +937,7 @@ shutdown_common(Active) ->
do_sort(P, []),
receive
Any ->
- ?t:fail({unexpected_message,Any})
+ ct:fail({unexpected_message,Any})
after 0 -> ok
end.
@@ -1055,7 +1018,7 @@ shutdown_pending(Config) when is_list(Config) ->
io:format("~p\n", [Msg]),
N = list_to_integer(Msg) - 5;
Other ->
- ?t:fail({unexpected,Other})
+ ct:fail({unexpected,Other})
end,
ok.
@@ -1107,9 +1070,9 @@ show_econnreset_active(Config) when is_list(Config) ->
{tcp_closed, S} ->
ok;
Other ->
- ?t:fail({unexpected1, Other})
+ ct:fail({unexpected1, Other})
after 1000 ->
- ?t:fail({timeout, {server, no_tcp_closed}})
+ ct:fail({timeout, {server, no_tcp_closed}})
end,
%% Now test with option switched on.
@@ -1128,14 +1091,14 @@ show_econnreset_active(Config) when is_list(Config) ->
{tcp_closed, S1} ->
ok;
Other1 ->
- ?t:fail({unexpected2, Other1})
+ ct:fail({unexpected2, Other1})
after 1 ->
- ?t:fail({timeout, {server, no_tcp_closed}})
+ ct:fail({timeout, {server, no_tcp_closed}})
end;
Other2 ->
- ?t:fail({unexpected3, Other2})
+ ct:fail({unexpected3, Other2})
after 1000 ->
- ?t:fail({timeout, {server, no_tcp_error}})
+ ct:fail({timeout, {server, no_tcp_error}})
end.
show_econnreset_active_once(Config) when is_list(Config) ->
@@ -1149,7 +1112,7 @@ show_econnreset_active_once(Config) when is_list(Config) ->
ok = gen_tcp:close(L),
ok = inet:setopts(Client, [{linger, {true, 0}}]),
ok = gen_tcp:close(Client),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
ok = receive Msg -> {unexpected_msg, Msg} after 0 -> ok end,
ok = inet:setopts(S, [{active, once}]),
receive
@@ -1158,14 +1121,14 @@ show_econnreset_active_once(Config) when is_list(Config) ->
{tcp_closed, S} ->
ok;
Other1 ->
- ?t:fail({unexpected1, Other1})
+ ct:fail({unexpected1, Other1})
after 1 ->
- ?t:fail({timeout, {server, no_tcp_closed}})
+ ct:fail({timeout, {server, no_tcp_closed}})
end;
Other2 ->
- ?t:fail({unexpected2, Other2})
+ ct:fail({unexpected2, Other2})
after 1000 ->
- ?t:fail({timeout, {server, no_tcp_error}})
+ ct:fail({timeout, {server, no_tcp_error}})
end.
show_econnreset_passive(Config) when is_list(Config) ->
@@ -1177,7 +1140,7 @@ show_econnreset_passive(Config) when is_list(Config) ->
ok = gen_tcp:close(L),
ok = inet:setopts(S, [{linger, {true, 0}}]),
ok = gen_tcp:close(S),
- ok = ?t:sleep(1),
+ ok = ct:sleep(1),
{error, closed} = gen_tcp:recv(Client, 0),
%% Now test with option switched on.
@@ -1190,7 +1153,7 @@ show_econnreset_passive(Config) when is_list(Config) ->
ok = gen_tcp:close(L1),
ok = inet:setopts(S1, [{linger, {true, 0}}]),
ok = gen_tcp:close(S1),
- ok = ?t:sleep(1),
+ ok = ct:sleep(1),
{error, econnreset} = gen_tcp:recv(Client1, 0).
econnreset_after_sync_send(Config) when is_list(Config) ->
@@ -1202,7 +1165,7 @@ econnreset_after_sync_send(Config) when is_list(Config) ->
ok = gen_tcp:close(L),
ok = inet:setopts(S, [{linger, {true, 0}}]),
ok = gen_tcp:close(S),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
{error, closed} = gen_tcp:send(Client, "Whatever"),
%% Now test with option switched on.
@@ -1215,7 +1178,7 @@ econnreset_after_sync_send(Config) when is_list(Config) ->
ok = gen_tcp:close(L1),
ok = inet:setopts(S1, [{linger, {true, 0}}]),
ok = gen_tcp:close(S1),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
{error, econnreset} = gen_tcp:send(Client1, "Whatever").
econnreset_after_async_send_active(Config) when is_list(Config) ->
@@ -1232,23 +1195,23 @@ econnreset_after_async_send_active(Config) when is_list(Config) ->
case erlang:port_info(Client, queue_size) of
{queue_size, N} when N > 0 -> ok;
{queue_size, 0} when OS =:= win32 -> ok;
- {queue_size, 0} = T -> ?t:fail(T)
+ {queue_size, 0} = T -> ct:fail(T)
end,
ok = gen_tcp:send(S, "Whatever"),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
ok = inet:setopts(S, [{linger, {true, 0}}]),
ok = gen_tcp:close(S),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
receive
{tcp, Client, "Whatever"} ->
receive
{tcp_closed, Client} ->
ok;
Other1 ->
- ?t:fail({unexpected1, Other1})
+ ct:fail({unexpected1, Other1})
end;
Other2 ->
- ?t:fail({unexpected2, Other2})
+ ct:fail({unexpected2, Other2})
end,
%% Now test with option switched on.
@@ -1263,13 +1226,13 @@ econnreset_after_async_send_active(Config) when is_list(Config) ->
case erlang:port_info(Client1, queue_size) of
{queue_size, N1} when N1 > 0 -> ok;
{queue_size, 0} when OS =:= win32 -> ok;
- {queue_size, 0} = T1 -> ?t:fail(T1)
+ {queue_size, 0} = T1 -> ct:fail(T1)
end,
ok = gen_tcp:send(S1, "Whatever"),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
ok = inet:setopts(S1, [{linger, {true, 0}}]),
ok = gen_tcp:close(S1),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
receive
{tcp, Client1, "Whatever"} ->
receive
@@ -1278,13 +1241,13 @@ econnreset_after_async_send_active(Config) when is_list(Config) ->
{tcp_closed, Client1} ->
ok;
Other3 ->
- ?t:fail({unexpected3, Other3})
+ ct:fail({unexpected3, Other3})
end;
Other4 ->
- ?t:fail({unexpected4, Other4})
+ ct:fail({unexpected4, Other4})
end;
Other5 ->
- ?t:fail({unexpected5, Other5})
+ ct:fail({unexpected5, Other5})
end.
econnreset_after_async_send_active_once(Config) when is_list(Config) ->
@@ -1302,13 +1265,13 @@ econnreset_after_async_send_active_once(Config) when is_list(Config) ->
case erlang:port_info(Client, queue_size) of
{queue_size, N} when N > 0 -> ok;
{queue_size, 0} when OS =:= win32 -> ok;
- {queue_size, 0} = T -> ?t:fail(T)
+ {queue_size, 0} = T -> ct:fail(T)
end,
ok = gen_tcp:send(S, "Whatever"),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
ok = inet:setopts(S, [{linger, {true, 0}}]),
ok = gen_tcp:close(S),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
ok = receive Msg -> {unexpected_msg, Msg} after 0 -> ok end,
ok = inet:setopts(Client, [{active, once}]),
receive
@@ -1317,10 +1280,10 @@ econnreset_after_async_send_active_once(Config) when is_list(Config) ->
{tcp_closed, Client} ->
ok;
Other ->
- ?t:fail({unexpected1, Other})
+ ct:fail({unexpected1, Other})
end;
Other ->
- ?t:fail({unexpected2, Other})
+ ct:fail({unexpected2, Other})
end.
econnreset_after_async_send_passive(Config) when is_list(Config) ->
@@ -1341,10 +1304,10 @@ econnreset_after_async_send_passive(Config) when is_list(Config) ->
case erlang:port_info(Client, queue_size) of
{queue_size, N} when N > 0 -> ok;
{queue_size, 0} when OS =:= win32 -> ok;
- {queue_size, 0} = T -> ?t:fail(T)
+ {queue_size, 0} = T -> ct:fail(T)
end,
ok = gen_tcp:close(S),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
{error, closed} = gen_tcp:recv(Client, 0),
%% Now test with option switched on.
@@ -1360,7 +1323,7 @@ econnreset_after_async_send_passive(Config) when is_list(Config) ->
ok = gen_tcp:send(S1, "Whatever"),
ok = gen_tcp:send(Client1, Payload),
ok = gen_tcp:close(S1),
- ok = ?t:sleep(20),
+ ok = ct:sleep(20),
{error, econnreset} = gen_tcp:recv(Client1, 0).
%%
@@ -1387,11 +1350,11 @@ linger_zero(Config) when is_list(Config) ->
case erlang:port_info(Client, queue_size) of
{queue_size, N} when N > 0 -> ok;
{queue_size, 0} when OS =:= win32 -> ok;
- {queue_size, 0} = T -> ?t:fail(T)
+ {queue_size, 0} = T -> ct:fail(T)
end,
ok = inet:setopts(Client, [{linger, {true, 0}}]),
ok = gen_tcp:close(Client),
- ok = ?t:sleep(1),
+ ok = ct:sleep(1),
undefined = erlang:port_info(Client, connected),
{error, econnreset} = gen_tcp:recv(S, PayloadSize).
@@ -1466,7 +1429,7 @@ busy_send_loop(Server, Client, N) ->
busy_send_2(Server, Client, N+1)
after 10000 ->
%% If this happens, see busy_send_srv
- ?t:fail({timeout,{server,not_send,flush([])}})
+ ct:fail({timeout,{server,not_send,flush([])}})
end
end.
@@ -1477,7 +1440,7 @@ busy_send_2(Server, Client, _N) ->
{Server,[closed]} ->
receive {Client,[0,{error,closed}]} -> ok end
after 10000 ->
- ?t:fail({timeout,{server,not_closed,flush([])}})
+ ct:fail({timeout,{server,not_closed,flush([])}})
end.
busy_send_srv(L, Master, Msg) ->
@@ -1567,7 +1530,7 @@ busy_disconnect_active_send(S, Data) ->
{error,closed} ->
receive
{tcp_closed,S} -> ok;
- _Other -> ?t:fail()
+ _Other -> ct:fail(failed)
end
end.
@@ -1613,52 +1576,56 @@ fill_sendq(Config) when is_list(Config) ->
Master = self(),
Server =
spawn_link(fun () ->
- {ok,L} = gen_tcp:listen
- (0, [{active,false},binary,
- {reuseaddr,true},{packet,0}]),
+ {ok,L} = gen_tcp:listen(0, [{active,false},binary,
+ {reuseaddr,true},{packet,0}]),
{ok,Port} = inet:port(L),
Master ! {self(),client,
fill_sendq_client(Port, Master)},
fill_sendq_srv(L, Master)
end),
io:format("~p Server~n", [Server]),
- receive {Server,client,Client} ->
- io:format("~p Client~n", [Client]),
- receive {Server,reader,Reader} ->
- io:format("~p Reader~n", [Reader]),
- fill_sendq_loop(Server, Client, Reader)
+ receive
+ {Server,client,Client} ->
+ io:format("~p Client~n", [Client]),
+ receive
+ {Server,reader,Reader} ->
+ io:format("~p Reader~n", [Reader]),
+ fill_sendq_loop(Server, Client, Reader)
end
end.
fill_sendq_loop(Server, Client, Reader) ->
%% Master
%%
- receive {Server,send} ->
+ receive
+ {Server,send} ->
fill_sendq_loop(Server, Client, Reader)
after 2000 ->
%% Send queue full, sender blocked -> close client.
io:format("Send timeout, closing Client...~n", []),
Client ! {self(),close},
- receive {Server,[{error,closed}]} ->
- io:format("Got server closed.~n"),
- receive {Reader,[{error,closed}]} ->
- io:format
- ("Got reader closed.~n"),
- ok
- after 3000 ->
- ?t:fail({timeout,{closed,reader}})
- end;
- {Reader,[{error,closed}]} ->
- io:format("Got reader closed.~n"),
- receive {Server,[{error,closed}]} ->
- io:format("Got server closed~n"),
- ok
- after 3000 ->
- ?t:fail({timeout,{closed,server}})
- end
- after 3000 ->
- ?t:fail({timeout,{closed,[server,reader]}})
- end
+ receive
+ {Server,[{error,closed}]} ->
+ io:format("Got server closed.~n"),
+ receive
+ {Reader,[{error,closed}]} ->
+ io:format("Got reader closed.~n"),
+ ok
+ after 3000 ->
+ ct:fail({timeout,{closed,reader}})
+ end;
+ {Reader,[{error,closed}]} ->
+ io:format("Got reader closed.~n"),
+ receive
+ {Server,[{error,closed}]} ->
+ io:format("Got server closed~n"),
+ ok
+ after 3000 ->
+ ct:fail({timeout,{closed,server}})
+ end
+ after 3000 ->
+ ct:fail({timeout,{closed,[server,reader]}})
+ end
end.
fill_sendq_srv(L, Master) ->
@@ -1865,7 +1832,7 @@ test_prio_accept_async() ->
spawn(?MODULE,priority_server,[{self(),Ref}]),
Port = receive
{Ref,P} -> P
- after 5000 -> ?t:fail({error,"helper process timeout"})
+ after 5000 -> ct:fail({error,"helper process timeout"})
end,
receive
after 3000 -> ok
@@ -1879,15 +1846,15 @@ test_prio_accept_async() ->
{Ref,{ok,[{priority,4},{tos,Tos1}]}} ->
ok;
{Ref,Error} ->
- ?t:fail({missmatch,Error})
- after 5000 -> ?t:fail({error,"helper process timeout"})
+ ct:fail({missmatch,Error})
+ after 5000 -> ct:fail({error,"helper process timeout"})
end,
receive
{Ref,{ok,[{priority,4},{tos,Tos1}]}} ->
ok;
{Ref,Error2} ->
- ?t:fail({missmatch,Error2})
- after 5000 -> ?t:fail({error,"helper process timeout"})
+ ct:fail({missmatch,Error2})
+ after 5000 -> ct:fail({error,"helper process timeout"})
end,
{ok,[{priority,4},{tos,Tos2}]} = inet:getopts(Sock2,[priority,tos]),
@@ -1909,10 +1876,6 @@ priority_server({Parent,Ref}) ->
test_prio_fail() ->
{ok,L} = gen_tcp:listen(0, [{active,false}]),
{error,_} = inet:setopts(L,[{priority,1000}]),
-% This error could only happen in linux kernels earlier than 2.6.24.4
-% Privilege check is now disabled and IP_TOS can never fail (only silently
-% be masked).
-% {error,_} = inet:setopts(L,[{tos,6 bsl 5}]),
gen_tcp:close(L),
ok.
@@ -1924,10 +1887,7 @@ test_prio_udp() ->
gen_udp:close(S),
ok.
-so_priority(doc) ->
- ["Tests the so_priority and ip_tos options on sockets when applicable."];
-so_priority(suite) ->
- [];
+%% Tests the so_priority and ip_tos options on sockets when applicable.
so_priority(Config) when is_list(Config) ->
{ok,L} = gen_tcp:listen(0, [{active,false}]),
ok = inet:setopts(L,[{priority,1}]),
@@ -1947,7 +1907,7 @@ so_priority(Config) when is_list(Config) ->
{unix,linux} ->
case os:version() of
{X,Y,_} when (X > 2) or ((X =:= 2) and (Y >= 4)) ->
- ?t:fail({error,
+ ct:fail({error,
"so_priority should work on this "
"OS, but does not"});
_ ->
@@ -1958,10 +1918,236 @@ so_priority(Config) when is_list(Config) ->
end
end.
+
+
+%% IP_RECVTOS and IP_RECVTCLASS for IP_PKTOPTIONS
+%% does not seem to be implemented in Linux until kernel 3.1
+%%
+%% It seems pktoptions does not return valid values
+%% for IPv4 connect sockets. On the accept socket
+%% we get valid values, but on the connect socket we get
+%% the default values for TOS and TTL.
+%%
+%% Therefore the argument CheckConnect that enables
+%% checking the returned values for the connect socket.
+%% It is only used for recvtclass that is an IPv6 option
+%% and there we get valid values from both socket ends.
+
+recvtos(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96}],
+ fun recvtos_ok/2,
+ false).
+
+recvtosttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}],
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end,
+ false).
+
+recvttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvttl,ttl,33}],
+ fun recvttl_ok/2,
+ false).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_pktoptions(
+ inet6, [{recvtclass,tclass,224}],
+ fun recvtclass_ok/2,
+ true);
+ [] ->
+ {skip,{ipv6_not_supported,IFs}}
+ end.
+
+%% These version numbers are the highest noted in daily tests
+%% where the test fails for a plausible reason, so
+%% skip on that platform.
+%%
+%% On newer versions it might be fixed, but we'll see about that
+%% when machines with newer versions gets installed...
+%% If the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good
+%% platforms - change {unix,_} to false?
+
+%% pktoptions is not supported for IPv4
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvtos_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv4
+recvttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+%% Using the option returns einval, so it is not implemented.
+recvttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+recvttl_ok({unix,linux}, _) -> true;
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv6
+recvtclass_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+%% Does not return any value - not implemented for pktoptions
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_pktoptions(Family, Spec, OSFilter, CheckConnect) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ Address =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ %% Set RecvOpts on listen socket
+ {ok,L} =
+ gen_tcp:listen(
+ 0,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts]),
+ {ok,P} = inet:port(L),
+ {ok,TrueRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts and Option values on connect socket
+ {ok,S2} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts ++ OptsVals],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S2, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S1} = gen_tcp:accept(L, Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S1, Opts),
+%%% %%
+%%% %% Handshake
+%%% ok = gen_tcp:send(S1, <<"hello">>),
+%%% {ok,<<"hello">>} = gen_tcp:recv(S2, 5, Timeout),
+%%% ok = gen_tcp:send(S2, <<"hi">>),
+%%% {ok,<<"hi">>} = gen_tcp:recv(S1, 2, Timeout),
+ %%
+ %% Verify returned remote options
+ {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]),
+ {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]),
+ (Result1 = sets_eq(OptsVals1, OptsVals))
+ orelse io:format(
+ "Accept differs: ~p neq ~p~n", [OptsVals1,OptsVals]),
+ (Result2 = sets_eq(OptsVals2, OptsValsDefault))
+ orelse io:format(
+ "Connect differs: ~p neq ~p~n",
+ [OptsVals2,OptsValsDefault]),
+ %%
+ ok = gen_tcp:close(S2),
+ ok = gen_tcp:close(S1),
+ %%
+ %%
+ %% Clear RecvOpts on listen socket and set Option values
+ ok = inet:setopts(L, FalseRecvOpts ++ OptsVals),
+ {ok,FalseRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsVals} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts on connecting socket
+ %%
+ {ok,S4} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S4, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S4, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S3} = gen_tcp:accept(L, Timeout),
+ {ok,FalseRecvOpts} = inet:getopts(S3, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S3, Opts),
+ %%
+ %% Verify returned remote options
+ {ok,[{pktoptions,[]}]} = inet:getopts(S3, [pktoptions]),
+ {ok,[{pktoptions,OptsVals4}]} = inet:getopts(S4, [pktoptions]),
+ (Result3 = sets_eq(OptsVals4, OptsVals))
+ orelse io:format(
+ "Accept2 differs: ~p neq ~p~n", [OptsVals4,OptsVals]),
+ %%
+ ok = gen_tcp:close(S4),
+ ok = gen_tcp:close(S3),
+ ok = gen_tcp:close(L),
+ (Result1 and ((not CheckConnect) or (Result2 and Result3)))
+ orelse
+ exit({failed,
+ [{OptsVals1,OptsVals4,OptsVals},
+ {OptsVals2,OptsValsDefault}],
+ {OSType,OSVer}}),
+%% exit({{OSType,OSVer},success}), % In search for the truth
+ ok.
+
+sets_eq(L1, L2) ->
+ lists:sort(L1) == lists:sort(L2).
+
+
+
%% Accept test utilities (suites are below)
millis() ->
- erlang:monotonic_time(milli_seconds).
+ erlang:monotonic_time(millisecond).
collect_accepts(0,_) -> [];
collect_accepts(N,Tmo) ->
@@ -2006,10 +2192,7 @@ mktmofun(Tmo,Parent,LS) ->
fun() -> Parent ! {accepted,self(), catch gen_tcp:accept(LS,Tmo)} end.
%% Accept tests
-primitive_accept(suite) ->
- [];
-primitive_accept(doc) ->
- ["Test singular accept"];
+%% Test singular accept.
primitive_accept(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
{ok,PortNo}=inet:port(LS),
@@ -2027,10 +2210,7 @@ primitive_accept(Config) when is_list(Config) ->
end.
-multi_accept_close_listen(suite) ->
- [];
-multi_accept_close_listen(doc) ->
- ["Closing listen socket when multi-accepting"];
+%% Closing listen socket when multi-accepting.
multi_accept_close_listen(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2043,10 +2223,7 @@ multi_accept_close_listen(Config) when is_list(Config) ->
ok = ?EXPECT_ACCEPTS([{_,{error,closed}},{_,{error,closed}},
{_,{error,closed}},{_,{error,closed}}],4,500).
-accept_timeout(suite) ->
- [];
-accept_timeout(doc) ->
- ["Single accept with timeout"];
+%% Single accept with timeout.
accept_timeout(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2054,10 +2231,7 @@ accept_timeout(Config) when is_list(Config) ->
P = spawn(F),
ok = ?EXPECT_ACCEPTS([{P,{error,timeout}}],1,2000).
-accept_timeouts_in_order(suite) ->
- [];
-accept_timeouts_in_order(doc) ->
- ["Check that multi-accept timeouts happen in the correct order"];
+%% Check that multi-accept timeouts happen in the correct order.
accept_timeouts_in_order(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2068,10 +2242,7 @@ accept_timeouts_in_order(Config) when is_list(Config) ->
ok = ?EXPECT_ACCEPTS([{P1,{error,timeout}},{P2,{error,timeout}},
{P3,{error,timeout}},{P4,{error,timeout}}],infinity,2000).
-accept_timeouts_in_order2(suite) ->
- [];
-accept_timeouts_in_order2(doc) ->
- ["Check that multi-accept timeouts happen in the correct order (more)"];
+%% Check that multi-accept timeouts happen in the correct order (more).
accept_timeouts_in_order2(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2082,10 +2253,7 @@ accept_timeouts_in_order2(Config) when is_list(Config) ->
ok = ?EXPECT_ACCEPTS([{P4,{error,timeout}},{P3,{error,timeout}},
{P2,{error,timeout}},{P1,{error,timeout}}],infinity,2000).
-accept_timeouts_in_order3(suite) ->
- [];
-accept_timeouts_in_order3(doc) ->
- ["Check that multi-accept timeouts happen in the correct order (even more)"];
+%% Check that multi-accept timeouts happen in the correct order (even more).
accept_timeouts_in_order3(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2096,11 +2264,8 @@ accept_timeouts_in_order3(Config) when is_list(Config) ->
ok = ?EXPECT_ACCEPTS([{P4,{error,timeout}},{P1,{error,timeout}},
{P3,{error,timeout}},{P2,{error,timeout}}],infinity,2000).
-accept_timeouts_in_order4(suite) ->
- [];
-accept_timeouts_in_order4(doc) ->
- ["Check that multi-accept timeouts happen in the correct order after "
- "mixing millsec and sec timeouts"];
+%% Check that multi-accept timeouts happen in the correct order after
+%% mixing millsec and sec timeouts.
accept_timeouts_in_order4(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2111,11 +2276,8 @@ accept_timeouts_in_order4(Config) when is_list(Config) ->
ok = ?EXPECT_ACCEPTS([{P1,{error,timeout}},{P2,{error,timeout}},
{P4,{error,timeout}},{P3,{error,timeout}}],infinity,2000).
-accept_timeouts_in_order5(suite) ->
- [];
-accept_timeouts_in_order5(doc) ->
- ["Check that multi-accept timeouts happen in the correct order after "
- "mixing millsec and sec timeouts (more)"];
+%% Check that multi-accept timeouts happen in the correct order after
+%% mixing millsec and sec timeouts (more).
accept_timeouts_in_order5(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2126,11 +2288,8 @@ accept_timeouts_in_order5(Config) when is_list(Config) ->
ok = ?EXPECT_ACCEPTS([{P4,{error,timeout}},{P1,{error,timeout}},
{P3,{error,timeout}},{P2,{error,timeout}}],infinity,2000).
-accept_timeouts_in_order6(suite) ->
- [];
-accept_timeouts_in_order6(doc) ->
- ["Check that multi-accept timeouts happen in the correct order after "
- "mixing millsec and sec timeouts (even more)"];
+%% Check that multi-accept timeouts happen in the correct order after
+%% mixing millsec and sec timeouts (even more).
accept_timeouts_in_order6(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2141,11 +2300,8 @@ accept_timeouts_in_order6(Config) when is_list(Config) ->
ok = ?EXPECT_ACCEPTS([{P4,{error,timeout}},{P2,{error,timeout}},
{P3,{error,timeout}},{P1,{error,timeout}}],infinity,2000).
-accept_timeouts_in_order7(suite) ->
- [];
-accept_timeouts_in_order7(doc) ->
- ["Check that multi-accept timeouts happen in the correct order after "
- "mixing millsec and sec timeouts (even more++)"];
+%% Check that multi-accept timeouts happen in the correct order after
+%% mixing millsec and sec timeouts (even more++).
accept_timeouts_in_order7(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2162,10 +2318,7 @@ accept_timeouts_in_order7(Config) when is_list(Config) ->
{P1,{error,timeout}},{P3,{error,timeout}},
{P8,{error,timeout}},{P7,{error,timeout}}],infinity,2000).
-accept_timeouts_mixed(suite) ->
- [];
-accept_timeouts_mixed(doc) ->
- ["Check that multi-accept timeouts behave correctly when mixed with successful timeouts"];
+%% Check that multi-accept timeouts behave correctly when mixed with successful timeouts.
accept_timeouts_mixed(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2185,10 +2338,7 @@ accept_timeouts_mixed(Config) when is_list(Config) ->
gen_tcp:connect("localhost",PortNo,[]),
ok = ?EXPECT_ACCEPTS([{P4,{ok,Port1}}] when is_port(Port1),infinity,100).
-killing_acceptor(suite) ->
- [];
-killing_acceptor(doc) ->
- ["Check that single acceptor behaves as expected when killed"];
+%% Check that single acceptor behaves as expected when killed.
killing_acceptor(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Pid = spawn(fun() -> erlang:display({accepted,self(),gen_tcp:accept(LS)}) end),
@@ -2203,10 +2353,7 @@ killing_acceptor(Config) when is_list(Config) ->
false = lists:member(accepting, L2),
ok.
-killing_multi_acceptors(suite) ->
- [];
-killing_multi_acceptors(doc) ->
- ["Check that multi acceptors behaves as expected when killed"];
+%% Check that multi acceptors behaves as expected when killed.
killing_multi_acceptors(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2228,10 +2375,7 @@ killing_multi_acceptors(Config) when is_list(Config) ->
false = lists:member(accepting, L3),
ok.
-killing_multi_acceptors2(suite) ->
- [];
-killing_multi_acceptors2(doc) ->
- ["Check that multi acceptors behaves as expected when killed (more)"];
+%% Check that multi acceptors behaves as expected when killed (more).
killing_multi_acceptors2(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2265,11 +2409,8 @@ killing_multi_acceptors2(Config) when is_list(Config) ->
false = lists:member(accepting, L5),
ok.
-several_accepts_in_one_go(suite) ->
- [];
-several_accepts_in_one_go(doc) ->
- ["checks that multi-accept works when more than one accept can be "
- "done at once (wb test of inet_driver)"];
+%% Checks that multi-accept works when more than one accept can be
+%% done at once (wb test of inet_driver).
several_accepts_in_one_go(Config) when is_list(Config) ->
{ok,LS}=gen_tcp:listen(0,[]),
Parent = self(),
@@ -2294,7 +2435,7 @@ wait_until_accepting(Proc,0) ->
exit({timeout_waiting_for_accepting,Proc});
wait_until_accepting(Proc,N) ->
case process_info(Proc,current_function) of
- {current_function,{prim_inet,accept0,2}} ->
+ {current_function,{prim_inet,accept0,3}} ->
case process_info(Proc,status) of
{status,waiting} ->
ok;
@@ -2312,11 +2453,8 @@ wait_until_accepting(Proc,N) ->
end.
-accept_system_limit(suite) ->
- [];
-accept_system_limit(doc) ->
- ["Check that accept returns {error, system_limit} "
- "(and not {error, enfile}) when running out of ports"];
+%% Check that accept returns {error, system_limit}
+%% (and not {error, enfile}) when running out of ports.
accept_system_limit(Config) when is_list(Config) ->
{ok, LS} = gen_tcp:listen(0, []),
{ok, TcpPort} = inet:port(LS),
@@ -2365,10 +2503,7 @@ open_ports(L) ->
end.
-active_once_closed(suite) ->
- [];
-active_once_closed(doc) ->
- ["Check that active once and tcp_close messages behave as expected"];
+%% Check that active once and tcp_close messages behave as expected.
active_once_closed(Config) when is_list(Config) ->
(fun() ->
{Loop,A} = setup_closed_ao(),
@@ -2416,75 +2551,84 @@ active_once_closed(Config) when is_list(Config) ->
ok = receive {tcp_closed, A} -> ok after 1000 -> error end
end)().
-send_timeout(suite) ->
- [];
-send_timeout(doc) ->
- ["Test the send_timeout socket option"];
+%% Test the send_timeout socket option.
send_timeout(Config) when is_list(Config) ->
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,RNode} = test_server:start_node(?UNIQ_NODE_NAME, slave,
+ [{args,"-pa " ++ Dir}]),
+
%% Basic
- BasicFun =
- fun(AutoClose) ->
- {Loop,A,RNode} = setup_timeout_sink(1000, AutoClose),
- {error,timeout} =
- Loop(fun() ->
- Res = gen_tcp:send(A,<<1:10000>>),
- %%erlang:display(Res),
- Res
- end),
- %% Check that the socket is not busy/closed...
- Error = after_send_timeout(AutoClose),
- {error,Error} = gen_tcp:send(A,<<"Hej">>),
- test_server:stop_node(RNode)
- end,
- BasicFun(false),
- BasicFun(true),
- %% Check timeout length
+ send_timeout_basic(false, RNode),
+ send_timeout_basic(true, RNode),
+
+ BinData = <<1:10000>>,
+
+ %% Check timeout length.
Self = self(),
Pid = spawn(fun() ->
- {Loop,A,RNode} = setup_timeout_sink(1000, true),
- {error,timeout} = Loop(fun() ->
- Res = gen_tcp:send(A,<<1:10000>>),
- %%erlang:display(Res),
- Self ! Res,
- Res
- end),
- test_server:stop_node(RNode)
+ A = setup_timeout_sink(RNode, 1000, true),
+ Send = fun() ->
+ Res = gen_tcp:send(A, BinData),
+ Self ! Res,
+ Res
+ end,
+ {error,timeout} = timeout_sink_loop(Send)
end),
Diff = get_max_diff(),
io:format("Max time for send: ~p~n",[Diff]),
true = (Diff > 500) and (Diff < 1500),
- %% Let test_server slave die...
+
+ %% Wait for the process to die.
Mon = erlang:monitor(process, Pid),
receive {'DOWN',Mon,process,Pid,_} -> ok end,
+
%% Check that parallell writers do not hang forever
- ParaFun =
- fun(AutoClose) ->
- {Loop,A,RNode} = setup_timeout_sink(1000, AutoClose),
- SenderFun = fun() ->
- {error,Error} =
- Loop(fun() ->
- gen_tcp:send(A, <<1:10000>>)
- end),
- Self ! {error,Error}
- end,
- spawn_link(SenderFun),
- spawn_link(SenderFun),
- receive
- {error,timeout} -> ok
- after 10000 ->
- exit(timeout)
- end,
- NextErr = after_send_timeout(AutoClose),
- receive
- {error,NextErr} -> ok
- after 10000 ->
- exit(timeout)
- end,
- {error,NextErr} = gen_tcp:send(A,<<"Hej">>),
- test_server:stop_node(RNode)
- end,
- ParaFun(false),
- ParaFun(true),
+ send_timeout_para(false, RNode),
+ send_timeout_para(true, RNode),
+
+ test_server:stop_node(RNode),
+
+ ok.
+
+send_timeout_basic(AutoClose, RNode) ->
+ BinData = <<1:10000>>,
+
+ A = setup_timeout_sink(RNode, 1000, AutoClose),
+ Send = fun() -> gen_tcp:send(A, BinData) end,
+ {error,timeout} = timeout_sink_loop(Send),
+
+ %% Check that the socket is not busy/closed...
+ Error = after_send_timeout(AutoClose),
+ {error,Error} = gen_tcp:send(A, <<"Hej">>),
+ ok.
+
+send_timeout_para(AutoClose, RNode) ->
+ BinData = <<1:10000>>,
+
+ A = setup_timeout_sink(RNode, 1000, AutoClose),
+ Self = self(),
+ SenderFun = fun() ->
+ Send = fun() -> gen_tcp:send(A, BinData) end,
+ {error,Error} = timeout_sink_loop(Send),
+ Self ! {error,Error}
+ end,
+ spawn_link(SenderFun),
+ spawn_link(SenderFun),
+
+ receive
+ {error,timeout} -> ok
+ after 10000 ->
+ exit(timeout)
+ end,
+
+ NextErr = after_send_timeout(AutoClose),
+ receive
+ {error,NextErr} -> ok
+ after 10000 ->
+ exit(timeout)
+ end,
+
+ {error,NextErr} = gen_tcp:send(A, <<"Hej">>),
ok.
mad_sender(S) ->
@@ -2500,46 +2644,41 @@ mad_sender(S) ->
flush() ->
receive
_X ->
- %erlang:display(_X),
flush()
after 0 ->
ok
end.
-send_timeout_active(suite) ->
- [];
-send_timeout_active(doc) ->
- ["Test the send_timeout socket option for active sockets"];
+%% Test the send_timeout socket option for active sockets.
send_timeout_active(Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:seconds(20)),
- %% Basic
- BasicFun =
- fun(AutoClose) ->
- {Loop,A,RNode,C} = setup_active_timeout_sink(1, AutoClose),
- inet:setopts(A, [{active, once}]),
- Mad = spawn_link(RNode,fun() -> mad_sender(C) end),
- {error,timeout} =
- Loop(fun() ->
- receive
- {tcp, _Sock, _Data} ->
- inet:setopts(A, [{active, once}]),
- Res = gen_tcp:send(A,lists:duplicate(1000, $a)),
- %erlang:display(Res),
- Res;
- Err ->
- io:format("sock closed: ~p~n", [Err]),
- Err
- end
- end),
- unlink(Mad),
- exit(Mad,kill),
- test_server:stop_node(RNode)
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,RNode} = test_server:start_node(?UNIQ_NODE_NAME, slave,
+ [{args,"-pa " ++ Dir}]),
+ do_send_timeout_active(false, RNode),
+ do_send_timeout_active(true, RNode),
+ test_server:stop_node(RNode),
+ ok.
+
+do_send_timeout_active(AutoClose, RNode) ->
+ {A,C} = setup_active_timeout_sink(RNode, 1, AutoClose),
+ inet:setopts(A, [{active, once}]),
+ Mad = spawn_link(RNode, fun() -> mad_sender(C) end),
+ ListData = lists:duplicate(1000, $a),
+ F = fun() ->
+ receive
+ {tcp, _Sock, _Data} ->
+ inet:setopts(A, [{active, once}]),
+ Res = gen_tcp:send(A, ListData),
+ Res;
+ Err ->
+ io:format("sock closed: ~p~n", [Err]),
+ Err
+ end
end,
- BasicFun(false),
- flush(),
- BasicFun(true),
+ {error,timeout} = timeout_sink_loop(F),
+ unlink(Mad),
+ exit(Mad, kill),
flush(),
- test_server:timetrap_cancel(Dog),
ok.
after_send_timeout(AutoClose) ->
@@ -2581,9 +2720,9 @@ get_max_diff(Max) ->
setup_closed_ao() ->
Dir = filename:dirname(code:which(?MODULE)),
- {ok,R} = test_server:start_node(test_default_options_slave,slave,
+ {ok,R} = test_server:start_node(?UNIQ_NODE_NAME, slave,
[{args,"-pa " ++ Dir}]),
- Host = list_to_atom(lists:nth(2,string:tokens(atom_to_list(node()),"@"))),
+ Host = get_hostname(node()),
{ok, L} = gen_tcp:listen(0, [{active,false},{packet,2}]),
Fun = fun(F) ->
receive
@@ -2622,11 +2761,8 @@ setup_closed_ao() ->
test_server:stop_node(R),
{Loop,A}.
-setup_timeout_sink(Timeout, AutoClose) ->
- Dir = filename:dirname(code:which(?MODULE)),
- {ok,R} = test_server:start_node(test_default_options_slave,slave,
- [{args,"-pa " ++ Dir}]),
- Host = list_to_atom(lists:nth(2,string:tokens(atom_to_list(node()),"@"))),
+setup_timeout_sink(RNode, Timeout, AutoClose) ->
+ Host = get_hostname(node()),
{ok, L} = gen_tcp:listen(0, [{active,false},{packet,2},
{send_timeout,Timeout},
{send_timeout_close,AutoClose}]),
@@ -2637,7 +2773,7 @@ setup_timeout_sink(Timeout, AutoClose) ->
die -> ok
end
end,
- Pid = rpc:call(R,erlang,spawn,[fun() -> Fun(Fun) end]),
+ Pid = rpc:call(RNode, erlang, spawn, [fun() -> Fun(Fun) end]),
{ok, Port} = inet:port(L),
Remote = fun(Fu) ->
Pid ! {self(), Fu},
@@ -2651,36 +2787,23 @@ setup_timeout_sink(Timeout, AutoClose) ->
{ok,A} = gen_tcp:accept(L),
gen_tcp:send(A,"Hello"),
{ok, "Hello"} = Remote(fun() -> gen_tcp:recv(C,0) end),
- Loop2 = fun(_,_,0) ->
- {failure, timeout};
- (L2,F2,N) ->
- Ret = F2(),
- io:format("~p~n",[Ret]),
- case Ret of
- ok -> receive after 1 -> ok end,
- L2(L2,F2,N-1);
- Other -> Other
- end
- end,
- Loop = fun(F3) -> Loop2(Loop2,F3,1000) end,
- {Loop,A,R}.
-
-setup_active_timeout_sink(Timeout, AutoClose) ->
- Dir = filename:dirname(code:which(?MODULE)),
- {ok,R} = test_server:start_node(test_default_options_slave,slave,
- [{args,"-pa " ++ Dir}]),
- Host = list_to_atom(lists:nth(2,string:tokens(atom_to_list(node()),"@"))),
- {ok, L} = gen_tcp:listen(0, [binary,{active,false},{packet,0},{nodelay, true},{keepalive, true},
- {send_timeout,Timeout},
- {send_timeout_close,AutoClose}]),
+ A.
+
+setup_active_timeout_sink(RNode, Timeout, AutoClose) ->
+ Host = get_hostname(node()),
+ ListenOpts = [binary,{active,false},{packet,0},
+ {nodelay,true},{keepalive,true},
+ {send_timeout,Timeout},{send_timeout_close,AutoClose}],
+ {ok, L} = gen_tcp:listen(0, ListenOpts),
Fun = fun(F) ->
receive
{From,X} when is_function(X) ->
- From ! {self(),X()}, F(F);
+ From ! {self(),X()},
+ F(F);
die -> ok
end
end,
- Pid = rpc:call(R,erlang,spawn,[fun() -> Fun(Fun) end]),
+ Pid = rpc:call(RNode, erlang, spawn, [fun() -> Fun(Fun) end]),
{ok, Port} = inet:port(L),
Remote = fun(Fu) ->
Pid ! {self(), Fu},
@@ -2688,26 +2811,22 @@ setup_active_timeout_sink(Timeout, AutoClose) ->
end
end,
{ok, C} = Remote(fun() ->
- gen_tcp:connect(Host,Port,
- [{active,false}])
+ gen_tcp:connect(Host, Port, [{active,false}])
end),
{ok,A} = gen_tcp:accept(L),
- gen_tcp:send(A,"Hello"),
- {ok, "H"++_} = Remote(fun() -> gen_tcp:recv(C,0) end),
- Loop2 = fun(_,_,0) ->
- {failure, timeout};
- (L2,F2,N) ->
- Ret = F2(),
- io:format("~p~n",[Ret]),
- case Ret of
- ok -> receive after 1 -> ok end,
- L2(L2,F2,N-1);
- Other -> Other
- end
- end,
- Loop = fun(F3) -> Loop2(Loop2,F3,1000) end,
- {Loop,A,R,C}.
+ gen_tcp:send(A, "Hello"),
+ {ok, "H"++_} = Remote(fun() -> gen_tcp:recv(C, 0) end),
+ {A,C}.
+timeout_sink_loop(Action) ->
+ Ret = Action(),
+ case Ret of
+ ok ->
+ receive after 1 -> ok end,
+ timeout_sink_loop(Action);
+ Other ->
+ Other
+ end.
has_superfluous_schedulers() ->
case {erlang:system_info(schedulers),
@@ -2718,10 +2837,8 @@ has_superfluous_schedulers() ->
end.
-otp_7731(suite) -> [];
-otp_7731(doc) ->
- "Leaking message from inet_drv {inet_reply,P,ok} "
- "when a socket sending resumes working after a send_timeout";
+%% Leaking message from inet_drv {inet_reply,P,ok}
+%% when a socket sending resumes working after a send_timeout.
otp_7731(Config) when is_list(Config) ->
ServerPid = spawn_link(?MODULE, otp_7731_server, [self()]),
receive {ServerPid, ready, PortNum} -> ok end,
@@ -2738,7 +2855,7 @@ otp_7731(Config) when is_list(Config) ->
%% Now make sure inet_drv does not leak any internal messages.
receive Msg ->
- test_server:fail({unexpected, Msg})
+ ct:fail({unexpected, Msg})
after 1000 ->
ok
end,
@@ -2791,8 +2908,7 @@ otp_7731_recv(Socket) ->
%% OTP-7615: TCP-ports hanging in CLOSING state when sending large
%% buffer followed by a recv() that returns error due to closed
%% connection.
-zombie_sockets(suite) -> [];
-zombie_sockets(doc) -> ["OTP-7615 Leaking closed ports."];
+%% OTP-7615 Leaking closed ports.
zombie_sockets(Config) when is_list(Config) ->
register(zombie_collector,self()),
Calls = 10,
@@ -2872,9 +2988,7 @@ zombie_serve_client(Socket, Bin) ->
gen_tcp:close(Socket),
zombie_collector ! {closed, Socket}.
-otp_7816(suite) -> [];
-otp_7816(doc) ->
- "Hanging send on windows when sending iolist with more than 16 binaries.";
+%% Hanging send on windows when sending iolist with more than 16 binaries.
otp_7816(Config) when is_list(Config) ->
Client = self(),
Server = spawn_link(fun()-> otp_7816_server(Client) end),
@@ -2963,8 +3077,7 @@ otp_7816_recv(CSocket, BytesLeft) ->
error
end.
-otp_8102(doc) -> ["Receive a packet with a faulty packet header"];
-otp_8102(suite) -> [];
+%% Receive a packet with a faulty packet header.
otp_8102(Config) when is_list(Config) ->
{ok, LSocket} = gen_tcp:listen(0, []),
{ok, {_, PortNum}} = inet:sockname(LSocket),
@@ -3004,8 +3117,7 @@ otp_8102_do(LSocket, PortNum, {Bin,PType}) ->
gen_tcp:close(SSocket),
gen_tcp:close(RSocket).
-otp_9389(doc) -> ["Verify packet_size handles long HTTP header lines"];
-otp_9389(suite) -> [];
+%% Verify packet_size handles long HTTP header lines.
otp_9389(Config) when is_list(Config) ->
{ok, LS} = gen_tcp:listen(0, [{active,false}]),
{ok, {_, PortNum}} = inet:sockname(LS),
@@ -3064,10 +3176,10 @@ otp_9389_loop(S, OrigLinkHdr, State) ->
error({timeout,header})
end.
-wrapping_oct(doc) ->
- "Check that 64bit octet counters work.";
-wrapping_oct(suite) ->
- [];
+wrapping_oct() ->
+ [{timetrap,{minutes,10}}].
+
+%% Check that 64bit octet counters work.
wrapping_oct(Config) when is_list(Config) ->
{ok,Sock} = gen_tcp:listen(0,[{active,false},{mode,binary}]),
{ok,Port} = inet:port(Sock),
@@ -3131,3 +3243,186 @@ oct_aloop(S,X,Times) ->
end.
ok({ok,V}) -> V.
+
+get_hostname(Name) ->
+ "@"++Host = lists:dropwhile(fun(C) -> C =/= $@ end, atom_to_list(Name)),
+ Host.
+
+otp_13939(doc) ->
+ ["Check that writing to a remotely closed socket doesn't block forever "
+ "when exit_on_close is false."];
+otp_13939(suite) ->
+ [];
+otp_13939(Config) when is_list(Config) ->
+ {Pid, Ref} = spawn_opt(
+ fun() ->
+ {ok, Listener} = gen_tcp:listen(0, [{exit_on_close, false}]),
+ {ok, Port} = inet:port(Listener),
+
+ spawn_link(
+ fun() ->
+ {ok, Client} = gen_tcp:connect("localhost", Port,
+ [{active, false}]),
+ ok = gen_tcp:close(Client)
+ end),
+
+ {ok, Accepted} = gen_tcp:accept(Listener),
+
+ ok = gen_tcp:send(Accepted, <<0:(10*1024*1024*8)>>),
+
+ %% The bug surfaces when there's a delay between the send
+ %% operations; inet:getstat is a red herring.
+ timer:sleep(100),
+
+ {error, Code} = gen_tcp:send(Accepted, <<0:(10*1024*1024*8)>>),
+ ct:pal("gen_tcp:send returned ~p~n", [Code])
+ end, [link, monitor]),
+
+ receive
+ {'DOWN', Ref, process, Pid, normal} ->
+ ok
+ after 1000 ->
+ demonitor(Ref, [flush]),
+ exit(Pid, normal),
+ ct:fail("Server process blocked on send.")
+ end.
+
+otp_12242(Config) when is_list(Config) ->
+ case os:type() of
+ {win32,_} ->
+ %% Even if we set sndbuf and recbuf to small sizes
+ %% Windows either happily accepts to send GBytes of data
+ %% in no time, so the second send below that is supposed
+ %% to time out just succedes, or the first send that
+ %% is supposed to fill the inet_drv I/O queue and
+ %% start waiting for when more data can be sent
+ %% instead sends all data but suffers a send
+ %% failure that closes the socket
+ {skipped,backpressure_broken_on_win32};
+ _ ->
+ %% Find the IPv4 address of an up and running interface
+ %% that is not loopback nor pointtopoint
+ {ok,IFList} = inet:getifaddrs(),
+ ct:pal("IFList ~p~n", [IFList]),
+ case
+ lists:flatten(
+ [lists:filtermap(
+ fun ({addr,Addr}) when tuple_size(Addr) =:= 4 ->
+ {true,Addr};
+ (_) ->
+ false
+ end, Opts)
+ || {_,Opts} <- IFList,
+ case lists:keyfind(flags, 1, Opts) of
+ {_,Flags} ->
+ lists:member(up, Flags)
+ andalso
+ lists:member(running, Flags)
+ andalso
+ not lists:member(loopback, Flags)
+ andalso
+ not lists:member(pointtopoint, Flags);
+ false ->
+ false
+ end])
+ of
+ [Addr|_] ->
+ otp_12242(Addr);
+ Other ->
+ {skipped,{no_external_address,Other}}
+ end
+ end;
+%%
+otp_12242(Addr) when tuple_size(Addr) =:= 4 ->
+ ct:timetrap(30000),
+ ct:pal("Using address ~p~n", [Addr]),
+ Bufsize = 16 * 1024,
+ Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface
+ Blob = binary:copy(<<$x>>, Datasize),
+ LOpts =
+ [{backlog,4},{reuseaddr,true},{ip,Addr},
+ binary,{active,false},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ COpts =
+ [binary,{active,false},{ip,Addr},
+ {linger,{true,1}}, % 1 s
+ {send_timeout,500},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,ListenerNode} =
+ test_server:start_node(
+ ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]),
+ Tester = self(),
+ Listener =
+ spawn(
+ ListenerNode,
+ fun () ->
+ {ok,L} = gen_tcp:listen(0, LOpts),
+ {ok,LPort} = inet:port(L),
+ Tester ! {self(),port,LPort},
+ {ok,A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(L),
+ receive
+ {Tester,stop} ->
+ ok = gen_tcp:close(A)
+ end
+ end),
+ ListenerMref = monitor(process, Listener),
+ LPort = receive {Listener,port,P} -> P end,
+ {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity),
+ {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]),
+ ct:pal("ReadCOpts ~p~n", [ReadCOpts]),
+ %%
+ %% Fill the buffers
+ ct:pal("Sending ~p bytes~n", [Datasize]),
+ ok = gen_tcp:send(C, Blob),
+ ct:pal("Sent ~p bytes~n", [Datasize]),
+ %% Spawn the Closer,
+ %% try to ensure that the close call is in progress
+ %% before the owner proceeds with sending
+ Owner = self(),
+ {_Closer,CloserMref} =
+ spawn_opt(
+ fun () ->
+ Owner ! {tref, erlang:start_timer(50, Owner, closing)},
+ ct:pal("Calling gen_tcp:close(C)~n"),
+ try gen_tcp:close(C) of
+ Result ->
+ ct:pal("gen_tcp:close(C) -> ~p~n", [Result]),
+ ok = Result
+ catch
+ Class:Reason:Stacktrace ->
+ ct:pal(
+ "gen_tcp:close(C) >< ~p:~p~n ~p~n",
+ [Class,Reason,Stacktrace]),
+ erlang:raise(Class, Reason, Stacktrace)
+ end
+ end, [link,monitor]),
+ receive
+ {tref,Tref} ->
+ receive {timeout,Tref,_} -> ok end,
+ ct:pal("Sending ~p bytes again~n", [Datasize]),
+ %% Now should the close be in progress...
+ %% All buffers are full, remote end is not reading,
+ %% and the send timeout is 1 s so this will timeout:
+ {error,timeout} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes again timed out~n", [Datasize]),
+ ok = inet:setopts(C, [{send_timeout,10000}]),
+ %% There is a hidden timeout here. Port close is sampled
+ %% every 5 s by prim_inet:send_recv_reply.
+ %% Linger is 3 s so the Closer will finish this send:
+ ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]),
+ {error,closed} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes with 10 s timeout was closed~n",
+ [Datasize]),
+ normal = wait(CloserMref),
+ ct:pal("The Closer has exited~n"),
+ Listener ! {Tester,stop},
+ receive {'DOWN',ListenerMref,_,_,_} -> ok end,
+ ct:pal("The Listener has exited~n"),
+ test_server:stop_node(ListenerNode),
+ ok
+ end.
+
+wait(Mref) ->
+ receive {'DOWN',Mref,_,_,Reason} -> Reason end.
diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl
index 8d8c953303..af9985de45 100644
--- a/lib/kernel/test/gen_udp_SUITE.erl
+++ b/lib/kernel/test/gen_udp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -17,17 +17,16 @@
%%
%% %CopyrightEnd%
%%
-%
-% test the behavior of gen_udp. Testing udp is really a very unfunny task,
-% because udp is not deterministic.
-%
--module(gen_udp_SUITE).
--include_lib("test_server/include/test_server.hrl").
+%%
+%% Test the behavior of gen_udp. Testing udp is really a very unfunny task,
+%% because udp is not deterministic.
+%%
+-module(gen_udp_SUITE).
+-include_lib("common_test/include/ct.hrl").
--define(default_timeout, ?t:minutes(1)).
-% XXX - we should pick a port that we _know_ is closed. That's pretty hard.
+%% XXX - we should pick a port that we _know_ is closed. That's pretty hard.
-define(CLOSED_PORT, 6666).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -35,18 +34,27 @@
-export([init_per_testcase/2, end_per_testcase/2]).
-export([send_to_closed/1, active_n/1,
- buffer_size/1, binary_passive_recv/1, bad_address/1,
- read_packets/1, open_fd/1, connect/1, implicit_inet6/1]).
+ buffer_size/1, binary_passive_recv/1, max_buffer_size/1, bad_address/1,
+ read_packets/1, open_fd/1, connect/1, implicit_inet6/1,
+ recvtos/1, recvtosttl/1, recvttl/1, recvtclass/1,
+ local_basic/1, local_unbound/1,
+ local_fdopen/1, local_fdopen_unbound/1, local_abstract/1]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
- [send_to_closed, buffer_size, binary_passive_recv,
+ [send_to_closed, buffer_size, binary_passive_recv, max_buffer_size,
bad_address, read_packets, open_fd, connect,
- implicit_inet6, active_n].
+ implicit_inet6, active_n,
+ recvtos, recvtosttl, recvttl, recvtclass,
+ {group, local}].
groups() ->
- [].
+ [{local, [],
+ [local_basic, local_unbound,
+ local_fdopen, local_fdopen_unbound, local_abstract]}].
init_per_suite(Config) ->
Config.
@@ -54,36 +62,40 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok.
+init_per_group(local, Config) ->
+ case gen_udp:open(0, [local]) of
+ {ok,S} ->
+ ok = gen_udp:close(S),
+ Config;
+ {error,eafnosupport} ->
+ {skip, "AF_LOCAL not supported"}
+ end;
init_per_group(_GroupName, Config) ->
Config.
+end_per_group(local, _Config) ->
+ delete_local_filenames();
end_per_group(_GroupName, Config) ->
Config.
init_per_testcase(_Case, Config) ->
- ?line Dog=test_server:timetrap(?default_timeout),
- [{watchdog, Dog}|Config].
+ Config.
-end_per_testcase(_Case, Config) ->
- Dog=?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+end_per_testcase(_Case, _Config) ->
ok.
%%-------------------------------------------------------------
%% Send two packets to a closed port (on some systems this causes the socket
%% to be closed).
-send_to_closed(doc) ->
- ["Tests core functionality."];
-send_to_closed(suite) ->
- [];
+%% Tests core functionality.
send_to_closed(Config) when is_list(Config) ->
- ?line {ok, Sock} = gen_udp:open(0),
- ?line ok = gen_udp:send(Sock, {127,0,0,1}, ?CLOSED_PORT, "foo"),
+ {ok, Sock} = gen_udp:open(0),
+ ok = gen_udp:send(Sock, {127,0,0,1}, ?CLOSED_PORT, "foo"),
timer:sleep(2),
- ?line ok = gen_udp:send(Sock, {127,0,0,1}, ?CLOSED_PORT, "foo"),
- ?line ok = gen_udp:close(Sock),
+ ok = gen_udp:send(Sock, {127,0,0,1}, ?CLOSED_PORT, "foo"),
+ ok = gen_udp:close(Sock),
ok.
@@ -91,19 +103,16 @@ send_to_closed(Config) when is_list(Config) ->
%%-------------------------------------------------------------
%% Test that the UDP socket buffer sizes are settable
-buffer_size(suite) ->
- [];
-buffer_size(doc) ->
- ["Test UDP buffer size setting."];
+%% Test UDP buffer size setting.
buffer_size(Config) when is_list(Config) ->
- ?line Len = 256,
- ?line Bin = list_to_binary(lists:seq(0, Len-1)),
- ?line M = 8192 div Len,
- ?line Spec0 =
+ Len = 256,
+ Bin = list_to_binary(lists:seq(0, Len-1)),
+ M = 8192 div Len,
+ Spec0 =
[{opt,M},{safe,M-3},{long,M+1},
{opt,2*M},{safe,2*M-3},{long,2*M+1},
{opt,4*M},{safe,4*M-3},{long,4*M+1}],
- ?line Spec =
+ Spec =
[case Tag of
opt ->
[{recbuf,Val*Len},{sndbuf,(Val + 2)*Len}];
@@ -115,12 +124,12 @@ buffer_size(Config) when is_list(Config) ->
[truncated,emsgsize,timeout]}
end || {Tag,Val} <- Spec0],
%%
- ?line {ok, ClientSocket} = gen_udp:open(0, [binary]),
- ?line {ok, ClientPort} = inet:port(ClientSocket),
- ?line Client = self(),
- ?line ClientIP = {127,0,0,1},
- ?line ServerIP = {127,0,0,1},
- ?line Server =
+ {ok, ClientSocket} = gen_udp:open(0, [binary]),
+ {ok, ClientPort} = inet:port(ClientSocket),
+ Client = self(),
+ ClientIP = {127,0,0,1},
+ ServerIP = {127,0,0,1},
+ Server =
spawn_link(
fun () ->
{ok, ServerSocket} = gen_udp:open(0, [binary]),
@@ -130,78 +139,77 @@ buffer_size(Config) when is_list(Config) ->
ServerSocket, 1, Spec),
ok = gen_udp:close(ServerSocket)
end),
- ?line Mref = erlang:monitor(process, Server),
- ?line receive
- {Server,port,ServerPort} ->
- ?line buffer_size_client(Server, ServerIP, ServerPort,
- ClientSocket, 1, Spec)
- end,
- ?line ok = gen_udp:close(ClientSocket),
- ?line receive
- {'DOWN',Mref,_,_,normal} ->
- ?line ok
- end.
+ Mref = erlang:monitor(process, Server),
+ receive
+ {Server,port,ServerPort} ->
+ buffer_size_client(Server, ServerIP, ServerPort,
+ ClientSocket, 1, Spec)
+ end,
+ ok = gen_udp:close(ClientSocket),
+ receive
+ {'DOWN',Mref,_,_,normal} ->
+ ok
+ end.
buffer_size_client(_, _, _, _, _, []) ->
- ?line ok;
+ ok;
buffer_size_client(Server, IP, Port,
Socket, Cnt, [Opts|T]) when is_list(Opts) ->
- ?line io:format("buffer_size_client Cnt=~w setopts ~p.~n", [Cnt,Opts]),
- ?line ok = inet:setopts(Socket, Opts),
- ?line Server ! {self(),setopts,Cnt},
- ?line receive {Server,setopts,Cnt} -> ok end,
- ?line buffer_size_client(Server, IP, Port, Socket, Cnt+1, T);
+ io:format("buffer_size_client Cnt=~w setopts ~p.~n", [Cnt,Opts]),
+ ok = inet:setopts(Socket, Opts),
+ Server ! {self(),setopts,Cnt},
+ receive {Server,setopts,Cnt} -> ok end,
+ buffer_size_client(Server, IP, Port, Socket, Cnt+1, T);
buffer_size_client(Server, IP, Port,
Socket, Cnt, [{B,Replies}|T]=Opts) when is_binary(B) ->
- ?line io:format(
- "buffer_size_client Cnt=~w send size ~w expecting ~p.~n",
- [Cnt,size(B),Replies]),
- ?line ok = gen_udp:send(Socket, IP, Port, <<Cnt,B/binary>>),
- ?line receive
- {Server,Cnt,Reply} ->
- ?line Tag =
- if
- is_tuple(Reply) ->
- element(1, Reply);
- is_atom(Reply) ->
- Reply
- end,
- ?line case lists:member(Tag, Replies) of
- true -> ok;
- false ->
- ?line
- ?t:fail({reply_mismatch,Cnt,Reply,Replies,
- byte_size(B),
- inet:getopts(Socket,
- [sndbuf,recbuf])})
- end,
- ?line buffer_size_client(Server, IP, Port, Socket, Cnt+1, T)
- after 1313 ->
- ?line buffer_size_client(Server, IP, Port, Socket, Cnt, Opts)
- end.
+ io:format(
+ "buffer_size_client Cnt=~w send size ~w expecting ~p.~n",
+ [Cnt,size(B),Replies]),
+ ok = gen_udp:send(Socket, IP, Port, <<Cnt,B/binary>>),
+ receive
+ {Server,Cnt,Reply} ->
+ Tag =
+ if
+ is_tuple(Reply) ->
+ element(1, Reply);
+ is_atom(Reply) ->
+ Reply
+ end,
+ case lists:member(Tag, Replies) of
+ true -> ok;
+ false ->
+ ct:fail({reply_mismatch,Cnt,Reply,Replies,
+ byte_size(B),
+ inet:getopts(Socket,
+ [sndbuf,recbuf])})
+ end,
+ buffer_size_client(Server, IP, Port, Socket, Cnt+1, T)
+ after 1313 ->
+ buffer_size_client(Server, IP, Port, Socket, Cnt, Opts)
+ end.
buffer_size_server(_, _, _, _, _, []) ->
ok;
buffer_size_server(Client, IP, Port,
Socket, Cnt, [Opts|T]) when is_list(Opts) ->
receive {Client,setopts,Cnt} -> ok end,
- ?line io:format("buffer_size_server Cnt=~w setopts ~p.~n", [Cnt,Opts]),
+ io:format("buffer_size_server Cnt=~w setopts ~p.~n", [Cnt,Opts]),
ok = inet:setopts(Socket, Opts),
Client ! {self(),setopts,Cnt},
buffer_size_server(Client, IP, Port, Socket, Cnt+1, T);
buffer_size_server(Client, IP, Port,
Socket, Cnt, [{B,_}|T]) when is_binary(B) ->
- ?line io:format(
- "buffer_size_server Cnt=~w expecting size ~w.~n",
- [Cnt,size(B)]),
+ io:format(
+ "buffer_size_server Cnt=~w expecting size ~w.~n",
+ [Cnt,size(B)]),
Client !
{self(),Cnt,
case buffer_size_server_recv(Socket, IP, Port, Cnt) of
D when is_binary(D) ->
SizeD = byte_size(D),
- ?line io:format(
- "buffer_size_server Cnt=~w received size ~w.~n",
- [Cnt,SizeD]),
+ io:format(
+ "buffer_size_server Cnt=~w received size ~w.~n",
+ [Cnt,SizeD]),
case B of
D ->
correct;
@@ -211,9 +219,9 @@ buffer_size_server(Client, IP, Port,
{unexpected,D}
end;
Error ->
- ?line io:format(
- "buffer_size_server Cnt=~w received error ~w.~n",
- [Cnt,Error]),
+ io:format(
+ "buffer_size_server Cnt=~w received error ~w.~n",
+ [Cnt,Error]),
Error
end},
buffer_size_server(Client, IP, Port, Socket, Cnt+1, T).
@@ -231,55 +239,57 @@ buffer_size_server_recv(Socket, IP, Port, Cnt) ->
end.
+%%-------------------------------------------------------------
+%% OTP-15206: Keep buffer small for udp
+%%-------------------------------------------------------------
+max_buffer_size(Config) when is_list(Config) ->
+ {ok, Socket} = gen_udp:open(0, [binary]),
+ ok = inet:setopts(Socket,[{recbuf, 1 bsl 20}]),
+ {ok, [{buffer, 65536}]} = inet:getopts(Socket,[buffer]),
+ gen_udp:close(Socket).
%%-------------------------------------------------------------
%% OTP-3823 gen_udp:recv does not return address in binary mode
%%
-binary_passive_recv(suite) ->
- [];
-binary_passive_recv(doc) ->
- ["OTP-3823 gen_udp:recv does not return address in binary mode"];
+%% OTP-3823 gen_udp:recv does not return address in binary mode.
binary_passive_recv(Config) when is_list(Config) ->
- ?line D1 = "The quick brown fox jumps over a lazy dog",
- ?line D2 = list_to_binary(D1),
- ?line D3 = ["The quick", <<" brown ">>, "fox jumps ", <<"over ">>,
- <<>>, $a, [[], " lazy ", <<"dog">>]],
- ?line D2 = iolist_to_binary(D3),
- ?line B = D2,
- ?line {ok, R} = gen_udp:open(0, [binary, {active, false}]),
- ?line {ok, RP} = inet:port(R),
- ?line {ok, S} = gen_udp:open(0),
- ?line {ok, SP} = inet:port(S),
- ?line ok = gen_udp:send(S, localhost, RP, D1),
- ?line {ok, {{127, 0, 0, 1}, SP, B}} = gen_udp:recv(R, byte_size(B)+1),
- ?line ok = gen_udp:send(S, localhost, RP, D2),
- ?line {ok, {{127, 0, 0, 1}, SP, B}} = gen_udp:recv(R, byte_size(B)+1),
- ?line ok = gen_udp:send(S, localhost, RP, D3),
- ?line {ok, {{127, 0, 0, 1}, SP, B}} = gen_udp:recv(R, byte_size(B)+1),
- ?line ok = gen_udp:close(S),
- ?line ok = gen_udp:close(R),
+ D1 = "The quick brown fox jumps over a lazy dog",
+ D2 = list_to_binary(D1),
+ D3 = ["The quick", <<" brown ">>, "fox jumps ", <<"over ">>,
+ <<>>, $a, [[], " lazy ", <<"dog">>]],
+ D2 = iolist_to_binary(D3),
+ B = D2,
+ {ok, R} = gen_udp:open(0, [binary, {active, false}]),
+ {ok, RP} = inet:port(R),
+ {ok, S} = gen_udp:open(0),
+ {ok, SP} = inet:port(S),
+ ok = gen_udp:send(S, localhost, RP, D1),
+ {ok, {{127, 0, 0, 1}, SP, B}} = gen_udp:recv(R, byte_size(B)+1),
+ ok = gen_udp:send(S, localhost, RP, D2),
+ {ok, {{127, 0, 0, 1}, SP, B}} = gen_udp:recv(R, byte_size(B)+1),
+ ok = gen_udp:send(S, localhost, RP, D3),
+ {ok, {{127, 0, 0, 1}, SP, B}} = gen_udp:recv(R, byte_size(B)+1),
+ ok = gen_udp:close(S),
+ ok = gen_udp:close(R),
ok.
%%-------------------------------------------------------------
%% OTP-3836 inet_udp crashes when IP-address is larger than 255.
-bad_address(suite) ->
- [];
-bad_address(doc) ->
- ["OTP-3836 inet_udp crashes when IP-address is larger than 255."];
+%% OTP-3836 inet_udp crashes when IP-address is larger than 255.
bad_address(Config) when is_list(Config) ->
- ?line {ok, R} = gen_udp:open(0),
- ?line {ok, RP} = inet:port(R),
- ?line {ok, S} = gen_udp:open(0),
- ?line {ok, _SP} = inet:port(S),
- ?line {'EXIT', badarg} =
+ {ok, R} = gen_udp:open(0),
+ {ok, RP} = inet:port(R),
+ {ok, S} = gen_udp:open(0),
+ {ok, _SP} = inet:port(S),
+ {'EXIT', badarg} =
(catch gen_udp:send(S, {127,0,0,1,0}, RP, "void")),
- ?line {'EXIT', badarg} =
+ {'EXIT', badarg} =
(catch gen_udp:send(S, {127,0,0,256}, RP, "void")),
- ?line ok = gen_udp:close(S),
- ?line ok = gen_udp:close(R),
+ ok = gen_udp:close(S),
+ ok = gen_udp:close(R),
ok.
@@ -288,58 +298,55 @@ bad_address(Config) when is_list(Config) ->
%%
%% Starts a slave node that on command sends a bunch of messages
%% to our UDP port. The receiving process just receives and
-%% ignores the incoming messages, but counts them.
-%% A tracing process traces the receiving process for
-%% 'receive' and scheduling events. From the trace,
-%% message contents is verified; and, how many messages
-%% are received per in/out scheduling, which should be
-%% the same as the read_packets parameter.
-%%
-%% What happens on the SMP emulator remains to be seen...
-%%
+%% ignores the incoming messages.
+%% A tracing process traces the receiving port for
+%% 'send' and scheduling events. From the trace,
+%% how many messages are received per in/out scheduling,
+%% which should never be more than the read_packet parameter.
-read_packets(doc) ->
- ["OTP-6249 UDP option for number of packet reads."];
+%% OTP-6249 UDP option for number of packet reads.
read_packets(Config) when is_list(Config) ->
- case erlang:system_info(smp_support) of
- false ->
- read_packets_1();
- true ->
- %% We would need some new sort of tracing to test this
- %% option reliably in an SMP emulator.
- {skip,"SMP emulator"}
- end.
-
-read_packets_1() ->
- ?line N1 = 5,
- ?line N2 = 7,
- ?line {ok,R} = gen_udp:open(0, [{read_packets,N1}]),
- ?line {ok,RP} = inet:port(R),
- ?line {ok,Node} = start_node(gen_udp_SUITE_read_packets),
- ?line Die = make_ref(),
- ?line Loop = erlang:spawn_link(fun () -> infinite_loop(Die) end),
+ N1 = 5,
+ N2 = 1,
+ Msgs = 30000,
+ {ok,R} = gen_udp:open(0, [{read_packets,N1}]),
+ {ok,RP} = inet:port(R),
+ {ok,Node} = start_node(gen_udp_SUITE_read_packets),
+ Die = make_ref(),
%%
- ?line Msgs1 = [erlang:integer_to_list(M) || M <- lists:seq(1, N1*3)],
- ?line [V1|_] = read_packets_test(R, RP, Msgs1, Node),
- ?line {ok,[{read_packets,N1}]} = inet:getopts(R, [read_packets]),
+ {V1, Trace1} = read_packets_test(R, RP, Msgs, Node),
+ {ok,[{read_packets,N1}]} = inet:getopts(R, [read_packets]),
%%
- ?line ok = inet:setopts(R, [{read_packets,N2}]),
- ?line Msgs2 = [erlang:integer_to_list(M) || M <- lists:seq(1, N2*3)],
- ?line [V2|_] = read_packets_test(R, RP, Msgs2, Node),
- ?line {ok,[{read_packets,N2}]} = inet:getopts(R, [read_packets]),
+ ok = inet:setopts(R, [{read_packets,N2}]),
+ {V2, Trace2} = read_packets_test(R, RP, Msgs, Node),
+ {ok,[{read_packets,N2}]} = inet:getopts(R, [read_packets]),
%%
- ?line stop_node(Node),
- ?line Mref = erlang:monitor(process, Loop),
- ?line Loop ! Die,
- ?line receive
- {'DOWN',Mref,_,_, normal} ->
- case {V1,V2} of
- {N1,N2} ->
- ok;
- _ when V1 =/= N1, V2 =/= N2 ->
- ok
- end
- end.
+ stop_node(Node),
+ ct:log("N1=~p, V1=~p vs N2=~p, V2=~p",[N1,V1,N2,V2]),
+
+ dump_terms(Config, "trace1.terms", Trace2),
+ dump_terms(Config, "trace2.terms", Trace2),
+
+ %% Because of the inherit racy-ness of the feature it is
+ %% hard to test that it behaves correctly.
+ %% Right now (OTP 21) a port task takes 5% of the
+ %% allotted port task reductions to execute, so
+ %% the max number of executions a port is allowed to
+ %% do before being re-scheduled is N * 20
+
+ if
+ V1 > (N1 * 20) ->
+ ct:fail("Got ~p msgs, max was ~p", [V1, N1]);
+ V2 > (N2 * 20) ->
+ ct:fail("Got ~p msgs, max was ~p", [V2, N2]);
+ true ->
+ ok
+ end.
+
+dump_terms(Config, Name, Terms) ->
+ FName = filename:join(proplists:get_value(priv_dir, Config),Name),
+ file:write_file(FName, term_to_binary(Terms)),
+ ct:log("Logged terms to ~s",[FName]).
infinite_loop(Die) ->
receive
@@ -351,7 +358,6 @@ infinite_loop(Die) ->
end.
read_packets_test(R, RP, Msgs, Node) ->
- Len = length(Msgs),
Receiver = self(),
Tracer =
spawn_link(
@@ -376,24 +382,24 @@ read_packets_test(R, RP, Msgs, Node) ->
[link,{priority,high}]),
receive
{Sender,{port,SP}} ->
- erlang:trace(self(), true,
- [running,'receive',{tracer,Tracer}]),
+ erlang:trace(R, true,
+ [running_ports,'send',{tracer,Tracer}]),
erlang:yield(),
Sender ! {Receiver,go},
- read_packets_recv(Len),
- erlang:trace(self(), false, [all]),
+ read_packets_recv(Msgs),
+ erlang:trace(R, false, [all]),
Tracer ! {Receiver,get_trace},
receive
{Tracer,{trace,Trace}} ->
- read_packets_verify(R, SP, Msgs, Trace)
+ {read_packets_verify(R, SP, Trace), Trace}
end
end.
-read_packets_send(S, RP, [Msg|Msgs]) ->
- ok = gen_udp:send(S, localhost, RP, Msg),
- read_packets_send(S, RP, Msgs);
-read_packets_send(_S, _RP, []) ->
- ok.
+read_packets_send(_S, _RP, 0) ->
+ ok;
+read_packets_send(S, RP, Msgs) ->
+ ok = gen_udp:send(S, localhost, RP, "UDP FLOOOOOOD"),
+ read_packets_send(S, RP, Msgs - 1).
read_packets_recv(0) ->
ok;
@@ -405,23 +411,24 @@ read_packets_recv(N) ->
timeout
end.
-read_packets_verify(R, SP, Msg, Trace) ->
- lists:reverse(
- lists:sort(read_packets_verify(R, SP, Msg, Trace, 0))).
-
-read_packets_verify(R, SP, Msgs, [{trace,Self,OutIn,_}|Trace], M)
- when Self =:= self(), OutIn =:= out;
- Self =:= self(), OutIn =:= in ->
- push(M, read_packets_verify(R, SP, Msgs, Trace, 0));
-read_packets_verify(R, SP, [Msg|Msgs],
- [{trace,Self,'receive',{udp,R,{127,0,0,1},SP,Msg}}
- |Trace], M)
+read_packets_verify(R, SP, Trace) ->
+ [Max | _] = Pkts = lists:reverse(lists:sort(read_packets_verify(R, SP, Trace, 0))),
+ ct:pal("~p",[lists:sublist(Pkts,10)]),
+ Max.
+
+read_packets_verify(R, SP, [{trace,R,OutIn,_}|Trace], M)
+ when OutIn =:= out; OutIn =:= in ->
+ push(M, read_packets_verify(R, SP, Trace, 0));
+read_packets_verify(R, SP, [{trace, R,'receive',timeout}|Trace], M) ->
+ push(M, read_packets_verify(R, SP, Trace, 0));
+read_packets_verify(R, SP,
+ [{trace,R,'send',{udp,R,{127,0,0,1},SP,_Msg}, Self} | Trace], M)
when Self =:= self() ->
- read_packets_verify(R, SP, Msgs, Trace, M+1);
-read_packets_verify(_R, _SP, [], [], M) ->
+ read_packets_verify(R, SP, Trace, M+1);
+read_packets_verify(_R, _SP, [], M) ->
push(M, []);
-read_packets_verify(_R, _SP, Msgs, Trace, M) ->
- ?t:fail({read_packets_verify,mismatch,Msgs,Trace,M}).
+read_packets_verify(_R, _SP, Trace, M) ->
+ ct:fail({read_packets_verify,mismatch,Trace,M}).
push(0, Vs) ->
Vs;
@@ -438,10 +445,7 @@ flush() ->
-open_fd(suite) ->
- [];
-open_fd(doc) ->
- ["Test that the 'fd' option works"];
+%% Test that the 'fd' option works.
open_fd(Config) when is_list(Config) ->
Msg = "Det gör ont när knoppar brista. Varför skulle annars våren tveka?",
Addr = {127,0,0,1},
@@ -460,10 +464,10 @@ open_fd(Config) when is_list(Config) ->
{udp,S3,Addr,P2,Msg} ->
ok
after 1000 ->
- ?t:fail(io_lib:format("~w", [flush()]))
+ ct:fail(io_lib:format("~w", [flush()]))
end
after 1000 ->
- ?t:fail(io_lib:format("~w", [flush()]))
+ ct:fail(io_lib:format("~w", [flush()]))
end.
active_n(Config) when is_list(Config) ->
@@ -569,88 +573,390 @@ active_n(Config) when is_list(Config) ->
ok = gen_udp:close(S1),
ok.
-%
-% Utils
-%
+
+
+recvtos(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96}],
+ fun recvtos_ok/2).
+
+recvtosttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}],
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end).
+
+recvttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvttl,ttl,33}],
+ fun recvttl_ok/2).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_recv_opts(
+ inet6, [{recvtclass,tclass,224}],
+ fun recvtclass_ok/2);
+ [] ->
+ {skip,ipv6_not_supported,IFs}
+ end.
+
+%% These version numbers are just above the highest noted in daily tests
+%% where the test fails for a plausible reason, that is the lowest
+%% where we can expect that the test mighe succeed, so
+%% skip on platforms lower than this.
+%%
+%% On newer versions it might be fixed, but we'll see about that
+%% when machines with newer versions gets installed...
+%% If the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good platforms?
+
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {9,9,0});
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,6,11});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_recv_opts(Family, Spec, OSFilter) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_recv_opts(Family, Spec, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_recv_opts(Family, Spec, _OSType, _OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ TrueRecvOpts_OptsVals = TrueRecvOpts ++ OptsVals,
+ Addr =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ {ok,S1} =
+ gen_udp:open(0, [Family,binary,{active,false}|TrueRecvOpts]),
+ {ok,P1} = inet:port(S1),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, TrueRecvOpts_OptsVals),
+ {ok,TrueRecvOpts_OptsVals} = inet:getopts(S1, RecvOpts ++ Opts),
+ %%
+ {ok,S2} =
+ gen_udp:open(0, [Family,binary,{active,true}|FalseRecvOpts]),
+ {ok,P2} = inet:port(S2),
+ {ok,FalseRecvOpts_OptsVals2} = inet:getopts(S2, RecvOpts ++ Opts),
+ OptsVals2 = FalseRecvOpts_OptsVals2 -- FalseRecvOpts,
+ %%
+ ok = gen_udp:send(S2, Addr, P1, <<"abcde">>),
+ ok = gen_udp:send(S1, Addr, P2, <<"fghij">>),
+ {ok,{_,P2,OptsVals3,<<"abcde">>}} = gen_udp:recv(S1, 0, Timeout),
+ verify_sets_eq(OptsVals3, OptsVals2),
+ receive
+ {udp,S2,_,P1,<<"fghij">>} ->
+ ok;
+ Other1 ->
+ exit({unexpected,Other1})
+ after Timeout ->
+ exit(timeout)
+ end,
+ %%
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S2, TrueRecvOpts),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ %%
+ ok = gen_udp:send(S2, Addr, P1, <<"klmno">>),
+ ok = gen_udp:send(S1, Addr, P2, <<"pqrst">>),
+ {ok,{_,P2,<<"klmno">>}} = gen_udp:recv(S1, 0, Timeout),
+ receive
+ {udp,S2,_,P1,OptsVals4,<<"pqrst">>} ->
+ verify_sets_eq(OptsVals4, OptsVals);
+ Other2 ->
+ exit({unexpected,Other2})
+ after Timeout ->
+ exit(timeout)
+ end,
+ ok = gen_udp:close(S1),
+ ok = gen_udp:close(S2),
+%% exit({{OSType,OSVer},success}), % In search for the truth
+ ok.
+
+verify_sets_eq(L1, L2) ->
+ L = lists:sort(L1),
+ case lists:sort(L2) of
+ L ->
+ ok;
+ _ ->
+ exit({sets_neq,L1,L2})
+ end.
+
+
+local_basic(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ CFile = local_filename(client),
+ CAddr = {local,bin_filename(CFile)},
+ _ = file:delete(SFile),
+ _ = file:delete(CFile),
+ %%
+ S = ok(gen_udp:open(0, [{ifaddr,{local,SFile}},{active,false}])),
+ C = ok(gen_udp:open(0, [{ifaddr,{local,CFile}},{active,false}])),
+ SAddr = ok(inet:sockname(S)),
+ CAddr = ok(inet:sockname(C)),
+ local_handshake(S, SAddr, C, CAddr),
+ ok = gen_udp:close(S),
+ ok = gen_udp:close(C),
+ %%
+ ok = file:delete(SFile),
+ ok = file:delete(CFile),
+ ok.
+
+local_unbound(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ _ = file:delete(SFile),
+ %%
+ S = ok(gen_udp:open(0, [{ifaddr,SAddr},{active,false}])),
+ C = ok(gen_udp:open(0, [local,{active,false}])),
+ SAddr = ok(inet:sockname(S)),
+ local_handshake(S, SAddr, C, undefined),
+ ok = gen_udp:close(S),
+ ok = gen_udp:close(C),
+ %%
+ ok = file:delete(SFile),
+ ok.
+
+local_fdopen(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ CFile = local_filename(client),
+ CAddr = {local,bin_filename(CFile)},
+ _ = file:delete(SFile),
+ _ = file:delete(CFile),
+ %%
+ S0 = ok(gen_udp:open(0, [{ifaddr,SAddr},{active,false}])),
+ C = ok(gen_udp:open(0, [{ifaddr,{local,CFile}},{active,false}])),
+ SAddr = ok(inet:sockname(S0)),
+ CAddr = ok(inet:sockname(C)),
+ Fd = ok(prim_inet:getfd(S0)),
+ S = ok(gen_udp:open(0, [{fd,Fd},local,{active,false}])),
+ SAddr = ok(inet:sockname(S)),
+ local_handshake(S, SAddr, C, CAddr),
+ ok = gen_udp:close(S),
+ ok = gen_udp:close(S0),
+ ok = gen_udp:close(C),
+ %%
+ ok = file:delete(SFile),
+ ok = file:delete(CFile),
+ ok.
+
+local_fdopen_unbound(_Config) ->
+ SFile = local_filename(server),
+ SAddr = {local,bin_filename(SFile)},
+ _ = file:delete(SFile),
+ %%
+ S = ok(gen_udp:open(0, [{ifaddr,SAddr},{active,false}])),
+ C0 = ok(gen_udp:open(0, [local,{active,false}])),
+ SAddr = ok(inet:sockname(S)),
+ Fd = ok(prim_inet:getfd(C0)),
+ C = ok(gen_udp:open(0, [{fd,Fd},local,{active,false}])),
+ local_handshake(S, SAddr, C, undefined),
+ ok = gen_udp:close(S),
+ ok = gen_udp:close(C),
+ ok = gen_udp:close(C0),
+ %%
+ ok = file:delete(SFile),
+ ok.
+
+local_abstract(_Config) ->
+ case os:type() of
+ {unix,linux} ->
+ S = ok(gen_udp:open(0, [{ifaddr,{local,<<>>}},{active,false}])),
+ C = ok(gen_udp:open(0, [{ifaddr,{local,<<>>}},{active,false}])),
+ {local,_} = SAddr = ok(inet:sockname(S)),
+ {local,_} = CAddr = ok(inet:sockname(C)),
+ local_handshake(S, SAddr, C, CAddr),
+ ok = gen_udp:close(S),
+ ok = gen_udp:close(C),
+ ok;
+ _ ->
+ {skip,"AF_LOCAL Abstract Addresses only supported on Linux"}
+ end.
+
+
+local_handshake(S, SAddr, C, CAddr) ->
+ SData = "9876543210",
+ CData = "0123456789",
+ ok = gen_udp:send(C, SAddr, 0, CData),
+ case ok(gen_tcp:recv(S, 112)) of
+ {{unspec,<<>>}, 0, CData} when CAddr =:= undefined ->
+ ok;
+ {{local,<<>>}, 0, CData} when CAddr =:= undefined ->
+ ok;
+ {CAddr, 0, CData} when CAddr =/= undefined ->
+ ok = gen_udp:send(S, CAddr, 0, SData),
+ {SAddr, 0, SData} = ok(gen_tcp:recv(C, 112)),
+ ok
+
+ end.
+
+%%
+%% Utils
+%%
+
start_node(Name) ->
Pa = filename:dirname(code:which(?MODULE)),
- ?t:start_node(Name, slave, [{args, "-pa " ++ Pa}]).
+ test_server:start_node(Name, slave, [{args, "-pa " ++ Pa}]).
stop_node(Node) ->
- ?t:stop_node(Node).
+ test_server:stop_node(Node).
-connect(suite) ->
- [];
-connect(doc) ->
- ["Test that connect/3 has effect"];
+%% Test that connect/3 has effect.
connect(Config) when is_list(Config) ->
- ?line Addr = {127,0,0,1},
- ?line {ok,S1} = gen_udp:open(0),
- ?line {ok,P1} = inet:port(S1),
- ?line {ok,S2} = gen_udp:open(0),
- ?line ok = inet:setopts(S2, [{active,false}]),
- ?line ok = gen_udp:close(S1),
- ?line ok = gen_udp:connect(S2, Addr, P1),
- ?line ok = gen_udp:send(S2, <<16#deadbeef:32>>),
- ?line ok = case gen_udp:recv(S2, 0, 5) of
- {error,econnrefused} -> ok;
- {error,econnreset} -> ok;
- Other -> Other
- end,
+ Addr = {127,0,0,1},
+ {ok,S1} = gen_udp:open(0),
+ {ok,P1} = inet:port(S1),
+ {ok,S2} = gen_udp:open(0),
+ ok = inet:setopts(S2, [{active,false}]),
+ ok = gen_udp:close(S1),
+ ok = gen_udp:connect(S2, Addr, P1),
+ ok = gen_udp:send(S2, <<16#deadbeef:32>>),
+ ok = case gen_udp:recv(S2, 0, 500) of
+ {error,econnrefused} -> ok;
+ {error,econnreset} -> ok;
+ Other -> Other
+ end,
ok.
implicit_inet6(Config) when is_list(Config) ->
- ?line Host = ok(inet:gethostname()),
- ?line
- case inet:getaddr(Host, inet6) of
- {ok,Addr} ->
- ?line implicit_inet6(Host, Addr);
- {error,Reason} ->
- {skip,
- "Can not look up IPv6 address: "
- ++atom_to_list(Reason)}
- end.
+ Host = ok(inet:gethostname()),
+ case inet:getaddr(Host, inet6) of
+ {ok,Addr} ->
+ implicit_inet6(Host, Addr);
+ {error,Reason} ->
+ {skip,
+ "Can not look up IPv6 address: "
+ ++atom_to_list(Reason)}
+ end.
implicit_inet6(Host, Addr) ->
- ?line Active = {active,false},
- ?line
- case gen_udp:open(0, [inet6,Active]) of
- {ok,S1} ->
- ?line Loopback = {0,0,0,0,0,0,0,1},
- ?line io:format("~s ~p~n", ["::1",Loopback]),
- ?line implicit_inet6(S1, Active, Loopback),
- ?line ok = gen_udp:close(S1),
- %%
- ?line Localhost = "localhost",
- ?line Localaddr = ok(inet:getaddr(Localhost, inet6)),
- ?line io:format("~s ~p~n", [Localhost,Localaddr]),
- ?line S2 = ok(gen_udp:open(0, [{ip,Localaddr},Active])),
- ?line implicit_inet6(S2, Active, Localaddr),
- ?line ok = gen_udp:close(S2),
- %%
- ?line io:format("~s ~p~n", [Host,Addr]),
- ?line S3 = ok(gen_udp:open(0, [{ifaddr,Addr},Active])),
- ?line implicit_inet6(S3, Active, Addr),
- ?line ok = gen_udp:close(S3);
- _ ->
- {skip,"IPv6 not supported"}
- end.
+ Active = {active,false},
+ Loopback = {0,0,0,0,0,0,0,1},
+ case gen_udp:open(0, [inet6,Active,{ip, Loopback}]) of
+ {ok,S1} ->
+ io:format("~s ~p~n", ["::1",Loopback]),
+ implicit_inet6(S1, Active, Loopback),
+ ok = gen_udp:close(S1),
+ %%
+ Localaddr = ok(get_localaddr()),
+ S2 = ok(gen_udp:open(0, [{ip,Localaddr},Active])),
+ implicit_inet6(S2, Active, Localaddr),
+ ok = gen_udp:close(S2),
+ %%
+ io:format("~s ~p~n", [Host,Addr]),
+ S3 = ok(gen_udp:open(0, [{ifaddr,Addr},Active])),
+ implicit_inet6(S3, Active, Addr),
+ ok = gen_udp:close(S3);
+ _ ->
+ {skip,"IPv6 not supported"}
+ end.
implicit_inet6(S1, Active, Addr) ->
- ?line P1 = ok(inet:port(S1)),
- ?line S2 = ok(gen_udp:open(0, [inet6,Active])),
- ?line P2 = ok(inet:port(S2)),
- ?line ok = gen_udp:connect(S2, Addr, P1),
- ?line ok = gen_udp:connect(S1, Addr, P2),
- ?line {Addr,P2} = ok(inet:peername(S1)),
- ?line {Addr,P1} = ok(inet:peername(S2)),
- ?line {Addr,P1} = ok(inet:sockname(S1)),
- ?line {Addr,P2} = ok(inet:sockname(S2)),
- ?line ok = gen_udp:send(S1, Addr, P2, "ping"),
- ?line {Addr,P1,"ping"} = ok(gen_udp:recv(S2, 1024, 1000)),
- ?line ok = gen_udp:send(S2, Addr, P1, "pong"),
- ?line {Addr,P2,"pong"} = ok(gen_udp:recv(S1, 1024)),
- ?line ok = gen_udp:close(S2).
-
-ok({ok,V}) -> V.
+ P1 = ok(inet:port(S1)),
+ S2 = ok(gen_udp:open(0, [inet6,Active])),
+ P2 = ok(inet:port(S2)),
+ ok = gen_udp:connect(S2, Addr, P1),
+ ok = gen_udp:connect(S1, Addr, P2),
+ {Addr,P2} = ok(inet:peername(S1)),
+ {Addr,P1} = ok(inet:peername(S2)),
+ {Addr,P1} = ok(inet:sockname(S1)),
+ {Addr,P2} = ok(inet:sockname(S2)),
+ ok = gen_udp:send(S1, Addr, P2, "ping"),
+ {Addr,P1,"ping"} = ok(gen_udp:recv(S2, 1024, 1000)),
+ ok = gen_udp:send(S2, Addr, P1, "pong"),
+ {Addr,P2,"pong"} = ok(gen_udp:recv(S1, 1024)),
+ ok = gen_udp:close(S2).
+
+ok({ok,V}) -> V;
+ok(NotOk) ->
+ try throw(not_ok)
+ catch
+ throw:Thrown:Stacktrace ->
+ erlang:raise(
+ error, {Thrown, NotOk}, tl(Stacktrace))
+ end.
+
+
+local_filename(Tag) ->
+ "/tmp/" ?MODULE_STRING "_" ++ os:getpid() ++ "_" ++ atom_to_list(Tag).
+
+bin_filename(String) ->
+ unicode:characters_to_binary(String, file:native_name_encoding()).
+
+delete_local_filenames() ->
+ _ =
+ [file:delete(F) ||
+ F <-
+ filelib:wildcard(
+ "/tmp/" ?MODULE_STRING "_" ++ os:getpid() ++ "_*")],
+ ok.
+
+get_localaddr() ->
+ get_localaddr(["localhost", "localhost6", "ip6-localhost"]).
+
+get_localaddr([]) ->
+ {error, localaddr_not_found};
+get_localaddr([Localhost|Ls]) ->
+ case inet:getaddr(Localhost, inet6) of
+ {ok, LocalAddr} ->
+ io:format("~s ~p~n", [Localhost, LocalAddr]),
+ {ok, LocalAddr};
+ _ ->
+ get_localaddr(Ls)
+ end.
diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl
index 73ee86eba4..8eab36e308 100644
--- a/lib/kernel/test/global_SUITE.erl
+++ b/lib/kernel/test/global_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,8 +19,6 @@
%%
-module(global_SUITE).
-%-define(line_trace, 1).
-
-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2,
init_per_suite/1, end_per_suite/1,
names/1, names_hidden/1, locks/1, locks_hidden/1,
@@ -51,7 +49,7 @@
-compile(export_all).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(NODES, [node()|nodes()]).
@@ -61,7 +59,8 @@
-define(GLOBAL_LOCK, global).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]}].
all() ->
case init:get_argument(ring_line) of
@@ -86,10 +85,10 @@ groups() ->
ring]}].
init_per_group(_GroupName, Config) ->
- Config.
+ Config.
end_per_group(_GroupName, Config) ->
- Config.
+ Config.
init_per_suite(Config) ->
Config.
@@ -99,9 +98,9 @@ end_per_suite(_Config) ->
-define(TESTCASE, testcase_name).
--define(testcase, ?config(?TESTCASE, Config)).
+-define(testcase, proplists:get_value(?TESTCASE, Config)).
-define(nodes_tag, '$global_nodes').
--define(registered, ?config(registered, Config)).
+-define(registered, proplists:get_value(registered, Config)).
init_per_testcase(Case, Config) when is_atom(Case), is_list(Config) ->
ok = gen_server:call(global_name_server, high_level_trace_start,infinity),
@@ -115,16 +114,16 @@ init_per_testcase(Case, Config) when is_atom(Case), is_list(Config) ->
end_per_testcase(_Case, Config) ->
ct:log("Calling end_per_testcase!",[]),
- ?line write_high_level_trace(Config),
- ?line _ =
+ write_high_level_trace(Config),
+ _ =
gen_server:call(global_name_server, high_level_trace_stop, infinity),
[global:unregister_name(N) || N <- global:registered_names()],
- ?line InitRegistered = ?registered,
- ?line Registered = registered(),
- ?line [io:format("~s local names: ~p~n", [What, N]) ||
- {What, N} <- [{"Added", Registered -- InitRegistered},
- {"Removed", InitRegistered -- Registered}],
- N =/= []],
+ InitRegistered = ?registered,
+ Registered = registered(),
+ [io:format("~s local names: ~p~n", [What, N]) ||
+ {What, N} <- [{"Added", Registered -- InitRegistered},
+ {"Removed", InitRegistered -- Registered}],
+ N =/= []],
ok.
@@ -147,12 +146,11 @@ end_per_testcase(_Case, Config) ->
%%% and releases the lock. Now the name should exist on both our own node
%%% and on the slave node (we wait until that is true; it seems that we
%%% can do rpc calls to another node before the connection is really up).
-register_1(suite) -> [];
register_1(Config) when is_list(Config) ->
Timeout = 15,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
+ init_condition(Config),
P = spawn_link(?MODULE, lock_global, [self(), Config]),
receive
{P, ok} ->
@@ -161,7 +159,7 @@ register_1(Config) when is_list(Config) ->
end,
P ! step2,
io:format("p1: sent step2~n"),
- ?line yes = global:register_name(foo, self()),
+ yes = global:register_name(foo, self()),
io:format("p1: registered~n"),
P ! step3,
receive
@@ -172,11 +170,11 @@ register_1(Config) when is_list(Config) ->
I =:= I2 ->
ok;
true ->
- test_server:fail({notsync, I, I2})
+ ct:fail({notsync, I, I2})
end,
- ?line _ = global:unregister_name(foo),
+ _ = global:unregister_name(foo),
write_high_level_trace(Config),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
lock_global(Parent, Config) ->
@@ -203,7 +201,7 @@ lock_global(Parent, Config) ->
io:format("p2: received step3~n"),
I = global:whereis_name(foo),
io:format("p2: name ~p~n", [I]),
- ?line ?UNTIL(I =:= rpc:call(N1, global, whereis_name, [foo])),
+ ?UNTIL(I =:= rpc:call(N1, global, whereis_name, [foo])),
I2 = I,
slave:stop(N1),
io:format("p2: name2 ~p~n", [I2]),
@@ -216,75 +214,73 @@ lock_global(Parent, Config) ->
%%% 'try_again_locker' would be called, and this time cause both 1 and 2
%%% to obtain a lock for 'global' on node 3, which would keep the
%%% name registry from ever becoming consistent again.
-both_known_1(suite) -> [];
both_known_1(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
+ init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ OrigNames = global:registered_names(),
- ?line [Cp1, Cp2, Cp3] = start_nodes([cp1, cp2, cp3], slave, Config),
+ [Cp1, Cp2, Cp3] = start_nodes([cp1, cp2, cp3], slave, Config),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- ?line rpc_disconnect_node(Cp1, Cp2, Config),
+ rpc_disconnect_node(Cp1, Cp2, Config),
- ?line {_Pid1, yes} = rpc:call(Cp1, ?MODULE, start_proc, [p1]),
- ?line {_Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [p2]),
+ {_Pid1, yes} = rpc:call(Cp1, ?MODULE, start_proc, [p1]),
+ {_Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [p2]),
- ?line Names10 = rpc:call(Cp1, global, registered_names, []),
- ?line Names20 = rpc:call(Cp2, global, registered_names, []),
- ?line Names30 = rpc:call(Cp3, global, registered_names, []),
+ Names10 = rpc:call(Cp1, global, registered_names, []),
+ Names20 = rpc:call(Cp2, global, registered_names, []),
+ Names30 = rpc:call(Cp3, global, registered_names, []),
Names1 = Names10 -- OrigNames,
Names2 = Names20 -- OrigNames,
Names3 = Names30 -- OrigNames,
- ?line [p1] = lists:sort(Names1),
- ?line [p2] = lists:sort(Names2),
- ?line [p1, p2] = lists:sort(Names3),
+ [p1] = lists:sort(Names1),
+ [p2] = lists:sort(Names2),
+ [p1, p2] = lists:sort(Names3),
- ?line Locker = spawn(Cp3, ?MODULE, lock_global2, [{global, l3},
- self()]),
+ Locker = spawn(Cp3, ?MODULE, lock_global2, [{global, l3},
+ self()]),
- ?line receive
- {locked, S} ->
- true = S
- end,
+ receive
+ {locked, S} ->
+ true = S
+ end,
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp2]),
+ pong = rpc:call(Cp1, net_adm, ping, [Cp2]),
%% Bring cp1 and cp2 together, while someone has locked global.
%% They will now loop in 'loop_locker'.
- ?line Names10_2 = rpc:call(Cp1, global, registered_names, []),
- ?line Names20_2 = rpc:call(Cp2, global, registered_names, []),
- ?line Names30_2 = rpc:call(Cp3, global, registered_names, []),
+ Names10_2 = rpc:call(Cp1, global, registered_names, []),
+ Names20_2 = rpc:call(Cp2, global, registered_names, []),
+ Names30_2 = rpc:call(Cp3, global, registered_names, []),
Names1_2 = Names10_2 -- OrigNames,
Names2_2 = Names20_2 -- OrigNames,
Names3_2 = Names30_2 -- OrigNames,
- ?line [p1] = lists:sort(Names1_2),
- ?line [p2] = lists:sort(Names2_2),
- ?line [p1, p2] = lists:sort(Names3_2),
+ [p1] = lists:sort(Names1_2),
+ [p2] = lists:sort(Names2_2),
+ [p1, p2] = lists:sort(Names3_2),
%% Let go of the lock, and expect the lockers to resolve the name
%% registry.
Locker ! {ok, self()},
- ?line
?UNTIL(begin
- ?line Names10_3 = rpc:call(Cp1, global, registered_names, []),
- ?line Names20_3 = rpc:call(Cp2, global, registered_names, []),
- ?line Names30_3 = rpc:call(Cp3, global, registered_names, []),
-
+ Names10_3 = rpc:call(Cp1, global, registered_names, []),
+ Names20_3 = rpc:call(Cp2, global, registered_names, []),
+ Names30_3 = rpc:call(Cp3, global, registered_names, []),
+
Names1_3 = Names10_3 -- OrigNames,
Names2_3 = Names20_3 -- OrigNames,
Names3_3 = Names30_3 -- OrigNames,
-
+
N1 = lists:sort(Names1_3),
N2 = lists:sort(Names2_3),
N3 = lists:sort(Names3_3),
@@ -296,51 +292,49 @@ both_known_1(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-lost_unregister(suite) -> [];
-lost_unregister(doc) ->
- ["OTP-6428. An unregistered name reappears."];
+%% OTP-6428. An unregistered name reappears.
lost_unregister(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
+ init_condition(Config),
- ?line {ok, B} = start_node(b, Config),
- ?line {ok, C} = start_node(c, Config),
+ {ok, B} = start_node(b, Config),
+ {ok, C} = start_node(c, Config),
Nodes = [node(), B, C],
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- % start a proc and register it
- ?line {Pid, yes} = start_proc(test),
+ %% start a proc and register it
+ {Pid, yes} = start_proc(test),
- ?line ?UNTIL(Pid =:= global:whereis_name(test)),
- ?line check_everywhere(Nodes, test, Config),
+ ?UNTIL(Pid =:= global:whereis_name(test)),
+ check_everywhere(Nodes, test, Config),
- ?line rpc_disconnect_node(B, C, Config),
- ?line check_everywhere(Nodes, test, Config),
- ?line _ = rpc:call(B, global, unregister_name, [test]),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
- ?line Pid = rpc:call(C, global, whereis_name, [test]),
- ?line check_everywhere(Nodes--[C], test, Config),
- ?line pong = rpc:call(B, net_adm, ping, [C]),
+ rpc_disconnect_node(B, C, Config),
+ check_everywhere(Nodes, test, Config),
+ _ = rpc:call(B, global, unregister_name, [test]),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
+ Pid = rpc:call(C, global, whereis_name, [test]),
+ check_everywhere(Nodes--[C], test, Config),
+ pong = rpc:call(B, net_adm, ping, [C]),
%% Now the name has reappeared on node B.
- ?line ?UNTIL(Pid =:= global:whereis_name(test)),
- ?line check_everywhere(Nodes, test, Config),
+ ?UNTIL(Pid =:= global:whereis_name(test)),
+ check_everywhere(Nodes, test, Config),
exit_p(Pid),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
- ?line check_everywhere(Nodes, test, Config),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
+ check_everywhere(Nodes, test, Config),
write_high_level_trace(Config),
stop_node(B),
stop_node(C),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-define(UNTIL_LOOP, 300).
@@ -350,7 +344,7 @@ lost_unregister(Config) when is_list(Config) ->
init_high_level_trace(Time) ->
Mul = try
test_server:timetrap_scale_factor()
- catch _:_ -> 1
+ catch _:_ -> 1
end,
put(?end_tag, msec() + Time * Mul * 1000),
%% Assures that started nodes start the high level trace automatically.
@@ -395,7 +389,7 @@ write_high_level_trace(Nodes, Config) ->
%% 'info' returns more than the trace, which is nice.
Data = [{Node, {info, rpc:call(Node, global, info, [])}} ||
Node <- Nodes],
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
DataFile = filename:join([Dir, lists:concat(["global_", ?testcase])]),
file:write_file(DataFile, term_to_binary({high_level_trace, When, Data})).
@@ -413,37 +407,35 @@ lock_global2(Id, Parent) ->
%% erl -sname XXX -rsh ctrsh where XX not in [cp1, cp2, cp3]
%%-----------------------------------------------------------------
-%cp1 - cp3 are started, and the name 'test' registered for a process on
-%test_server. Then it is checked that the name is registered on all
-%nodes, using whereis_name. Check that the same
-%name can't be registered with another value. Exit the registered
-%process and check that the name disappears. Register a new process
-%(Pid2) under the name 'test'. Let another new process (Pid3)
-%reregister itself under the same name. Test global:send/2. Test
-%unregister. Kill Pid3. Start a process (Pid6) on cp3,
-%register it as 'test', stop cp1 - cp3 and check that 'test' disappeared.
-%Kill Pid2 and check that 'test' isn't registered.
-
-names(suite) -> [];
+%% cp1 - cp3 are started, and the name 'test' registered for a process on
+%% test_server. Then it is checked that the name is registered on all
+%% nodes, using whereis_name. Check that the same
+%% name can't be registered with another value. Exit the registered
+%% process and check that the name disappears. Register a new process
+%% (Pid2) under the name 'test'. Let another new process (Pid3)
+%% reregister itself under the same name. Test global:send/2. Test
+%% unregister. Kill Pid3. Start a process (Pid6) on cp3,
+%% register it as 'test', stop cp1 - cp3 and check that 'test' disappeared.
+%% Kill Pid2 and check that 'test' isn't registered.
+
names(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
- ?line {ok, Cp3} = start_node(cp3, Config),
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp3} = start_node(cp3, Config),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- % start a proc and register it
- ?line {Pid, yes} = start_proc(test),
+ %% start a proc and register it
+ {Pid, yes} = start_proc(test),
- % test that it is registered at all nodes
- ?line
- ?UNTIL(begin
+ %% test that it is registered at all nodes
+ ?UNTIL(begin
(Pid =:= global:whereis_name(test)) and
(Pid =:= rpc:call(Cp1, global, whereis_name, [test])) and
(Pid =:= rpc:call(Cp2, global, whereis_name, [test])) and
@@ -451,156 +443,148 @@ names(Config) when is_list(Config) ->
([test] =:= global:registered_names() -- OrigNames)
end),
- % try to register the same name
- ?line no = global:register_name(test, self()),
- ?line no = rpc:call(Cp1, global, register_name, [test, self()]),
+ %% try to register the same name
+ no = global:register_name(test, self()),
+ no = rpc:call(Cp1, global, register_name, [test, self()]),
- % let process exit, check that it is unregistered automatically
+ %% let process exit, check that it is unregistered automatically
exit_p(Pid),
- ?line
- ?UNTIL((undefined =:= global:whereis_name(test)) and
+ ?UNTIL((undefined =:= global:whereis_name(test)) and
(undefined =:= rpc:call(Cp1, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp2, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp3, global, whereis_name, [test]))),
- % test re_register
- ?line {Pid2, yes} = start_proc(test),
- ?line ?UNTIL(Pid2 =:= rpc:call(Cp3, global, whereis_name, [test])),
+ %% test re_register
+ {Pid2, yes} = start_proc(test),
+ ?UNTIL(Pid2 =:= rpc:call(Cp3, global, whereis_name, [test])),
Pid3 = rpc:call(Cp3, ?MODULE, start_proc2, [test]),
- ?line ?UNTIL(Pid3 =:= rpc:call(Cp3, global, whereis_name, [test])),
+ ?UNTIL(Pid3 =:= rpc:call(Cp3, global, whereis_name, [test])),
Pid3 = global:whereis_name(test),
- % test sending
+ %% test sending
global:send(test, {ping, self()}),
receive
{pong, Cp3} -> ok
after
- 2000 -> test_server:fail(timeout1)
+ 2000 -> ct:fail(timeout1)
end,
rpc:call(Cp1, global, send, [test, {ping, self()}]),
receive
{pong, Cp3} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line _ = global:unregister_name(test),
- ?line
- ?UNTIL((undefined =:= global:whereis_name(test)) and
+ _ = global:unregister_name(test),
+ ?UNTIL((undefined =:= global:whereis_name(test)) and
(undefined =:= rpc:call(Cp1, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp2, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp3, global, whereis_name, [test]))),
exit_p(Pid3),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
- % register a proc
- ?line {_Pid6, yes} = rpc:call(Cp3, ?MODULE, start_proc, [test]),
+ %% register a proc
+ {_Pid6, yes} = rpc:call(Cp3, ?MODULE, start_proc, [test]),
write_high_level_trace(Config),
- % stop the nodes, and make sure names are released.
+
+ %% stop the nodes, and make sure names are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
exit_p(Pid2),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
- ?line init_condition(Config),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
+ init_condition(Config),
ok.
-names_hidden(suite) -> [];
-names_hidden(doc) ->
- ["Tests that names on a hidden node doesn't interfere with names on "
- "visible nodes."];
+%% Tests that names on a hidden node doesn't interfere with names on
+%% visible nodes.
names_hidden(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
- ?line OrigNodes = nodes(),
-
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
- ?line {ok, Cp3} = start_hidden_node(cp3, Config),
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp3]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [Cp2]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [node()]),
-
- ?line [] = [Cp1, Cp2 | OrigNodes] -- nodes(),
-
- % start a proc on hidden node and register it
- ?line {HPid, yes} = rpc:call(Cp3, ?MODULE, start_proc, [test]),
- ?line Cp3 = node(HPid),
-
- % Check that it didn't get registered on visible nodes
- ?line
- ?UNTIL((undefined =:= global:whereis_name(test)) and
+ init_condition(Config),
+ OrigNames = global:registered_names(),
+ OrigNodes = nodes(),
+
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp3} = start_hidden_node(cp3, Config),
+ pong = rpc:call(Cp1, net_adm, ping, [Cp3]),
+ pong = rpc:call(Cp3, net_adm, ping, [Cp2]),
+ pong = rpc:call(Cp3, net_adm, ping, [node()]),
+
+ [] = [Cp1, Cp2 | OrigNodes] -- nodes(),
+
+ %% start a proc on hidden node and register it
+ {HPid, yes} = rpc:call(Cp3, ?MODULE, start_proc, [test]),
+ Cp3 = node(HPid),
+
+ %% Check that it didn't get registered on visible nodes
+ ?UNTIL((undefined =:= global:whereis_name(test)) and
(undefined =:= rpc:call(Cp1, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp2, global, whereis_name, [test]))),
- % start a proc on visible node and register it
- ?line {Pid, yes} = start_proc(test),
- ?line true = (Pid =/= HPid),
+ %% start a proc on visible node and register it
+ {Pid, yes} = start_proc(test),
+ true = (Pid =/= HPid),
- % test that it is registered at all nodes
- ?line
- ?UNTIL((Pid =:= global:whereis_name(test)) and
+ %% test that it is registered at all nodes
+ ?UNTIL((Pid =:= global:whereis_name(test)) and
(Pid =:= rpc:call(Cp1, global, whereis_name, [test])) and
(Pid =:= rpc:call(Cp2, global, whereis_name, [test])) and
(HPid =:= rpc:call(Cp3, global, whereis_name, [test])) and
([test] =:= global:registered_names() -- OrigNames)),
- % try to register the same name
- ?line no = global:register_name(test, self()),
- ?line no = rpc:call(Cp1, global, register_name, [test, self()]),
+ %% try to register the same name
+ no = global:register_name(test, self()),
+ no = rpc:call(Cp1, global, register_name, [test, self()]),
- % let process exit, check that it is unregistered automatically
+ %% let process exit, check that it is unregistered automatically
exit_p(Pid),
- ?line
- ?UNTIL((undefined =:= global:whereis_name(test)) and
+ ?UNTIL((undefined =:= global:whereis_name(test)) and
(undefined =:= rpc:call(Cp1, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp2, global, whereis_name, [test])) and
(HPid =:= rpc:call(Cp3, global, whereis_name, [test]))),
- % test re_register
- ?line {Pid2, yes} = start_proc(test),
- ?line ?UNTIL(Pid2 =:= rpc:call(Cp2, global, whereis_name, [test])),
+ %% test re_register
+ {Pid2, yes} = start_proc(test),
+ ?UNTIL(Pid2 =:= rpc:call(Cp2, global, whereis_name, [test])),
Pid3 = rpc:call(Cp2, ?MODULE, start_proc2, [test]),
- ?line ?UNTIL(Pid3 =:= rpc:call(Cp2, global, whereis_name, [test])),
- ?line Pid3 = global:whereis_name(test),
+ ?UNTIL(Pid3 =:= rpc:call(Cp2, global, whereis_name, [test])),
+ Pid3 = global:whereis_name(test),
- % test sending
- ?line Pid3 = global:send(test, {ping, self()}),
+ %% test sending
+ Pid3 = global:send(test, {ping, self()}),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout1)
+ 2000 -> ct:fail(timeout1)
end,
rpc:call(Cp1, global, send, [test, {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line _ = rpc:call(Cp3, global, unregister_name, [test]),
- ?line
- ?UNTIL((Pid3 =:= global:whereis_name(test)) and
+ _ = rpc:call(Cp3, global, unregister_name, [test]),
+ ?UNTIL((Pid3 =:= global:whereis_name(test)) and
(Pid3 =:= rpc:call(Cp1, global, whereis_name, [test])) and
(Pid3 =:= rpc:call(Cp2, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp3, global, whereis_name, [test]))),
- ?line _ = global:unregister_name(test),
- ?line
- ?UNTIL((undefined =:= global:whereis_name(test)) and
+ _ = global:unregister_name(test),
+ ?UNTIL((undefined =:= global:whereis_name(test)) and
(undefined =:= rpc:call(Cp1, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp2, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp3, global, whereis_name, [test]))),
@@ -608,277 +592,297 @@ names_hidden(Config) when is_list(Config) ->
exit_p(Pid3),
exit_p(HPid),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
write_high_level_trace(Config),
- % stop the nodes, and make sure names are released.
+
+ %% stop the nodes, and make sure names are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-locks(suite) -> [];
locks(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
- ?line {ok, Cp3} = start_node(cp3, Config),
-
- ?line wait_for_ready_net(Config),
-
- % start two procs
- ?line Pid = start_proc(),
- ?line Pid2 = rpc:call(Cp1, ?MODULE, start_proc, []),
- % set a lock, and make sure noone else can set the same lock
- ?line true = global:set_lock({test_lock, self()}, ?NODES, 1),
- ?line false = req(Pid, {set_lock, test_lock, self()}),
- ?line false = req(Pid2, {set_lock, test_lock, self()}),
- % delete, and let another proc set the lock
+ init_condition(Config),
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp3} = start_node(cp3, Config),
+
+ wait_for_ready_net(Config),
+
+ %% start two procs
+ Pid = start_proc(),
+ Pid2 = rpc:call(Cp1, ?MODULE, start_proc, []),
+
+ %% set a lock, and make sure noone else can set the same lock
+ true = global:set_lock({test_lock, self()}, ?NODES, 1),
+ false = req(Pid, {set_lock, test_lock, self()}),
+ false = req(Pid2, {set_lock, test_lock, self()}),
+
+ %% delete, and let another proc set the lock
global:del_lock({test_lock, self()}),
- ?line true = req(Pid, {set_lock, test_lock, self()}),
- ?line false = req(Pid2, {set_lock, test_lock, self()}),
- ?line false = global:set_lock({test_lock, self()}, ?NODES,1),
- % kill lock-holding proc, make sure the lock is released
+ true = req(Pid, {set_lock, test_lock, self()}),
+ false = req(Pid2, {set_lock, test_lock, self()}),
+ false = global:set_lock({test_lock, self()}, ?NODES,1),
+
+ %% kill lock-holding proc, make sure the lock is released
exit_p(Pid),
?UNTIL(true =:= global:set_lock({test_lock, self()}, ?NODES,1)),
Pid2 ! {set_lock_loop, test_lock, self()},
- % make sure we don't have the msg
+
+ %% make sure we don't have the msg
receive
- {got_lock, Pid2} -> test_server:fail(got_lock)
+ {got_lock, Pid2} -> ct:fail(got_lock)
after
1000 -> ok
end,
global:del_lock({test_lock, self()}),
- % make sure pid2 got the lock
+
+ %% make sure pid2 got the lock
receive
{got_lock, Pid2} -> ok
after
- % 12000 >> 5000, which is the max time before a new retry for
- % set_lock
- 12000 -> test_server:fail(got_lock2)
+ %% 12000 >> 5000, which is the max time before a new retry for
+ %% set_lock
+ 12000 -> ct:fail(got_lock2)
end,
- % let proc set the same lock
- ?line true = req(Pid2, {set_lock, test_lock, self()}),
- % let proc set new lock
- ?line true = req(Pid2, {set_lock, test_lock2, self()}),
- ?line false = global:set_lock({test_lock, self()},?NODES,1),
- ?line false = global:set_lock({test_lock2, self()}, ?NODES,1),
+ %% let proc set the same lock
+ true = req(Pid2, {set_lock, test_lock, self()}),
+
+ %% let proc set new lock
+ true = req(Pid2, {set_lock, test_lock2, self()}),
+ false = global:set_lock({test_lock, self()},?NODES,1),
+ false = global:set_lock({test_lock2, self()}, ?NODES,1),
exit_p(Pid2),
-% erlang:display({locks1, ets:tab2list(global_locks)}),
?UNTIL(true =:= global:set_lock({test_lock, self()}, ?NODES, 1)),
?UNTIL(true =:= global:set_lock({test_lock2, self()}, ?NODES, 1)),
- ?line global:del_lock({test_lock, self()}),
- ?line global:del_lock({test_lock2, self()}),
-
- % let proc set two locks
- ?line Pid3 = rpc:call(Cp1, ?MODULE, start_proc, []),
- ?line true = req(Pid3, {set_lock, test_lock, self()}),
- ?line true = req(Pid3, {set_lock, test_lock2, self()}),
- % del one lock
- ?line Pid3 ! {del_lock, test_lock2},
- ?line test_server:sleep(100),
- % check that one lock is still set, but not the other
- ?line false = global:set_lock({test_lock, self()}, ?NODES, 1),
- ?line true = global:set_lock({test_lock2, self()}, ?NODES, 1),
- ?line global:del_lock({test_lock2, self()}),
- % kill lock-holder
+ global:del_lock({test_lock, self()}),
+ global:del_lock({test_lock2, self()}),
+
+ %% let proc set two locks
+ Pid3 = rpc:call(Cp1, ?MODULE, start_proc, []),
+ true = req(Pid3, {set_lock, test_lock, self()}),
+ true = req(Pid3, {set_lock, test_lock2, self()}),
+
+ %% del one lock
+ Pid3 ! {del_lock, test_lock2},
+ ct:sleep(100),
+
+ %% check that one lock is still set, but not the other
+ false = global:set_lock({test_lock, self()}, ?NODES, 1),
+ true = global:set_lock({test_lock2, self()}, ?NODES, 1),
+ global:del_lock({test_lock2, self()}),
+
+ %% kill lock-holder
exit_p(Pid3),
-% erlang:display({locks2, ets:tab2list(global_locks)}),
+
?UNTIL(true =:= global:set_lock({test_lock, self()}, ?NODES, 1)),
- ?line global:del_lock({test_lock, self()}),
+ global:del_lock({test_lock, self()}),
?UNTIL(true =:= global:set_lock({test_lock2, self()}, ?NODES, 1)),
- ?line global:del_lock({test_lock2, self()}),
-
- % start one proc on each node
- ?line Pid4 = start_proc(),
- ?line Pid5 = rpc:call(Cp1, ?MODULE, start_proc, []),
- ?line Pid6 = rpc:call(Cp2, ?MODULE, start_proc, []),
- ?line Pid7 = rpc:call(Cp3, ?MODULE, start_proc, []),
- % set lock on two nodes
- ?line true = req(Pid4, {set_lock, test_lock, self(), [node(), Cp1]}),
- ?line false = req(Pid5, {set_lock, test_lock, self(), [node(), Cp1]}),
- % set same lock on other two nodes
- ?line true = req(Pid6, {set_lock, test_lock, self(), [Cp2, Cp3]}),
- ?line false = req(Pid7, {set_lock, test_lock, self(), [Cp2, Cp3]}),
- % release lock
+ global:del_lock({test_lock2, self()}),
+
+ %% start one proc on each node
+ Pid4 = start_proc(),
+ Pid5 = rpc:call(Cp1, ?MODULE, start_proc, []),
+ Pid6 = rpc:call(Cp2, ?MODULE, start_proc, []),
+ Pid7 = rpc:call(Cp3, ?MODULE, start_proc, []),
+
+ %% set lock on two nodes
+ true = req(Pid4, {set_lock, test_lock, self(), [node(), Cp1]}),
+ false = req(Pid5, {set_lock, test_lock, self(), [node(), Cp1]}),
+
+ %% set same lock on other two nodes
+ true = req(Pid6, {set_lock, test_lock, self(), [Cp2, Cp3]}),
+ false = req(Pid7, {set_lock, test_lock, self(), [Cp2, Cp3]}),
+
+ %% release lock
Pid6 ! {del_lock, test_lock, [Cp2, Cp3]},
- % try to set lock on a node that already has the lock
- ?line false = req(Pid6, {set_lock, test_lock, self(), [Cp1, Cp2, Cp3]}),
- % set lock on a node
+ %% try to set lock on a node that already has the lock
+ false = req(Pid6, {set_lock, test_lock, self(), [Cp1, Cp2, Cp3]}),
+
+ %% set lock on a node
exit_p(Pid4),
?UNTIL(true =:= req(Pid5, {set_lock, test_lock, self(), [node(), Cp1]})),
- ?line Pid8 = start_proc(),
- ?line false = req(Pid8, {set_lock, test_lock, self()}),
+ Pid8 = start_proc(),
+ false = req(Pid8, {set_lock, test_lock, self()}),
write_high_level_trace(Config),
- % stop the nodes, and make sure locks are released.
+
+ %% stop the nodes, and make sure locks are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line test_server:sleep(100),
- ?line true = req(Pid8, {set_lock, test_lock, self()}),
+ ct:sleep(100),
+ true = req(Pid8, {set_lock, test_lock, self()}),
exit_p(Pid8),
- ?line test_server:sleep(10),
+ ct:sleep(10),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-
-locks_hidden(suite) -> [];
-locks_hidden(doc) ->
- ["Tests that locks on a hidden node doesn't interere with locks on "
- "visible nodes."];
+
+%% Tests that locks on a hidden node doesn't interere with locks on
+%% visible nodes.
locks_hidden(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNodes = nodes(),
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
- ?line {ok, Cp3} = start_hidden_node(cp3, Config),
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp3]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [Cp2]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [node()]),
-
- ?line [] = [Cp1, Cp2 | OrigNodes] -- nodes(),
-
- % start two procs
- ?line Pid = start_proc(),
- ?line Pid2 = rpc:call(Cp1, ?MODULE, start_proc, []),
- ?line HPid = rpc:call(Cp3, ?MODULE, start_proc, []),
- % Make sure hidden node doesn't interfere with visible nodes lock
- ?line true = req(HPid, {set_lock, test_lock, self()}),
- ?line true = global:set_lock({test_lock, self()}, ?NODES, 1),
- ?line false = req(Pid, {set_lock, test_lock, self()}),
- ?line true = req(HPid, {del_lock_sync, test_lock, self()}),
- ?line false = req(Pid2, {set_lock, test_lock, self()}),
- % delete, and let another proc set the lock
+ init_condition(Config),
+ OrigNodes = nodes(),
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp3} = start_hidden_node(cp3, Config),
+ pong = rpc:call(Cp1, net_adm, ping, [Cp3]),
+ pong = rpc:call(Cp3, net_adm, ping, [Cp2]),
+ pong = rpc:call(Cp3, net_adm, ping, [node()]),
+
+ [] = [Cp1, Cp2 | OrigNodes] -- nodes(),
+
+ %% start two procs
+ Pid = start_proc(),
+ Pid2 = rpc:call(Cp1, ?MODULE, start_proc, []),
+ HPid = rpc:call(Cp3, ?MODULE, start_proc, []),
+
+ %% Make sure hidden node doesn't interfere with visible nodes lock
+ true = req(HPid, {set_lock, test_lock, self()}),
+ true = global:set_lock({test_lock, self()}, ?NODES, 1),
+ false = req(Pid, {set_lock, test_lock, self()}),
+ true = req(HPid, {del_lock_sync, test_lock, self()}),
+ false = req(Pid2, {set_lock, test_lock, self()}),
+
+ %% delete, and let another proc set the lock
global:del_lock({test_lock, self()}),
- ?line true = req(Pid, {set_lock, test_lock, self()}),
- ?line false = req(Pid2, {set_lock, test_lock, self()}),
- ?line false = global:set_lock({test_lock, self()}, ?NODES,1),
- % kill lock-holding proc, make sure the lock is released
+ true = req(Pid, {set_lock, test_lock, self()}),
+ false = req(Pid2, {set_lock, test_lock, self()}),
+ false = global:set_lock({test_lock, self()}, ?NODES,1),
+
+ %% kill lock-holding proc, make sure the lock is released
exit_p(Pid),
?UNTIL(true =:= global:set_lock({test_lock, self()}, ?NODES, 1)),
?UNTIL(true =:= req(HPid, {set_lock, test_lock, self()})),
Pid2 ! {set_lock_loop, test_lock, self()},
- % make sure we don't have the msg
+
+ %% make sure we don't have the msg
receive
- {got_lock, Pid2} -> test_server:fail(got_lock)
+ {got_lock, Pid2} -> ct:fail(got_lock)
after
1000 -> ok
end,
global:del_lock({test_lock, self()}),
- % make sure pid2 got the lock
+
+ %% make sure pid2 got the lock
receive
{got_lock, Pid2} -> ok
after
- % 12000 >> 5000, which is the max time before a new retry for
- % set_lock
- 12000 -> test_server:fail(got_lock2)
+ %% 12000 >> 5000, which is the max time before a new retry for
+ %% set_lock
+ 12000 -> ct:fail(got_lock2)
end,
- ?line true = req(HPid, {del_lock_sync, test_lock, self()}),
-
- % let proc set the same lock
- ?line true = req(Pid2, {set_lock, test_lock, self()}),
- % let proc set new lock
- ?line true = req(Pid2, {set_lock, test_lock2, self()}),
- ?line true = req(HPid, {set_lock, test_lock, self()}),
- ?line true = req(HPid, {set_lock, test_lock2, self()}),
+ true = req(HPid, {del_lock_sync, test_lock, self()}),
+
+ %% let proc set the same lock
+ true = req(Pid2, {set_lock, test_lock, self()}),
+
+ %% let proc set new lock
+ true = req(Pid2, {set_lock, test_lock2, self()}),
+ true = req(HPid, {set_lock, test_lock, self()}),
+ true = req(HPid, {set_lock, test_lock2, self()}),
exit_p(HPid),
- ?line false = global:set_lock({test_lock, self()},?NODES,1),
- ?line false = global:set_lock({test_lock2, self()}, ?NODES,1),
+ false = global:set_lock({test_lock, self()},?NODES,1),
+ false = global:set_lock({test_lock2, self()}, ?NODES,1),
+
exit_p(Pid2),
-% erlang:display({locks1, ets:tab2list(global_locks)}),
?UNTIL(true =:= global:set_lock({test_lock, self()}, ?NODES, 1)),
?UNTIL(true =:= global:set_lock({test_lock2, self()}, ?NODES, 1)),
- ?line global:del_lock({test_lock, self()}),
- ?line global:del_lock({test_lock2, self()}),
+ global:del_lock({test_lock, self()}),
+ global:del_lock({test_lock2, self()}),
write_high_level_trace(Config),
- % stop the nodes, and make sure locks are released.
+
+ %% stop the nodes, and make sure locks are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-
-bad_input(suite) -> [];
+
bad_input(Config) when is_list(Config) ->
Timeout = 15,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
+ init_condition(Config),
Pid = whereis(global_name_server),
- ?line {'EXIT', _} = (catch global:set_lock(bad_id)),
- ?line {'EXIT', _} = (catch global:set_lock({id, self()}, bad_nodes)),
- ?line {'EXIT', _} = (catch global:del_lock(bad_id)),
- ?line {'EXIT', _} = (catch global:del_lock({id, self()}, bad_nodes)),
- ?line {'EXIT', _} = (catch global:register_name(name, bad_pid)),
- ?line {'EXIT', _} = (catch global:reregister_name(name, bad_pid)),
- ?line {'EXIT', _} = (catch global:trans(bad_id, {m,f})),
- ?line {'EXIT', _} = (catch global:trans({id, self()}, {m,f}, [node()], -1)),
- ?line Pid = whereis(global_name_server),
- ?line init_condition(Config),
+ {'EXIT', _} = (catch global:set_lock(bad_id)),
+ {'EXIT', _} = (catch global:set_lock({id, self()}, bad_nodes)),
+ {'EXIT', _} = (catch global:del_lock(bad_id)),
+ {'EXIT', _} = (catch global:del_lock({id, self()}, bad_nodes)),
+ {'EXIT', _} = (catch global:register_name(name, bad_pid)),
+ {'EXIT', _} = (catch global:reregister_name(name, bad_pid)),
+ {'EXIT', _} = (catch global:trans(bad_id, {m,f})),
+ {'EXIT', _} = (catch global:trans({id, self()}, {m,f}, [node()], -1)),
+ Pid = whereis(global_name_server),
+ init_condition(Config),
ok.
-names_and_locks(suite) -> [];
names_and_locks(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
-
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
- ?line {ok, Cp3} = start_node(cp3, Config),
-
- % start one proc on each node
- ?line PidTS = start_proc(),
- ?line Pid1 = rpc:call(Cp1, ?MODULE, start_proc, []),
- ?line Pid2 = rpc:call(Cp2, ?MODULE, start_proc, []),
- ?line Pid3 = rpc:call(Cp3, ?MODULE, start_proc, []),
- % register some of them
- ?line yes = global:register_name(test1, Pid1),
- ?line yes = global:register_name(test2, Pid2),
- ?line yes = global:register_name(test3, Pid3),
- ?line no = global:register_name(test3, PidTS),
- ?line yes = global:register_name(test4, PidTS),
-
- % set lock on two nodes
- ?line true = req(PidTS, {set_lock, test_lock, self(), [node(), Cp1]}),
- ?line false = req(Pid1, {set_lock, test_lock, self(), [node(), Cp1]}),
- % set same lock on other two nodes
- ?line true = req(Pid2, {set_lock, test_lock, self(), [Cp2, Cp3]}),
- ?line false = req(Pid3, {set_lock, test_lock, self(), [Cp2, Cp3]}),
- % release lock
+ init_condition(Config),
+ OrigNames = global:registered_names(),
+
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp3} = start_node(cp3, Config),
+
+ %% start one proc on each node
+ PidTS = start_proc(),
+ Pid1 = rpc:call(Cp1, ?MODULE, start_proc, []),
+ Pid2 = rpc:call(Cp2, ?MODULE, start_proc, []),
+ Pid3 = rpc:call(Cp3, ?MODULE, start_proc, []),
+
+ %% register some of them
+ yes = global:register_name(test1, Pid1),
+ yes = global:register_name(test2, Pid2),
+ yes = global:register_name(test3, Pid3),
+ no = global:register_name(test3, PidTS),
+ yes = global:register_name(test4, PidTS),
+
+ %% set lock on two nodes
+ true = req(PidTS, {set_lock, test_lock, self(), [node(), Cp1]}),
+ false = req(Pid1, {set_lock, test_lock, self(), [node(), Cp1]}),
+
+ %% set same lock on other two nodes
+ true = req(Pid2, {set_lock, test_lock, self(), [Cp2, Cp3]}),
+ false = req(Pid3, {set_lock, test_lock, self(), [Cp2, Cp3]}),
+
+ %% release lock
Pid2 ! {del_lock, test_lock, [Cp2, Cp3]},
- ?line test_server:sleep(100),
- % try to set lock on a node that already has the lock
- ?line false = req(Pid2, {set_lock, test_lock, self(), [Cp1, Cp2, Cp3]}),
- % set two locks
- ?line true = req(Pid2, {set_lock, test_lock, self(), [Cp2, Cp3]}),
- ?line true = req(Pid2, {set_lock, test_lock2, self(), [Cp2, Cp3]}),
-
- % kill some processes, make sure all locks/names are released
+ ct:sleep(100),
+
+ %% try to set lock on a node that already has the lock
+ false = req(Pid2, {set_lock, test_lock, self(), [Cp1, Cp2, Cp3]}),
+
+ %% set two locks
+ true = req(Pid2, {set_lock, test_lock, self(), [Cp2, Cp3]}),
+ true = req(Pid2, {set_lock, test_lock2, self(), [Cp2, Cp3]}),
+
+ %% kill some processes, make sure all locks/names are released
exit_p(PidTS),
- ?line ?UNTIL(undefined =:= global:whereis_name(test4)),
- ?line true = global:set_lock({test_lock, self()}, [node(), Cp1], 1),
+ ?UNTIL(undefined =:= global:whereis_name(test4)),
+ true = global:set_lock({test_lock, self()}, [node(), Cp1], 1),
global:del_lock({test_lock, self()}, [node(), Cp1]),
exit_p(Pid2),
- ?line
- ?UNTIL((undefined =:= global:whereis_name(test2)) and
+ ?UNTIL((undefined =:= global:whereis_name(test2)) and
(true =:= global:set_lock({test_lock, self()}, [Cp2, Cp3], 1)) and
(true =:= global:set_lock({test_lock2, self()}, [Cp2, Cp3], 1))),
@@ -888,117 +892,113 @@ names_and_locks(Config) when is_list(Config) ->
exit_p(Pid1),
exit_p(Pid3),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-
-lock_die(suite) -> [];
-lock_die(doc) ->
- ["OTP-6341. Remove locks using monitors."];
+
+%% OTP-6341. Remove locks using monitors.
lock_die(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
%% First test.
LockId = {id, self()},
- ?line Pid2 = start_proc(),
- ?line true = req(Pid2, {set_lock2, LockId, self()}),
+ Pid2 = start_proc(),
+ true = req(Pid2, {set_lock2, LockId, self()}),
- ?line true = global:set_lock(LockId, [Cp1]),
+ true = global:set_lock(LockId, [Cp1]),
%% Id is locked on Cp1 and Cp2 (by Pid2) but not by self():
%% (there is no mon. ref)
- ?line _ = global:del_lock(LockId, [node(), Cp1, Cp2]),
+ _ = global:del_lock(LockId, [node(), Cp1, Cp2]),
+
+ exit_p(Pid2),
- ?line exit_p(Pid2),
-
%% Second test.
- ?line Pid3 = start_proc(),
- ?line true = req(Pid3, {set_lock, id, self(), [Cp1]}),
+ Pid3 = start_proc(),
+ true = req(Pid3, {set_lock, id, self(), [Cp1]}),
%% The lock is removed from Cp1 thanks to monitors.
- ?line exit_p(Pid3),
-
- ?line true = global:set_lock(LockId, [node(), Cp1]),
- ?line _ = global:del_lock(LockId, [node(), Cp1]),
+ exit_p(Pid3),
+
+ true = global:set_lock(LockId, [node(), Cp1]),
+ _ = global:del_lock(LockId, [node(), Cp1]),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp1),
stop_node(Cp2),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-name_die(suite) -> [];
-name_die(doc) ->
- ["OTP-6341. Remove names using monitors."];
+%% OTP-6341. Remove names using monitors.
name_die(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
- ?line [Cp1] = Cps = start_nodes([z], peer, Config), % z > test_server
+ init_condition(Config),
+ OrigNames = global:registered_names(),
+ [Cp1] = Cps = start_nodes([z], peer, Config), % z > test_server
Nodes = lists:sort([node() | Cps]),
- ?line wait_for_ready_net(Config),
-
+ wait_for_ready_net(Config),
+
Name = name_die,
- ?line Pid = rpc:call(Cp1, ?MODULE, start_proc, []),
+ Pid = rpc:call(Cp1, ?MODULE, start_proc, []),
%% Test 1. No resolver is called if the same pid is registered on
%% both partitions.
T1 = node(),
Part1 = [T1],
Part2 = [Cp1],
- ?line rpc_cast(Cp1,
- ?MODULE, part_2_2, [Config,
- Part1,
- Part2,
- []]),
- ?line ?UNTIL(is_ready_partition(Config)),
- ?line ?UNTIL(undefined =:= global:whereis_name(Name)),
- ?line yes = global:register_name(Name, Pid),
-
- ?line pong = net_adm:ping(Cp1),
- ?line wait_for_ready_net(Nodes, Config),
- ?line assert_pid(global:whereis_name(Name)),
+ rpc_cast(Cp1,
+ ?MODULE, part_2_2, [Config,
+ Part1,
+ Part2,
+ []]),
+ ?UNTIL(is_ready_partition(Config)),
+ ?UNTIL(undefined =:= global:whereis_name(Name)),
+ yes = global:register_name(Name, Pid),
+
+ pong = net_adm:ping(Cp1),
+ wait_for_ready_net(Nodes, Config),
+ assert_pid(global:whereis_name(Name)),
exit_p(Pid),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
%% Test 2. Register a name running outside the current partition.
%% Killing the pid will not remove the name from the current
%% partition, unless monitors are used.
- ?line Pid2 = rpc:call(Cp1, ?MODULE, start_proc, []),
- Dir = ?config(priv_dir, Config),
+ Pid2 = rpc:call(Cp1, ?MODULE, start_proc, []),
+ Dir = proplists:get_value(priv_dir, Config),
KillFile = filename:join([Dir, "kill.txt"]),
file:delete(KillFile),
- ?line erlang:spawn(Cp1, fun() -> kill_pid(Pid2, KillFile, Config) end),
- ?line rpc_cast(Cp1,
- ?MODULE, part_2_2, [Config,
- Part1,
- Part2,
- []]),
- ?line ?UNTIL(is_ready_partition(Config)),
- ?line ?UNTIL(undefined =:= global:whereis_name(Name)),
- ?line yes = global:register_name(Name, Pid2),
- ?line touch(KillFile, "kill"),
- ?line file_contents(KillFile, "done", Config),
+ erlang:spawn(Cp1, fun() -> kill_pid(Pid2, KillFile, Config) end),
+ rpc_cast(Cp1,
+ ?MODULE, part_2_2, [Config,
+ Part1,
+ Part2,
+ []]),
+ ?UNTIL(is_ready_partition(Config)),
+ ?UNTIL(undefined =:= global:whereis_name(Name)),
+ yes = global:register_name(Name, Pid2),
+ touch(KillFile, "kill"),
+ file_contents(KillFile, "done", Config),
file:delete(KillFile),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_nodes(Cps),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
kill_pid(Pid, File, Config) ->
@@ -1006,186 +1006,178 @@ kill_pid(Pid, File, Config) ->
exit_p(Pid),
touch(File, "done").
-basic_partition(suite) -> [];
-basic_partition(doc) ->
- ["Tests that two partitioned networks exchange correct info."];
+%% Tests that two partitioned networks exchange correct info.
basic_partition(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
-
- ?line [Cp1, Cp2, Cp3] = start_nodes([cp1, cp2, cp3], peer, Config),
- ?line [Cp1, Cp2, Cp3] = lists:sort(nodes()),
-
- ?line wait_for_ready_net(Config),
-
- % make cp2 and cp3 connected, partitioned from us and cp1
- ?line rpc_cast(Cp2, ?MODULE, part1, [Config, node(), Cp1, Cp3]),
- ?line ?UNTIL(is_ready_partition(Config)),
-
- % start different processes in both partitions
- ?line {Pid, yes} = start_proc(test),
-
- % connect to other partition
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line [Cp1, Cp2, Cp3] = lists:sort(nodes()),
-
- % check names
- ?line ?UNTIL(Pid =:= rpc:call(Cp2, global, whereis_name, [test])),
- ?line ?UNTIL(undefined =/= global:whereis_name(test2)),
- ?line Pid2 = global:whereis_name(test2),
- ?line Pid2 = rpc:call(Cp2, global, whereis_name, [test2]),
- ?line assert_pid(Pid2),
- ?line Pid3 = global:whereis_name(test4),
- ?line ?UNTIL(Pid3 =:= rpc:call(Cp1, global, whereis_name, [test4])),
- ?line assert_pid(Pid3),
-
- % kill all procs
- ?line Pid3 = global:send(test4, die),
- % sleep to let the proc die
+ init_condition(Config),
+ OrigNames = global:registered_names(),
+
+ [Cp1, Cp2, Cp3] = start_nodes([cp1, cp2, cp3], peer, Config),
+ [Cp1, Cp2, Cp3] = lists:sort(nodes()),
+
+ wait_for_ready_net(Config),
+
+ %% make cp2 and cp3 connected, partitioned from us and cp1
+ rpc_cast(Cp2, ?MODULE, part1, [Config, node(), Cp1, Cp3]),
+ ?UNTIL(is_ready_partition(Config)),
+
+ %% start different processes in both partitions
+ {Pid, yes} = start_proc(test),
+
+ %% connect to other partition
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ [Cp1, Cp2, Cp3] = lists:sort(nodes()),
+
+ %% check names
+ ?UNTIL(Pid =:= rpc:call(Cp2, global, whereis_name, [test])),
+ ?UNTIL(undefined =/= global:whereis_name(test2)),
+ Pid2 = global:whereis_name(test2),
+ Pid2 = rpc:call(Cp2, global, whereis_name, [test2]),
+ assert_pid(Pid2),
+ Pid3 = global:whereis_name(test4),
+ ?UNTIL(Pid3 =:= rpc:call(Cp1, global, whereis_name, [test4])),
+ assert_pid(Pid3),
+
+ %% kill all procs
+ Pid3 = global:send(test4, die),
+ %% sleep to let the proc die
wait_for_exit(Pid3),
- ?line ?UNTIL(undefined =:= global:whereis_name(test4)),
-
+ ?UNTIL(undefined =:= global:whereis_name(test4)),
+
exit_p(Pid),
exit_p(Pid2),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-basic_name_partition(suite) ->
- [];
-basic_name_partition(doc) ->
- ["Creates two partitions with two nodes in each partition.",
- "Tests that names are exchanged correctly, and that EXITs",
- "during connect phase are handled correctly."];
+%% Creates two partitions with two nodes in each partition.
+%% Tests that names are exchanged correctly, and that EXITs
+%% during connect phase are handled correctly.
basic_name_partition(Config) when is_list(Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [Cp1, Cp2, Cp3] = start_nodes([cp1, cp2, cp3], peer, Config),
- ?line [Cp1, Cp2, Cp3] = lists:sort(nodes()),
+ [Cp1, Cp2, Cp3] = start_nodes([cp1, cp2, cp3], peer, Config),
+ [Cp1, Cp2, Cp3] = lists:sort(nodes()),
Nodes = ?NODES,
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- % There used to be more than one name registered for some
- % processes. That was a mistake; there is no support for more than
- % one name per process, and the manual is quite clear about that
- % ("equivalent to the register/2 and whereis/1 BIFs"). The
- % resolver procedure did not take care of such "duplicated" names,
- % which caused this testcase to fail every now and then.
+ %% There used to be more than one name registered for some
+ %% processes. That was a mistake; there is no support for more than
+ %% one name per process, and the manual is quite clear about that
+ %% ("equivalent to the register/2 and whereis/1 BIFs"). The
+ %% resolver procedure did not take care of such "duplicated" names,
+ %% which caused this testcase to fail every now and then.
- % make cp2 and cp3 connected, partitioned from us and cp1
- % us: register name03
- % cp1: register name12
- % cp2: register name12
- % cp3: register name03
-
- ?line rpc_cast(Cp2, ?MODULE, part1_5, [Config, node(), Cp1, Cp3]),
- ?line ?UNTIL(is_ready_partition(Config)),
-
- % start different processes in both partitions
- ?line {_, yes} = start_proc_basic(name03),
- ?line {_, yes} = rpc:call(Cp1, ?MODULE, start_proc_basic, [name12]),
- test_server:sleep(1000),
+ %% make cp2 and cp3 connected, partitioned from us and cp1
+ %% us: register name03
+ %% cp1: register name12
+ %% cp2: register name12
+ %% cp3: register name03
- % connect to other partition
- ?line pong = net_adm:ping(Cp3),
-
- ?line ?UNTIL([Cp1, Cp2, Cp3] =:= lists:sort(nodes())),
- ?line wait_for_ready_net(Config),
- % check names
- ?line Pid03 = global:whereis_name(name03),
- ?line assert_pid(Pid03),
- ?line true = lists:member(node(Pid03), [node(), Cp3]),
- ?line check_everywhere(Nodes, name03, Config),
-
- ?line Pid12 = global:whereis_name(name12),
- ?line assert_pid(Pid12),
- ?line true = lists:member(node(Pid12), [Cp1, Cp2]),
- ?line check_everywhere(Nodes, name12, Config),
-
- % kill all procs
- ?line Pid12 = global:send(name12, die),
- ?line Pid03 = global:send(name03, die),
- % sleep to let the procs die
+ rpc_cast(Cp2, ?MODULE, part1_5, [Config, node(), Cp1, Cp3]),
+ ?UNTIL(is_ready_partition(Config)),
+
+ %% start different processes in both partitions
+ {_, yes} = start_proc_basic(name03),
+ {_, yes} = rpc:call(Cp1, ?MODULE, start_proc_basic, [name12]),
+ ct:sleep(1000),
+
+ %% connect to other partition
+ pong = net_adm:ping(Cp3),
+
+ ?UNTIL([Cp1, Cp2, Cp3] =:= lists:sort(nodes())),
+ wait_for_ready_net(Config),
+
+ %% check names
+ Pid03 = global:whereis_name(name03),
+ assert_pid(Pid03),
+ true = lists:member(node(Pid03), [node(), Cp3]),
+ check_everywhere(Nodes, name03, Config),
+
+ Pid12 = global:whereis_name(name12),
+ assert_pid(Pid12),
+ true = lists:member(node(Pid12), [Cp1, Cp2]),
+ check_everywhere(Nodes, name12, Config),
+
+ %% kill all procs
+ Pid12 = global:send(name12, die),
+ Pid03 = global:send(name03, die),
+
+ %% sleep to let the procs die
wait_for_exit(Pid12),
wait_for_exit(Pid03),
- ?line
?UNTIL(begin
Names = [name03, name12],
lists:duplicate(length(Names), undefined)
=:= [global:whereis_name(Name) || Name <- Names]
end),
-
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-%Peer nodes cp0 - cp6 are started. Break apart the connections from
-%cp3-cp6 to cp0-cp2 and test_server so we get two partitions.
-%In the cp3-cp6 partition, start one process on each node and register
-%using both erlang:register, and global:register (test1 on cp3, test2 on
-%cp4, test3 on cp5, test4 on cp6), using different resolution functions:
-%default for test1, notify_all_name for test2, random_notify_name for test3
-%and one for test4 that sends a message to test_server and keeps the
-%process which is greater in the standard ordering. In the other partition,
-%do the same (test1 on test_server, test2 on cp0, test3 on cp1, test4 on cp2).
-%Sleep a little, then from test_server, connect to cp3-cp6 in order.
-%Check that the values for the registered names are the expected ones, and
-%that the messages from test4 arrive.
-
-advanced_partition(suite) ->
- [];
-advanced_partition(doc) ->
- ["Test that names are resolved correctly when two",
- "partitioned networks connect."];
+%% Peer nodes cp0 - cp6 are started. Break apart the connections from
+%% cp3-cp6 to cp0-cp2 and test_server so we get two partitions.
+%% In the cp3-cp6 partition, start one process on each node and register
+%% using both erlang:register, and global:register (test1 on cp3, test2 on
+%% cp4, test3 on cp5, test4 on cp6), using different resolution functions:
+%% default for test1, notify_all_name for test2, random_notify_name for test3
+%% and one for test4 that sends a message to test_server and keeps the
+%% process which is greater in the standard ordering. In the other partition,
+%% do the same (test1 on test_server, test2 on cp0, test3 on cp1, test4 on cp2).
+%% Sleep a little, then from test_server, connect to cp3-cp6 in order.
+%% Check that the values for the registered names are the expected ones, and
+%% that the messages from test4 arrive.
+
+%% Test that names are resolved correctly when two
+%% partitioned networks connect.
advanced_partition(Config) when is_list(Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6]
+ [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6]
= start_nodes([cp0, cp1, cp2, cp3, cp4, cp5, cp6], peer, Config),
Nodes = lists:sort([node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6]),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- % make cp3-cp6 connected, partitioned from us and cp0-cp2
- ?line rpc_cast(Cp3, ?MODULE, part2,
- [Config, self(), node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5,Cp6]),
- ?line ?UNTIL(is_ready_partition(Config)),
-
- % start different processes in this partition
- ?line start_procs(self(), Cp0, Cp1, Cp2, Config),
-
- % connect to other partition
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
- ?line pong = net_adm:ping(Cp6),
-
- ?line wait_for_ready_net(Config),
+ %% make cp3-cp6 connected, partitioned from us and cp0-cp2
+ rpc_cast(Cp3, ?MODULE, part2,
+ [Config, self(), node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5,Cp6]),
+ ?UNTIL(is_ready_partition(Config)),
+
+ %% start different processes in this partition
+ start_procs(self(), Cp0, Cp1, Cp2, Config),
+
+ %% connect to other partition
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+ pong = net_adm:ping(Cp6),
+
+ wait_for_ready_net(Config),
- ?line
?UNTIL(lists:member(undefined,
[rpc:call(Cp3, erlang, whereis, [test1]),
rpc:call(node(), erlang, whereis, [test1])])),
@@ -1200,43 +1192,42 @@ advanced_partition(Config) when is_list(Config) ->
Mt3 = rpc:call(Cp1, erlang, whereis, [test3]),
_Mt4 = rpc:call(Cp2, erlang, whereis, [test4]),
- % check names
- ?line Pid1 = global:whereis_name(test1),
- ?line Pid1 = rpc:call(Cp3, global, whereis_name, [test1]),
- ?line assert_pid(Pid1),
- ?line true = lists:member(Pid1, [Nt1, Mt1]),
- ?line true = lists:member(undefined, [Nt1, Mt1]),
- ?line check_everywhere(Nodes, test1, Config),
-
- ?line undefined = global:whereis_name(test2),
- ?line undefined = rpc:call(Cp3, global, whereis_name, [test2]),
- ?line yes = sreq(Nt2, {got_notify, self()}),
- ?line yes = sreq(Mt2, {got_notify, self()}),
- ?line check_everywhere(Nodes, test2, Config),
-
- ?line Pid3 = global:whereis_name(test3),
- ?line Pid3 = rpc:call(Cp3, global, whereis_name, [test3]),
- ?line assert_pid(Pid3),
- ?line true = lists:member(Pid3, [Nt3, Mt3]),
- ?line no = sreq(Pid3, {got_notify, self()}),
- ?line yes = sreq(other(Pid3, [Nt2, Nt3]), {got_notify, self()}),
- ?line check_everywhere(Nodes, test3, Config),
-
- ?line Pid4 = global:whereis_name(test4),
- ?line Pid4 = rpc:call(Cp3, global, whereis_name, [test4]),
- ?line assert_pid(Pid4),
-% ?line true = lists:member(Pid4, [Nt4, Mt4]),
- ?line Pid4 = Nt4,
- ?line check_everywhere(Nodes, test4, Config),
-
- ?line 1 = collect_resolves(),
-
- ?line Pid1 = global:send(test1, die),
+ %% check names
+ Pid1 = global:whereis_name(test1),
+ Pid1 = rpc:call(Cp3, global, whereis_name, [test1]),
+ assert_pid(Pid1),
+ true = lists:member(Pid1, [Nt1, Mt1]),
+ true = lists:member(undefined, [Nt1, Mt1]),
+ check_everywhere(Nodes, test1, Config),
+
+ undefined = global:whereis_name(test2),
+ undefined = rpc:call(Cp3, global, whereis_name, [test2]),
+ yes = sreq(Nt2, {got_notify, self()}),
+ yes = sreq(Mt2, {got_notify, self()}),
+ check_everywhere(Nodes, test2, Config),
+
+ Pid3 = global:whereis_name(test3),
+ Pid3 = rpc:call(Cp3, global, whereis_name, [test3]),
+ assert_pid(Pid3),
+ true = lists:member(Pid3, [Nt3, Mt3]),
+ no = sreq(Pid3, {got_notify, self()}),
+ yes = sreq(other(Pid3, [Nt2, Nt3]), {got_notify, self()}),
+ check_everywhere(Nodes, test3, Config),
+
+ Pid4 = global:whereis_name(test4),
+ Pid4 = rpc:call(Cp3, global, whereis_name, [test4]),
+ assert_pid(Pid4),
+ Pid4 = Nt4,
+ check_everywhere(Nodes, test4, Config),
+
+ 1 = collect_resolves(),
+
+ Pid1 = global:send(test1, die),
exit_p(Pid3),
exit_p(Pid4),
wait_for_exit(Pid1),
wait_for_exit(Pid3),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp0),
@@ -1246,93 +1237,90 @@ advanced_partition(Config) when is_list(Config) ->
stop_node(Cp4),
stop_node(Cp5),
stop_node(Cp6),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-
-%Peer nodes cp0 - cp6 are started, and partitioned just like in
-%advanced_partition. Start cp8, only connected to test_server. Let cp6
-%break apart from the rest, and 12 s later, ping cp0 and cp3, and
-%register the name test5. After the same 12 s, let cp5 halt.
-%Wait for the death of cp5. Ping cp3 (at the same time as cp6 does).
-%Take down cp2. Start cp7, restart cp2. Ping cp4, cp6 and cp8.
-%Now, expect all nodes to be connected and have the same picture of all
-%registered names.
-
-stress_partition(suite) ->
- [];
-stress_partition(doc) ->
- ["Stress global, make a partitioned net, make some nodes",
- "go up/down a bit."];
+
+%% Peer nodes cp0 - cp6 are started, and partitioned just like in
+%% advanced_partition. Start cp8, only connected to test_server. Let cp6
+%% break apart from the rest, and 12 s later, ping cp0 and cp3, and
+%% register the name test5. After the same 12 s, let cp5 halt.
+%% Wait for the death of cp5. Ping cp3 (at the same time as cp6 does).
+%% Take down cp2. Start cp7, restart cp2. Ping cp4, cp6 and cp8.
+%% Now, expect all nodes to be connected and have the same picture of all
+%% registered names.
+
+%% Stress global, make a partitioned net, make some nodes
+%% go up/down a bit.
stress_partition(Config) when is_list(Config) ->
Timeout = 90,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6]
+ [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6]
= start_nodes([cp0, cp1, cp2, cp3, cp4, cp5, cp6], peer, Config),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- % make cp3-cp5 connected, partitioned from us and cp0-cp2
- % cp6 is alone (single node). cp6 pings cp0 and cp3 in 12 secs...
- ?line rpc_cast(Cp3, ?MODULE, part3,
- [Config, self(), node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5,Cp6]),
- ?line ?UNTIL(is_ready_partition(Config)),
-
- % start different processes in this partition
- ?line start_procs(self(), Cp0, Cp1, Cp2, Config),
+ %% make cp3-cp5 connected, partitioned from us and cp0-cp2
+ %% cp6 is alone (single node). cp6 pings cp0 and cp3 in 12 secs...
+ rpc_cast(Cp3, ?MODULE, part3,
+ [Config, self(), node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5,Cp6]),
+ ?UNTIL(is_ready_partition(Config)),
+
+ %% start different processes in this partition
+ start_procs(self(), Cp0, Cp1, Cp2, Config),
+
+ {ok, Cp8} = start_peer_node(cp8, Config),
- ?line {ok, Cp8} = start_peer_node(cp8, Config),
-
monitor_node(Cp5, true),
receive
{nodedown, Cp5} -> ok
after
- 20000 -> test_server:fail({no_nodedown, Cp5})
+ 20000 -> ct:fail({no_nodedown, Cp5})
end,
monitor_node(Cp5, false),
- % Ok, now cp6 pings us, and cp5 will go down.
-
- % connect to other partition
- ?line pong = net_adm:ping(Cp3),
- ?line rpc_cast(Cp2, ?MODULE, crash, [0]),
-
- % Start new nodes
- ?line {ok, Cp7} = start_peer_node(cp7, Config),
- ?line {ok, Cp2_2} = start_peer_node(cp2, Config),
+ %% Ok, now cp6 pings us, and cp5 will go down.
+
+ %% connect to other partition
+ pong = net_adm:ping(Cp3),
+ rpc_cast(Cp2, ?MODULE, crash, [0]),
+
+ %% Start new nodes
+ {ok, Cp7} = start_peer_node(cp7, Config),
+ {ok, Cp2_2} = start_peer_node(cp2, Config),
Nodes = lists:sort([node(), Cp0, Cp1, Cp2_2, Cp3, Cp4, Cp6, Cp7, Cp8]),
put(?nodes_tag, Nodes),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp6),
- ?line pong = net_adm:ping(Cp8),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp6),
+ pong = net_adm:ping(Cp8),
- ?line wait_for_ready_net(Nodes, Config),
+ wait_for_ready_net(Nodes, Config),
- % Make sure that all nodes have the same picture of all names
- ?line check_everywhere(Nodes, test1, Config),
- ?line assert_pid(global:whereis_name(test1)),
-
- ?line check_everywhere(Nodes, test2, Config),
- ?line undefined = global:whereis_name(test2),
+ %% Make sure that all nodes have the same picture of all names
+ check_everywhere(Nodes, test1, Config),
+ assert_pid(global:whereis_name(test1)),
- ?line check_everywhere(Nodes, test3, Config),
- ?line assert_pid(global:whereis_name(test3)),
+ check_everywhere(Nodes, test2, Config),
+ undefined = global:whereis_name(test2),
- ?line check_everywhere(Nodes, test4, Config),
- ?line assert_pid(global:whereis_name(test4)),
+ check_everywhere(Nodes, test3, Config),
+ assert_pid(global:whereis_name(test3)),
- ?line check_everywhere(Nodes, test5, Config),
- ?line ?UNTIL(undefined =:= global:whereis_name(test5)),
-
- ?line assert_pid(global:send(test1, die)),
- ?line assert_pid(global:send(test3, die)),
- ?line assert_pid(global:send(test4, die)),
+ check_everywhere(Nodes, test4, Config),
+ assert_pid(global:whereis_name(test4)),
+
+ check_everywhere(Nodes, test5, Config),
+ ?UNTIL(undefined =:= global:whereis_name(test5)),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ assert_pid(global:send(test1, die)),
+ assert_pid(global:send(test3, die)),
+ assert_pid(global:send(test4, die)),
+
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp0),
@@ -1344,15 +1332,13 @@ stress_partition(Config) when is_list(Config) ->
stop_node(Cp6),
stop_node(Cp7),
stop_node(Cp8),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
%% Use this one to test alot of connection tests
-%% erl -sname ts -rsh ctrsh -pa /clearcase/otp/internal_tools/test_server/ebin/ -ring_line 10000 -s test_server run_test global_SUITE
+%% erl -sname ts -ring_line 10000 -s test_server run_test global_SUITE
-ring_line(suite) -> [];
-ring_line(doc) -> [""];
ring_line(Config) when is_list(Config) ->
{ok, [[N]]} = init:get_argument(ring_line),
loop_it(list_to_integer(N), Config).
@@ -1361,74 +1347,70 @@ loop_it(N, Config) -> loop_it(N,N, Config).
loop_it(0,_, _Config) -> ok;
loop_it(N,M, Config) ->
- test_server:format(1, "Round: ~w", [M-N]),
+ ct:pal(?HI_VERBOSITY, "Round: ~w", [M-N]),
ring(Config),
line(Config),
loop_it(N-1,M, Config).
-ring(suite) ->
- [];
-ring(doc) ->
- ["Make 10 single nodes, all having the same name.",
- "Make all ping its predecessor, pinging in a ring.",
- "Make sure that there's just one winner."];
+%% Make 10 single nodes, all having the same name.
+%% Make all ping its predecessor, pinging in a ring.
+%% Make sure that there's just one winner.
ring(Config) when is_list(Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6, Cp7, Cp8]
+ [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6, Cp7, Cp8]
= start_nodes([cp0, cp1, cp2, cp3, cp4, cp5, cp6, cp7, cp8],
peer, Config),
Nodes = lists:sort([node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6, Cp7, Cp8]),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
Time = msec() + 7000,
- ?line rpc_cast(Cp0, ?MODULE, single_node, [Time, Cp8, Config]),
- ?line rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
- ?line rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
- ?line rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
- ?line rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
- ?line rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
- ?line rpc_cast(Cp6, ?MODULE, single_node, [Time, Cp5, Config]),
- ?line rpc_cast(Cp7, ?MODULE, single_node, [Time, Cp6, Config]),
- ?line rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
-
- % sleep to make the partitioned net ready
- test_server:sleep(Time - msec()),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
- ?line pong = net_adm:ping(Cp6),
- ?line pong = net_adm:ping(Cp7),
- ?line pong = net_adm:ping(Cp8),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
- ?line pong = net_adm:ping(Cp6),
- ?line pong = net_adm:ping(Cp7),
- ?line pong = net_adm:ping(Cp8),
-
- ?line wait_for_ready_net(Nodes, Config),
-
- % Just make sure that all nodes have the same picture of all names
- ?line check_everywhere(Nodes, single_name, Config),
- ?line assert_pid(global:whereis_name(single_name)),
-
- ?line
+ rpc_cast(Cp0, ?MODULE, single_node, [Time, Cp8, Config]),
+ rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
+ rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
+ rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
+ rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
+ rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
+ rpc_cast(Cp6, ?MODULE, single_node, [Time, Cp5, Config]),
+ rpc_cast(Cp7, ?MODULE, single_node, [Time, Cp6, Config]),
+ rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
+
+ %% sleep to make the partitioned net ready
+ sleep(Time - msec()),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+ pong = net_adm:ping(Cp6),
+ pong = net_adm:ping(Cp7),
+ pong = net_adm:ping(Cp8),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+ pong = net_adm:ping(Cp6),
+ pong = net_adm:ping(Cp7),
+ pong = net_adm:ping(Cp8),
+
+ wait_for_ready_net(Nodes, Config),
+
+ %% Just make sure that all nodes have the same picture of all names
+ check_everywhere(Nodes, single_name, Config),
+ assert_pid(global:whereis_name(single_name)),
+
?UNTIL(begin
{Ns2, []} = rpc:multicall(Nodes, erlang, whereis,
[single_name]),
@@ -1437,10 +1419,10 @@ ring(Config) when is_list(Config) ->
end,
0, Ns2)
end),
-
- ?line assert_pid(global:send(single_name, die)),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ assert_pid(global:send(single_name, die)),
+
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp0),
@@ -1452,64 +1434,60 @@ ring(Config) when is_list(Config) ->
stop_node(Cp6),
stop_node(Cp7),
stop_node(Cp8),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-simple_ring(suite) ->
- [];
-simple_ring(doc) ->
- ["Simpler version of the ring case. Used because there are some",
- "distribution problems with many nodes.",
- "Make 6 single nodes, all having the same name.",
- "Make all ping its predecessor, pinging in a ring.",
- "Make sure that there's just one winner."];
+%% Simpler version of the ring case. Used because there are some
+%% distribution problems with many nodes.
+%% Make 6 single nodes, all having the same name.
+%% Make all ping its predecessor, pinging in a ring.
+%% Make sure that there's just one winner.
simple_ring(Config) when is_list(Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
Names = [cp0, cp1, cp2, cp3, cp4, cp5],
- ?line [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5]
+ [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5]
= start_nodes(Names, peer, Config),
Nodes = lists:sort([node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5]),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
Time = msec() + 5000,
- ?line rpc_cast(Cp0, ?MODULE, single_node, [Time, Cp5, Config]),
- ?line rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
- ?line rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
- ?line rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
- ?line rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
- ?line rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
-
- % sleep to make the partitioned net ready
- test_server:sleep(Time - msec()),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
-
- ?line wait_for_ready_net(Nodes, Config),
-
- % Just make sure that all nodes have the same picture of all names
- ?line check_everywhere(Nodes, single_name, Config),
- ?line assert_pid(global:whereis_name(single_name)),
-
- ?line
+ rpc_cast(Cp0, ?MODULE, single_node, [Time, Cp5, Config]),
+ rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
+ rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
+ rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
+ rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
+ rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
+
+ %% sleep to make the partitioned net ready
+ sleep(Time - msec()),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+
+ wait_for_ready_net(Nodes, Config),
+
+ %% Just make sure that all nodes have the same picture of all names
+ check_everywhere(Nodes, single_name, Config),
+ assert_pid(global:whereis_name(single_name)),
+
?UNTIL(begin
{Ns2, []} = rpc:multicall(Nodes, erlang, whereis,
[single_name]),
@@ -1518,10 +1496,10 @@ simple_ring(Config) when is_list(Config) ->
end,
0, Ns2)
end),
-
- ?line assert_pid(global:send(single_name, die)),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ assert_pid(global:send(single_name, die)),
+
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp0),
@@ -1530,72 +1508,68 @@ simple_ring(Config) when is_list(Config) ->
stop_node(Cp3),
stop_node(Cp4),
stop_node(Cp5),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-line(suite) ->
- [];
-line(doc) ->
- ["Make 6 single nodes, all having the same name.",
- "Make all ping its predecessor, pinging in a line.",
- "Make sure that there's just one winner."];
+%% Make 6 single nodes, all having the same name.
+%% Make all ping its predecessor, pinging in a line.
+%% Make sure that there's just one winner.
line(Config) when is_list(Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6, Cp7, Cp8]
+ [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6, Cp7, Cp8]
= start_nodes([cp0, cp1, cp2, cp3, cp4, cp5, cp6, cp7, cp8],
peer, Config),
Nodes = lists:sort([node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6, Cp7, Cp8]),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
Time = msec() + 7000,
- ?line rpc_cast(Cp0, ?MODULE, single_node,
- [Time, Cp0, Config]), % ping ourself!
- ?line rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
- ?line rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
- ?line rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
- ?line rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
- ?line rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
- ?line rpc_cast(Cp6, ?MODULE, single_node, [Time, Cp5, Config]),
- ?line rpc_cast(Cp7, ?MODULE, single_node, [Time, Cp6, Config]),
- ?line rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
-
- % sleep to make the partitioned net ready
- test_server:sleep(Time - msec()),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
- ?line pong = net_adm:ping(Cp6),
- ?line pong = net_adm:ping(Cp7),
- ?line pong = net_adm:ping(Cp8),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
- ?line pong = net_adm:ping(Cp6),
- ?line pong = net_adm:ping(Cp7),
- ?line pong = net_adm:ping(Cp8),
-
- ?line wait_for_ready_net(Nodes, Config),
-
- % Just make sure that all nodes have the same picture of all names
- ?line check_everywhere(Nodes, single_name, Config),
- ?line assert_pid(global:whereis_name(single_name)),
-
- ?line
+ rpc_cast(Cp0, ?MODULE, single_node,
+ [Time, Cp0, Config]), % ping ourself!
+ rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
+ rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
+ rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
+ rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
+ rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
+ rpc_cast(Cp6, ?MODULE, single_node, [Time, Cp5, Config]),
+ rpc_cast(Cp7, ?MODULE, single_node, [Time, Cp6, Config]),
+ rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
+
+ %% Sleep to make the partitioned net ready
+ sleep(Time - msec()),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+ pong = net_adm:ping(Cp6),
+ pong = net_adm:ping(Cp7),
+ pong = net_adm:ping(Cp8),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+ pong = net_adm:ping(Cp6),
+ pong = net_adm:ping(Cp7),
+ pong = net_adm:ping(Cp8),
+
+ wait_for_ready_net(Nodes, Config),
+
+ %% Just make sure that all nodes have the same picture of all names
+ check_everywhere(Nodes, single_name, Config),
+ assert_pid(global:whereis_name(single_name)),
+
?UNTIL(begin
{Ns2, []} = rpc:multicall(Nodes, erlang, whereis,
[single_name]),
@@ -1604,10 +1578,10 @@ line(Config) when is_list(Config) ->
end,
0, Ns2)
end),
-
- ?line assert_pid(global:send(single_name, die)),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ assert_pid(global:send(single_name, die)),
+
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp0),
@@ -1619,65 +1593,61 @@ line(Config) when is_list(Config) ->
stop_node(Cp6),
stop_node(Cp7),
stop_node(Cp8),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-simple_line(suite) ->
- [];
-simple_line(doc) ->
- ["Simpler version of the line case. Used because there are some",
- "distribution problems with many nodes.",
- "Make 6 single nodes, all having the same name.",
- "Make all ping its predecessor, pinging in a line.",
- "Make sure that there's just one winner."];
+%% Simpler version of the line case. Used because there are some
+%% distribution problems with many nodes.
+%% Make 6 single nodes, all having the same name.
+%% Make all ping its predecessor, pinging in a line.
+%% Make sure that there's just one winner.
simple_line(Config) when is_list(Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5]
+ [Cp0, Cp1, Cp2, Cp3, Cp4, Cp5]
= start_nodes([cp0, cp1, cp2, cp3, cp4, cp5], peer, Config),
Nodes = lists:sort([node(), Cp0, Cp1, Cp2, Cp3, Cp4, Cp5]),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
Time = msec() + 5000,
- ?line rpc_cast(Cp0, ?MODULE, single_node,
- [Time, Cp0, Config]), % ping ourself!
- ?line rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
- ?line rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
- ?line rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
- ?line rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
- ?line rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
-
- % sleep to make the partitioned net ready
- test_server:sleep(Time - msec()),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
-
- ?line pong = net_adm:ping(Cp0),
- ?line pong = net_adm:ping(Cp1),
- ?line pong = net_adm:ping(Cp2),
- ?line pong = net_adm:ping(Cp3),
- ?line pong = net_adm:ping(Cp4),
- ?line pong = net_adm:ping(Cp5),
-
- ?line wait_for_ready_net(Nodes, Config),
-
- % Just make sure that all nodes have the same picture of all names
- ?line check_everywhere(Nodes, single_name, Config),
- ?line assert_pid(global:whereis_name(single_name)),
-
- ?line
+ rpc_cast(Cp0, ?MODULE, single_node,
+ [Time, Cp0, Config]), % ping ourself!
+ rpc_cast(Cp1, ?MODULE, single_node, [Time, Cp0, Config]),
+ rpc_cast(Cp2, ?MODULE, single_node, [Time, Cp1, Config]),
+ rpc_cast(Cp3, ?MODULE, single_node, [Time, Cp2, Config]),
+ rpc_cast(Cp4, ?MODULE, single_node, [Time, Cp3, Config]),
+ rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
+
+ %% sleep to make the partitioned net ready
+ sleep(Time - msec()),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+
+ pong = net_adm:ping(Cp0),
+ pong = net_adm:ping(Cp1),
+ pong = net_adm:ping(Cp2),
+ pong = net_adm:ping(Cp3),
+ pong = net_adm:ping(Cp4),
+ pong = net_adm:ping(Cp5),
+
+ wait_for_ready_net(Nodes, Config),
+
+ %% Just make sure that all nodes have the same picture of all names
+ check_everywhere(Nodes, single_name, Config),
+ assert_pid(global:whereis_name(single_name)),
+
?UNTIL(begin
{Ns2, []} = rpc:multicall(Nodes, erlang, whereis,
[single_name]),
@@ -1686,10 +1656,10 @@ simple_line(Config) when is_list(Config) ->
end,
0, Ns2)
end),
-
- ?line assert_pid(global:send(single_name, die)),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ assert_pid(global:send(single_name, die)),
+
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_node(Cp0),
@@ -1698,82 +1668,74 @@ simple_line(Config) when is_list(Config) ->
stop_node(Cp3),
stop_node(Cp4),
stop_node(Cp5),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-
-otp_1849(suite) -> [];
-otp_1849(doc) ->
- ["Test ticket: Global should keep track of all pids that set the same lock."];
+
+%% Test ticket: Global should keep track of all pids that set the same lock.
otp_1849(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
- ?line {ok, Cp3} = start_node(cp3, Config),
-
- ?line wait_for_ready_net(Config),
-
- % start procs on each node
- ?line Pid1 = rpc:call(Cp1, ?MODULE, start_proc, []),
- ?line assert_pid(Pid1),
- ?line Pid2 = rpc:call(Cp2, ?MODULE, start_proc, []),
- ?line assert_pid(Pid2),
- ?line Pid3 = rpc:call(Cp3, ?MODULE, start_proc, []),
- ?line assert_pid(Pid3),
-
- % set a lock on every node
- ?line true = req(Pid1, {set_lock2, {test_lock, ?MODULE}, self()}),
- ?line true = req(Pid2, {set_lock2, {test_lock, ?MODULE}, self()}),
- ?line true = req(Pid3, {set_lock2, {test_lock, ?MODULE}, self()}),
-
- ?line
- ?UNTIL(begin
+ init_condition(Config),
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp3} = start_node(cp3, Config),
+
+ wait_for_ready_net(Config),
+
+ %% start procs on each node
+ Pid1 = rpc:call(Cp1, ?MODULE, start_proc, []),
+ assert_pid(Pid1),
+ Pid2 = rpc:call(Cp2, ?MODULE, start_proc, []),
+ assert_pid(Pid2),
+ Pid3 = rpc:call(Cp3, ?MODULE, start_proc, []),
+ assert_pid(Pid3),
+
+ %% set a lock on every node
+ true = req(Pid1, {set_lock2, {test_lock, ?MODULE}, self()}),
+ true = req(Pid2, {set_lock2, {test_lock, ?MODULE}, self()}),
+ true = req(Pid3, {set_lock2, {test_lock, ?MODULE}, self()}),
+
+ ?UNTIL(begin
[{test_lock, ?MODULE, Lock1}] =
rpc:call(Cp1, ets, tab2list, [global_locks]),
3 =:= length(Lock1)
end),
- ?line true = req(Pid3, {del_lock2, {test_lock, ?MODULE}, self()}),
- ?line
+ true = req(Pid3, {del_lock2, {test_lock, ?MODULE}, self()}),
?UNTIL(begin
[{test_lock, ?MODULE, Lock2}] =
rpc:call(Cp1, ets, tab2list, [global_locks]),
2 =:= length(Lock2)
end),
- ?line true = req(Pid2, {del_lock2, {test_lock, ?MODULE}, self()}),
- ?line
+ true = req(Pid2, {del_lock2, {test_lock, ?MODULE}, self()}),
?UNTIL(begin
[{test_lock, ?MODULE, Lock3}] =
rpc:call(Cp1, ets, tab2list, [global_locks]),
1 =:= length(Lock3)
end),
- ?line true = req(Pid1, {del_lock2, {test_lock, ?MODULE}, self()}),
- ?line ?UNTIL([] =:= rpc:call(Cp1, ets, tab2list, [global_locks])),
+ true = req(Pid1, {del_lock2, {test_lock, ?MODULE}, self()}),
+ ?UNTIL([] =:= rpc:call(Cp1, ets, tab2list, [global_locks])),
- ?line true = req(Pid1, {set_lock2, {test_lock, ?MODULE}, self()}),
- ?line true = req(Pid2, {set_lock2, {test_lock, ?MODULE}, self()}),
- ?line true = req(Pid3, {set_lock2, {test_lock, ?MODULE}, self()}),
- ?line false = req(Pid2, {set_lock2, {test_lock, not_valid}, self()}),
+ true = req(Pid1, {set_lock2, {test_lock, ?MODULE}, self()}),
+ true = req(Pid2, {set_lock2, {test_lock, ?MODULE}, self()}),
+ true = req(Pid3, {set_lock2, {test_lock, ?MODULE}, self()}),
+ false = req(Pid2, {set_lock2, {test_lock, not_valid}, self()}),
exit_p(Pid1),
- ?line
?UNTIL(begin
[{test_lock, ?MODULE, Lock10}] =
rpc:call(Cp1, ets, tab2list, [global_locks]),
2 =:= length(Lock10)
end),
- ?line
?UNTIL(begin
[{test_lock, ?MODULE, Lock11}] =
rpc:call(Cp2, ets, tab2list, [global_locks]),
2 =:= length(Lock11)
end),
- ?line
?UNTIL(begin
[{test_lock, ?MODULE, Lock12}] =
rpc:call(Cp3, ets, tab2list, [global_locks]),
@@ -1784,13 +1746,11 @@ otp_1849(Config) when is_list(Config) ->
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-
-otp_3162(suite) -> [];
-otp_3162(doc) ->
- ["Test ticket: Deadlock in global"];
+
+%% Test ticket: Deadlock in global.
otp_3162(Config) when is_list(Config) ->
StartFun = fun() ->
{ok, Cp1} = start_node(cp1, Config),
@@ -1804,77 +1764,74 @@ do_otp_3162(StartFun, Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line [Cp1, Cp2, Cp3] = StartFun(),
+ init_condition(Config),
+ [Cp1, Cp2, Cp3] = StartFun(),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- % start procs on each node
- ?line Pid1 = rpc:call(Cp1, ?MODULE, start_proc4, [kalle]),
- ?line assert_pid(Pid1),
- ?line Pid2 = rpc:call(Cp2, ?MODULE, start_proc4, [stina]),
- ?line assert_pid(Pid2),
- ?line Pid3 = rpc:call(Cp3, ?MODULE, start_proc4, [vera]),
- ?line assert_pid(Pid3),
+ %% start procs on each node
+ Pid1 = rpc:call(Cp1, ?MODULE, start_proc4, [kalle]),
+ assert_pid(Pid1),
+ Pid2 = rpc:call(Cp2, ?MODULE, start_proc4, [stina]),
+ assert_pid(Pid2),
+ Pid3 = rpc:call(Cp3, ?MODULE, start_proc4, [vera]),
+ assert_pid(Pid3),
- ?line rpc_disconnect_node(Cp1, Cp2, Config),
+ rpc_disconnect_node(Cp1, Cp2, Config),
- ?line ?UNTIL
+ ?UNTIL
([Cp3] =:= lists:sort(rpc:call(Cp1, erlang, nodes, [])) -- [node()]),
?UNTIL([kalle, vera] =:=
- lists:sort(rpc:call(Cp1, global, registered_names, []))),
- ?line ?UNTIL
+ lists:sort(rpc:call(Cp1, global, registered_names, []))),
+ ?UNTIL
([Cp3] =:= lists:sort(rpc:call(Cp2, erlang, nodes, [])) -- [node()]),
?UNTIL([stina, vera] =:=
- lists:sort(rpc:call(Cp2, global, registered_names, []))),
- ?line ?UNTIL
+ lists:sort(rpc:call(Cp2, global, registered_names, []))),
+ ?UNTIL
([Cp1, Cp2] =:=
- lists:sort(rpc:call(Cp3, erlang, nodes, [])) -- [node()]),
+ lists:sort(rpc:call(Cp3, erlang, nodes, [])) -- [node()]),
?UNTIL([kalle, stina, vera] =:=
- lists:sort(rpc:call(Cp3, global, registered_names, []))),
+ lists:sort(rpc:call(Cp3, global, registered_names, []))),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp1]),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp1]),
- ?line ?UNTIL
+ ?UNTIL
([Cp2, Cp3] =:=
- lists:sort(rpc:call(Cp1, erlang, nodes, [])) -- [node()]),
- ?line
+ lists:sort(rpc:call(Cp1, erlang, nodes, [])) -- [node()]),
?UNTIL(begin
NN = lists:sort(rpc:call(Cp1, global, registered_names, [])),
[kalle, stina, vera] =:= NN
end),
- ?line ?UNTIL
+ ?UNTIL
([Cp1, Cp3] =:=
- lists:sort(rpc:call(Cp2, erlang, nodes, [])) -- [node()]),
+ lists:sort(rpc:call(Cp2, erlang, nodes, [])) -- [node()]),
?UNTIL([kalle, stina, vera] =:=
- lists:sort(rpc:call(Cp2, global, registered_names, []))),
- ?line ?UNTIL
+ lists:sort(rpc:call(Cp2, global, registered_names, []))),
+ ?UNTIL
([Cp1, Cp2] =:=
- lists:sort(rpc:call(Cp3, erlang, nodes, [])) -- [node()]),
+ lists:sort(rpc:call(Cp3, erlang, nodes, [])) -- [node()]),
?UNTIL([kalle, stina, vera] =:=
- lists:sort(rpc:call(Cp3, global, registered_names, []))),
+ lists:sort(rpc:call(Cp3, global, registered_names, []))),
write_high_level_trace(Config),
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-
-otp_5640(suite) -> [];
-otp_5640(doc) ->
- ["OTP-5640. 'allow' multiple names for registered processes."];
+
+%% OTP-5640. 'allow' multiple names for registered processes.
otp_5640(Config) when is_list(Config) ->
Timeout = 25,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
init_condition(Config),
- ?line {ok, B} = start_node(b, Config),
+ {ok, B} = start_node(b, Config),
- ?line Nodes = lists:sort([node(), B]),
- ?line wait_for_ready_net(Nodes, Config),
+ Nodes = lists:sort([node(), B]),
+ wait_for_ready_net(Nodes, Config),
Server = whereis(global_name_server),
ServerB = rpc:call(B, erlang, whereis, [global_name_server]),
@@ -1882,50 +1839,50 @@ otp_5640(Config) when is_list(Config) ->
Me = self(),
Proc = spawn(fun() -> otp_5640_proc(Me) end),
- ?line yes = global:register_name(name1, Proc),
- ?line no = global:register_name(name2, Proc),
+ yes = global:register_name(name1, Proc),
+ no = global:register_name(name2, Proc),
- ?line ok = application:set_env(kernel, global_multi_name_action, allow),
- ?line yes = global:register_name(name2, Proc),
+ ok = application:set_env(kernel, global_multi_name_action, allow),
+ yes = global:register_name(name2, Proc),
- test_server:sleep(100),
- ?line Proc = global:whereis_name(name1),
- ?line Proc = global:whereis_name(name2),
- ?line check_everywhere(Nodes, name1, Config),
- ?line check_everywhere(Nodes, name2, Config),
+ ct:sleep(100),
+ Proc = global:whereis_name(name1),
+ Proc = global:whereis_name(name2),
+ check_everywhere(Nodes, name1, Config),
+ check_everywhere(Nodes, name2, Config),
- ?line {monitors_2levels, MonBy1} = mon_by_servers(Proc),
- ?line [] = ([Server,Server,ServerB,ServerB] -- MonBy1),
- ?line {links,[]} = process_info(Proc, links),
- ?line _ = global:unregister_name(name1),
+ {monitors_2levels, MonBy1} = mon_by_servers(Proc),
+ [] = ([Server,Server,ServerB,ServerB] -- MonBy1),
+ {links,[]} = process_info(Proc, links),
+ _ = global:unregister_name(name1),
- test_server:sleep(100),
- ?line undefined = global:whereis_name(name1),
- ?line Proc = global:whereis_name(name2),
- ?line check_everywhere(Nodes, name1, Config),
- ?line check_everywhere(Nodes, name2, Config),
+ ct:sleep(100),
+ undefined = global:whereis_name(name1),
+ Proc = global:whereis_name(name2),
+ check_everywhere(Nodes, name1, Config),
+ check_everywhere(Nodes, name2, Config),
- ?line {monitors_2levels, MonBy2} = mon_by_servers(Proc),
- ?line [] = ([Server,ServerB] -- MonBy2),
+ {monitors_2levels, MonBy2} = mon_by_servers(Proc),
+ [] = ([Server,ServerB] -- MonBy2),
TmpMonBy2 = MonBy2 -- [Server,ServerB],
- ?line TmpMonBy2 = TmpMonBy2 -- [Server,ServerB],
- ?line {links,[]} = process_info(Proc, links),
+ TmpMonBy2 = TmpMonBy2 -- [Server,ServerB],
+ {links,[]} = process_info(Proc, links),
- ?line yes = global:register_name(name1, Proc),
+ yes = global:register_name(name1, Proc),
Proc ! die,
- test_server:sleep(100),
- ?line undefined = global:whereis_name(name1),
- ?line undefined = global:whereis_name(name2),
- ?line check_everywhere(Nodes, name1, Config),
- ?line check_everywhere(Nodes, name2, Config),
- ?line {monitors, GMonitors} = process_info(Server, monitors),
- ?line false = lists:member({process, Proc}, GMonitors),
+ ct:sleep(100),
+ undefined = global:whereis_name(name1),
+ undefined = global:whereis_name(name2),
+ check_everywhere(Nodes, name1, Config),
+ check_everywhere(Nodes, name2, Config),
+ {monitors, GMonitors} = process_info(Server, monitors),
+ false = lists:member({process, Proc}, GMonitors),
write_high_level_trace(Config),
stop_node(B),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
otp_5640_proc(_Parent) ->
@@ -1934,45 +1891,42 @@ otp_5640_proc(_Parent) ->
exit(normal)
end.
-otp_5737(suite) -> [];
-otp_5737(doc) ->
- ["OTP-5737. set_lock/3 and trans/4 accept Retries = 0."];
+%% OTP-5737. set_lock/3 and trans/4 accept Retries = 0.
otp_5737(Config) when is_list(Config) ->
Timeout = 25,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
+ init_condition(Config),
LockId = {?MODULE,self()},
Nodes = [node()],
- ?line {'EXIT', _} = (catch global:set_lock(LockId, Nodes, -1)),
- ?line {'EXIT', _} = (catch global:set_lock(LockId, Nodes, a)),
- ?line true = global:set_lock(LockId, Nodes, 0),
+ {'EXIT', _} = (catch global:set_lock(LockId, Nodes, -1)),
+ {'EXIT', _} = (catch global:set_lock(LockId, Nodes, a)),
+ true = global:set_lock(LockId, Nodes, 0),
Time1 = now(),
- ?line false = global:set_lock({?MODULE,not_me}, Nodes, 0),
- ?line true = timer:now_diff(now(), Time1) < 5000,
- ?line _ = global:del_lock(LockId, Nodes),
+ false = global:set_lock({?MODULE,not_me}, Nodes, 0),
+ true = timer:now_diff(now(), Time1) < 5000,
+ _ = global:del_lock(LockId, Nodes),
Fun = fun() -> ok end,
- ?line {'EXIT', _} = (catch global:trans(LockId, Fun, Nodes, -1)),
- ?line {'EXIT', _} = (catch global:trans(LockId, Fun, Nodes, a)),
- ?line ok = global:trans(LockId, Fun, Nodes, 0),
+ {'EXIT', _} = (catch global:trans(LockId, Fun, Nodes, -1)),
+ {'EXIT', _} = (catch global:trans(LockId, Fun, Nodes, a)),
+ ok = global:trans(LockId, Fun, Nodes, 0),
write_high_level_trace(Config),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-otp_6931(suite) -> [];
-otp_6931(doc) -> ["OTP-6931. Ignore nodeup when connect_all=false."];
+%% OTP-6931. Ignore nodeup when connect_all=false.
otp_6931(Config) when is_list(Config) ->
Me = self(),
- ?line {ok, CAf} = start_non_connecting_node(ca_false, Config),
- ?line ok = rpc:call(CAf, error_logger, add_report_handler, [?MODULE, Me]),
- ?line info = rpc:call(CAf, error_logger, warning_map, []),
- ?line {global_name_server,CAf} ! {nodeup, fake_node},
+ {ok, CAf} = start_non_connecting_node(ca_false, Config),
+ ok = rpc:call(CAf, error_logger, add_report_handler, [?MODULE, Me]),
+ info = rpc:call(CAf, error_logger, warning_map, []),
+ {global_name_server,CAf} ! {nodeup, fake_node},
timer:sleep(100),
stop_node(CAf),
- receive {nodeup,fake_node} -> test_server:fail({info_report, was, sent})
+ receive {nodeup,fake_node} -> ct:fail({info_report, was, sent})
after 1000 -> ok
end,
ok.
@@ -1980,18 +1934,17 @@ otp_6931(Config) when is_list(Config) ->
%%%-----------------------------------------------------------------
%%% Testing a disconnected node. Not two partitions.
%%%-----------------------------------------------------------------
-simple_disconnect(suite) -> [];
-simple_disconnect(doc) -> ["OTP-5563. Disconnected nodes (not partitions)"];
+%% OTP-5563. Disconnected nodes (not partitions).
simple_disconnect(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
%% Three nodes (test_server, n_1, n_2).
- ?line [Cp1, Cp2] = Cps = start_nodes([n_1, n_2], peer, Config),
- ?line wait_for_ready_net(Config),
+ [Cp1, Cp2] = Cps = start_nodes([n_1, n_2], peer, Config),
+ wait_for_ready_net(Config),
Nodes = lists:sort([node() | Cps]),
@@ -2001,39 +1954,39 @@ simple_disconnect(Config) when is_list(Config) ->
Resolver = {no_module, resolve_none}, % will never be called
PingNode = Cp2,
- ?line {_Pid1, yes} =
+ {_Pid1, yes} =
rpc:call(Cp1, ?MODULE, start_resolver, [Name, Resolver]),
- test_server:sleep(100),
+ ct:sleep(100),
%% Disconnect test_server and Cp2.
- ?line true = erlang:disconnect_node(Cp2),
- test_server:sleep(500),
+ true = erlang:disconnect_node(Cp2),
+ ct:sleep(500),
%% _Pid is registered on Cp1. The exchange of names between Cp2 and
%% test_server sees two identical pids.
- ?line pong = net_adm:ping(PingNode),
- ?line ?UNTIL(Cps =:= lists:sort(nodes())),
+ pong = net_adm:ping(PingNode),
+ ?UNTIL(Cps =:= lists:sort(nodes())),
- ?line {_, Trace0} = collect_tracers(Nodes),
- ?line Resolvers = [P || {_Node,new_resolver,{pid,P}} <- Trace0],
- ?line lists:foreach(fun(P) -> P ! die end, Resolvers),
- ?line lists:foreach(fun(P) -> wait_for_exit(P) end, Resolvers),
- ?line check_everywhere(Nodes, Name, Config),
- ?line undefined = global:whereis_name(Name),
+ {_, Trace0} = collect_tracers(Nodes),
+ Resolvers = [P || {_Node,new_resolver,{pid,P}} <- Trace0],
+ lists:foreach(fun(P) -> P ! die end, Resolvers),
+ lists:foreach(fun(P) -> wait_for_exit(P) end, Resolvers),
+ check_everywhere(Nodes, Name, Config),
+ undefined = global:whereis_name(Name),
- ?line {_, Trace1} = collect_tracers(Nodes),
+ {_, Trace1} = collect_tracers(Nodes),
Trace = Trace0 ++ Trace1,
- ?line [] = [foo || {_, resolve_none, _, _} <- Trace],
+ [] = [foo || {_, resolve_none, _, _} <- Trace],
- ?line Gs = name_servers(Nodes),
- ?line [_, _, _] = monitored_by_node(Trace, Gs),
+ Gs = name_servers(Nodes),
+ [_, _, _] = monitored_by_node(Trace, Gs),
lists:foreach(fun(N) -> rpc:call(N, ?MODULE, stop_tracer, []) end, Nodes),
- ?line OrigNames = global:registered_names(),
+ OrigNames = global:registered_names(),
write_high_level_trace(Config),
stop_nodes(Cps),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
%% Not used right now.
@@ -2052,7 +2005,7 @@ simple_dis(Nodes0, Name, Resolver, Config) ->
simple_dis_node(_Node, DisNodes, _Name, _Resolver, Config) ->
lists:foreach(
fun(OtherNode) -> _ = erlang:disconnect_node(OtherNode) end, DisNodes),
- ?line ?UNTIL(DisNodes -- nodes() =:= DisNodes),
+ ?UNTIL(DisNodes -- nodes() =:= DisNodes),
ok.
@@ -2072,19 +2025,18 @@ simple_dis_node(_Node, DisNodes, _Name, _Resolver, Config) ->
-define(RES(F), {F, fun ?MODULE:F/3}).
-simple_resolve(suite) -> [];
-simple_resolve(doc) -> ["OTP-5563. Partitions and names."];
+%% OTP-5563. Partitions and names.
simple_resolve(Config) when is_list(Config) ->
Timeout = 360,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
+ [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
Nodes = lists:sort([node() | Cps]),
- ?line wait_for_ready_net(Config),
-
+ wait_for_ready_net(Config),
+
lists:foreach(fun(N) ->
rpc:call(N, ?MODULE, start_tracer, [])
end, Nodes),
@@ -2193,20 +2145,19 @@ simple_resolve(Config) when is_list(Config) ->
%% then a new attempt (nodeup etc.) is made. This time the
%% resolver does not disconnect any node.
res(?RES(disconnect_first), Cps, Cf#cf{link = Z2, n2 = Z2,
- nodes = [node(), N1, A2, Z2]}),
+ nodes = [node(), N1, A2, Z2]}),
- ?line lists:foreach(fun(N) ->
- rpc:call(N, ?MODULE, stop_tracer, [])
- end, Nodes),
+ lists:foreach(fun(N) ->
+ rpc:call(N, ?MODULE, stop_tracer, [])
+ end, Nodes),
- ?line OrigNames = global:registered_names(),
+ OrigNames = global:registered_names(),
write_high_level_trace(Config),
stop_nodes(Cps),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-simple_resolve2(suite) -> [];
-simple_resolve2(doc) -> ["OTP-5563. Partitions and names."];
+%% OTP-5563. Partitions and names.
simple_resolve2(Config) when is_list(Config) ->
%% Continuation of simple_resolve. Of some reason it did not
%% always work to re-start z_2. "Cannot be a global bug."
@@ -2214,13 +2165,13 @@ simple_resolve2(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
- ?line wait_for_ready_net(Config),
+ [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
+ wait_for_ready_net(Config),
Nodes = lists:sort([node() | Cps]),
-
+
lists:foreach(fun(N) ->
rpc:call(N, ?MODULE, start_tracer, [])
end, Nodes),
@@ -2230,33 +2181,32 @@ simple_resolve2(Config) when is_list(Config) ->
%% Halt z_2.
res(?RES(halt_second), Cps, Cf#cf{link = N1, n1 = N1, n2 = Z2, ping = A2,
- nodes = [node(), N1, A2], n_res = 1}),
+ nodes = [node(), N1, A2], n_res = 1}),
- ?line lists:foreach(fun(N) ->
- rpc:call(N, ?MODULE, stop_tracer, [])
- end, Nodes),
+ lists:foreach(fun(N) ->
+ rpc:call(N, ?MODULE, stop_tracer, [])
+ end, Nodes),
- ?line OrigNames = global:registered_names(),
+ OrigNames = global:registered_names(),
write_high_level_trace(Config),
stop_nodes(Cps), % Not all nodes may be present, but it works anyway.
- ?line init_condition(Config),
+ init_condition(Config),
ok.
-simple_resolve3(suite) -> [];
-simple_resolve3(doc) -> ["OTP-5563. Partitions and names."];
+%% OTP-5563. Partitions and names.
simple_resolve3(Config) when is_list(Config) ->
%% Continuation of simple_resolve.
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
- ?line wait_for_ready_net(Config),
+ [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
+ wait_for_ready_net(Config),
Nodes = lists:sort([node() | Cps]),
-
+
lists:foreach(fun(N) ->
rpc:call(N, ?MODULE, start_tracer, [])
end, Nodes),
@@ -2267,27 +2217,27 @@ simple_resolve3(Config) when is_list(Config) ->
%% Halt a_2.
res(?RES(halt_second), Cps, Cf#cf{link = node(), n2 = A2,
nodes = [node(), N1], n_res = 1}),
-
- ?line lists:foreach(fun(N) ->
- rpc:call(N, ?MODULE, stop_tracer, [])
- end, Nodes),
- ?line OrigNames = global:registered_names(),
+ lists:foreach(fun(N) ->
+ rpc:call(N, ?MODULE, stop_tracer, [])
+ end, Nodes),
+
+ OrigNames = global:registered_names(),
write_high_level_trace(Config),
stop_nodes(Cps), % Not all nodes may be present, but it works anyway.
- ?line init_condition(Config),
+ init_condition(Config),
ok.
res({Res,Resolver}, [N1, A2, Z2], Cf) ->
%% Note: there are no links anymore, but monitors.
#cf{link = LinkedNode, ping = PingNode, n1 = Res1, n2 = OtherNode,
nodes = Nodes0, n_res = NRes, config = Config} = Cf,
- ?t:format("~n~nResolver: ~p", [Res]),
- ?t:format(" Registered on partition 1: ~p", [Res1]),
- ?t:format(" Registered on partition 2: ~p", [OtherNode]),
- ?t:format(" Pinged node: ~p", [PingNode]),
- ?t:format(" Linked node: ~p", [LinkedNode]),
- ?t:format(" Expected # resolvers: ~p", [NRes]),
+ io:format("~n~nResolver: ~p", [Res]),
+ io:format(" Registered on partition 1: ~p", [Res1]),
+ io:format(" Registered on partition 2: ~p", [OtherNode]),
+ io:format(" Pinged node: ~p", [PingNode]),
+ io:format(" Linked node: ~p", [LinkedNode]),
+ io:format(" Expected # resolvers: ~p", [NRes]),
Nodes = lists:sort(Nodes0),
T1 = node(),
Part1 = [T1, N1],
@@ -2299,67 +2249,67 @@ res({Res,Resolver}, [N1, A2, Z2], Cf) ->
%% expected monitors remain between registered processes and the
%% global_name_server.
- ?line rpc_cast(OtherNode,
- ?MODULE,
- part_2_2,
- [Config, Part1, Part2, [{Name, Resolver}]]),
- ?line ?UNTIL(is_ready_partition(Config)),
- ?line {_Pid1, yes} =
+ rpc_cast(OtherNode,
+ ?MODULE,
+ part_2_2,
+ [Config, Part1, Part2, [{Name, Resolver}]]),
+ ?UNTIL(is_ready_partition(Config)),
+ {_Pid1, yes} =
rpc:call(Res1, ?MODULE, start_resolver, [Name, Resolver]),
- ?line pong = net_adm:ping(PingNode),
- ?line wait_for_ready_net(Nodes, Config),
+ pong = net_adm:ping(PingNode),
+ wait_for_ready_net(Nodes, Config),
- ?line check_everywhere(Nodes, Name, Config),
- ?line case global:whereis_name(Name) of
- undefined when LinkedNode =:= none -> ok;
- Pid -> assert_pid(Pid)
- end,
+ check_everywhere(Nodes, Name, Config),
+ case global:whereis_name(Name) of
+ undefined when LinkedNode =:= none -> ok;
+ Pid -> assert_pid(Pid)
+ end,
- ?line {_, Trace0} = collect_tracers(Nodes),
- ?line Resolvers = [P || {_Node,new_resolver,{pid,P}} <- Trace0],
+ {_, Trace0} = collect_tracers(Nodes),
+ Resolvers = [P || {_Node,new_resolver,{pid,P}} <- Trace0],
- ?line NRes = length(Resolvers),
+ NRes = length(Resolvers),
%% Wait for extra monitor processes to be created.
%% This applies as long as global:do_monitor/1 spawns processes.
%% (Some day monitor() will be truly synchronous.)
- test_server:sleep(100),
+ ct:sleep(100),
- ?line lists:foreach(fun(P) -> P ! die end, Resolvers),
- ?line lists:foreach(fun(P) -> wait_for_exit(P) end, Resolvers),
+ lists:foreach(fun(P) -> P ! die end, Resolvers),
+ lists:foreach(fun(P) -> wait_for_exit(P) end, Resolvers),
- ?line check_everywhere(Nodes, Name, Config),
- ?line undefined = global:whereis_name(Name),
+ check_everywhere(Nodes, Name, Config),
+ undefined = global:whereis_name(Name),
%% Wait for monitors to remove names.
- test_server:sleep(100),
+ ct:sleep(100),
- ?line {_, Trace1} = collect_tracers(Nodes),
+ {_, Trace1} = collect_tracers(Nodes),
Trace = Trace0 ++ Trace1,
- ?line Gs = name_servers([T1, N1, A2, Z2]),
- ?line MonitoredByNode = monitored_by_node(Trace, Gs),
- ?line MonitoredBy = [M || {_N,M} <- MonitoredByNode],
-
+ Gs = name_servers([T1, N1, A2, Z2]),
+ MonitoredByNode = monitored_by_node(Trace, Gs),
+ MonitoredBy = [M || {_N,M} <- MonitoredByNode],
+
X = MonitoredBy -- Gs,
LengthGs = length(Gs),
- ?line case MonitoredBy of
- [] when LinkedNode =:= none -> ok;
- Gs -> ok;
- _ when LengthGs < 4, X =:= [] -> ok;
- _ -> ?t:format("ERROR:~nMonitoredBy ~p~n"
- "global_name_servers ~p~n",
- [MonitoredByNode, Gs]),
- ?t:fail(monitor_mismatch)
- end,
+ case MonitoredBy of
+ [] when LinkedNode =:= none -> ok;
+ Gs -> ok;
+ _ when LengthGs < 4, X =:= [] -> ok;
+ _ -> io:format("ERROR:~nMonitoredBy ~p~n"
+ "global_name_servers ~p~n",
+ [MonitoredByNode, Gs]),
+ ct:fail(monitor_mismatch)
+ end,
ok.
name_servers(Nodes) ->
lists:sort([rpc:call(N, erlang, whereis, [global_name_server]) ||
N <- Nodes,
pong =:= net_adm:ping(N)]).
-
+
monitored_by_node(Trace, Servers) ->
lists:sort([{node(M),M} ||
{_Node,_P,died,{monitors_2levels,ML}} <- Trace,
@@ -2371,7 +2321,7 @@ part_2_2(Config, Part1, Part2, NameResolvers) ->
make_partition(Config, Part1, Part2),
lists:foreach
(fun({Name, Resolver}) ->
- ?line {Pid2, yes} = start_resolver(Name, Resolver),
+ {Pid2, yes} = start_resolver(Name, Resolver),
trace_message({node(), part_2_2, nodes(), {pid2,Pid2}})
end, NameResolvers).
@@ -2396,7 +2346,7 @@ exit_resolver(name, _Pid1, _Pid2) ->
lock_resolver(name, Pid1, _Pid2) ->
Id = {?MODULE, self()},
Nodes = [node()],
- ?line true = global:set_lock(Id, Nodes),
+ true = global:set_lock(Id, Nodes),
_ = global:del_lock(Id, Nodes),
Pid1.
@@ -2426,7 +2376,7 @@ start_resolver(Name, Resolver) ->
receive
{Pid, Res} -> {Pid, Res}
end.
-
+
init_resolver(Parent, Name, Resolver) ->
X = global:register_name(Name, self(), Resolver),
Parent ! {self(), X},
@@ -2455,18 +2405,17 @@ mon_by_servers(Proc) ->
-define(REGNAME, contact_a_2).
-leftover_name(suite) -> [];
-leftover_name(doc) -> ["OTP-5563. Bug: nodedown while synching."];
+%% OTP-5563. Bug: nodedown while synching.
leftover_name(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
- ?line [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
+ [N1, A2, Z2] = Cps = start_nodes([n_1, a_2, z_2], peer, Config),
Nodes = lists:sort([node() | Cps]),
- ?line wait_for_ready_net(Config),
-
+ wait_for_ready_net(Config),
+
lists:foreach(fun(N) ->
rpc:call(N, ?MODULE, start_tracer, [])
end, Nodes),
@@ -2474,20 +2423,20 @@ leftover_name(Config) when is_list(Config) ->
Name = name, % registered on a_2
ResName = resolved_name, % registered on n_1 and a_2
%%
- ?line _Pid = ping_a_2_fun(?REGNAME, N1, A2),
+ _Pid = ping_a_2_fun(?REGNAME, N1, A2),
T1 = node(),
Part1 = [T1, N1],
Part2 = [A2, Z2],
NoResolver = {no_module, resolve_none},
Resolver = fun contact_a_2/3,
- ?line rpc_cast(A2,
- ?MODULE, part_2_2, [Config,
- Part1,
- Part2,
- [{Name, NoResolver},
- {ResName, Resolver}]]),
- ?line ?UNTIL(is_ready_partition(Config)),
+ rpc_cast(A2,
+ ?MODULE, part_2_2, [Config,
+ Part1,
+ Part2,
+ [{Name, NoResolver},
+ {ResName, Resolver}]]),
+ ?UNTIL(is_ready_partition(Config)),
%% resolved_name is resolved to run on a_2, an insert operation is
%% sent to n_1. The resolver function halts a_2, but the nodedown
@@ -2496,36 +2445,36 @@ leftover_name(Config) when is_list(Config) ->
%% delayed). Unless "artificial" nodedown messages are sent the
%% name would linger on indefinitely. [There is no test case for
%% the situation that no nodedown message at all is sent.]
- ?line {_Pid1, yes} =
+ {_Pid1, yes} =
rpc:call(N1, ?MODULE, start_resolver,
[ResName, fun contact_a_2/3]),
- test_server:sleep(1000),
+ ct:sleep(1000),
- ?line trace_message({node(), pinging, z_2}),
- ?line pong = net_adm:ping(Z2),
- ?line ?UNTIL((Nodes -- [A2]) =:= lists:sort(?NODES)),
- ?t:sleep(1000),
+ trace_message({node(), pinging, z_2}),
+ pong = net_adm:ping(Z2),
+ ?UNTIL((Nodes -- [A2]) =:= lists:sort(?NODES)),
+ ct:sleep(1000),
- ?line {_,Trace0} = collect_tracers(Nodes),
+ {_,Trace0} = collect_tracers(Nodes),
- ?line Resolvers = [P || {_Node,new_resolver,{pid,P}} <- Trace0],
- ?line lists:foreach(fun(P) -> P ! die end, Resolvers),
- ?line lists:foreach(fun(P) -> wait_for_exit(P) end, Resolvers),
+ Resolvers = [P || {_Node,new_resolver,{pid,P}} <- Trace0],
+ lists:foreach(fun(P) -> P ! die end, Resolvers),
+ lists:foreach(fun(P) -> wait_for_exit(P) end, Resolvers),
- ?line lists:foreach(fun(N) ->
- rpc:call(N, ?MODULE, stop_tracer, [])
- end, Nodes),
+ lists:foreach(fun(N) ->
+ rpc:call(N, ?MODULE, stop_tracer, [])
+ end, Nodes),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
stop_nodes(Cps),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
%% Runs on n_1
contact_a_2(resolved_name, Pid1, Pid2) ->
trace_message({node(), ?REGNAME, {pid1,Pid1}, {pid2,Pid2},
- {node1,node(Pid1)}, {node2,node(Pid2)}}),
+ {node1,node(Pid1)}, {node2,node(Pid2)}}),
?REGNAME ! doit,
Pid2.
@@ -2543,15 +2492,14 @@ ping_a_2(RegName, N1, A2) ->
{nodedown, A2} -> ok
end
end.
-
+
halt_node(Node) ->
rpc:call(Node, erlang, halt, []).
%%%-----------------------------------------------------------------
%%% Testing re-registration of a name.
%%%-----------------------------------------------------------------
-re_register_name(suite) -> [];
-re_register_name(doc) -> ["OTP-5563. Name is re-registered."];
+%% OTP-5563. Name is re-registered.
re_register_name(Config) when is_list(Config) ->
%% When re-registering a name the link to the old pid used to
%% linger on. Don't think is was a serious bug though--some memory
@@ -2560,18 +2508,18 @@ re_register_name(Config) when is_list(Config) ->
Timeout = 15,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
+ init_condition(Config),
Me = self(),
Pid1 = spawn(fun() -> proc(Me) end),
- ?line yes = global:register_name(name, Pid1),
+ yes = global:register_name(name, Pid1),
Pid2 = spawn(fun() -> proc(Me) end),
- ?line _ = global:re_register_name(name, Pid2),
+ _ = global:re_register_name(name, Pid2),
Pid2 ! die,
Pid1 ! die,
receive {Pid1, MonitoredBy1} -> [] = MonitoredBy1 end,
receive {Pid2, MonitoredBy2} -> [_] = MonitoredBy2 end,
- ?line _ = global:unregister_name(name),
- ?line init_condition(Config),
+ _ = global:unregister_name(name),
+ init_condition(Config),
ok.
proc(Parent) ->
@@ -2583,15 +2531,14 @@ proc(Parent) ->
%%%-----------------------------------------------------------------
%%%
%%%-----------------------------------------------------------------
-name_exit(suite) -> [];
-name_exit(doc) -> ["OTP-5563. Registered process dies."];
+%% OTP-5563. Registered process dies.
name_exit(Config) when is_list(Config) ->
StartFun = fun() ->
{ok, N1} = start_node_rel(n_1, this, Config),
{ok, N2} = start_node_rel(n_2, this, Config),
[N1, N2]
end,
- ?t:format("Test of current release~n"),
+ io:format("Test of current release~n"),
do_name_exit(StartFun, current, Config).
do_name_exit(StartFun, Version, Config) ->
@@ -2607,17 +2554,17 @@ do_name_exit(StartFun, Version, Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
%% Three nodes (test_server, n_1, n_2).
- ?line Cps = StartFun(),
+ Cps = StartFun(),
Nodes = lists:sort([node() | Cps]),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
lists:foreach(fun(N) -> rpc:call(N, ?MODULE, start_tracer, []) end,Nodes),
Name = name,
- ?line {Pid, yes} = start_proc(Name),
+ {Pid, yes} = start_proc(Name),
Me = self(),
LL = spawn(fun() -> long_lock(Me) end),
@@ -2628,23 +2575,23 @@ do_name_exit(StartFun, Version, Config) ->
Pid ! die,
wait_for_exit_fast(Pid),
- ?t:sleep(100),
+ ct:sleep(100),
%% Name has been removed from node()'s table, but nowhere else
%% since there is a lock on 'global'.
{R1,[]} = rpc:multicall(Nodes, global, whereis_name, [Name]),
- ?line case Version of
- old -> [_,_] = lists:usort(R1);
- current -> [undefined, undefined, undefined] = R1
- end,
- ?t:sleep(3000),
- ?line check_everywhere(Nodes, Name, Config),
+ case Version of
+ old -> [_,_] = lists:usort(R1);
+ current -> [undefined, undefined, undefined] = R1
+ end,
+ ct:sleep(3000),
+ check_everywhere(Nodes, Name, Config),
lists:foreach(fun(N) -> rpc:call(N, ?MODULE, stop_tracer, []) end, Nodes),
- ?line OrigNames = global:registered_names(),
+ OrigNames = global:registered_names(),
exit(LL, kill),
write_high_level_trace(Config),
stop_nodes(Cps),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
long_lock(Parent) ->
@@ -2657,17 +2604,16 @@ long_lock(Parent) ->
%%%-----------------------------------------------------------------
%%% Testing the support for external nodes (cnodes)
%%%-----------------------------------------------------------------
-external_nodes(suite) -> [];
-external_nodes(doc) -> ["OTP-5563. External nodes (cnodes)."];
+%% OTP-5563. External nodes (cnodes).
external_nodes(Config) when is_list(Config) ->
Timeout = 30,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
- ?line [NodeB, NodeC] = start_nodes([b, c], peer, Config),
- ?line wait_for_ready_net(Config),
+ [NodeB, NodeC] = start_nodes([b, c], peer, Config),
+ wait_for_ready_net(Config),
%% Nodes = ?NODES,
%% lists:foreach(fun(N) -> rpc:call(N, ?MODULE, start_tracer, []) end,
@@ -2676,75 +2622,75 @@ external_nodes(Config) when is_list(Config) ->
%% Two partitions: [test_server] and [b, c].
%% c registers an external name on b
- ?line rpc_cast(NodeB, ?MODULE, part_ext,
- [Config, node(), NodeC, Name]),
- ?line ?UNTIL(is_ready_partition(Config)),
+ rpc_cast(NodeB, ?MODULE, part_ext,
+ [Config, node(), NodeC, Name]),
+ ?UNTIL(is_ready_partition(Config)),
- ?line pong = net_adm:ping(NodeB),
- ?line ?UNTIL([NodeB, NodeC] =:= lists:sort(nodes())),
- ?line wait_for_ready_net(Config),
+ pong = net_adm:ping(NodeB),
+ ?UNTIL([NodeB, NodeC] =:= lists:sort(nodes())),
+ wait_for_ready_net(Config),
- ?line Cpid = rpc:call(NodeC, erlang, whereis, [Name]),
+ Cpid = rpc:call(NodeC, erlang, whereis, [Name]),
ExternalName = [{name,Cpid,NodeB}],
- ?line ExternalName = get_ext_names(),
- ?line ExternalName = rpc:call(NodeB, gen_server, call,
- [global_name_server, get_names_ext]),
- ?line ExternalName = rpc:call(NodeC, gen_server, call,
- [global_name_server, get_names_ext]),
-
- ?line [_] = cnode_links(Cpid),
- ?line [_,_,_] = cnode_monitored_by(Cpid),
- ?line no = global:register_name(Name, self()),
- ?line yes = global:re_register_name(Name, self()),
- ?line ?UNTIL([] =:= cnode_monitored_by(Cpid)),
- ?line ?UNTIL([] =:= cnode_links(Cpid)),
- ?line [] = gen_server:call(global_name_server, get_names_ext, infinity),
-
- ?line Cpid ! {register, self(), Name},
- ?line receive {Cpid, Reply1} -> no = Reply1 end,
- ?line _ = global:unregister_name(Name),
- test_server:sleep(1000),
- ?line Cpid ! {register, self(), Name},
- ?line ?UNTIL(length(get_ext_names()) =:= 1),
- ?line receive {Cpid, Reply2} -> yes = Reply2 end,
-
- ?line Cpid ! {unregister, self(), Name},
- ?line ?UNTIL(length(get_ext_names()) =:= 0),
- ?line receive {Cpid, Reply3} -> ok = Reply3 end,
+ ExternalName = get_ext_names(),
+ ExternalName = rpc:call(NodeB, gen_server, call,
+ [global_name_server, get_names_ext]),
+ ExternalName = rpc:call(NodeC, gen_server, call,
+ [global_name_server, get_names_ext]),
+
+ [_] = cnode_links(Cpid),
+ [_,_,_] = cnode_monitored_by(Cpid),
+ no = global:register_name(Name, self()),
+ yes = global:re_register_name(Name, self()),
+ ?UNTIL([] =:= cnode_monitored_by(Cpid)),
+ ?UNTIL([] =:= cnode_links(Cpid)),
+ [] = gen_server:call(global_name_server, get_names_ext, infinity),
+
+ Cpid ! {register, self(), Name},
+ receive {Cpid, Reply1} -> no = Reply1 end,
+ _ = global:unregister_name(Name),
+ ct:sleep(1000),
+ Cpid ! {register, self(), Name},
+ ?UNTIL(length(get_ext_names()) =:= 1),
+ receive {Cpid, Reply2} -> yes = Reply2 end,
+
+ Cpid ! {unregister, self(), Name},
+ ?UNTIL(length(get_ext_names()) =:= 0),
+ receive {Cpid, Reply3} -> ok = Reply3 end,
Cpid ! die,
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
- ?line [] = get_ext_names(),
- ?line [] = rpc:call(NodeB, gen_server, call,
- [global_name_server, get_names_ext]),
- ?line [] = rpc:call(NodeC, gen_server, call,
- [global_name_server, get_names_ext]),
+ ?UNTIL(OrigNames =:= global:registered_names()),
+ [] = get_ext_names(),
+ [] = rpc:call(NodeB, gen_server, call,
+ [global_name_server, get_names_ext]),
+ [] = rpc:call(NodeC, gen_server, call,
+ [global_name_server, get_names_ext]),
- ?line Cpid2 = erlang:spawn(NodeC, fun() -> cnode_proc(NodeB) end),
- ?line Cpid2 ! {register, self(), Name},
- ?line receive {Cpid2, Reply4} -> yes = Reply4 end,
+ Cpid2 = erlang:spawn(NodeC, fun() -> cnode_proc(NodeB) end),
+ Cpid2 ! {register, self(), Name},
+ receive {Cpid2, Reply4} -> yes = Reply4 end,
%% It could be a bug that Cpid2 is linked to 'global_name_server'
%% at node 'b'. The effect: Cpid2 dies when node 'b' crashes.
stop_node(NodeB),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
- ?line [] = get_ext_names(),
- ?line [] = rpc:call(NodeC, gen_server, call,
- [global_name_server, get_names_ext]),
+ ?UNTIL(OrigNames =:= global:registered_names()),
+ [] = get_ext_names(),
+ [] = rpc:call(NodeC, gen_server, call,
+ [global_name_server, get_names_ext]),
- %% ?line {_, Trace} = collect_tracers(Nodes),
+ %% {_, Trace} = collect_tracers(Nodes),
%% lists:foreach(fun(M) -> erlang:display(M) end, Trace),
ThisNode = node(),
- ?line Cpid3 = erlang:spawn(NodeC, fun() -> cnode_proc(ThisNode) end),
- ?line Cpid3 ! {register, self(), Name},
- ?line receive {Cpid3, Reply5} -> yes = Reply5 end,
+ Cpid3 = erlang:spawn(NodeC, fun() -> cnode_proc(ThisNode) end),
+ Cpid3 ! {register, self(), Name},
+ receive {Cpid3, Reply5} -> yes = Reply5 end,
- ?line ?UNTIL(length(get_ext_names()) =:= 1),
+ ?UNTIL(length(get_ext_names()) =:= 1),
stop_node(NodeC),
- ?line ?UNTIL(length(get_ext_names()) =:= 0),
+ ?UNTIL(length(get_ext_names()) =:= 0),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
get_ext_names() ->
@@ -2791,19 +2737,16 @@ cnode_proc(E) ->
cnode_proc(E).
-many_nodes(suite) ->
- [];
-many_nodes(doc) ->
- ["OTP-5770. Start many nodes. Make them connect at the same time."];
+%% OTP-5770. Start many nodes. Make them connect at the same time.
many_nodes(Config) when is_list(Config) ->
Timeout = 240,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
{Rels, N_cps} =
- case ?t:os_type() of
+ case test_server:os_type() of
{unix, Osname} when Osname =:= linux;
Osname =:= openbsd;
Osname =:= darwin ->
@@ -2814,12 +2757,12 @@ many_nodes(Config) when is_list(Config) ->
_ ->
{node_rel(1, 32, this), 32}
end,
- ?line Cps = [begin {ok, Cp} = start_node_rel(Name, Rel, Config), Cp end ||
- {Name,Rel} <- Rels],
+ Cps = [begin {ok, Cp} = start_node_rel(Name, Rel, Config), Cp end ||
+ {Name,Rel} <- Rels],
Nodes = lists:sort(?NODES),
- ?line wait_for_ready_net(Nodes, Config),
+ wait_for_ready_net(Nodes, Config),
- ?line Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
GoFile = filename:join([Dir, "go.txt"]),
file:delete(GoFile),
@@ -2830,34 +2773,34 @@ many_nodes(Config) when is_list(Config) ->
file:delete(File),
rpc_cast(N, ?MODULE, isolated_node, [File, GoFile, Cps, Config])
end,
- ?line lists:foreach(IsoFun, CpsFiles),
-
- ?line all_nodes_files(CpsFiles, "isolated", Config),
- ?line Time = msec(),
- ?line sync_until(),
+ lists:foreach(IsoFun, CpsFiles),
+
+ all_nodes_files(CpsFiles, "isolated", Config),
+ Time = msec(),
+ sync_until(),
erlang:display(ready_to_go),
- ?line touch(GoFile, "go"),
- ?line all_nodes_files(CpsFiles, "done", Config),
- ?line Time2 = msec(),
+ touch(GoFile, "go"),
+ all_nodes_files(CpsFiles, "done", Config),
+ Time2 = msec(),
- ?line lists:foreach(fun(N) -> pong = net_adm:ping(N) end, Cps),
+ lists:foreach(fun(N) -> pong = net_adm:ping(N) end, Cps),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
write_high_level_trace(Config), % The test succeeded, but was it slow?
- ?line lists:foreach(fun({_N, File}) -> file:delete(File) end, CpsFiles),
- ?line file:delete(GoFile),
+ lists:foreach(fun({_N, File}) -> file:delete(File) end, CpsFiles),
+ file:delete(GoFile),
- ?line ?UNTIL(OrigNames =:= global:registered_names()),
+ ?UNTIL(OrigNames =:= global:registered_names()),
write_high_level_trace(Config),
- ?line stop_nodes(Cps),
- ?line init_condition(Config),
+ stop_nodes(Cps),
+ init_condition(Config),
Diff = Time2 - Time,
Return = lists:flatten(io_lib:format("~w nodes took ~w ms",
[N_cps, Diff])),
erlang:display({{nodes,N_cps},{time,Diff}}),
- ?t:format("~s~n", [Return]),
+ io:format("~s~n", [Return]),
{comment, Return}.
node_rel(From, To, Rel) ->
@@ -2883,7 +2826,7 @@ isolated_node(File, GoFile, Nodes, Config) ->
touch(File, "got_go"),
lists:foreach(fun(N) -> _ = net_adm:ping(N) end, shuffle(Nodes)),
touch(File, "pinged"),
- ?line ?UNTIL((Ns -- get_known(node())) =:= []),
+ ?UNTIL((Ns -- get_known(node())) =:= []),
touch(File, "done").
touch(File, List) ->
@@ -2931,19 +2874,17 @@ sync_until(LogFile) ->
timer:sleep(Time).
shuffle(L) ->
- [E || {_, E} <- lists:keysort(1, [{random:uniform(), E} || E <- L])].
+ [E || {_, E} <- lists:keysort(1, [{rand:uniform(), E} || E <- L])].
-sync_0(suite) -> [];
-sync_0(doc) ->
- ["OTP-5770. sync/0."];
+%% OTP-5770. sync/0.
sync_0(Config) when is_list(Config) ->
Timeout = 180,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
+ init_condition(Config),
N_cps =
- case ?t:os_type() of
+ case test_server:os_type() of
{unix, Osname} when Osname =:= linux;
Osname =:= openbsd;
Osname =:= darwin ->
@@ -2958,82 +2899,80 @@ sync_0(Config) when is_list(Config) ->
Names = [lists:concat([cp,N]) || N <- lists:seq(1, N_cps)],
Cps = start_and_sync(Names),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
write_high_level_trace(Config),
stop_nodes(Cps),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
start_and_sync([]) ->
[];
start_and_sync([Name | Names]) ->
- ?line {ok, N} = start_node(Name, slave, []),
- ?line {Time, _Void} = rpc:call(N, timer, tc, [global, sync, []]),
- ?t:format("~p: ~p~n", [Name, Time]),
+ {ok, N} = start_node(Name, slave, []),
+ {Time, _Void} = rpc:call(N, timer, tc, [global, sync, []]),
+ io:format("~p: ~p~n", [Name, Time]),
[N | start_and_sync(Names)].
%%%-----------------------------------------------------------------
%%% Testing of change of global_groups parameter.
%%%-----------------------------------------------------------------
-global_groups_change(suite) -> [];
-global_groups_change(doc) -> ["Test change of global_groups parameter."];
+%% Test change of global_groups parameter.
global_groups_change(Config) ->
Timeout = 90,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line M = from($@, atom_to_list(node())),
-
- % Create the .app files and the boot script
- ?line {KernelVer, StdlibVer} = create_script_dc("dc"),
- ?line case is_real_system(KernelVer, StdlibVer) of
- true ->
- Options = [];
- false ->
- Options = [local]
- end,
+ init_condition(Config),
+ M = from($@, atom_to_list(node())),
+
+ %% Create the .app files and the boot script
+ {KernelVer, StdlibVer} = create_script_dc("dc"),
+ case is_real_system(KernelVer, StdlibVer) of
+ true ->
+ Options = [];
+ false ->
+ Options = [local]
+ end,
+
+ ok = systools:make_script("dc", Options),
- ?line ok = systools:make_script("dc", Options),
-
[Ncp1,Ncp2,Ncp3,Ncp4,Ncp5,NcpA,NcpB,NcpC,NcpD,NcpE] =
node_names([cp1,cp2,cp3,cp4,cp5,cpA,cpB,cpC,cpD,cpE], Config),
- % Write config files
- ?line Dir = ?config(priv_dir,Config),
- ?line {ok, Fd_dc} = file:open(filename:join(Dir, "sys.config"), [write]),
- ?line config_dc1(Fd_dc, Ncp1, Ncp2, Ncp3, NcpA, NcpB, NcpC, NcpD, NcpE),
- ?line file:close(Fd_dc),
- ?line Config1 = filename:join(Dir, "sys"),
-
- % Test [cp1, cp2, cp3]
- ?line {ok, Cp1} = start_node_boot(Ncp1, Config1, dc),
- ?line {ok, Cp2} = start_node_boot(Ncp2, Config1, dc),
- ?line {ok, Cp3} = start_node_boot(Ncp3, Config1, dc),
- ?line {ok, CpA} = start_node_boot(NcpA, Config1, dc),
- ?line {ok, CpB} = start_node_boot(NcpB, Config1, dc),
- ?line {ok, CpC} = start_node_boot(NcpC, Config1, dc),
- ?line {ok, CpD} = start_node_boot(NcpD, Config1, dc),
- ?line {ok, CpE} = start_node_boot(NcpE, Config1, dc),
-
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp2]),
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp3]),
- ?line pang = rpc:call(Cp1, net_adm, ping,
- [list_to_atom(lists:concat(["cp5@", M]))]),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp3]),
- ?line pang = rpc:call(Cp2, net_adm, ping,
- [list_to_atom(lists:concat(["cp5@", M]))]),
-
- ?line {TestGG4, yes} = rpc:call(CpB, ?MODULE, start_proc, [test]),
- ?line {TestGG5, yes} = rpc:call(CpE, ?MODULE, start_proc, [test]),
-
-
- ?line pong = rpc:call(CpA, net_adm, ping, [CpC]),
- ?line pong = rpc:call(CpC, net_adm, ping, [CpB]),
- ?line pong = rpc:call(CpD, net_adm, ping, [CpC]),
- ?line pong = rpc:call(CpE, net_adm, ping, [CpD]),
-
- ?line
+ %% Write config files
+ Dir = proplists:get_value(priv_dir,Config),
+ {ok, Fd_dc} = file:open(filename:join(Dir, "sys.config"), [write]),
+ config_dc1(Fd_dc, Ncp1, Ncp2, Ncp3, NcpA, NcpB, NcpC, NcpD, NcpE),
+ file:close(Fd_dc),
+ Config1 = filename:join(Dir, "sys"),
+
+ %% Test [cp1, cp2, cp3]
+ {ok, Cp1} = start_node_boot(Ncp1, Config1, dc),
+ {ok, Cp2} = start_node_boot(Ncp2, Config1, dc),
+ {ok, Cp3} = start_node_boot(Ncp3, Config1, dc),
+ {ok, CpA} = start_node_boot(NcpA, Config1, dc),
+ {ok, CpB} = start_node_boot(NcpB, Config1, dc),
+ {ok, CpC} = start_node_boot(NcpC, Config1, dc),
+ {ok, CpD} = start_node_boot(NcpD, Config1, dc),
+ {ok, CpE} = start_node_boot(NcpE, Config1, dc),
+
+ pong = rpc:call(Cp1, net_adm, ping, [Cp2]),
+ pong = rpc:call(Cp1, net_adm, ping, [Cp3]),
+ pang = rpc:call(Cp1, net_adm, ping,
+ [list_to_atom(lists:concat(["cp5@", M]))]),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp3]),
+ pang = rpc:call(Cp2, net_adm, ping,
+ [list_to_atom(lists:concat(["cp5@", M]))]),
+
+ {TestGG4, yes} = rpc:call(CpB, ?MODULE, start_proc, [test]),
+ {TestGG5, yes} = rpc:call(CpE, ?MODULE, start_proc, [test]),
+
+
+ pong = rpc:call(CpA, net_adm, ping, [CpC]),
+ pong = rpc:call(CpC, net_adm, ping, [CpB]),
+ pong = rpc:call(CpD, net_adm, ping, [CpC]),
+ pong = rpc:call(CpE, net_adm, ping, [CpD]),
+
?UNTIL(begin
TestGG4_1 = rpc:call(CpA, global, whereis_name, [test]),
TestGG4_2 = rpc:call(CpB, global, whereis_name, [test]),
@@ -3050,88 +2989,87 @@ global_groups_change(Config) ->
(TestGG5_2 =:= TestGG5)
end),
- ?line ?t:format( "#### nodes() ~p~n",[nodes()]),
+ io:format( "#### nodes() ~p~n",[nodes()]),
- ?line XDcWa1 = rpc:call(Cp1, global_group, info, []),
- ?line XDcWa2 = rpc:call(Cp2, global_group, info, []),
- ?line XDcWa3 = rpc:call(Cp3, global_group, info, []),
- ?line ?t:format( "#### XDcWa1 ~p~n",[XDcWa1]),
- ?line ?t:format( "#### XDcWa2 ~p~n",[XDcWa2]),
- ?line ?t:format( "#### XDcWa3 ~p~n",[XDcWa3]),
+ XDcWa1 = rpc:call(Cp1, global_group, info, []),
+ XDcWa2 = rpc:call(Cp2, global_group, info, []),
+ XDcWa3 = rpc:call(Cp3, global_group, info, []),
+ io:format( "#### XDcWa1 ~p~n",[XDcWa1]),
+ io:format( "#### XDcWa2 ~p~n",[XDcWa2]),
+ io:format( "#### XDcWa3 ~p~n",[XDcWa3]),
+
+ stop_node(CpC),
- ?line stop_node(CpC),
-
%% Read the current configuration parameters, and change them
- ?line OldEnv =
+ OldEnv =
rpc:call(Cp1, application_controller, prep_config_change, []),
- ?line {value, {kernel, OldKernel}} = lists:keysearch(kernel, 1, OldEnv),
+ {value, {kernel, OldKernel}} = lists:keysearch(kernel, 1, OldEnv),
- ?line GG1 =
+ GG1 =
lists:sort([mk_node(Ncp1, M), mk_node(Ncp2, M), mk_node(Ncp5, M)]),
- ?line GG2 = lists:sort([mk_node(Ncp3, M)]),
- ?line GG3 = lists:sort([mk_node(Ncp4, M)]),
- ?line GG4 = lists:sort([mk_node(NcpA, M), mk_node(NcpB, M)]),
- ?line GG5 =
+ GG2 = lists:sort([mk_node(Ncp3, M)]),
+ GG3 = lists:sort([mk_node(Ncp4, M)]),
+ GG4 = lists:sort([mk_node(NcpA, M), mk_node(NcpB, M)]),
+ GG5 =
lists:sort([mk_node(NcpC, M), mk_node(NcpD, M), mk_node(NcpE, M)]),
- ?line NewNG = {global_groups,[{gg1, normal, GG1},
- {gg2, normal, GG2},
- {gg3, normal, GG3},
- {gg4, normal, GG4},
- {gg5, hidden, GG5}]},
-
- ?line NewKernel =
+ NewNG = {global_groups,[{gg1, normal, GG1},
+ {gg2, normal, GG2},
+ {gg3, normal, GG3},
+ {gg4, normal, GG4},
+ {gg5, hidden, GG5}]},
+
+ NewKernel =
[{kernel, lists:keyreplace(global_groups, 1, OldKernel, NewNG)}],
- ?line ok = rpc:call(Cp1, application_controller, test_change_apps,
- [[kernel], [NewKernel]]),
- ?line ok = rpc:call(Cp2, application_controller, test_change_apps,
- [[kernel], [NewKernel]]),
- ?line ok = rpc:call(Cp3, application_controller, test_change_apps,
- [[kernel], [NewKernel]]),
- ?line ok = rpc:call(CpA, application_controller, test_change_apps,
- [[kernel], [NewKernel]]),
- ?line ok = rpc:call(CpB, application_controller, test_change_apps,
- [[kernel], [NewKernel]]),
- ?line ok = rpc:call(CpD, application_controller, test_change_apps,
- [[kernel], [NewKernel]]),
- ?line ok = rpc:call(CpE, application_controller, test_change_apps,
- [[kernel], [NewKernel]]),
-
- ?line ?t:format("#### ~p~n",[multicall]),
- ?line ?t:format( "#### ~p~n",[multicall]),
+ ok = rpc:call(Cp1, application_controller, test_change_apps,
+ [[kernel], [NewKernel]]),
+ ok = rpc:call(Cp2, application_controller, test_change_apps,
+ [[kernel], [NewKernel]]),
+ ok = rpc:call(Cp3, application_controller, test_change_apps,
+ [[kernel], [NewKernel]]),
+ ok = rpc:call(CpA, application_controller, test_change_apps,
+ [[kernel], [NewKernel]]),
+ ok = rpc:call(CpB, application_controller, test_change_apps,
+ [[kernel], [NewKernel]]),
+ ok = rpc:call(CpD, application_controller, test_change_apps,
+ [[kernel], [NewKernel]]),
+ ok = rpc:call(CpE, application_controller, test_change_apps,
+ [[kernel], [NewKernel]]),
+
+ io:format("#### ~p~n",[multicall]),
+ io:format( "#### ~p~n",[multicall]),
%% no idea to check the result from the rpc because the other
%% nodes will disconnect test server, and thus the result will
%% always be {badrpc, nodedown}
- ?line rpc:multicall([Cp1, Cp2, Cp3, CpA, CpB, CpD, CpE],
- application_controller, config_change, [OldEnv]),
+ rpc:multicall([Cp1, Cp2, Cp3, CpA, CpB, CpD, CpE],
+ application_controller, config_change, [OldEnv]),
- ?line {ok, Fd_dc2} = file:open(filename:join(Dir, "sys2.config"), [write]),
- ?line config_dc2(Fd_dc2, NewNG, Ncp1, Ncp2, Ncp3),
- ?line file:close(Fd_dc2),
- ?line Config2 = filename:join(Dir, "sys2"),
- ?line {ok, CpC} = start_node_boot(NcpC, Config2, dc),
+ {ok, Fd_dc2} = file:open(filename:join(Dir, "sys2.config"), [write]),
+ config_dc2(Fd_dc2, NewNG, Ncp1, Ncp2, Ncp3),
+ file:close(Fd_dc2),
+ Config2 = filename:join(Dir, "sys2"),
+ {ok, CpC} = start_node_boot(NcpC, Config2, dc),
- ?line sync_and_wait(CpA),
- ?line sync_and_wait(CpD),
-
- ?line pong = rpc:call(CpA, net_adm, ping, [CpC]),
- ?line pong = rpc:call(CpC, net_adm, ping, [CpB]),
- ?line pong = rpc:call(CpD, net_adm, ping, [CpC]),
- ?line pong = rpc:call(CpE, net_adm, ping, [CpD]),
-
- ?line GG5 =
+ sync_and_wait(CpA),
+ sync_and_wait(CpD),
+
+ pong = rpc:call(CpA, net_adm, ping, [CpC]),
+ pong = rpc:call(CpC, net_adm, ping, [CpB]),
+ pong = rpc:call(CpD, net_adm, ping, [CpC]),
+ pong = rpc:call(CpE, net_adm, ping, [CpD]),
+
+ GG5 =
lists:sort([mk_node(NcpC, M)|rpc:call(CpC, erlang, nodes, [])]),
- ?line GG5 =
+ GG5 =
lists:sort([mk_node(NcpD, M)|rpc:call(CpD, erlang, nodes, [])]),
- ?line GG5 =
+ GG5 =
lists:sort([mk_node(NcpE, M)|rpc:call(CpE, erlang, nodes, [])]),
- ?line false =
+ false =
lists:member(mk_node(NcpC, M), rpc:call(CpA, erlang, nodes, [])),
- ?line false =
+ false =
lists:member(mk_node(NcpC, M), rpc:call(CpB, erlang, nodes, [])),
- ?line
?UNTIL(begin
TestGG4a = rpc:call(CpA, global, whereis_name, [test]),
TestGG4b = rpc:call(CpB, global, whereis_name, [test]),
@@ -3148,171 +3086,171 @@ global_groups_change(Config) ->
(TestGG5 =:= TestGG5e)
end),
- ?line Info1 = rpc:call(Cp1, global_group, info, []),
- ?line Info2 = rpc:call(Cp2, global_group, info, []),
- ?line Info3 = rpc:call(Cp3, global_group, info, []),
- ?line InfoA = rpc:call(CpA, global_group, info, []),
- ?line InfoB = rpc:call(CpB, global_group, info, []),
- ?line InfoC = rpc:call(CpC, global_group, info, []),
- ?line InfoD = rpc:call(CpD, global_group, info, []),
- ?line InfoE = rpc:call(CpE, global_group, info, []),
- ?line ?t:format( "#### Info1 ~p~n",[Info1]),
- ?line ?t:format( "#### Info2 ~p~n",[Info2]),
- ?line ?t:format( "#### Info3 ~p~n",[Info3]),
- ?line ?t:format( "#### InfoA ~p~n",[InfoA]),
- ?line ?t:format( "#### InfoB ~p~n",[InfoB]),
- ?line ?t:format( "#### InfoC ~p~n",[InfoC]),
- ?line ?t:format( "#### InfoD ~p~n",[InfoD]),
- ?line ?t:format( "#### InfoE ~p~n",[InfoE]),
-
- ?line {global_groups, GGNodes} = NewNG,
-
- ?line Info1ok = [{state, synced},
- {own_group_name, gg1},
- {own_group_nodes, GG1},
- {synced_nodes, [mk_node(Ncp2, M)]},
- {sync_error, []},
- {no_contact, [mk_node(Ncp5, M)]},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg1, 1, GGNodes))},
- {monitoring, []}],
-
-
- ?line Info2ok = [{state, synced},
- {own_group_name, gg1},
- {own_group_nodes, GG1},
- {synced_nodes, [mk_node(Ncp1, M)]},
- {sync_error, []},
- {no_contact, [mk_node(Ncp5, M)]},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg1, 1, GGNodes))},
- {monitoring, []}],
-
- ?line Info3ok = [{state, synced},
- {own_group_name, gg2},
- {own_group_nodes, GG2},
- {synced_nodes, []},
- {sync_error, []},
- {no_contact, []},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg2, 1, GGNodes))},
- {monitoring, []}],
-
- ?line InfoAok = [{state, synced},
- {own_group_name, gg4},
- {own_group_nodes, GG4},
- {synced_nodes, lists:delete(mk_node(NcpA, M), GG4)},
- {sync_error, []},
- {no_contact, []},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg4, 1, GGNodes))},
- {monitoring, []}],
-
- ?line InfoBok = [{state, synced},
- {own_group_name, gg4},
- {own_group_nodes, GG4},
- {synced_nodes, lists:delete(mk_node(NcpB, M), GG4)},
- {sync_error, []},
- {no_contact, []},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg4, 1, GGNodes))},
- {monitoring, []}],
-
- ?line InfoCok = [{state, synced},
- {own_group_name, gg5},
- {own_group_nodes, GG5},
- {synced_nodes, lists:delete(mk_node(NcpC, M), GG5)},
- {sync_error, []},
- {no_contact, []},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg5, 1, GGNodes))},
- {monitoring, []}],
-
- ?line InfoDok = [{state, synced},
- {own_group_name, gg5},
- {own_group_nodes, GG5},
- {synced_nodes, lists:delete(mk_node(NcpD, M), GG5)},
- {sync_error, []},
- {no_contact, []},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg5, 1, GGNodes))},
- {monitoring, []}],
-
- ?line InfoEok = [{state, synced},
- {own_group_name, gg5},
- {own_group_nodes, GG5},
- {synced_nodes, lists:delete(mk_node(NcpE, M), GG5)},
- {sync_error, []},
- {no_contact, []},
- {other_groups, remove_gg_pub_type(lists:keydelete
- (gg5, 1, GGNodes))},
- {monitoring, []}],
-
-
- ?line case Info1 of
- Info1ok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", Cp1}, {Info1, Info1ok}})
- end,
+ Info1 = rpc:call(Cp1, global_group, info, []),
+ Info2 = rpc:call(Cp2, global_group, info, []),
+ Info3 = rpc:call(Cp3, global_group, info, []),
+ InfoA = rpc:call(CpA, global_group, info, []),
+ InfoB = rpc:call(CpB, global_group, info, []),
+ InfoC = rpc:call(CpC, global_group, info, []),
+ InfoD = rpc:call(CpD, global_group, info, []),
+ InfoE = rpc:call(CpE, global_group, info, []),
+ io:format( "#### Info1 ~p~n",[Info1]),
+ io:format( "#### Info2 ~p~n",[Info2]),
+ io:format( "#### Info3 ~p~n",[Info3]),
+ io:format( "#### InfoA ~p~n",[InfoA]),
+ io:format( "#### InfoB ~p~n",[InfoB]),
+ io:format( "#### InfoC ~p~n",[InfoC]),
+ io:format( "#### InfoD ~p~n",[InfoD]),
+ io:format( "#### InfoE ~p~n",[InfoE]),
+
+ {global_groups, GGNodes} = NewNG,
+
+ Info1ok = [{state, synced},
+ {own_group_name, gg1},
+ {own_group_nodes, GG1},
+ {synced_nodes, [mk_node(Ncp2, M)]},
+ {sync_error, []},
+ {no_contact, [mk_node(Ncp5, M)]},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg1, 1, GGNodes))},
+ {monitoring, []}],
+
+
+ Info2ok = [{state, synced},
+ {own_group_name, gg1},
+ {own_group_nodes, GG1},
+ {synced_nodes, [mk_node(Ncp1, M)]},
+ {sync_error, []},
+ {no_contact, [mk_node(Ncp5, M)]},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg1, 1, GGNodes))},
+ {monitoring, []}],
+
+ Info3ok = [{state, synced},
+ {own_group_name, gg2},
+ {own_group_nodes, GG2},
+ {synced_nodes, []},
+ {sync_error, []},
+ {no_contact, []},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg2, 1, GGNodes))},
+ {monitoring, []}],
+
+ InfoAok = [{state, synced},
+ {own_group_name, gg4},
+ {own_group_nodes, GG4},
+ {synced_nodes, lists:delete(mk_node(NcpA, M), GG4)},
+ {sync_error, []},
+ {no_contact, []},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg4, 1, GGNodes))},
+ {monitoring, []}],
+
+ InfoBok = [{state, synced},
+ {own_group_name, gg4},
+ {own_group_nodes, GG4},
+ {synced_nodes, lists:delete(mk_node(NcpB, M), GG4)},
+ {sync_error, []},
+ {no_contact, []},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg4, 1, GGNodes))},
+ {monitoring, []}],
+
+ InfoCok = [{state, synced},
+ {own_group_name, gg5},
+ {own_group_nodes, GG5},
+ {synced_nodes, lists:delete(mk_node(NcpC, M), GG5)},
+ {sync_error, []},
+ {no_contact, []},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg5, 1, GGNodes))},
+ {monitoring, []}],
+
+ InfoDok = [{state, synced},
+ {own_group_name, gg5},
+ {own_group_nodes, GG5},
+ {synced_nodes, lists:delete(mk_node(NcpD, M), GG5)},
+ {sync_error, []},
+ {no_contact, []},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg5, 1, GGNodes))},
+ {monitoring, []}],
+
+ InfoEok = [{state, synced},
+ {own_group_name, gg5},
+ {own_group_nodes, GG5},
+ {synced_nodes, lists:delete(mk_node(NcpE, M), GG5)},
+ {sync_error, []},
+ {no_contact, []},
+ {other_groups, remove_gg_pub_type(lists:keydelete
+ (gg5, 1, GGNodes))},
+ {monitoring, []}],
+
+
+ case Info1 of
+ Info1ok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", Cp1}, {Info1, Info1ok}})
+ end,
- ?line case Info2 of
- Info2ok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", Cp2}, {Info2, Info2ok}})
- end,
+ case Info2 of
+ Info2ok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", Cp2}, {Info2, Info2ok}})
+ end,
- ?line case Info3 of
- Info3ok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", Cp3}, {Info3, Info3ok}})
- end,
+ case Info3 of
+ Info3ok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", Cp3}, {Info3, Info3ok}})
+ end,
- ?line case InfoA of
- InfoAok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", CpA}, {InfoA, InfoAok}})
- end,
+ case InfoA of
+ InfoAok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", CpA}, {InfoA, InfoAok}})
+ end,
- ?line case InfoB of
- InfoBok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", CpB}, {InfoB, InfoBok}})
- end,
+ case InfoB of
+ InfoBok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", CpB}, {InfoB, InfoBok}})
+ end,
- ?line case InfoC of
- InfoCok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", CpC}, {InfoC, InfoCok}})
- end,
+ case InfoC of
+ InfoCok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", CpC}, {InfoC, InfoCok}})
+ end,
- ?line case InfoD of
- InfoDok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", CpD}, {InfoD, InfoDok}})
- end,
+ case InfoD of
+ InfoDok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", CpD}, {InfoD, InfoDok}})
+ end,
- ?line case InfoE of
- InfoEok ->
- ok;
- _ ->
- test_server:fail({{"could not change the global groups"
- " in node", CpE}, {InfoE, InfoEok}})
- end,
+ case InfoE of
+ InfoEok ->
+ ok;
+ _ ->
+ ct:fail({{"could not change the global groups"
+ " in node", CpE}, {InfoE, InfoEok}})
+ end,
write_high_level_trace(Config), % no good since CpC was restarted
stop_node(Cp1),
@@ -3324,7 +3262,7 @@ global_groups_change(Config) ->
stop_node(CpD),
stop_node(CpE),
- ?line init_condition(Config),
+ init_condition(Config),
ok.
sync_and_wait(Node) ->
@@ -3354,43 +3292,43 @@ sync_and_wait(Node) ->
is_real_system(KernelVsn, StdlibVsn) ->
LibDir = code:lib_dir(),
filelib:is_dir(filename:join(LibDir, "kernel-" ++ KernelVsn))
- andalso
- filelib:is_dir(filename:join(LibDir, "stdlib-" ++ StdlibVsn)).
+ andalso
+ filelib:is_dir(filename:join(LibDir, "stdlib-" ++ StdlibVsn)).
create_script_dc(ScriptName) ->
- ?line Name = filename:join(".", ScriptName),
- ?line Apps = application_controller:which_applications(),
- ?line {value,{_,_,KernelVer}} = lists:keysearch(kernel,1,Apps),
- ?line {value,{_,_,StdlibVer}} = lists:keysearch(stdlib,1,Apps),
- ?line {ok,Fd} = file:open(Name ++ ".rel", [write]),
- ?line {_, Version} = init:script_id(),
- ?line io:format(Fd,
- "{release, {\"Test release 3\", \"~s\"}, \n"
- " {erts, \"4.4\"}, \n"
- " [{kernel, \"~s\"}, {stdlib, \"~s\"}]}.\n",
- [Version, KernelVer, StdlibVer]),
- ?line file:close(Fd),
+ Name = filename:join(".", ScriptName),
+ Apps = application_controller:which_applications(),
+ {value,{_,_,KernelVer}} = lists:keysearch(kernel,1,Apps),
+ {value,{_,_,StdlibVer}} = lists:keysearch(stdlib,1,Apps),
+ {ok,Fd} = file:open(Name ++ ".rel", [write]),
+ {_, Version} = init:script_id(),
+ io:format(Fd,
+ "{release, {\"Test release 3\", \"~s\"}, \n"
+ " {erts, \"4.4\"}, \n"
+ " [{kernel, \"~s\"}, {stdlib, \"~s\"}]}.\n",
+ [Version, KernelVer, StdlibVer]),
+ file:close(Fd),
{KernelVer, StdlibVer}.
%% Not used?
config_dc(Fd, Ncp1, Ncp2, Ncp3) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['~s@~s','~s@~s','~s@~s']},"
- "{sync_nodes_timeout, 1000},"
- "{global_groups, [{gg1, ['~s@~s', '~s@~s']},"
- " {gg2, ['~s@~s']}]}"
- " ]}].~n",
+ "{sync_nodes_timeout, 1000},"
+ "{global_groups, [{gg1, ['~s@~s', '~s@~s']},"
+ " {gg2, ['~s@~s']}]}"
+ " ]}].~n",
[Ncp1, M, Ncp2, M, Ncp3, M, Ncp1, M, Ncp2, M, Ncp3, M]).
config_dc1(Fd, Ncp1, Ncp2, Ncp3, NcpA, NcpB, NcpC, NcpD, NcpE) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['~s@~s','~s@~s','~s@~s','~s@~s','~s@~s','~s@~s','~s@~s','~s@~s']},"
- "{sync_nodes_timeout, 1000},"
- "{global_groups, [{gg1, ['~s@~s', '~s@~s']},"
- " {gg2, ['~s@~s']},"
- " {gg4, normal, ['~s@~s','~s@~s','~s@~s']},"
- " {gg5, hidden, ['~s@~s','~s@~s']}]}]}].~n",
+ "{sync_nodes_timeout, 1000},"
+ "{global_groups, [{gg1, ['~s@~s', '~s@~s']},"
+ " {gg2, ['~s@~s']},"
+ " {gg4, normal, ['~s@~s','~s@~s','~s@~s']},"
+ " {gg5, hidden, ['~s@~s','~s@~s']}]}]}].~n",
[Ncp1, M, Ncp2, M, Ncp3, M,
NcpA, M, NcpB, M, NcpC, M, NcpD, M, NcpE, M,
Ncp1, M, Ncp2, M,
@@ -3401,8 +3339,8 @@ config_dc1(Fd, Ncp1, Ncp2, Ncp3, NcpA, NcpB, NcpC, NcpD, NcpE) ->
config_dc2(Fd, NewGG, Ncp1, Ncp2, Ncp3) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['~s@~s','~s@~s','~s@~s']},"
- "{sync_nodes_timeout, 1000},"
- "~p]}].~n",
+ "{sync_nodes_timeout, 1000},"
+ "~p]}].~n",
[Ncp1, M, Ncp2, M, Ncp3, M, NewGG]).
@@ -3414,33 +3352,33 @@ from(_H, []) -> [].
other(A, [A, _B]) -> A;
other(_, [_A, B]) -> B.
-
+
%% this one runs at cp2
part1(Config, Main, Cp1, Cp3) ->
case catch begin
make_partition(Config, [Main, Cp1], [node(), Cp3]),
- ?line {_Pid, yes} = start_proc(test2),
- ?line {_Pid2, yes} = start_proc(test4)
+ {_Pid, yes} = start_proc(test2),
+ {_Pid2, yes} = start_proc(test4)
end of
{_, yes} -> ok; % w("ok", []);
{'EXIT', _R} ->
ok
- % w("global_SUITE line:~w: ~p", [?LINE, _R])
+ %% w("global_SUITE line:~w: ~p", [?LINE, _R])
end.
%% Runs at Cp2
part1_5(Config, Main, Cp1, Cp3) ->
case catch begin
make_partition(Config, [Main, Cp1], [node(), Cp3]),
- ?line {_Pid1, yes} = start_proc_basic(name12),
- ?line {_Pid2, yes} =
+ {_Pid1, yes} = start_proc_basic(name12),
+ {_Pid2, yes} =
rpc:call(Cp3, ?MODULE, start_proc_basic, [name03])
end of
{_, yes} -> ok; % w("ok", []);
{'EXIT', _R} ->
ok
- % w("global_SUITE line:~w: ~p", [?LINE, _R])
+ %% w("global_SUITE line:~w: ~p", [?LINE, _R])
end.
w(X,Y) ->
@@ -3451,7 +3389,7 @@ w(X,Y) ->
%% this one runs on one node in Part2
%% The partition is ready when is_ready_partition(Config) returns (true).
make_partition(Config, Part1, Part2) ->
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
Ns = [begin
Name = lists:concat([atom_to_list(N),"_",msec(),".part"]),
File = filename:join([Dir, Name]),
@@ -3503,7 +3441,7 @@ is_ready_partition(Config) ->
true.
make_partition_file(Config) ->
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
filename:join([Dir, atom_to_list(make_partition_done)]).
%% this one runs at cp3
@@ -3514,37 +3452,36 @@ part2(Config, Parent, Main, Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6) ->
part3(Config, Parent, Main, Cp0, Cp1, Cp2, Cp3, Cp4, Cp5, Cp6) ->
make_partition(Config, [Main, Cp0, Cp1, Cp2], [Cp3, Cp4, Cp5, Cp6]),
start_procs(Parent, Cp4, Cp5, Cp6, Config),
- % Make Cp6 alone
- ?line rpc_cast(Cp5, ?MODULE, crash, [12000]),
- ?line rpc_cast(Cp6, ?MODULE, alone, [Cp0, Cp3]).
+ %% Make Cp6 alone
+ rpc_cast(Cp5, ?MODULE, crash, [12000]),
+ rpc_cast(Cp6, ?MODULE, alone, [Cp0, Cp3]).
start_procs(Parent, N1, N2, N3, Config) ->
S1 = lists:sort([N1, N2, N3]),
- ?line
?UNTIL(begin
NN = lists:sort(nodes()),
S1 =:= NN
end),
- ?line Pid3 = start_proc3(test1),
- ?line Pid4 = rpc:call(N1, ?MODULE, start_proc3, [test2]),
- ?line assert_pid(Pid4),
- ?line Pid5 = rpc:call(N2, ?MODULE, start_proc3, [test3]),
- ?line assert_pid(Pid5),
- ?line Pid6 = rpc:call(N3, ?MODULE, start_proc3, [test4]),
- ?line assert_pid(Pid6),
- ?line yes = global:register_name(test1, Pid3),
- ?line yes = global:register_name(test2, Pid4, {global, notify_all_name}),
- ?line yes = global:register_name(test3, Pid5, {global, random_notify_name}),
+ Pid3 = start_proc3(test1),
+ Pid4 = rpc:call(N1, ?MODULE, start_proc3, [test2]),
+ assert_pid(Pid4),
+ Pid5 = rpc:call(N2, ?MODULE, start_proc3, [test3]),
+ assert_pid(Pid5),
+ Pid6 = rpc:call(N3, ?MODULE, start_proc3, [test4]),
+ assert_pid(Pid6),
+ yes = global:register_name(test1, Pid3),
+ yes = global:register_name(test2, Pid4, fun global:notify_all_name/3),
+ yes = global:register_name(test3, Pid5, fun global:random_notify_name/3),
Resolve = fun(Name, Pid1, Pid2) ->
Parent ! {resolve_called, Name, node()},
{Min, Max} = minmax(Pid1, Pid2),
exit(Min, kill),
Max
end,
- ?line yes = global:register_name(test4, Pid6, Resolve).
+ yes = global:register_name(test4, Pid6, Resolve).
+
-
collect_resolves() -> cr(0).
cr(Res) ->
receive
@@ -3574,7 +3511,7 @@ start_proc() ->
receive
Pid -> Pid
end.
-
+
start_proc(Name) ->
Pid = spawn(?MODULE, p_init, [self(), Name]),
@@ -3609,7 +3546,7 @@ start_proc_basic(Name) ->
end.
init_proc_basic(Parent, Name) ->
- X = global:register_name(Name, self(), {?MODULE, fix_basic_name}),
+ X = global:register_name(Name, self(), fun ?MODULE:fix_basic_name/3),
Parent ! {self(),X},
loop().
@@ -3618,7 +3555,7 @@ single_node(Time, Node, Config) ->
lists:foreach(fun(N) -> _ = erlang:disconnect_node(N) end, nodes()),
?UNTIL(get_known(node()) =:= [node()]),
spawn(?MODULE, init_2, []),
- test_server:sleep(Time - msec()),
+ sleep(Time - msec()),
net_adm:ping(Node).
init_2() ->
@@ -3630,12 +3567,12 @@ loop_2() ->
receive
die -> ok
end.
-
+
msec() ->
msec(now()).
msec(T) ->
- element(1,T)*1000000000 + element(2,T)*1000 + element(3,T) div 1000.
+ element(1,T)*1000000000 + element(2,T)*1000 + element(3,T) div 1000.
assert_pid(Pid) ->
if
@@ -3680,13 +3617,15 @@ sreq(Pid, Msg) ->
alone(N1, N2) ->
lists:foreach(fun(Node) -> true = erlang:disconnect_node(Node) end,
nodes()),
- test_server:sleep(12000),
+ ct:sleep(12000),
net_adm:ping(N1),
net_adm:ping(N2),
yes = global:register_name(test5, self()).
crash(Time) ->
- test_server:sleep(Time),
+ %% ct:sleep/1 will not work because it calls a server process
+ %% that does not run on other nodes.
+ timer:sleep(Time),
erlang:halt().
loop() ->
@@ -3748,23 +3687,23 @@ pr_diff(Str, T0, T1) ->
{_, {H,M,S}} = calendar:time_difference(T0, T1),
((H*60+M)*60)+S
end,
- test_server:format(1,"~13s: ~w (diff: ~w)",[Str, T1, Diff]),
+ ct:pal(?HI_VERBOSITY,"~13s: ~w (diff: ~w)",[Str, T1, Diff]),
if
Diff > 100 ->
- test_server:format(1,"~s: ** LARGE DIFF ~w~n", [Str, Diff]);
+ io:format(1,"~s: ** LARGE DIFF ~w~n", [Str, Diff]);
true ->
ok
end.
-endif.
now_diff({A1,B1,C1},{A2,B2,C2}) ->
- C1-C2 + 1000000*((B1-B2) + 1000000*(A1-A2)).
+ C1-C2 + 1000000*((B1-B2) + 1000000*(A1-A2)).
start_node_boot(Name, Config, Boot) ->
Pa = filename:dirname(code:which(?MODULE)),
Res = test_server:start_node(Name, peer, [{args, " -pa " ++ Pa ++
- " -config " ++ Config ++
- " -boot " ++ atom_to_list(Boot)}]),
+ " -config " ++ Config ++
+ " -boot " ++ atom_to_list(Boot)}]),
record_started_node(Res).
%% Increase the timeout for when an upcoming connection is teared down
@@ -3790,13 +3729,13 @@ start_node(Name0, How, Args, Config) ->
Pa = filename:dirname(code:which(?MODULE)),
R = test_server:start_node(Name, How, [{args,
Args ++ " " ++
- "-kernel net_setuptime 100 "
-% "-noshell "
+ "-kernel net_setuptime 100 "
+ %% "-noshell "
"-pa " ++ Pa},
{linked, false}
-]),
+ ]),
%% {linked,false} only seems to work for slave nodes.
-% test_server:sleep(1000),
+ %% ct:sleep(1000),
record_started_node(R).
start_node_rel(Name0, Rel, Config) ->
@@ -3807,14 +3746,14 @@ start_node_rel(Name0, Rel, Config) ->
Rel when is_atom(Rel) ->
{[{release, atom_to_list(Rel)}], ""};
RelList ->
- {RelList, ""}
- end,
+ {RelList, ""}
+ end,
Env = [],
Pa = filename:dirname(code:which(?MODULE)),
Res = test_server:start_node(Name, peer,
[{args,
Compat ++
- " -kernel net_setuptime 100 "
+ " -kernel net_setuptime 100 "
" -pa " ++ Pa},
{erl, Release}] ++ Env),
record_started_node(Res).
@@ -3844,57 +3783,45 @@ stop_nodes(Nodes) ->
lists:foreach(fun(Node) -> stop_node(Node) end, Nodes).
stop_node(Node) ->
- ?line ?t:stop_node(Node).
+ test_server:stop_node(Node).
stop() ->
lists:foreach(fun(Node) ->
- ?t:stop_node(Node)
+ test_server:stop_node(Node)
end, nodes()).
-dbg_logs(Name) -> dbg_logs(Name, ?NODES).
-
-dbg_logs(Name, Nodes) ->
- lists:foreach(fun(N) ->
- F = lists:concat([Name, ".log.", N, ".txt"]),
- ?line ok = sys:log_to_file({global_name_server, N}, F)
- end, Nodes).
-
-
-global_lost_nodes(suite) ->
- [];
-global_lost_nodes(doc) ->
- ["Tests that locally loaded nodes do not loose contact with other nodes."];
+%% Tests that locally loaded nodes do not loose contact with other nodes.
global_lost_nodes(Config) when is_list(Config) ->
Timeout = 60,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
-
- ?line {ok, Node1} = start_node(node1, Config),
- ?line {ok, Node2} = start_node(node2, Config),
+ init_condition(Config),
+
+ {ok, Node1} = start_node(node1, Config),
+ {ok, Node2} = start_node(node2, Config),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- ?line io:format("Nodes: ~p", [nodes()]),
- ?line io:format("Nodes at node1: ~p",
- [rpc:call(Node1, erlang, nodes, [])]),
- ?line io:format("Nodes at node2: ~p",
- [rpc:call(Node2, erlang, nodes, [])]),
+ io:format("Nodes: ~p", [nodes()]),
+ io:format("Nodes at node1: ~p",
+ [rpc:call(Node1, erlang, nodes, [])]),
+ io:format("Nodes at node2: ~p",
+ [rpc:call(Node2, erlang, nodes, [])]),
- ?line rpc_cast(Node1, ?MODULE, global_load, [node_1,Node2,node_2]),
- ?line rpc_cast(Node2, ?MODULE, global_load, [node_2,Node1,node_1]),
+ rpc_cast(Node1, ?MODULE, global_load, [node_1,Node2,node_2]),
+ rpc_cast(Node2, ?MODULE, global_load, [node_2,Node1,node_1]),
lost_nodes_waiter(Node1, Node2),
write_high_level_trace(Config),
- ?line stop_node(Node1),
- ?line stop_node(Node2),
- ?line init_condition(Config),
+ stop_node(Node1),
+ stop_node(Node2),
+ init_condition(Config),
ok.
global_load(MyName, OtherNode, OtherName) ->
- ?line yes = global:register_name(MyName, self()),
+ yes = global:register_name(MyName, self()),
io:format("Registered ~p",[MyName]),
global_load1(OtherNode, OtherName, 0).
@@ -3902,32 +3829,32 @@ global_load1(_OtherNode, _OtherName, 2) ->
io:format("*** ~p giving up. No use.", [node()]),
init:stop();
global_load1(OtherNode, OtherName, Fails) ->
- test_server:sleep(1000),
- ?line case catch global:whereis_name(OtherName) of
- Pid when is_pid(Pid) ->
- io:format("~p says: ~p is still there.",
- [node(),OtherName]),
- global_load1(OtherNode, OtherName, Fails);
- Other ->
- io:format("~p says: ~p is lost (~p) Pinging.",
- [ node(), OtherName, Other]),
- case net_adm:ping(OtherNode) of
- pong ->
- io:format("Re-established contact to ~p",
- [OtherName]);
- pang ->
- io:format("PANIC! Other node is DEAD.", []),
- init:stop()
- end,
- global_load1(OtherNode, OtherName, Fails+1)
- end.
+ ct:sleep(1000),
+ case catch global:whereis_name(OtherName) of
+ Pid when is_pid(Pid) ->
+ io:format("~p says: ~p is still there.",
+ [node(),OtherName]),
+ global_load1(OtherNode, OtherName, Fails);
+ Other ->
+ io:format("~p says: ~p is lost (~p) Pinging.",
+ [ node(), OtherName, Other]),
+ case net_adm:ping(OtherNode) of
+ pong ->
+ io:format("Re-established contact to ~p",
+ [OtherName]);
+ pang ->
+ io:format("PANIC! Other node is DEAD.", []),
+ init:stop()
+ end,
+ global_load1(OtherNode, OtherName, Fails+1)
+ end.
lost_nodes_waiter(N1, N2) ->
- ?line net_kernel:monitor_nodes(true),
+ net_kernel:monitor_nodes(true),
receive
{nodedown, Node} when Node =:= N1 ; Node =:= N2 ->
io:format("~p went down!",[Node]),
- ?line ?t:fail("Node went down.")
+ ct:fail("Node went down.")
after 10000 ->
ok
end,
@@ -3935,36 +3862,33 @@ lost_nodes_waiter(N1, N2) ->
-mass_death(suite) ->
- [];
-mass_death(doc) ->
- ["Tests the simultaneous death of many processes with registered names"];
+%% Tests the simultaneous death of many processes with registered names.
mass_death(Config) when is_list(Config) ->
Timeout = 90,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line OrigNames = global:registered_names(),
+ init_condition(Config),
+ OrigNames = global:registered_names(),
%% Start nodes
- ?line Cps = [cp1,cp2,cp3,cp4,cp5],
- ?line Nodes = [begin {ok, Node} = start_node(Cp, Config), Node end ||
- Cp <- Cps],
- ?line io:format("Nodes: ~p~n", [Nodes]),
- ?line Ns = lists:seq(1, 40),
+ Cps = [cp1,cp2,cp3,cp4,cp5],
+ Nodes = [begin {ok, Node} = start_node(Cp, Config), Node end ||
+ Cp <- Cps],
+ io:format("Nodes: ~p~n", [Nodes]),
+ Ns = lists:seq(1, 40),
%% Start processes with globally registered names on the nodes
- ?line {Pids,[]} = rpc:multicall(Nodes, ?MODULE, mass_spawn, [Ns]),
- ?line io:format("Pids: ~p~n", [Pids]),
+ {Pids,[]} = rpc:multicall(Nodes, ?MODULE, mass_spawn, [Ns]),
+ io:format("Pids: ~p~n", [Pids]),
%% Wait...
- ?line test_server:sleep(10000),
+ ct:sleep(10000),
%% Check the globally registered names
- ?line NewNames = global:registered_names(),
- ?line io:format("NewNames: ~p~n", [NewNames]),
- ?line Ndiff = lists:sort(NewNames--OrigNames),
- ?line io:format("Ndiff: ~p~n", [Ndiff]),
- ?line Ndiff = lists:sort(mass_names(Nodes, Ns)),
+ NewNames = global:registered_names(),
+ io:format("NewNames: ~p~n", [NewNames]),
+ Ndiff = lists:sort(NewNames--OrigNames),
+ io:format("Ndiff: ~p~n", [Ndiff]),
+ Ndiff = lists:sort(mass_names(Nodes, Ns)),
%%
%% Kill the root pids
- ?line lists:foreach(fun (Pid) -> Pid ! drop_dead end, Pids),
+ lists:foreach(fun (Pid) -> Pid ! drop_dead end, Pids),
%% Start probing and wait for all registered names to disappear
{YYYY,MM,DD} = date(),
{H,M,S} = time(),
@@ -3973,22 +3897,21 @@ mass_death(Config) when is_list(Config) ->
wait_mass_death(Nodes, OrigNames, erlang:now(), Config).
wait_mass_death(Nodes, OrigNames, Then, Config) ->
- ?line Names = global:registered_names(),
- ?line
- case Names--OrigNames of
- [] ->
- ?line T = now_diff(erlang:now(), Then) div 1000,
- ?line lists:foreach(
- fun (Node) ->
- stop_node(Node)
- end, Nodes),
- ?line init_condition(Config),
- {comment,lists:flatten(io_lib:format("~.3f s~n", [T/1000.0]))};
- Ndiff ->
- ?line io:format("Ndiff: ~p~n", [Ndiff]),
- ?line test_server:sleep(1000),
- ?line wait_mass_death(Nodes, OrigNames, Then, Config)
- end.
+ Names = global:registered_names(),
+ case Names--OrigNames of
+ [] ->
+ T = now_diff(erlang:now(), Then) div 1000,
+ lists:foreach(
+ fun (Node) ->
+ stop_node(Node)
+ end, Nodes),
+ init_condition(Config),
+ {comment,lists:flatten(io_lib:format("~.3f s~n", [T/1000.0]))};
+ Ndiff ->
+ io:format("Ndiff: ~p~n", [Ndiff]),
+ ct:sleep(1000),
+ wait_mass_death(Nodes, OrigNames, Then, Config)
+ end.
mass_spawn([]) ->
ok;
@@ -4018,7 +3941,7 @@ mass_name(Node, N) ->
start_nodes(L, How, Config) ->
start_nodes2(L, How, 0, Config),
Nodes = collect_nodes(0, length(L)),
- ?line ?UNTIL([] =:= Nodes -- nodes()),
+ ?UNTIL([] =:= Nodes -- nodes()),
put(?nodes_tag, Nodes),
%% Pinging doesn't help, we have to wait too, for nodes() to become
%% correct on the other node.
@@ -4042,7 +3965,7 @@ verify_nodes(Nodes, Config) ->
verify_nodes([], _N, _Config) ->
[];
verify_nodes([Node | Rest], N, Config) ->
- ?line ?UNTIL(
+ ?UNTIL(
case rpc:call(Node, erlang, nodes, []) of
Nodes when is_list(Nodes) ->
case N =:= lists:sort([Node | Nodes]) of
@@ -4074,7 +3997,7 @@ start_nodes2([Name | Rest], How, N, Config) ->
Self ! {N, R},
%% sleeping is necessary, or with peer nodes, they will
%% go down again, despite {linked, false}.
- test_server:sleep(100000)
+ ct:sleep(100000)
end),
start_nodes2(Rest, How, N+1, Config).
@@ -4086,13 +4009,6 @@ collect_nodes(N, Max) ->
[Node | collect_nodes(N+1, Max)]
end.
-only_element(_E, []) ->
- true;
-only_element(E, [E|R]) ->
- only_element(E, R);
-only_element(_E, _) ->
- false.
-
exit_p(Pid) ->
Ref = erlang:monitor(process, Pid),
Pid ! die,
@@ -4115,6 +4031,11 @@ wait_for_exit_fast(Pid) ->
ok
end.
+sleep(Time) when Time > 0 ->
+ ct:sleep(Time);
+sleep(_Time) ->
+ ok.
+
check_everywhere(Nodes, Name, Config) ->
?UNTIL(begin
case rpc:multicall(Nodes, global, whereis_name, [Name]) of
@@ -4155,14 +4076,12 @@ remove_gg_pub_type([{GG, _, Nodes}|Rest]) ->
%% Better do this in a slave node.
%% (The transition from links to monitors does not affect this case.)
-garbage_messages(suite) ->
- [];
garbage_messages(Config) when is_list(Config) ->
Timeout = 25,
ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
- ?line init_condition(Config),
- ?line [Slave] = start_nodes([garbage_messages], slave, Config),
+ init_condition(Config),
+ [Slave] = start_nodes([garbage_messages], slave, Config),
Fun = fun() ->
{links,L} = process_info(whereis(global_name_server), links),
lists:foreach(fun(Pid) -> Pid ! {garbage,to,you} end, L),
@@ -4170,15 +4089,15 @@ garbage_messages(Config) when is_list(Config) ->
_Any -> ok
end
end,
- ?line Pid = spawn_link(Slave, erlang, apply, [Fun,[]]),
- ?t:sleep(2000),
- ?line Global = rpc:call(Slave, erlang, whereis, [global_name_server]),
- ?line {registered_name,global_name_server} =
+ Pid = spawn_link(Slave, erlang, apply, [Fun,[]]),
+ ct:sleep(2000),
+ Global = rpc:call(Slave, erlang, whereis, [global_name_server]),
+ {registered_name,global_name_server} =
rpc:call(Slave, erlang, process_info, [Global,registered_name]),
- ?line true = unlink(Pid),
+ true = unlink(Pid),
write_high_level_trace(Config),
- ?line stop_node(Slave),
- ?line init_condition(Config),
+ stop_node(Slave),
+ init_condition(Config),
ok.
wait_for_ready_net(Config) ->
@@ -4186,13 +4105,13 @@ wait_for_ready_net(Config) ->
wait_for_ready_net(Nodes0, Config) ->
Nodes = lists:sort(Nodes0),
- ?t:format("wait_for_ready_net ~p~n", [Nodes]),
+ io:format("wait_for_ready_net ~p~n", [Nodes]),
?UNTIL(begin
lists:all(fun(N) -> Nodes =:= get_known(N) end, Nodes) and
- lists:all(fun(N) ->
- LNs = rpc:call(N, erlang, nodes, []),
- Nodes =:= lists:sort([N | LNs])
- end, Nodes)
+ lists:all(fun(N) ->
+ LNs = rpc:call(N, erlang, nodes, []),
+ Nodes =:= lists:sort([N | LNs])
+ end, Nodes)
end).
get_known(Node) ->
@@ -4207,7 +4126,7 @@ quite_a_few_nodes(Max) ->
N = try
ulimit("ulimit -u")
catch _:_ ->
- ulimit("ulimit -p") % can fail...
+ ulimit("ulimit -p") % can fail...
end,
lists:min([(N - 40) div 3, Max]).
@@ -4241,10 +4160,10 @@ rpc_cast(Node, Module, Function, Args, File) ->
%% The emulator now ensures that the node has been removed from
%% nodes().
-rpc_disconnect_node(Node, DisconnectedNode, _Config) ->
- True = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
- False = lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, [])),
- {true, false} = {True, False}.
+rpc_disconnect_node(Node, DisconnectedNode, Config) ->
+ true = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
+ ?UNTIL
+ (not lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, []))).
%%%
%%% Utility
@@ -4264,15 +4183,15 @@ start_tracer() ->
Pid = spawn(fun() -> tracer([]) end),
case catch register(my_tracer, Pid) of
{'EXIT', _} ->
- ?t:fail(re_register_my_tracer);
+ ct:fail(re_register_my_tracer);
_ ->
ok
end.
tracer(L) ->
receive
- % {save, Term} ->
- % tracer([{now(),Term} | L]);
+ %% {save, Term} ->
+ %% tracer([{now(),Term} | L]);
{get, From} ->
From ! {trace, lists:reverse(L)},
tracer([]);
@@ -4305,7 +4224,7 @@ collect_tracers(Nodes) ->
trace_message(M) ->
case catch my_tracer ! M of
{'EXIT', _} ->
- ?t:fail(my_tracer_not_registered);
+ ct:fail(my_tracer_not_registered);
_ ->
ok
end.
diff --git a/lib/kernel/test/global_SUITE_data/global_trace.erl b/lib/kernel/test/global_SUITE_data/global_trace.erl
index 69d95f610d..b4af4ed76e 100644
--- a/lib/kernel/test/global_SUITE_data/global_trace.erl
+++ b/lib/kernel/test/global_SUITE_data/global_trace.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/global_group_SUITE.erl b/lib/kernel/test/global_group_SUITE.erl
index 0a994c3bf0..594ee6b537 100644
--- a/lib/kernel/test/global_group_SUITE.erl
+++ b/lib/kernel/test/global_group_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -28,15 +28,17 @@
-export([init_per_testcase/2, end_per_testcase/2]).
-%-compile(export_all).
+%%-compile(export_all).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(NODES, [node()|nodes()]).
-define(UNTIL(Seq), loop_until_true(fun() -> Seq end)).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,5}}].
all() ->
[start_gg_proc, no_gg_proc, no_gg_proc_sync, compatible,
@@ -46,10 +48,10 @@ groups() ->
[].
init_per_group(_GroupName, Config) ->
- Config.
+ Config.
end_per_group(_GroupName, Config) ->
- Config.
+ Config.
init_per_suite(Config) ->
@@ -77,15 +79,13 @@ end_per_suite(_Config) ->
ok.
-define(TESTCASE, testcase_name).
--define(testcase, ?config(?TESTCASE, Config)).
+-define(testcase, proplists:get_value(?TESTCASE, Config)).
-init_per_testcase(Case, Config) when is_atom(Case), is_list(Config) ->
- Dog=?t:timetrap(?t:minutes(5)),
- [{?TESTCASE, Case}, {watchdog, Dog}|Config].
+init_per_testcase(Case, Config) ->
+ Config.
-end_per_testcase(_Func, Config) ->
- Dog=?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+end_per_testcase(_Func, _Config) ->
+ ok.
%%-----------------------------------------------------------------
%% Test suites for global groups.
@@ -94,198 +94,191 @@ end_per_testcase(_Func, Config) ->
%%-----------------------------------------------------------------
-start_gg_proc(suite) -> [];
-start_gg_proc(doc) -> ["Check that the global_group processes are started automatically. "];
+%% Check that the global_group processes are started automatically. .
start_gg_proc(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(120)),
-
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "global_group.config"),
- ?line {ok, Fd}=file:open(File, [write]),
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "global_group.config"),
+ {ok, Fd}=file:open(File, [write]),
[Ncp1,Ncp2,Ncp3] = node_names([cp1, cp2, cp3], Config),
- ?line config(Fd, Ncp1, Ncp2, Ncp3, "cpx", "cpy", "cpz", "cpq"),
+ config(Fd, Ncp1, Ncp2, Ncp3, "cpx", "cpy", "cpz", "cpq"),
- ?line Cp1nn = node_at(Ncp1),
- ?line Cp2nn = node_at(Ncp2),
- ?line Cp3nn = node_at(Ncp3),
+ Cp1nn = node_at(Ncp1),
+ Cp2nn = node_at(Ncp2),
+ Cp3nn = node_at(Ncp3),
- ?line {ok, Cp1} = start_node(Ncp1, Config),
- ?line {ok, Cp2} = start_node(Ncp2, Config),
- ?line {ok, Cp3} = start_node(Ncp3, Config),
+ {ok, Cp1} = start_node(Ncp1, Config),
+ {ok, Cp2} = start_node(Ncp2, Config),
+ {ok, Cp3} = start_node(Ncp3, Config),
- ?line [] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
- ?line [] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
- ?line [] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
+ [] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
+ [] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
+ [] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
- % stop the nodes, and make sure names are released.
+ %% stop the nodes, and make sure names are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
- ?line test_server:timetrap_cancel(Dog),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
ok.
-
-no_gg_proc(suite) -> [];
-no_gg_proc(doc) -> ["Start a system without global groups. Nodes are not "
- "synced at start (sync_nodes_optional is not defined)"];
+
+%% Start a system without global groups. Nodes are not
+%% synced at start (sync_nodes_optional is not defined).
no_gg_proc(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(200)),
-
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "no_global_group.config"),
- ?line {ok, Fd} = file:open(File, [write]),
- ?line config_no(Fd),
-
- ?line NN = node_name(atom_to_list(node())),
- ?line Cp1nn = list_to_atom("cp1@" ++ NN),
- ?line Cp2nn = list_to_atom("cp2@" ++ NN),
- ?line Cp3nn = list_to_atom("cp3@" ++ NN),
- ?line Cpxnn = list_to_atom("cpx@" ++ NN),
- ?line Cpynn = list_to_atom("cpy@" ++ NN),
- ?line Cpznn = list_to_atom("cpz@" ++ NN),
-
- ?line {ok, Cp1} = start_node_no(cp1, Config),
- ?line {ok, Cp2} = start_node_no(cp2, Config),
- ?line {ok, Cp3} = start_node_no(cp3, Config),
- ?line {ok, Cpx} = start_node_no(cpx, Config),
- ?line {ok, Cpy} = start_node_no(cpy, Config),
- ?line {ok, Cpz} = start_node_no(cpz, Config),
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "no_global_group.config"),
+ {ok, Fd} = file:open(File, [write]),
+ config_no(Fd),
+
+ NN = node_name(atom_to_list(node())),
+ Cp1nn = list_to_atom("cp1@" ++ NN),
+ Cp2nn = list_to_atom("cp2@" ++ NN),
+ Cp3nn = list_to_atom("cp3@" ++ NN),
+ Cpxnn = list_to_atom("cpx@" ++ NN),
+ Cpynn = list_to_atom("cpy@" ++ NN),
+ Cpznn = list_to_atom("cpz@" ++ NN),
+
+ {ok, Cp1} = start_node_no(cp1, Config),
+ {ok, Cp2} = start_node_no(cp2, Config),
+ {ok, Cp3} = start_node_no(cp3, Config),
+ {ok, Cpx} = start_node_no(cpx, Config),
+ {ok, Cpy} = start_node_no(cpy, Config),
+ {ok, Cpz} = start_node_no(cpz, Config),
%% let the nodes know of each other
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp2nn]),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp3nn]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [Cpxnn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cpynn]),
- ?line pong = rpc:call(Cpy, net_adm, ping, [Cpznn]),
+ pong = rpc:call(Cp1, net_adm, ping, [Cp2nn]),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp3nn]),
+ pong = rpc:call(Cp3, net_adm, ping, [Cpxnn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cpynn]),
+ pong = rpc:call(Cpy, net_adm, ping, [Cpznn]),
- ?line wait_for_ready_net(),
+ wait_for_ready_net(),
- ?line [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
- ?line [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
- ?line [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
- ?line [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}]),
- ?line [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}]),
- ?line [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}]),
+ [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
+ [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
+ [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
+ [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}]),
+ [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}]),
+ [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}]),
- % start a proc and register it
- ?line {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
+ %% start a proc and register it
+ {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
- ?line RegNames = lists:sort([test2,test_server]),
+ RegNames = lists:sort([test2,test_server]),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}])),
- ?line undefined = rpc:call(Cp3, global_group, global_groups, []),
+ undefined = rpc:call(Cp3, global_group, global_groups, []),
+
+ Own_nodes_should = [node(), Cp1nn, Cp2nn, Cp3nn,
+ Cpxnn, Cpynn, Cpznn],
+ Own_nodes = rpc:call(Cp3, global_group, own_nodes, []),
+ [] = (Own_nodes -- Own_nodes_should),
+ [] = (Own_nodes_should -- Own_nodes),
+
+ Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout2)
+ end,
+ Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout3)
+ end,
+ Pid2 = rpc:call(Cpz, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout4)
+ end,
- ?line Own_nodes_should = [node(), Cp1nn, Cp2nn, Cp3nn,
- Cpxnn, Cpynn, Cpznn],
- ?line Own_nodes = rpc:call(Cp3, global_group, own_nodes, []),
- ?line [] = (Own_nodes -- Own_nodes_should),
- ?line [] = (Own_nodes_should -- Own_nodes),
-
- ?line Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout2)
- end,
- ?line Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout3)
- end,
- ?line Pid2 = rpc:call(Cpz, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout4)
- end,
-
-
- % start a proc and register it
- ?line {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
+
+ %% start a proc and register it
+ {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
%%------------------------------------
%% Test monitor nodes
%%------------------------------------
- ?line Pid2 = rpc:call(Cp1, global_group, send, [{node, Cp2nn}, test2, monitor]),
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, monitor]),
+ Pid2 = rpc:call(Cp1, global_group, send, [{node, Cp2nn}, test2, monitor]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, monitor]),
- % Kill node Cp1
- ?line Pid2 =
+ %% Kill node Cp1
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodedown, Cp1}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodedown, Cp1}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cp1),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ ct:sleep(100),
+ stop_node(Cp1),
+ ct:sleep(1000),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
- % Kill node Cpz
- ?line Pid2 =
+ %% Kill node Cpz
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodedown, Cpz}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodedown, Cpz}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cpz),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ ct:sleep(100),
+ stop_node(Cpz),
+ ct:sleep(1000),
- % Restart node Cp1
- ?line Pid2 =
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% Restart node Cp1
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodeup, Cp1}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodeup, Cp1}]),
- ?line {ok, Cp1} = start_node_no(cp1, Config),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp1nn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cp1nn]),
- ?line wait_for_ready_net(),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ {ok, Cp1} = start_node_no(cp1, Config),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp1nn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cp1nn]),
+ wait_for_ready_net(),
- % Restart node Cpz
- ?line Pid2 =
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% Restart node Cpz
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodeup, Cpz}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodeup, Cpz}]),
- ?line {ok, Cpz} = start_node_no(cpz, Config),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cpznn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cpznn]),
- ?line wait_for_ready_net(),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ {ok, Cpz} = start_node_no(cpz, Config),
+ pong = rpc:call(Cp2, net_adm, ping, [Cpznn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cpznn]),
+ wait_for_ready_net(),
- % stop the nodes, and make sure names are released.
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% stop the nodes, and make sure names are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
@@ -293,174 +286,169 @@ no_gg_proc(Config) when is_list(Config) ->
stop_node(Cpy),
stop_node(Cpz),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
- ?line test_server:timetrap_cancel(Dog),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
ok.
-
-no_gg_proc_sync(suite) -> [];
-no_gg_proc_sync(doc) ->
- ["Start a system without global groups, but syncing the nodes by using "
- "sync_nodes_optional."];
-no_gg_proc_sync(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(200)),
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "no_global_group_sync.config"),
- ?line {ok, Fd} = file:open(File, [write]),
+%% Start a system without global groups, but syncing the nodes by using
+%% sync_nodes_optional.
+no_gg_proc_sync(Config) when is_list(Config) ->
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "no_global_group_sync.config"),
+ {ok, Fd} = file:open(File, [write]),
[Ncp1,Ncp2,Ncp3,Ncpx,Ncpy,Ncpz] =
node_names([cp1,cp2,cp3,cpx,cpy,cpz], Config),
- ?line config_sync(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz),
-
- ?line Cp1nn = node_at(Ncp1),
- ?line Cp2nn = node_at(Ncp2),
- ?line Cp3nn = node_at(Ncp3),
- ?line Cpxnn = node_at(Ncpx),
- ?line Cpynn = node_at(Ncpy),
- ?line Cpznn = node_at(Ncpz),
-
- ?line {ok, Cp1} = start_node_no2(Ncp1, Config),
- ?line {ok, Cp2} = start_node_no2(Ncp2, Config),
- ?line {ok, Cp3} = start_node_no2(Ncp3, Config),
- ?line {ok, Cpx} = start_node_no2(Ncpx, Config),
- ?line {ok, Cpy} = start_node_no2(Ncpy, Config),
- ?line {ok, Cpz} = start_node_no2(Ncpz, Config),
+ config_sync(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz),
+
+ Cp1nn = node_at(Ncp1),
+ Cp2nn = node_at(Ncp2),
+ Cp3nn = node_at(Ncp3),
+ Cpxnn = node_at(Ncpx),
+ Cpynn = node_at(Ncpy),
+ Cpznn = node_at(Ncpz),
+
+ {ok, Cp1} = start_node_no2(Ncp1, Config),
+ {ok, Cp2} = start_node_no2(Ncp2, Config),
+ {ok, Cp3} = start_node_no2(Ncp3, Config),
+ {ok, Cpx} = start_node_no2(Ncpx, Config),
+ {ok, Cpy} = start_node_no2(Ncpy, Config),
+ {ok, Cpz} = start_node_no2(Ncpz, Config),
%% let the nodes know of each other
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp2nn]),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp3nn]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [Cpxnn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cpynn]),
- ?line pong = rpc:call(Cpy, net_adm, ping, [Cpznn]),
+ pong = rpc:call(Cp1, net_adm, ping, [Cp2nn]),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp3nn]),
+ pong = rpc:call(Cp3, net_adm, ping, [Cpxnn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cpynn]),
+ pong = rpc:call(Cpy, net_adm, ping, [Cpznn]),
- ?line wait_for_ready_net(),
+ wait_for_ready_net(),
- ?line [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
- ?line [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
- ?line [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
- ?line [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}]),
- ?line [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}]),
- ?line [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}]),
+ [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
+ [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
+ [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
+ [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}]),
+ [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}]),
+ [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}]),
- % start a proc and register it
- ?line {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
+ %% start a proc and register it
+ {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
- ?line RegNames = lists:sort([test2,test_server]),
+ RegNames = lists:sort([test2,test_server]),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}])),
- ?line undefined = rpc:call(Cp3, global_group, global_groups, []),
+ undefined = rpc:call(Cp3, global_group, global_groups, []),
- ?line Own_nodes_should = [node(), Cp1nn, Cp2nn, Cp3nn,
- Cpxnn, Cpynn, Cpznn],
- ?line Own_nodes = rpc:call(Cp3, global_group, own_nodes, []),
- ?line [] = (Own_nodes -- Own_nodes_should),
- ?line [] = (Own_nodes_should -- Own_nodes),
-
- ?line Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout2)
- end,
- ?line Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout3)
- end,
- ?line Pid2 = rpc:call(Cpz, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout4)
- end,
-
-
- % start a proc and register it
- ?line {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
+ Own_nodes_should = [node(), Cp1nn, Cp2nn, Cp3nn,
+ Cpxnn, Cpynn, Cpznn],
+ Own_nodes = rpc:call(Cp3, global_group, own_nodes, []),
+ [] = (Own_nodes -- Own_nodes_should),
+ [] = (Own_nodes_should -- Own_nodes),
+
+ Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout2)
+ end,
+ Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout3)
+ end,
+ Pid2 = rpc:call(Cpz, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout4)
+ end,
+
+
+ %% start a proc and register it
+ {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
%%------------------------------------
%% Test monitor nodes
%%------------------------------------
- ?line Pid2 = rpc:call(Cp1, global_group, send, [{node, Cp2nn}, test2, monitor]),
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, monitor]),
+ Pid2 = rpc:call(Cp1, global_group, send, [{node, Cp2nn}, test2, monitor]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, monitor]),
- % Kill node Cp1
- ?line Pid2 =
+ %% Kill node Cp1
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodedown, Cp1}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodedown, Cp1}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cp1),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ ct:sleep(100),
+ stop_node(Cp1),
+ ct:sleep(1000),
- % Kill node Cpz
- ?line Pid2 =
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% Kill node Cpz
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodedown, Cpz}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodedown, Cpz}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cpz),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ ct:sleep(100),
+ stop_node(Cpz),
+ ct:sleep(1000),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
- % Restart node Cp1
- ?line Pid2 =
+ %% Restart node Cp1
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodeup, Cp1}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodeup, Cp1}]),
- ?line {ok, Cp1} = start_node_no2(Ncp1, Config),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp1nn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cp1nn]),
- ?line wait_for_ready_net(),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ {ok, Cp1} = start_node_no2(Ncp1, Config),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp1nn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cp1nn]),
+ wait_for_ready_net(),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
- % Restart node Cpz
- ?line Pid2 =
+ %% Restart node Cpz
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodeup, Cpz}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodeup, Cpz}]),
- ?line {ok, Cpz} = start_node_no2(Ncpz, Config),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cpznn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cpznn]),
- ?line wait_for_ready_net(),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ {ok, Cpz} = start_node_no2(Ncpz, Config),
+ pong = rpc:call(Cp2, net_adm, ping, [Cpznn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cpznn]),
+ wait_for_ready_net(),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
- % stop the nodes, and make sure names are released.
+ %% stop the nodes, and make sure names are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
@@ -468,173 +456,168 @@ no_gg_proc_sync(Config) when is_list(Config) ->
stop_node(Cpy),
stop_node(Cpz),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
- ?line test_server:timetrap_cancel(Dog),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
ok.
-
-compatible(suite) -> [];
-compatible(doc) ->
- ["Check that a system without global groups is compatible with the old R4 system."];
-compatible(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(200)),
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "global_group_comp.config"),
- ?line {ok, Fd} = file:open(File, [write]),
+%% Check that a system without global groups is compatible with the old R4 system.
+compatible(Config) when is_list(Config) ->
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "global_group_comp.config"),
+ {ok, Fd} = file:open(File, [write]),
[Ncp1,Ncp2,Ncp3,Ncpx,Ncpy,Ncpz] =
node_names([cp1,cp2,cp3,cpx,cpy,cpz], Config),
- ?line config_comp(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz),
-
- ?line Cp1nn = node_at(Ncp1),
- ?line Cp2nn = node_at(Ncp2),
- ?line Cp3nn = node_at(Ncp3),
- ?line Cpxnn = node_at(Ncpx),
- ?line Cpynn = node_at(Ncpy),
- ?line Cpznn = node_at(Ncpz),
-
- ?line {ok, Cp1} = start_node_comp(Ncp1, Config),
- ?line {ok, Cp2} = start_node_comp(Ncp2, Config),
- ?line {ok, Cp3} = start_node_comp(Ncp3, Config),
- ?line {ok, Cpx} = start_node_comp(Ncpx, Config),
- ?line {ok, Cpy} = start_node_comp(Ncpy, Config),
- ?line {ok, Cpz} = start_node_comp(Ncpz, Config),
+ config_comp(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz),
+
+ Cp1nn = node_at(Ncp1),
+ Cp2nn = node_at(Ncp2),
+ Cp3nn = node_at(Ncp3),
+ Cpxnn = node_at(Ncpx),
+ Cpynn = node_at(Ncpy),
+ Cpznn = node_at(Ncpz),
+
+ {ok, Cp1} = start_node_comp(Ncp1, Config),
+ {ok, Cp2} = start_node_comp(Ncp2, Config),
+ {ok, Cp3} = start_node_comp(Ncp3, Config),
+ {ok, Cpx} = start_node_comp(Ncpx, Config),
+ {ok, Cpy} = start_node_comp(Ncpy, Config),
+ {ok, Cpz} = start_node_comp(Ncpz, Config),
%% let the nodes know of each other
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cp2nn]),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp3nn]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [Cpxnn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cpynn]),
- ?line pong = rpc:call(Cpy, net_adm, ping, [Cpznn]),
+ pong = rpc:call(Cp1, net_adm, ping, [Cp2nn]),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp3nn]),
+ pong = rpc:call(Cp3, net_adm, ping, [Cpxnn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cpynn]),
+ pong = rpc:call(Cpy, net_adm, ping, [Cpznn]),
- ?line wait_for_ready_net(),
+ wait_for_ready_net(),
- ?line [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
- ?line [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
- ?line [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
- ?line [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}]),
- ?line [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}]),
- ?line [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}]),
+ [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
+ [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}]),
+ [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}]),
+ [test_server] = rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}]),
+ [test_server] = rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}]),
+ [test_server] = rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}]),
- % start a proc and register it
- ?line {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
+ %% start a proc and register it
+ {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
- ?line RegNames = lists:sort([test2,test_server]),
+ RegNames = lists:sort([test2,test_server]),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp2, global_group, registered_names, [{node, Cp2nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp3, global_group, registered_names, [{node, Cp3nn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp1, global_group, registered_names, [{node, Cpxnn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp2, global_group, registered_names, [{node, Cpynn}])),
- ?line RegNames =
+ RegNames =
lists:sort(
rpc:call(Cp3, global_group, registered_names, [{node, Cpznn}])),
- ?line undefined = rpc:call(Cp3, global_group, global_groups, []),
+ undefined = rpc:call(Cp3, global_group, global_groups, []),
- ?line Own_nodes_should = [node(), Cp1nn, Cp2nn, Cp3nn,
- Cpxnn, Cpynn, Cpznn],
- ?line Own_nodes = rpc:call(Cp3, global_group, own_nodes, []),
- ?line [] = (Own_nodes -- Own_nodes_should),
- ?line [] = (Own_nodes_should -- Own_nodes),
-
- ?line Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout2)
- end,
- ?line Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout3)
- end,
- ?line Pid2 = rpc:call(Cpz, global_group, send, [test2, {ping, self()}]),
- ?line receive
- {pong, Cp2} -> ok
- after
- 2000 -> test_server:fail(timeout4)
- end,
-
-
- % start a proc and register it
- ?line {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
+ Own_nodes_should = [node(), Cp1nn, Cp2nn, Cp3nn,
+ Cpxnn, Cpynn, Cpznn],
+ Own_nodes = rpc:call(Cp3, global_group, own_nodes, []),
+ [] = (Own_nodes -- Own_nodes_should),
+ [] = (Own_nodes_should -- Own_nodes),
+
+ Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout2)
+ end,
+ Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout3)
+ end,
+ Pid2 = rpc:call(Cpz, global_group, send, [test2, {ping, self()}]),
+ receive
+ {pong, Cp2} -> ok
+ after
+ 2000 -> ct:fail(timeout4)
+ end,
+
+
+ %% start a proc and register it
+ {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
%%------------------------------------
%% Test monitor nodes
%%------------------------------------
- ?line Pid2 = rpc:call(Cp1, global_group, send, [{node, Cp2nn}, test2, monitor]),
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, monitor]),
+ Pid2 = rpc:call(Cp1, global_group, send, [{node, Cp2nn}, test2, monitor]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, monitor]),
- % Kill node Cp1
- ?line Pid2 =
+ %% Kill node Cp1
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodedown, Cp1}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodedown, Cp1}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cp1),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ ct:sleep(100),
+ stop_node(Cp1),
+ ct:sleep(1000),
- % Kill node Cpz
- ?line Pid2 =
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% Kill node Cpz
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodedown, Cpz}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodedown, Cpz}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cpz),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ ct:sleep(100),
+ stop_node(Cpz),
+ ct:sleep(1000),
- % Restart node Cp1
- ?line Pid2 =
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% Restart node Cp1
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodeup, Cp1}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodeup, Cp1}]),
- ?line {ok, Cp1} = start_node_comp(Ncp1, Config),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cp1nn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cp1nn]),
- ?line wait_for_ready_net(),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ {ok, Cp1} = start_node_comp(Ncp1, Config),
+ pong = rpc:call(Cp2, net_adm, ping, [Cp1nn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cp1nn]),
+ wait_for_ready_net(),
- % Restart node Cpz
- ?line Pid2 =
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% Restart node Cpz
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, {wait_nodeup, Cpz}]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, {wait_nodeup, Cpz}]),
- ?line {ok, Cpz} = start_node_comp(Ncpz, Config),
- ?line pong = rpc:call(Cp2, net_adm, ping, [Cpznn]),
- ?line pong = rpc:call(Cpx, net_adm, ping, [Cpznn]),
- ?line wait_for_ready_net(),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ {ok, Cpz} = start_node_comp(Ncpz, Config),
+ pong = rpc:call(Cp2, net_adm, ping, [Cpznn]),
+ pong = rpc:call(Cpx, net_adm, ping, [Cpznn]),
+ wait_for_ready_net(),
- % stop the nodes, and make sure names are released.
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+
+ %% stop the nodes, and make sure names are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
@@ -642,147 +625,137 @@ compatible(Config) when is_list(Config) ->
stop_node(Cpy),
stop_node(Cpz),
- ?line ?UNTIL(undefined =:= global:whereis_name(test)),
- ?line test_server:timetrap_cancel(Dog),
+ ?UNTIL(undefined =:= global:whereis_name(test)),
ok.
-
-one_grp(suite) -> [];
-one_grp(doc) -> ["Test a system with only one global group. "];
-one_grp(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(120)),
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "global_group.config"),
- ?line {ok, Fd} = file:open(File, [write]),
+%% Test a system with only one global group. .
+one_grp(Config) when is_list(Config) ->
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "global_group.config"),
+ {ok, Fd} = file:open(File, [write]),
[Ncp1,Ncp2,Ncp3] = node_names([cp1, cp2, cp3], Config),
- ?line config(Fd, Ncp1, Ncp2, Ncp3, "cpx", "cpy", "cpz", "cpq"),
+ config(Fd, Ncp1, Ncp2, Ncp3, "cpx", "cpy", "cpz", "cpq"),
- ?line {ok, Cp1} = start_node(Ncp1, Config),
- ?line {ok, Cp2} = start_node(Ncp2, Config),
- ?line {ok, Cp3} = start_node(Ncp3, Config),
+ {ok, Cp1} = start_node(Ncp1, Config),
+ {ok, Cp2} = start_node(Ncp2, Config),
+ {ok, Cp3} = start_node(Ncp3, Config),
- % sleep a while to make the global_group to sync...
- test_server:sleep(1000),
+ %% sleep a while to make the global_group to sync...
+ ct:sleep(1000),
- % start a proc and register it
- ?line {Pid, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
+ %% start a proc and register it
+ {Pid, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
- % test that it is registered at all nodes
- ?line Pid = rpc:call(Cp1, global, whereis_name, [test]),
- ?line Pid = rpc:call(Cp2, global, whereis_name, [test]),
- ?line Pid = rpc:call(Cp3, global, whereis_name, [test]),
+ %% test that it is registered at all nodes
+ Pid = rpc:call(Cp1, global, whereis_name, [test]),
+ Pid = rpc:call(Cp2, global, whereis_name, [test]),
+ Pid = rpc:call(Cp3, global, whereis_name, [test]),
- % try to register the same name
- ?line no = rpc:call(Cp1, global, register_name, [test, self()]),
+ %% try to register the same name
+ no = rpc:call(Cp1, global, register_name, [test, self()]),
- % let process exit, check that it is unregistered automatically
+ %% let process exit, check that it is unregistered automatically
Pid ! die,
- ?line
- ?UNTIL(begin
+ ?UNTIL(begin
(undefined =:= rpc:call(Cp1, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp2, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp3, global, whereis_name, [test]))
end),
- % test re_register
- ?line {Pid2, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
- ?line Pid2 = rpc:call(Cp3, global, whereis_name, [test]),
+ %% test re_register
+ {Pid2, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
+ Pid2 = rpc:call(Cp3, global, whereis_name, [test]),
Pid3 = rpc:call(Cp3, ?MODULE, start_proc_rereg, [test]),
- ?line Pid3 = rpc:call(Cp3, global, whereis_name, [test]),
+ Pid3 = rpc:call(Cp3, global, whereis_name, [test]),
- % test sending
+ %% test sending
rpc:call(Cp1, global, send, [test, {ping, self()}]),
receive
{pong, Cp3} -> ok
after
- 2000 -> test_server:fail(timeout1)
+ 2000 -> ct:fail(timeout1)
end,
rpc:call(Cp3, global, send, [test, {ping, self()}]),
receive
{pong, Cp3} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line rpc:call(Cp3, global, unregister_name, [test]),
- ?line undefined = rpc:call(Cp1, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cp2, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cp3, global, whereis_name, [test]),
+ rpc:call(Cp3, global, unregister_name, [test]),
+ undefined = rpc:call(Cp1, global, whereis_name, [test]),
+ undefined = rpc:call(Cp2, global, whereis_name, [test]),
+ undefined = rpc:call(Cp3, global, whereis_name, [test]),
Pid3 ! die,
- ?line ?UNTIL(undefined =:= rpc:call(Cp3, global, whereis_name, [test])),
+ ?UNTIL(undefined =:= rpc:call(Cp3, global, whereis_name, [test])),
- % register a proc
- ?line {_, yes} = rpc:call(Cp3, ?MODULE, start_proc, [test]),
+ %% register a proc
+ {_, yes} = rpc:call(Cp3, ?MODULE, start_proc, [test]),
- % stop the nodes, and make sure names are released.
+ %% stop the nodes, and make sure names are released.
stop_node(Cp3),
- ?line ?UNTIL(undefined =:= rpc:call(Cp1, global, whereis_name, [test])),
+ ?UNTIL(undefined =:= rpc:call(Cp1, global, whereis_name, [test])),
Pid2 ! die,
stop_node(Cp1),
stop_node(Cp2),
- ?line test_server:timetrap_cancel(Dog),
ok.
-
-one_grp_x(suite) -> [];
-one_grp_x(doc) -> ["Check a system with only one global group. "
- "Start the nodes with different time intervals. "];
-one_grp_x(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(120)),
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "global_group.config"),
- ?line {ok, Fd} = file:open(File, [write]),
+%% Check a system with only one global group.
+%% Start the nodes with different time intervals.
+one_grp_x(Config) when is_list(Config) ->
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "global_group.config"),
+ {ok, Fd} = file:open(File, [write]),
[Ncp1,Ncp2,Ncp3] = node_names([cp1, cp2, cp3], Config),
- ?line config(Fd, Ncp1, Ncp2, Ncp3, "cpx", "cpy", "cpz", "cpq"),
+ config(Fd, Ncp1, Ncp2, Ncp3, "cpx", "cpy", "cpz", "cpq"),
- ?line {ok, Cp1} = start_node(Ncp1, Config),
- % sleep a while to make the global_group to sync...
- test_server:sleep(1000),
+ {ok, Cp1} = start_node(Ncp1, Config),
+ %% sleep a while to make the global_group to sync...
+ ct:sleep(1000),
- % start a proc and register it
- ?line {Pid, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
+ %% start a proc and register it
+ {Pid, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
- ?line {ok, Cp2} = start_node(Ncp2, Config),
- % sleep a while to make the global_group to sync...
- test_server:sleep(1000),
+ {ok, Cp2} = start_node(Ncp2, Config),
+ %% sleep a while to make the global_group to sync...
+ ct:sleep(1000),
- % test that it is registered at all nodes
- ?line Pid = rpc:call(Cp1, global, whereis_name, [test]),
- ?line Pid = rpc:call(Cp2, global, whereis_name, [test]),
+ %% test that it is registered at all nodes
+ Pid = rpc:call(Cp1, global, whereis_name, [test]),
+ Pid = rpc:call(Cp2, global, whereis_name, [test]),
- ?line {ok, Cp3} = start_node(Ncp3, Config),
- % sleep a while to make the global_group to sync...
- test_server:sleep(1000),
+ {ok, Cp3} = start_node(Ncp3, Config),
+ %% sleep a while to make the global_group to sync...
+ ct:sleep(1000),
- ?line Pid = rpc:call(Cp3, global, whereis_name, [test]),
+ Pid = rpc:call(Cp3, global, whereis_name, [test]),
- % try to register the same name
- ?line no = rpc:call(Cp1, global, register_name, [test, self()]),
+ %% try to register the same name
+ no = rpc:call(Cp1, global, register_name, [test, self()]),
- % let process exit, check that it is unregistered automatically
+ %% let process exit, check that it is unregistered automatically
Pid ! die,
- ?line
- ?UNTIL(begin
+ ?UNTIL(begin
(undefined =:= rpc:call(Cp1, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp2, global, whereis_name, [test])) and
(undefined =:= rpc:call(Cp3, global, whereis_name, [test]))
end),
- % test re_register
- ?line {Pid2, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
- ?line Pid2 = rpc:call(Cp3, global, whereis_name, [test]),
+ %% test re_register
+ {Pid2, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
+ Pid2 = rpc:call(Cp3, global, whereis_name, [test]),
Pid2 ! die,
@@ -790,296 +763,291 @@ one_grp_x(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
- ?line test_server:timetrap_cancel(Dog),
ok.
-
-two_grp(suite) -> [];
-two_grp(doc) -> ["Test a two global group system. "];
-two_grp(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(200)),
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "global_group.config"),
- ?line {ok, Fd} = file:open(File, [write]),
+%% Test a two global group system. .
+two_grp(Config) when is_list(Config) ->
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "global_group.config"),
+ {ok, Fd} = file:open(File, [write]),
[Ncp1,Ncp2,Ncp3,Ncpx,Ncpy,Ncpz,Ncpq] =
node_names([cp1,cp2,cp3,cpx,cpy,cpz,cpq], Config),
- ?line config(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz, Ncpq),
-
- ?line Cp1nn = node_at(Ncp1),
- ?line Cp2nn = node_at(Ncp2),
- ?line Cp3nn = node_at(Ncp3),
- ?line Cpxnn = node_at(Ncpx),
- ?line Cpynn = node_at(Ncpy),
- ?line Cpznn = node_at(Ncpz),
-
- ?line {ok, Cp1} = start_node(Ncp1, Config),
- ?line {ok, Cp2} = start_node(Ncp2, Config),
- ?line {ok, Cp3} = start_node(Ncp3, Config),
- ?line {ok, Cpx} = start_node(Ncpx, Config),
- ?line {ok, Cpy} = start_node(Ncpy, Config),
- ?line {ok, Cpz} = start_node(Ncpz, Config),
+ config(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz, Ncpq),
+
+ Cp1nn = node_at(Ncp1),
+ Cp2nn = node_at(Ncp2),
+ Cp3nn = node_at(Ncp3),
+ Cpxnn = node_at(Ncpx),
+ Cpynn = node_at(Ncpy),
+ Cpznn = node_at(Ncpz),
+
+ {ok, Cp1} = start_node(Ncp1, Config),
+ {ok, Cp2} = start_node(Ncp2, Config),
+ {ok, Cp3} = start_node(Ncp3, Config),
+ {ok, Cpx} = start_node(Ncpx, Config),
+ {ok, Cpy} = start_node(Ncpy, Config),
+ {ok, Cpz} = start_node(Ncpz, Config),
%% The groups (cpq not started):
%% [{nc1, [cp1,cp2,cp3]}, {nc2, [cpx,cpy,cpz]}, {nc3, [cpq]}]
- % sleep a while to make the global_groups to sync...
- test_server:sleep(1000),
-
- % check the global group names
- ?line {nc1, [nc2, nc3]} = rpc:call(Cp1, global_group, global_groups, []),
- ?line {nc1, [nc2, nc3]} = rpc:call(Cp2, global_group, global_groups, []),
- ?line {nc1, [nc2, nc3]} = rpc:call(Cp3, global_group, global_groups, []),
- ?line {nc2, [nc1, nc3]} = rpc:call(Cpx, global_group, global_groups, []),
- ?line {nc2, [nc1, nc3]} = rpc:call(Cpy, global_group, global_groups, []),
- ?line {nc2, [nc1, nc3]} = rpc:call(Cpz, global_group, global_groups, []),
-
- % check the global group nodes
- ?line [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp1, global_group, own_nodes, []),
- ?line [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp2, global_group, own_nodes, []),
- ?line [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp3, global_group, own_nodes, []),
- ?line [Cpxnn, Cpynn, Cpznn] = rpc:call(Cpx, global_group, own_nodes, []),
- ?line [Cpxnn, Cpynn, Cpznn] = rpc:call(Cpy, global_group, own_nodes, []),
- ?line [Cpxnn, Cpynn, Cpznn] = rpc:call(Cpz, global_group, own_nodes, []),
-
-
- % start a proc and register it
- ?line {Pid1, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
-
- ?line Pid1 = rpc:call(Cp1, global_group, send, [test, {io, from_cp1}]),
- ?line Pid1 = rpc:call(Cpx, global_group, send, [test, {io, from_cpx}]),
- ?line Pid1 = rpc:call(Cp1, global_group, send, [{group,nc1}, test,
- {io, from_cp1}]),
- ?line [test] =
+ %% sleep a while to make the global_groups to sync...
+ ct:sleep(1000),
+
+ %% check the global group names
+ {nc1, [nc2, nc3]} = rpc:call(Cp1, global_group, global_groups, []),
+ {nc1, [nc2, nc3]} = rpc:call(Cp2, global_group, global_groups, []),
+ {nc1, [nc2, nc3]} = rpc:call(Cp3, global_group, global_groups, []),
+ {nc2, [nc1, nc3]} = rpc:call(Cpx, global_group, global_groups, []),
+ {nc2, [nc1, nc3]} = rpc:call(Cpy, global_group, global_groups, []),
+ {nc2, [nc1, nc3]} = rpc:call(Cpz, global_group, global_groups, []),
+
+ %% check the global group nodes
+ [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp1, global_group, own_nodes, []),
+ [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp2, global_group, own_nodes, []),
+ [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp3, global_group, own_nodes, []),
+ [Cpxnn, Cpynn, Cpznn] = rpc:call(Cpx, global_group, own_nodes, []),
+ [Cpxnn, Cpynn, Cpznn] = rpc:call(Cpy, global_group, own_nodes, []),
+ [Cpxnn, Cpynn, Cpznn] = rpc:call(Cpz, global_group, own_nodes, []),
+
+
+ %% start a proc and register it
+ {Pid1, yes} = rpc:call(Cp1, ?MODULE, start_proc, [test]),
+
+ Pid1 = rpc:call(Cp1, global_group, send, [test, {io, from_cp1}]),
+ Pid1 = rpc:call(Cpx, global_group, send, [test, {io, from_cpx}]),
+ Pid1 = rpc:call(Cp1, global_group, send, [{group,nc1}, test,
+ {io, from_cp1}]),
+ [test] =
rpc:call(Cpx, global_group, registered_names, [{node, Cp1nn}]),
- ?line [test] =
+ [test] =
rpc:call(Cpx, global_group, registered_names, [{group, nc1}]),
- ?line [] = rpc:call(Cpx, global_group, registered_names, [{node, Cpxnn}]),
- ?line [] = rpc:call(Cpx, global_group, registered_names, [{group, nc2}]),
- ?line Pid1 = rpc:call(Cpx, global_group, send, [{group,nc1}, test,
- {io, from_cp1}]),
- ?line {badarg,{test,{io,from_cpx}}} =
+ [] = rpc:call(Cpx, global_group, registered_names, [{node, Cpxnn}]),
+ [] = rpc:call(Cpx, global_group, registered_names, [{group, nc2}]),
+ Pid1 = rpc:call(Cpx, global_group, send, [{group,nc1}, test,
+ {io, from_cp1}]),
+ {badarg,{test,{io,from_cpx}}} =
rpc:call(Cp1, global_group, send, [{group,nc2}, test, {io, from_cpx}]),
- ?line {badarg,{test,{io,from_cpx}}} =
+ {badarg,{test,{io,from_cpx}}} =
rpc:call(Cpx, global_group, send, [{group,nc2}, test, {io, from_cpx}]),
- % test that it is registered at all nodes
- ?line Pid1 = rpc:call(Cp1, global, whereis_name, [test]),
- ?line Pid1 = rpc:call(Cp2, global, whereis_name, [test]),
- ?line Pid1 = rpc:call(Cp3, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cpx, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cpy, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cpz, global, whereis_name, [test]),
-
- % start a proc and register it
- ?line {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
-
- % test that it is registered at all nodes
- ?line Pid1 = rpc:call(Cp1, global, whereis_name, [test]),
- ?line Pid1 = rpc:call(Cp2, global, whereis_name, [test]),
- ?line Pid1 = rpc:call(Cp3, global, whereis_name, [test]),
- ?line PidX = rpc:call(Cpx, global, whereis_name, [test]),
- ?line PidX = rpc:call(Cpy, global, whereis_name, [test]),
- ?line PidX = rpc:call(Cpz, global, whereis_name, [test]),
-
+ %% test that it is registered at all nodes
+ Pid1 = rpc:call(Cp1, global, whereis_name, [test]),
+ Pid1 = rpc:call(Cp2, global, whereis_name, [test]),
+ Pid1 = rpc:call(Cp3, global, whereis_name, [test]),
+ undefined = rpc:call(Cpx, global, whereis_name, [test]),
+ undefined = rpc:call(Cpy, global, whereis_name, [test]),
+ undefined = rpc:call(Cpz, global, whereis_name, [test]),
+
+ %% start a proc and register it
+ {PidX, yes} = rpc:call(Cpx, ?MODULE, start_proc, [test]),
+
+ %% test that it is registered at all nodes
+ Pid1 = rpc:call(Cp1, global, whereis_name, [test]),
+ Pid1 = rpc:call(Cp2, global, whereis_name, [test]),
+ Pid1 = rpc:call(Cp3, global, whereis_name, [test]),
+ PidX = rpc:call(Cpx, global, whereis_name, [test]),
+ PidX = rpc:call(Cpy, global, whereis_name, [test]),
+ PidX = rpc:call(Cpz, global, whereis_name, [test]),
+
Pid1 ! die,
%% If we don't wait for global on other nodes to have updated its
%% tables, 'test' may still be defined at the point when it is
%% tested a few lines below.
- ?line
- ?UNTIL(begin
+ ?UNTIL(begin
Pid = rpc:call(Cp2, global, whereis_name, [test]),
undefined =:= Pid
end),
- % start a proc and register it
- ?line {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
+ %% start a proc and register it
+ {Pid2, yes} = rpc:call(Cp2, ?MODULE, start_proc, [test2]),
- % test that it is registered at all nodes
- ?line Pid2 = rpc:call(Cp1, global, whereis_name, [test2]),
- ?line Pid2 = rpc:call(Cp2, global, whereis_name, [test2]),
- ?line Pid2 = rpc:call(Cp3, global, whereis_name, [test2]),
- ?line PidX = rpc:call(Cpx, global, whereis_name, [test]),
- ?line PidX = rpc:call(Cpy, global, whereis_name, [test]),
- ?line PidX = rpc:call(Cpz, global, whereis_name, [test]),
-
- ?line undefined = rpc:call(Cp1, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cp2, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cp3, global, whereis_name, [test]),
- ?line undefined = rpc:call(Cpx, global, whereis_name, [test2]),
- ?line undefined = rpc:call(Cpy, global, whereis_name, [test2]),
- ?line undefined = rpc:call(Cpz, global, whereis_name, [test2]),
-
+ %% test that it is registered at all nodes
+ Pid2 = rpc:call(Cp1, global, whereis_name, [test2]),
+ Pid2 = rpc:call(Cp2, global, whereis_name, [test2]),
+ Pid2 = rpc:call(Cp3, global, whereis_name, [test2]),
+ PidX = rpc:call(Cpx, global, whereis_name, [test]),
+ PidX = rpc:call(Cpy, global, whereis_name, [test]),
+ PidX = rpc:call(Cpz, global, whereis_name, [test]),
+
+ undefined = rpc:call(Cp1, global, whereis_name, [test]),
+ undefined = rpc:call(Cp2, global, whereis_name, [test]),
+ undefined = rpc:call(Cp3, global, whereis_name, [test]),
+ undefined = rpc:call(Cpx, global, whereis_name, [test2]),
+ undefined = rpc:call(Cpy, global, whereis_name, [test2]),
+ undefined = rpc:call(Cpz, global, whereis_name, [test2]),
- ?line Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
+
+ Pid2 = rpc:call(Cp1, global_group, send, [test2, {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
+ Pid2 = rpc:call(Cp2, global_group, send, [test2, {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line Pid2 = rpc:call(Cp3, global_group, send, [test2, {ping, self()}]),
+ Pid2 = rpc:call(Cp3, global_group, send, [test2, {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line PidX = rpc:call(Cpx, global_group, send, [test, {ping, self()}]),
+ PidX = rpc:call(Cpx, global_group, send, [test, {ping, self()}]),
receive
{pong, Cpx} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line PidX = rpc:call(Cpy, global_group, send, [test, {ping, self()}]),
+ PidX = rpc:call(Cpy, global_group, send, [test, {ping, self()}]),
receive
{pong, Cpx} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line PidX = rpc:call(Cpz, global_group, send, [test, {ping, self()}]),
+ PidX = rpc:call(Cpz, global_group, send, [test, {ping, self()}]),
receive
{pong, Cpx} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line Pid2 = rpc:call(Cpx, global_group, send, [{node, Cp1nn}, test2,
- {ping, self()}]),
+ Pid2 = rpc:call(Cpx, global_group, send, [{node, Cp1nn}, test2,
+ {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line Pid2 = rpc:call(Cpy, global_group, send, [{node, Cp2nn}, test2,
- {ping, self()}]),
+ Pid2 = rpc:call(Cpy, global_group, send, [{node, Cp2nn}, test2,
+ {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line Pid2 = rpc:call(Cpz, global_group, send, [{node, Cp3nn}, test2,
- {ping, self()}]),
+ Pid2 = rpc:call(Cpz, global_group, send, [{node, Cp3nn}, test2,
+ {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpznn}, test,
- {ping, self()}]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpznn}, test,
+ {ping, self()}]),
receive
{pong, Cpx} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line PidX = rpc:call(Cpy, global_group, send, [{node, Cpxnn}, test,
- {ping, self()}]),
+ PidX = rpc:call(Cpy, global_group, send, [{node, Cpxnn}, test,
+ {ping, self()}]),
receive
{pong, Cpx} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line PidX = rpc:call(Cpz, global_group, send, [{node, Cpynn}, test,
- {ping, self()}]),
+ PidX = rpc:call(Cpz, global_group, send, [{node, Cpynn}, test,
+ {ping, self()}]),
receive
{pong, Cpx} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line Pid2 = rpc:call(Cpx, global_group, send, [{group, nc1}, test2,
- {ping, self()}]),
+ Pid2 = rpc:call(Cpx, global_group, send, [{group, nc1}, test2,
+ {ping, self()}]),
receive
{pong, Cp2} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
- ?line PidX = rpc:call(Cpy, global_group, send, [{group, nc2}, test,
- {ping, self()}]),
+ PidX = rpc:call(Cpy, global_group, send, [{group, nc2}, test,
+ {ping, self()}]),
receive
{pong, Cpx} -> ok
after
- 2000 -> test_server:fail(timeout2)
+ 2000 -> ct:fail(timeout2)
end,
%%------------------------------------
%% Test monitor nodes
%%------------------------------------
- ?line Pid2 =
+ Pid2 =
rpc:call(Cp1, global_group, send, [{node, Cp2nn}, test2, monitor]),
- ?line PidX =
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, monitor]),
- % Kill node Cp1
- ?line Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
- {wait_nodedown, Cp1}]),
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
- {wait_nodedown, Cp1}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cp1),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop_nodedown),
- ?line PidX =
+ %% Kill node Cp1
+ Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
+ {wait_nodedown, Cp1}]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
+ {wait_nodedown, Cp1}]),
+ ct:sleep(100),
+ stop_node(Cp1),
+ ct:sleep(1000),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop_nodedown),
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, to_loop]),
- % Kill node Cpz
- ?line Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
- {wait_nodedown, Cpz}]),
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
- {wait_nodedown, Cpz}]),
- ?line test_server:sleep(100),
- ?line stop_node(Cpz),
- ?line test_server:sleep(1000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop_nodedown),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
- ?line Pid2 =
+ %% Kill node Cpz
+ Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
+ {wait_nodedown, Cpz}]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
+ {wait_nodedown, Cpz}]),
+ ct:sleep(100),
+ stop_node(Cpz),
+ ct:sleep(1000),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop_nodedown),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, to_loop]),
- % Restart node Cp1
- ?line [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp2, global_group, own_nodes, []),
- ?line Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
- {wait_nodeup, Cp1}]),
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
- {wait_nodeup, Cp1}]),
- ?line test_server:sleep(100),
- ?line {ok, Cp1} = start_node(Ncp1, Config),
- ?line test_server:sleep(5000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop_nodeup),
- ?line PidX =
+ %% Restart node Cp1
+ [Cp1nn, Cp2nn, Cp3nn] = rpc:call(Cp2, global_group, own_nodes, []),
+ Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
+ {wait_nodeup, Cp1}]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
+ {wait_nodeup, Cp1}]),
+ ct:sleep(100),
+ {ok, Cp1} = start_node(Ncp1, Config),
+ ct:sleep(5000),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop_nodeup),
+ PidX =
rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test, to_loop]),
- % Restart node Cpz
- ?line Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
- {wait_nodeup, Cpz}]),
- ?line PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
- {wait_nodeup, Cpz}]),
- ?line test_server:sleep(100),
- ?line {ok, Cpz} = start_node(Ncpz, Config),
- ?line test_server:sleep(5000),
-
- ?line ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop_nodeup),
- ?line ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
- ?line Pid2 =
+ %% Restart node Cpz
+ Pid2 = rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2,
+ {wait_nodeup, Cpz}]),
+ PidX = rpc:call(Cpx, global_group, send, [{node, Cpxnn}, test,
+ {wait_nodeup, Cpz}]),
+ ct:sleep(100),
+ {ok, Cpz} = start_node(Ncpz, Config),
+ ct:sleep(5000),
+
+ ok = assert_loop(Cp2, Cp2nn, test2, Pid2, loop_nodeup),
+ ok = assert_loop(Cpx, Cpxnn, test, PidX, loop),
+ Pid2 =
rpc:call(Cp2, global_group, send, [{node, Cp2nn}, test2, to_loop]),
@@ -1093,72 +1061,68 @@ two_grp(Config) when is_list(Config) ->
stop_node(Cpy),
stop_node(Cpz),
- ?line test_server:timetrap_cancel(Dog),
ok.
-
-hidden_groups(suite) -> [];
-hidden_groups(doc) -> ["Test hidden global groups."];
-hidden_groups(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(200)),
- ?line Dir = ?config(priv_dir, Config),
- ?line File = filename:join(Dir, "global_group.config"),
- ?line {ok, Fd} = file:open(File, [write]),
+%% Test hidden global groups.
+hidden_groups(Config) when is_list(Config) ->
+ Dir = proplists:get_value(priv_dir, Config),
+ File = filename:join(Dir, "global_group.config"),
+ {ok, Fd} = file:open(File, [write]),
[Ncp1,Ncp2,Ncp3,Ncpx,Ncpy,Ncpz,Ncpq] =
node_names([cp1,cp2,cp3,cpx,cpy,cpz,cpq], Config),
- ?line config_hidden(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz, Ncpq),
-
- ?line {ok, Cp1} = start_node(Ncp1, Config),
- ?line {ok, Cp2} = start_node(Ncp2, Config),
- ?line {ok, Cp3} = start_node(Ncp3, Config),
- ?line {ok, Cpx} = start_node(Ncpx, Config),
- ?line {ok, Cpy} = start_node(Ncpy, Config),
- ?line {ok, Cpz} = start_node(Ncpz, Config),
- ?line {ok, Cpq} = start_node(Ncpq, Config),
-
- % sleep a while to make the global_groups to sync...
- test_server:sleep(1000),
-
- % check the global group names
- ?line {nc1, [nc2, nc3]} = rpc:call(Cp1, global_group, global_groups, []),
- ?line {nc1, [nc2, nc3]} = rpc:call(Cp2, global_group, global_groups, []),
- ?line {nc1, [nc2, nc3]} = rpc:call(Cp3, global_group, global_groups, []),
- ?line {nc2, [nc1, nc3]} = rpc:call(Cpx, global_group, global_groups, []),
- ?line {nc2, [nc1, nc3]} = rpc:call(Cpy, global_group, global_groups, []),
- ?line {nc2, [nc1, nc3]} = rpc:call(Cpz, global_group, global_groups, []),
-
- % check the global group nodes
- ?line [Cp1, Cp2, Cp3] = rpc:call(Cp1, global_group, own_nodes, []),
- ?line [Cp1, Cp2, Cp3] = rpc:call(Cp2, global_group, own_nodes, []),
- ?line [Cp1, Cp2, Cp3] = rpc:call(Cp3, global_group, own_nodes, []),
- ?line [Cpx, Cpy, Cpz] = rpc:call(Cpx, global_group, own_nodes, []),
- ?line [Cpx, Cpy, Cpz] = rpc:call(Cpy, global_group, own_nodes, []),
- ?line [Cpx, Cpy, Cpz] = rpc:call(Cpz, global_group, own_nodes, []),
- ?line [Cpq] = rpc:call(Cpq, global_group, own_nodes, []),
-
- % Make some inter group connections
- ?line pong = rpc:call(Cp1, net_adm, ping, [Cpx]),
- ?line pong = rpc:call(Cpy, net_adm, ping, [Cp2]),
- ?line pong = rpc:call(Cp3, net_adm, ping, [Cpx]),
- ?line pong = rpc:call(Cpz, net_adm, ping, [Cp3]),
- ?line pong = rpc:call(Cpq, net_adm, ping, [Cp1]),
- ?line pong = rpc:call(Cpz, net_adm, ping, [Cpq]),
-
- % Check that no inter group connections are visible
+ config_hidden(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz, Ncpq),
+
+ {ok, Cp1} = start_node(Ncp1, Config),
+ {ok, Cp2} = start_node(Ncp2, Config),
+ {ok, Cp3} = start_node(Ncp3, Config),
+ {ok, Cpx} = start_node(Ncpx, Config),
+ {ok, Cpy} = start_node(Ncpy, Config),
+ {ok, Cpz} = start_node(Ncpz, Config),
+ {ok, Cpq} = start_node(Ncpq, Config),
+
+ %% sleep a while to make the global_groups to sync...
+ ct:sleep(1000),
+
+ %% check the global group names
+ {nc1, [nc2, nc3]} = rpc:call(Cp1, global_group, global_groups, []),
+ {nc1, [nc2, nc3]} = rpc:call(Cp2, global_group, global_groups, []),
+ {nc1, [nc2, nc3]} = rpc:call(Cp3, global_group, global_groups, []),
+ {nc2, [nc1, nc3]} = rpc:call(Cpx, global_group, global_groups, []),
+ {nc2, [nc1, nc3]} = rpc:call(Cpy, global_group, global_groups, []),
+ {nc2, [nc1, nc3]} = rpc:call(Cpz, global_group, global_groups, []),
+
+ %% check the global group nodes
+ [Cp1, Cp2, Cp3] = rpc:call(Cp1, global_group, own_nodes, []),
+ [Cp1, Cp2, Cp3] = rpc:call(Cp2, global_group, own_nodes, []),
+ [Cp1, Cp2, Cp3] = rpc:call(Cp3, global_group, own_nodes, []),
+ [Cpx, Cpy, Cpz] = rpc:call(Cpx, global_group, own_nodes, []),
+ [Cpx, Cpy, Cpz] = rpc:call(Cpy, global_group, own_nodes, []),
+ [Cpx, Cpy, Cpz] = rpc:call(Cpz, global_group, own_nodes, []),
+ [Cpq] = rpc:call(Cpq, global_group, own_nodes, []),
+
+ %% Make some inter group connections
+ pong = rpc:call(Cp1, net_adm, ping, [Cpx]),
+ pong = rpc:call(Cpy, net_adm, ping, [Cp2]),
+ pong = rpc:call(Cp3, net_adm, ping, [Cpx]),
+ pong = rpc:call(Cpz, net_adm, ping, [Cp3]),
+ pong = rpc:call(Cpq, net_adm, ping, [Cp1]),
+ pong = rpc:call(Cpz, net_adm, ping, [Cpq]),
+
+ %% Check that no inter group connections are visible
NC1Nodes = lists:sort([Cp1, Cp2, Cp3]),
NC2Nodes = lists:sort([Cpx, Cpy, Cpz]),
- ?line NC1Nodes = lists:sort([Cp1|rpc:call(Cp1, erlang, nodes, [])]),
- ?line NC1Nodes = lists:sort([Cp2|rpc:call(Cp2, erlang, nodes, [])]),
- ?line NC1Nodes = lists:sort([Cp3|rpc:call(Cp3, erlang, nodes, [])]),
- ?line NC2Nodes = lists:sort([Cpx|rpc:call(Cpx, erlang, nodes, [])]),
- ?line NC2Nodes = lists:sort([Cpy|rpc:call(Cpy, erlang, nodes, [])]),
- ?line NC2Nodes = lists:sort([Cpz|rpc:call(Cpz, erlang, nodes, [])]),
+ NC1Nodes = lists:sort([Cp1|rpc:call(Cp1, erlang, nodes, [])]),
+ NC1Nodes = lists:sort([Cp2|rpc:call(Cp2, erlang, nodes, [])]),
+ NC1Nodes = lists:sort([Cp3|rpc:call(Cp3, erlang, nodes, [])]),
+ NC2Nodes = lists:sort([Cpx|rpc:call(Cpx, erlang, nodes, [])]),
+ NC2Nodes = lists:sort([Cpy|rpc:call(Cpy, erlang, nodes, [])]),
+ NC2Nodes = lists:sort([Cpz|rpc:call(Cpz, erlang, nodes, [])]),
NC12Nodes = lists:append(NC1Nodes, NC2Nodes),
- ?line false = lists:any(fun(N) -> lists:member(N, NC12Nodes) end,
- rpc:call(Cpq, erlang, nodes, [])),
+ false = lists:any(fun(N) -> lists:member(N, NC12Nodes) end,
+ rpc:call(Cpq, erlang, nodes, [])),
stop_node(Cp1),
@@ -1169,63 +1133,68 @@ hidden_groups(Config) when is_list(Config) ->
stop_node(Cpz),
stop_node(Cpq),
- ?line test_server:timetrap_cancel(Dog),
ok.
-
-test_exit(suite) -> [];
-test_exit(doc) -> ["Checks when the search process exits. "];
-test_exit(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(120)),
- ?line NN = node_name(atom_to_list(node())),
- ?line Cp1nn = list_to_atom("cp1@" ++ NN),
+%% Checks when the search process exits. .
+test_exit(Config) when is_list(Config) ->
+ NN = node_name(atom_to_list(node())),
+ Cp1nn = list_to_atom("cp1@" ++ NN),
- ?line {ok, Cp1} = start_node(cp1, Config),
- ?line {ok, Cp2} = start_node(cp2, Config),
- ?line {ok, Cp3} = start_node(cp3, Config),
+ {ok, Cp1} = start_node(cp1, Config),
+ {ok, Cp2} = start_node(cp2, Config),
+ {ok, Cp3} = start_node(cp3, Config),
- test_server:sleep(1000),
+ ct:sleep(1000),
- ?line {error, illegal_function_call} =
+ {error, illegal_function_call} =
rpc:call(Cp1, global_group, registered_names_test, [{node, Cp1nn}]),
- ?line {badarg,_} =
+ {badarg,_} =
rpc:call(Cp1, global_group, send, [king, "The message"]),
- ?line undefined = rpc:call(Cp1, global_group, whereis_name, [king]),
-
- % stop the nodes, and make sure names are released.
+ undefined = rpc:call(Cp1, global_group, whereis_name, [king]),
+
+ % make sure the search process really exits after every global_group operations
+ ProcessCount0 = rpc:call(Cp1, erlang, system_info, [process_count]),
+ _ = rpc:call(Cp1, global_group, whereis_name, [{node, Cp1nn}, whatever_pid_name]),
+ ProcessCount1 = rpc:call(Cp1, erlang, system_info, [process_count]),
+ _ = rpc:call(Cp1, global_group, registered_names, [{node, Cp1nn}]),
+ ProcessCount2 = rpc:call(Cp1, erlang, system_info, [process_count]),
+ _ = rpc:call(Cp1, global_group, send, [{node, Cp1nn}, whatever_pid_name, msg]),
+ ProcessCount3 = rpc:call(Cp1, erlang, system_info, [process_count]),
+ ProcessCount0 = ProcessCount1 = ProcessCount2 = ProcessCount3,
+
+ %% stop the nodes, and make sure names are released.
stop_node(Cp1),
stop_node(Cp2),
stop_node(Cp3),
- % sleep to let the nodes die
- test_server:sleep(1000),
+ %% sleep to let the nodes die
+ ct:sleep(1000),
- ?line test_server:timetrap_cancel(Dog),
ok.
-
+
start_node(Name, Config) ->
Pa=filename:dirname(code:which(?MODULE)),
- Dir=?config(priv_dir, Config),
+ Dir=proplists:get_value(priv_dir, Config),
ConfFile = " -config " ++ filename:join(Dir, "global_group"),
test_server:start_node(Name, slave, [{args, "-pa " ++ Pa ++ ConfFile}]).
start_node_no(Name, Config) ->
Pa=filename:dirname(code:which(?MODULE)),
- Dir=?config(priv_dir, Config),
+ Dir=proplists:get_value(priv_dir, Config),
ConfFile = " -config " ++ filename:join(Dir, "no_global_group"),
test_server:start_node(Name, slave, [{args, "-pa " ++ Pa ++ ConfFile}]).
start_node_no2(Name, Config) ->
Pa=filename:dirname(code:which(?MODULE)),
- Dir=?config(priv_dir, Config),
+ Dir=proplists:get_value(priv_dir, Config),
ConfFile = " -config " ++ filename:join(Dir, "no_global_group_sync"),
test_server:start_node(Name, slave, [{args, "-pa " ++ Pa ++ ConfFile}]).
start_node_comp(Name, Config) ->
Pa=filename:dirname(code:which(?MODULE)),
- Dir=?config(priv_dir, Config),
+ Dir=proplists:get_value(priv_dir, Config),
ConfFile = " -config " ++ filename:join(Dir, "global_group_comp"),
test_server:start_node(Name, slave, [{args, "-pa " ++ Pa ++ ConfFile}]).
@@ -1242,17 +1211,17 @@ node_name(Name, Config) ->
lists:concat([Name,U,?testcase,U,Pid,U,U,L]).
stop_node(Node) ->
- ?t:stop_node(Node).
+ test_server:stop_node(Node).
wait_for_ready_net() ->
Nodes = lists:sort(?NODES),
?UNTIL(begin
lists:all(fun(N) -> Nodes =:= get_known(N) end, Nodes) and
- lists:all(fun(N) ->
- LNs = rpc:call(N, erlang, nodes, []),
- Nodes =:= lists:sort([N | LNs])
- end, Nodes)
+ lists:all(fun(N) ->
+ LNs = rpc:call(N, erlang, nodes, []),
+ Nodes =:= lists:sort([N | LNs])
+ end, Nodes)
end).
get_known(Node) ->
@@ -1262,11 +1231,11 @@ get_known(Node) ->
config_hidden(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz, Ncpq) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['~s@~s','~s@~s','~s@~s', "
- " '~s@~s','~s@~s','~s@~s']},"
- "{sync_nodes_timeout, 1000},"
- "{global_groups, [{nc1, hidden, ['~s@~s','~s@~s','~s@~s']}, "
- "{nc2, hidden, ['~s@~s','~s@~s','~s@~s']}, "
- "{nc3, normal, ['~s@~s']}]} ] }]. ~n",
+ " '~s@~s','~s@~s','~s@~s']},"
+ "{sync_nodes_timeout, 1000},"
+ "{global_groups, [{nc1, hidden, ['~s@~s','~s@~s','~s@~s']}, "
+ "{nc2, hidden, ['~s@~s','~s@~s','~s@~s']}, "
+ "{nc3, normal, ['~s@~s']}]} ] }]. ~n",
[Ncp1, M, Ncp2, M, Ncp3, M,
Ncpx, M, Ncpy, M, Ncpz, M,
Ncp1, M, Ncp2, M, Ncp3, M,
@@ -1276,11 +1245,11 @@ config_hidden(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz, Ncpq) ->
config(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz, Ncpq) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['~s@~s','~s@~s','~s@~s', "
- " '~s@~s','~s@~s','~s@~s']},"
- "{sync_nodes_timeout, 1000},"
- "{global_groups, [{nc1, ['~s@~s','~s@~s','~s@~s']}, "
- " {nc2, ['~s@~s','~s@~s','~s@~s']}, "
- "{nc3, ['~s@~s']}]} ] }]. ~n",
+ " '~s@~s','~s@~s','~s@~s']},"
+ "{sync_nodes_timeout, 1000},"
+ "{global_groups, [{nc1, ['~s@~s','~s@~s','~s@~s']}, "
+ " {nc2, ['~s@~s','~s@~s','~s@~s']}, "
+ "{nc3, ['~s@~s']}]} ] }]. ~n",
[Ncp1, M, Ncp2, M, Ncp3, M,
Ncpx, M, Ncpy, M, Ncpz, M,
Ncp1, M, Ncp2, M, Ncp3, M,
@@ -1293,9 +1262,9 @@ config_no(Fd) ->
config_sync(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['~s@~s','~s@~s','~s@~s', "
- " '~s@~s','~s@~s','~s@~s']},"
- "{sync_nodes_timeout, 1000},"
- "{global_groups, []} ] }] .~n",
+ " '~s@~s','~s@~s','~s@~s']},"
+ "{sync_nodes_timeout, 1000},"
+ "{global_groups, []} ] }] .~n",
[Ncp1, M, Ncp2, M, Ncp3, M,
Ncpx, M, Ncpy, M, Ncpz, M]).
@@ -1303,8 +1272,8 @@ config_sync(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz) ->
config_comp(Fd, Ncp1, Ncp2, Ncp3, Ncpx, Ncpy, Ncpz) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['~s@~s','~s@~s','~s@~s', "
- " '~s@~s','~s@~s','~s@~s']},"
- "{sync_nodes_timeout, 1000} ] }] .~n",
+ " '~s@~s','~s@~s','~s@~s']},"
+ "{sync_nodes_timeout, 1000} ] }] .~n",
[Ncp1, M, Ncp2, M, Ncp3, M,
Ncpx, M, Ncpy, M, Ncpz, M]).
@@ -1325,7 +1294,7 @@ start_proc(Name) ->
receive
{Pid, Res} -> {Pid, Res}
end.
-
+
start_proc_rereg(Name) ->
Pid = spawn(?MODULE, init2, [self(), Name]),
receive
@@ -1437,9 +1406,9 @@ assert_loop(Cp, CpName, Name, NamePid, Loop) ->
Loop ->
ok;
Other1 ->
- test_server:fail(Other1)
+ ct:fail(Other1)
after 5000 ->
- test_server:fail(timeout)
+ ct:fail(timeout)
end.
loop_until_true(Fun) ->
diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl
index 39cd29cea0..f5ca6d0e1d 100644
--- a/lib/kernel/test/heart_SUITE.erl
+++ b/lib/kernel/test/heart_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(heart_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2, start/1, restart/1,
@@ -29,39 +29,43 @@
set_cmd/1, clear_cmd/1, get_cmd/1,
callback_api/1,
options_api/1,
- dont_drop/1, kill_pid/1]).
+ dont_drop/1, kill_pid/1, heart_no_kill/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
--export([start_heart_stress/1, mangle/1, suicide_by_heart/0]).
+-export([start_heart_stress/1, mangle/1, suicide_by_heart/0, non_suicide_by_heart/0]).
-define(DEFAULT_TIMEOUT_SECS, 120).
+-define(UNIQ_NODE_NAME,
+ list_to_atom(?MODULE_STRING ++ "__" ++
+ atom_to_list(?FUNCTION_NAME) ++ "_" ++
+ integer_to_list(erlang:unique_integer([positive])))).
+
init_per_testcase(_Func, Config) ->
- Dog=test_server:timetrap(test_server:seconds(?DEFAULT_TIMEOUT_SECS)),
- [{watchdog, Dog}|Config].
+ Config.
-end_per_testcase(_Func, Config) ->
+end_per_testcase(_Func, _Config) ->
Nodes = nodes(),
lists:foreach(fun(X) ->
NNam = list_to_atom(hd(string:tokens(atom_to_list(X),"@"))),
case NNam of
heart_test ->
- ?t:format(1, "WARNING: Killed ~p~n", [X]),
+ ct:pal(?HI_VERBOSITY, "WARNING: Killed ~p~n", [X]),
rpc:cast(X, erlang, halt, []);
_ ->
ok
end
- end, Nodes),
- Dog=?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+ end, Nodes).
%%-----------------------------------------------------------------
%% Test suite for heart.
%% Should be started in a CC view with:
%% erl -sname master -rsh ctrsh
%%-----------------------------------------------------------------
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() -> [
start, restart, reboot,
@@ -70,7 +74,8 @@ all() -> [
set_cmd, clear_cmd, get_cmd,
callback_api,
options_api,
- kill_pid
+ kill_pid,
+ heart_no_kill
].
groups() ->
@@ -97,7 +102,7 @@ end_per_suite(Config) when is_list(Config) ->
start_check(Type, Name) ->
start_check(Type, Name, []).
start_check(Type, Name, Envs) ->
- Args = case ?t:os_type() of
+ Args = case test_server:os_type() of
{win32,_} ->
"+t50000 -heart " ++ env_encode([{"HEART_COMMAND", no_reboot}|Envs]);
_ ->
@@ -107,32 +112,30 @@ start_check(Type, Name, Envs) ->
loose ->
loose_node:start(Name, Args, ?DEFAULT_TIMEOUT_SECS);
_ ->
- ?t:start_node(Name, Type, [{args, Args}])
+ test_server:start_node(Name, Type, [{args, Args}])
end,
erlang:monitor_node(Node, true),
case rpc:call(Node, erlang, whereis, [heart]) of
Pid when is_pid(Pid) ->
ok;
_ ->
- test_server:fail(heart_not_started)
+ ct:fail(heart_not_started)
end,
{ok, Node}.
-start(doc) -> [];
-start(suite) -> {req, [{time, 10}]};
start(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
rpc:call(Node, init, reboot, []),
receive
{nodedown, Node} -> ok
- after 2000 -> test_server:fail(node_not_closed)
+ after 2000 -> ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
case net_adm:ping(Node) of
pang ->
ok;
_ ->
- test_server:fail(node_rebooted)
+ ct:fail(node_rebooted)
end,
test_server:stop_node(Node).
@@ -145,46 +148,36 @@ start(Config) when is_list(Config) ->
%% restart
%% Purpose:
%% Check that a node is up and running after a init:restart/0
-restart(doc) -> [];
-restart(suite) ->
- case ?t:os_type() of
- {Fam, _} when Fam == unix; Fam == win32 ->
- {req, [{time,10}]};
- _ ->
- {skip, "Only run on unix and win32"}
- end;
restart(Config) when is_list(Config) ->
- {ok, Node} = start_check(loose, heart_test),
+ {ok, Node} = start_check(loose, ?UNIQ_NODE_NAME),
rpc:call(Node, init, restart, []),
receive
{nodedown, Node} ->
ok
- after 2000 ->
- test_server:fail(node_not_closed)
+ after 5000 ->
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
node_check_up_down(Node, 2000),
loose_node:stop(Node).
%% reboot
%% Purpose:
%% Check that a node is up and running after a init:reboot/0
-reboot(doc) -> [];
-reboot(suite) -> {req, [{time, 10}]};
reboot(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
{nodedown, Node} ->
ok
after 2000 ->
- test_server:fail(node_not_closed)
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
node_check_up_down(Node, 2000),
ok.
@@ -196,7 +189,6 @@ reboot(Config) when is_list(Config) ->
%% May currently dump core in beam debug build due to lock-order violation
%% This should be removed when a non-lockad information retriever is implemented
%% for crash dumps
-node_start_immediately_after_crash(suite) -> {req, [{time, 10}]};
node_start_immediately_after_crash(Config) when is_list(Config) ->
Config2 = ignore_cores:setup(?MODULE, node_start_immediately_after_crash, Config, true),
try
@@ -207,10 +199,11 @@ node_start_immediately_after_crash(Config) when is_list(Config) ->
node_start_immediately_after_crash_test(Config) when is_list(Config) ->
- {ok, Node} = start_check(loose, heart_test_imm, [{"ERL_CRASH_DUMP_SECONDS", "0"}]),
+ {ok, Node} = start_check(loose, ?UNIQ_NODE_NAME,
+ [{"ERL_CRASH_DUMP_SECONDS", "0"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -229,13 +222,13 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
T0 = now(),
receive {nodedown, Node} ->
- test_server:format("Took ~.2f s. for node to go down~n", [timer:now_diff(now(), T0)/1000000]),
+ io:format("Took ~.2f s. for node to go down~n", [timer:now_diff(now(), T0)/1000000]),
ok
%% timeout is very liberal here. nodedown is received in about 1 s. on linux (palantir)
%% and in about 10 s. on solaris (carcharoth)
- after (15000*test_server:timetrap_scale_factor()) -> test_server:fail(node_not_closed)
+ after (15000*test_server:timetrap_scale_factor()) -> ct:fail(node_not_closed)
end,
- test_server:sleep(3000),
+ timer:sleep(3000),
node_check_up_down(Node, 2000),
loose_node:stop(Node).
@@ -248,7 +241,6 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
%% May currently dump core in beam debug build due to lock-order violation
%% This should be removed when a non-lockad information retriever is implemented
%% for crash dumps
-node_start_soon_after_crash(suite) -> {req, [{time, 10}]};
node_start_soon_after_crash(Config) when is_list(Config) ->
Config2 = ignore_cores:setup(?MODULE, node_start_soon_after_crash, Config, true),
try
@@ -258,10 +250,11 @@ node_start_soon_after_crash(Config) when is_list(Config) ->
end.
node_start_soon_after_crash_test(Config) when is_list(Config) ->
- {ok, Node} = start_check(loose, heart_test_soon, [{"ERL_CRASH_DUMP_SECONDS", "10"}]),
+ {ok, Node} = start_check(loose, ?UNIQ_NODE_NAME,
+ [{"ERL_CRASH_DUMP_SECONDS", "10"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -278,9 +271,9 @@ node_start_soon_after_crash_test(Config) when is_list(Config) ->
rpc:cast(Node, Mod, do, []),
receive {nodedown, Node} -> ok
- after (15000*test_server:timetrap_scale_factor()) -> test_server:fail(node_not_closed)
+ after (15000*test_server:timetrap_scale_factor()) -> ct:fail(node_not_closed)
end,
- test_server:sleep(20000),
+ timer:sleep(20000),
node_check_up_down(Node, 15000),
loose_node:stop(Node).
@@ -293,16 +286,15 @@ node_check_up_down(Node, Tmo) ->
receive
{nodedown, Node} -> ok
after Tmo ->
- test_server:fail(node_not_closed2)
+ ct:fail(node_not_closed2)
end;
_ ->
- test_server:fail(node_not_rebooted)
+ ct:fail(node_not_rebooted)
end.
%% Only tests bad command, correct behaviour is tested in reboot/1.
-set_cmd(suite) -> [];
set_cmd(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
Cmd = wrong_atom,
{error, {bad_cmd, Cmd}} = rpc:call(Node, heart, set_cmd, [Cmd]),
Cmd1 = lists:duplicate(2047, $a),
@@ -314,25 +306,24 @@ set_cmd(Config) when is_list(Config) ->
stop_node(Node),
ok.
-clear_cmd(suite) -> {req,[{time,15}]};
clear_cmd(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
{nodedown, Node} ->
ok
after 2000 ->
- test_server:fail(node_not_closed)
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
case net_adm:ping(Node) of
pong ->
erlang:monitor_node(Node, true);
_ ->
- test_server:fail(node_not_rebooted)
+ ct:fail(node_not_rebooted)
end,
ok = rpc:call(Node, heart, set_cmd,
["erl -noshell -heart " ++ name(Node) ++ "&"]),
@@ -342,28 +333,34 @@ clear_cmd(Config) when is_list(Config) ->
{nodedown, Node} ->
ok
after 2000 ->
- test_server:fail(node_not_closed)
+ ct:fail(node_not_closed)
end,
- test_server:sleep(5000),
+ timer:sleep(5000),
case net_adm:ping(Node) of
pang ->
ok;
_ ->
- test_server:fail(node_rebooted)
+ ct:fail(node_rebooted)
end,
ok.
-get_cmd(suite) -> [];
get_cmd(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
- Cmd = "test",
- ok = rpc:call(Node, heart, set_cmd, [Cmd]),
- {ok, Cmd} = rpc:call(Node, heart, get_cmd, []),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
+
+ ShortCmd = "test",
+ ok = rpc:call(Node, heart, set_cmd, [ShortCmd]),
+ {ok, ShortCmd} = rpc:call(Node, heart, get_cmd, []),
+
+ %% This would hang prior to OTP-15024 being fixed.
+ LongCmd = [$a || _ <- lists:seq(1, 160)],
+ ok = rpc:call(Node, heart, set_cmd, [LongCmd]),
+ {ok, LongCmd} = rpc:call(Node, heart, get_cmd, []),
+
stop_node(Node),
ok.
callback_api(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
none = rpc:call(Node, heart, get_callback, []),
M0 = self(),
F0 = ok,
@@ -391,13 +388,13 @@ callback_api(Config) when is_list(Config) ->
{ok, {M2,F2}} = rpc:call(Node, heart, get_callback, []),
ok = rpc:call(Node, heart, set_callback, [M2,F3]),
receive {nodedown, Node} -> ok
- after 5000 -> test_server:fail(node_not_killed)
+ after 5000 -> ct:fail(node_not_killed)
end,
stop_node(Node),
ok.
options_api(Config) when is_list(Config) ->
- {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
none = rpc:call(Node, heart, get_options, []),
M0 = self(),
F0 = ok,
@@ -425,15 +422,13 @@ options_api(Config) when is_list(Config) ->
ok.
-dont_drop(suite) ->
%%% Removed as it may crash epmd/distribution in colourful
%%% ways. While we ARE finding out WHY, it would
%%% be nice for others to be able to run the kernel test suite
-%%% without "exploding machines", so thats why I removed it for now.
- [];
-dont_drop(doc) ->
- ["Tests that the heart command does not get dropped when ",
- "set just before halt on very high I/O load."];
+%%% without "exploding machines", so that's why I removed it for now.
+
+%% Tests that the heart command does not get dropped when
+%% set just before halt on very high I/O load..
dont_drop(Config) when is_list(Config) ->
%%% Have to do it some times to make it happen...
[ok,ok,ok,ok,ok,ok,ok,ok,ok,ok] = do_dont_drop(Config,10),
@@ -455,7 +450,7 @@ do_dont_drop(Config,N) ->
Env = [{"HEART_COMMAND", FirstCmd}],
Func = "start_heart_stress",
Arg = NN3 ++ "@" ++ Host ++ " " ++
- filename:join(?config(data_dir, Config), "simple_echo"),
+ filename:join(proplists:get_value(data_dir, Config), "simple_echo"),
start_node_run(Name,Env,Func,Arg),
case wait_for_any_of(list_to_atom(NN2 ++ "@" ++ Host),
list_to_atom(NN3 ++ "@" ++ Host)) of
@@ -488,16 +483,13 @@ wait_for_any_of(N1,N2,Times) ->
end.
-kill_pid(suite) ->
- [];
-kill_pid(doc) ->
- ["Tests that heart kills the old erlang node before executing ",
- "heart command."];
+%% Tests that heart kills the old erlang node before executing
+%% heart command.
kill_pid(Config) when is_list(Config) ->
ok = do_kill_pid(Config).
do_kill_pid(_Config) ->
- Name = heart_test,
+ Name = ?UNIQ_NODE_NAME,
Env = [{"HEART_COMMAND", "nickeNyfikenFarEttJobb"}],
{ok,Node} = start_node_run(Name,Env,suicide_by_heart,[]),
ok = wait_for_node(Node,15),
@@ -507,6 +499,30 @@ do_kill_pid(_Config) ->
false
end.
+
+heart_no_kill(suite) ->
+ [];
+heart_no_kill(doc) ->
+ ["Tests that heart doesn't kill the old erlang node when ",
+ "HEART_NO_KILL is set."];
+heart_no_kill(Config) when is_list(Config) ->
+ ok = do_no_kill(Config).
+
+do_no_kill(_Config) ->
+ Name = heart_test,
+ {ok,Node} = start_node_run(Name,[],non_suicide_by_heart,[]),
+ io:format("Node is ~p~n", [Node]),
+ ok = wait_for_node(Node,15),
+ io:format("wait_for_node is ~p~n", [ok]),
+ erlang:monitor_node(Node, true),
+ receive {nodedown,Node} -> false
+ after 30000 ->
+ io:format("Node didn't die..\n"),
+ rpc:call(Node,init,stop,[]),
+ io:format("done init:stop..\n"),
+ ok
+ end.
+
wait_for_node(_,0) ->
false;
wait_for_node(Node,N) ->
@@ -625,6 +641,20 @@ suicide_by_heart() ->
sallad
end.
+non_suicide_by_heart() ->
+ P = open_port({spawn,"heart -ht 11 -pid "++os:getpid()},
+ [exit_status, {env, [{"HEART_NO_KILL", "TRUE"}]},
+ {packet,2}]),
+ receive X -> X end,
+ %% Just hang and wait for heart to timeout
+ receive
+ {P,{exit_status,_}} ->
+ ok
+ after
+ 20000 ->
+ exit(timeout)
+ end.
+
%% generate a module from binary
generate(Module, Attributes, FunStrings) ->
diff --git a/lib/kernel/test/ignore_cores.erl b/lib/kernel/test/ignore_cores.erl
index db61c4003b..fde65bf5c4 100644
--- a/lib/kernel/test/ignore_cores.erl
+++ b/lib/kernel/test/ignore_cores.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -28,7 +28,7 @@
-module(ignore_cores).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([init/1, fini/1, setup/3, setup/4, restore/1, dir/1]).
@@ -53,7 +53,7 @@ init(Config) ->
fini(Config) ->
#ignore_cores{org_cwd = OrgCWD,
org_path = OrgPath,
- org_pwd_env = OrgPWD} = ?config(ignore_cores, Config),
+ org_pwd_env = OrgPWD} = proplists:get_value(ignore_cores, Config),
ok = file:set_cwd(OrgCWD),
true = code:set_path(OrgPath),
case OrgPWD of
@@ -70,10 +70,10 @@ setup(Suite, Testcase, Config, SetCwd) when is_atom(Suite),
is_list(Config) ->
#ignore_cores{org_cwd = OrgCWD,
org_path = OrgPath,
- org_pwd_env = OrgPWD} = ?config(ignore_cores, Config),
+ org_pwd_env = OrgPWD} = proplists:get_value(ignore_cores, Config),
Path = lists:map(fun (".") -> OrgCWD; (Dir) -> Dir end, OrgPath),
true = code:set_path(Path),
- PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
IgnDir = filename:join([PrivDir,
atom_to_list(Suite)
++ "_"
@@ -94,7 +94,7 @@ setup(Suite, Testcase, Config, SetCwd) when is_atom(Suite),
end,
ok = file:write_file(filename:join([IgnDir, "ignore_core_files"]), <<>>),
%% cores are dumped in /cores on MacOS X
- CoresDir = case {?t:os_type(), filelib:is_dir("/cores")} of
+ CoresDir = case {test_server:os_type(), filelib:is_dir("/cores")} of
{{unix,darwin}, true} ->
filelib:fold_files("/cores",
"^core.*$",
@@ -119,7 +119,7 @@ restore(Config) ->
org_path = OrgPath,
org_pwd_env = OrgPWD,
ign_dir = IgnDir,
- cores_dir = CoresDir} = ?config(ignore_cores, Config),
+ cores_dir = CoresDir} = proplists:get_value(ignore_cores, Config),
try
case CoresDir of
false ->
@@ -155,5 +155,5 @@ restore(Config) ->
dir(Config) ->
- #ignore_cores{ign_dir = Dir} = ?config(ignore_cores, Config),
+ #ignore_cores{ign_dir = Dir} = proplists:get_value(ignore_cores, Config),
Dir.
diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl
index 5ba06bb032..f436eafad3 100644
--- a/lib/kernel/test/inet_SUITE.erl
+++ b/lib/kernel/test/inet_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(inet_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/inet.hrl").
-include_lib("kernel/src/inet_dns.hrl").
@@ -40,13 +40,17 @@
lookup_bad_search_option/1,
getif/1,
getif_ifr_name_overflow/1,getservbyname_overflow/1, getifaddrs/1,
- parse_strict_address/1, simple_netns/1, simple_netns_open/1]).
+ parse_strict_address/1, ipv4_mapped_ipv6_address/1,
+ simple_netns/1, simple_netns_open/1,
+ simple_bind_to_device/1, simple_bind_to_device_open/1]).
-export([get_hosts/1, get_ipv6_hosts/1, parse_hosts/1, parse_address/1,
kill_gethost/0, parallell_gethost/0, test_netns/0]).
-export([init_per_testcase/2, end_per_testcase/2]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[t_gethostbyaddr, t_gethostbyname, t_getaddr,
@@ -56,7 +60,8 @@ all() ->
gethostnative_debug_level, gethostnative_soft_restart,
lookup_bad_search_option,
getif, getif_ifr_name_overflow, getservbyname_overflow,
- getifaddrs, parse_strict_address, simple_netns, simple_netns_open].
+ getifaddrs, parse_strict_address, simple_netns, simple_netns_open,
+ simple_bind_to_device, simple_bind_to_device_open].
groups() ->
[{parse, [], [parse_hosts, parse_address]}].
@@ -97,35 +102,30 @@ init_per_testcase(lookup_bad_search_option, Config) ->
Prev = ets:lookup(Db, Key),
ets:delete(Db, Key),
ets:insert(Db, {Key,[lookup_bad_search_option]}),
- ?t:format("Misconfigured resolver lookup order", []),
- Dog = test_server:timetrap(test_server:seconds(60)),
- [{Key,Prev},{watchdog,Dog}|Config];
+ io:format("Misconfigured resolver lookup order", []),
+ [{Key,Prev}|Config];
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:seconds(60)),
- [{watchdog,Dog}|Config].
+ Config.
end_per_testcase(lookup_bad_search_option, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
Db = inet_db,
Key = res_lookup,
- Prev = ?config(Key, Config),
+ Prev = proplists:get_value(Key, Config),
ets:delete(Db, Key),
ets:insert(Db, Prev),
- ?t:format("Restored resolver lookup order", []);
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+ io:format("Restored resolver lookup order", []);
+end_per_testcase(_Func, _Config) ->
+ ok.
t_gethostbyaddr() ->
required(v4).
-t_gethostbyaddr(doc) -> "Test the inet:gethostbyaddr/1 function.";
+%% Test the inet:gethostbyaddr/1 function.
t_gethostbyaddr(Config) when is_list(Config) ->
{Name,FullName,IPStr,{A,B,C,D}=IP,Aliases,_,_} = ct:get_config(test_host_ipv4_only),
Rname = integer_to_list(D) ++ "." ++
- integer_to_list(C) ++ "." ++
- integer_to_list(B) ++ "." ++
- integer_to_list(A) ++ ".in-addr.arpa",
+ integer_to_list(C) ++ "." ++
+ integer_to_list(B) ++ "." ++
+ integer_to_list(A) ++ ".in-addr.arpa",
{ok,HEnt} = inet:gethostbyaddr(IPStr),
{ok,HEnt} = inet:gethostbyaddr(IP),
{error,Error} = inet:gethostbyaddr(Name),
@@ -142,11 +142,13 @@ t_gethostbyaddr(Config) when is_list(Config) ->
ok;
_ ->
io:format("alias list: ~p", [HEnt#hostent.h_aliases]),
- io:format("check alias list: ~p", [[Aliases,[Rname]]]),
+ io:format(
+ "check alias list: ~p", [[Aliases,tl(Aliases),[Rname]]]),
io:format("name: ~p", [HEnt#hostent.h_name]),
io:format("check name: ~p", [[Name,FullName]]),
- check_elems([{HEnt#hostent.h_name,[Name,FullName]},
- {HEnt#hostent.h_aliases,[[],Aliases,[Rname]]}])
+ check_elems(
+ [{HEnt#hostent.h_name,[Name,FullName]},
+ {HEnt#hostent.h_aliases,[[],Aliases,tl(Aliases),[Rname]]}])
end,
{_DName, _DFullName, DIPStr, DIP, _, _, _} = ct:get_config(test_dummy_host),
@@ -155,79 +157,78 @@ t_gethostbyaddr(Config) when is_list(Config) ->
ok.
t_gethostbyaddr_v6() -> required(v6).
-t_gethostbyaddr_v6(doc) -> "Test the inet:gethostbyaddr/1 inet6 function.";
+%% Test the inet:gethostbyaddr/1 inet6 function.
t_gethostbyaddr_v6(Config) when is_list(Config) ->
- ?line {Name6, FullName6, IPStr6, IP6, Aliases6} =
+ {Name6, FullName6, IPStr6, IP6, Aliases6} =
ct:get_config(test_host_ipv6_only),
- ?line case inet:gethostbyaddr(IPStr6) of
+ case inet:gethostbyaddr(IPStr6) of
%% Even if IPv6 is not supported, the native resolver may succeed
%% looking up the host. DNS lookup will probably fail.
{error,nxdomain} ->
{skip, "IPv6 test fails! IPv6 not supported on this host!?"};
{ok,HEnt6} ->
- ?line {ok,HEnt6} = inet:gethostbyaddr(IP6),
- ?line {error,Error6} = inet:gethostbyaddr(Name6),
- ?line ok = io:format("Failure reason: ~p: ~s",
- [Error6, inet:format_error(Error6)]),
- ?line HEnt6_ = HEnt6#hostent{h_addrtype = inet6,
- h_length = 16,
- h_addr_list = [IP6]},
- ?line HEnt6_ = HEnt6,
- ?line check_elems([{HEnt6#hostent.h_name,[Name6,FullName6]},
- {HEnt6#hostent.h_aliases,[[],Aliases6]}]),
-
- ?line {_DName6, _DFullName6, DIPStr6, DIP6, _} =
- ct:get_config(test_dummy_ipv6_host),
- ?line {error,nxdomain} = inet:gethostbyaddr(DIPStr6),
- ?line {error,nxdomain} = inet:gethostbyaddr(DIP6),
+ {ok,HEnt6} = inet:gethostbyaddr(IP6),
+ {error,Error6} = inet:gethostbyaddr(Name6),
+ ok = io:format("Failure reason: ~p: ~s",
+ [Error6, inet:format_error(Error6)]),
+ HEnt6_ = HEnt6#hostent{h_addrtype = inet6,
+ h_length = 16,
+ h_addr_list = [IP6]},
+ HEnt6_ = HEnt6,
+ check_elems(
+ [{HEnt6#hostent.h_name,[Name6,FullName6]},
+ {HEnt6#hostent.h_aliases,[[],Aliases6,tl(Aliases6)]}]),
+
+ {_DName6, _DFullName6, DIPStr6, DIP6, _} =
+ ct:get_config(test_dummy_ipv6_host),
+ {error,nxdomain} = inet:gethostbyaddr(DIPStr6),
+ {error,nxdomain} = inet:gethostbyaddr(DIP6),
ok
end.
t_gethostbyname() -> required(v4).
-t_gethostbyname(doc) -> "Test the inet:gethostbyname/1 function.";
-t_gethostbyname(suite) -> [];
+%% Test the inet:gethostbyname/1 function.
t_gethostbyname(Config) when is_list(Config) ->
- ?line {Name,FullName,IPStr,IP,Aliases,IP_46_Str,_} =
+ {Name,FullName,IPStr,IP,Aliases,IP_46_Str,_} =
ct:get_config(test_host_ipv4_only),
- ?line {ok,_} = inet:gethostbyname(IPStr),
- ?line {ok,HEnt} = inet:gethostbyname(Name),
- ?line {ok,HEnt} = inet:gethostbyname(list_to_atom(Name)),
- ?line HEnt_ = HEnt#hostent{h_addrtype = inet,
- h_length = 4,
- h_addr_list = [IP]},
-
- ?line HEnt_ = HEnt,
- ?line check_elems([{HEnt#hostent.h_name,[Name,FullName]},
- {HEnt#hostent.h_aliases,[[],Aliases]}]),
- ?line {ok,HEntF} = inet:gethostbyname(FullName),
- ?line HEntF_ = HEntF#hostent{h_name = FullName,
- h_addrtype = inet,
- h_length = 4,
- h_addr_list = [IP]},
- ?line HEntF_ = HEntF,
- ?line check_elems([{HEnt#hostent.h_aliases,[[],Aliases]}]),
+ {ok,_} = inet:gethostbyname(IPStr),
+ {ok,HEnt} = inet:gethostbyname(Name),
+ {ok,HEnt} = inet:gethostbyname(list_to_atom(Name)),
+ HEnt_ = HEnt#hostent{h_addrtype = inet,
+ h_length = 4,
+ h_addr_list = [IP]},
+
+ HEnt_ = HEnt,
+ check_elems([{HEnt#hostent.h_name,[Name,FullName]},
+ {HEnt#hostent.h_aliases,[[],Aliases,tl(Aliases)]}]),
+ {ok,HEntF} = inet:gethostbyname(FullName),
+ HEntF_ = HEntF#hostent{h_name = FullName,
+ h_addrtype = inet,
+ h_length = 4,
+ h_addr_list = [IP]},
+ HEntF_ = HEntF,
+ check_elems([{HEnt#hostent.h_aliases,[[],Aliases,tl(Aliases)]}]),
%%
- ?line FullNameU = toupper(FullName),
- ?line {ok,HEntU} = inet:gethostbyname(FullNameU),
- ?line FullNameU = toupper(HEntU#hostent.h_name),
- ?line #hostent{
+ FullNameU = toupper(FullName),
+ {ok,HEntU} = inet:gethostbyname(FullNameU),
+ FullNameU = toupper(HEntU#hostent.h_name),
+ #hostent{
h_addrtype = inet,
h_length = 4,
h_addr_list = [IP]} = HEntU,
- ?line check_elems(
- [{[toupper(H) || H <- HEntU#hostent.h_aliases],
- [[],[toupper(A) || A <- Aliases]]}]),
+ check_elems(
+ [{[toupper(H) || H <- HEntU#hostent.h_aliases],
+ [[],[toupper(A) || A <- Aliases]]}]),
- ?line {DName, _DFullName, _DIPStr, _DIP, _, _, _} =
+ {DName, _DFullName, _DIPStr, _DIP, _, _, _} =
ct:get_config(test_dummy_host),
- ?line {error,nxdomain} = inet:gethostbyname(DName),
- ?line {error,nxdomain} = inet:gethostbyname(IP_46_Str),
+ {error,nxdomain} = inet:gethostbyname(DName),
+ {error,nxdomain} = inet:gethostbyname(IP_46_Str),
ok.
t_gethostbyname_v6() -> required(v6).
-t_gethostbyname_v6(doc) -> "Test the inet:gethostbyname/1 inet6 function.";
-t_gethostbyname_v6(suite) -> [];
+%% Test the inet:gethostbyname/1 inet6 function.
t_gethostbyname_v6(Config) when is_list(Config) ->
{Name, FullName, IPStr, IP, Aliases} =
ct:get_config(test_host_ipv6_only),
@@ -242,7 +243,7 @@ t_gethostbyname_v6(Config) when is_list(Config) ->
h_length = 16} = HEnt,
check_elems(
[{HEnt#hostent.h_name,[Name,FullName]},
- {HEnt#hostent.h_aliases,[[],Aliases]}]);
+ {HEnt#hostent.h_aliases,[[],Aliases,tl(Aliases)]}]);
[IP46] -> % IPv4 compatible address
{ok,HEnt4} = inet:gethostbyname(Name, inet),
#hostent{h_addrtype = inet,
@@ -262,7 +263,7 @@ t_gethostbyname_v6(Config) when is_list(Config) ->
h_addrtype = inet6,
h_length = 16} = HEntF,
check_elems(
- [{HEnt#hostent.h_aliases,[[],Aliases]}]);
+ [{HEnt#hostent.h_aliases,[[],Aliases,tl(Aliases)]}]);
[IP46F] -> % IPv4 compatible address
{ok,HEnt4F} = inet:gethostbyname(FullName, inet),
#hostent{h_addrtype = inet,
@@ -287,33 +288,31 @@ check_elem(Val, [Val|_], _) -> ok;
check_elem(Val, [_|Tests], Tests0) ->
check_elem(Val, Tests, Tests0);
check_elem(Val, [], Tests0) ->
- ?t:fail({no_match,Val,Tests0}).
+ ct:fail({no_match,Val,Tests0}).
t_getaddr() -> required(v4).
-t_getaddr(doc) -> "Test the inet:getaddr/2 function.";
-t_getaddr(suite) -> [];
+%% Test the inet:getaddr/2 function.
t_getaddr(Config) when is_list(Config) ->
- ?line {Name,FullName,IPStr,IP,_,IP_46_Str,IP46} =
+ {Name,FullName,IPStr,IP,_,IP_46_Str,IP46} =
ct:get_config(test_host_ipv4_only),
- ?line {ok,IP} = inet:getaddr(list_to_atom(Name), inet),
- ?line {ok,IP} = inet:getaddr(Name, inet),
- ?line {ok,IP} = inet:getaddr(FullName, inet),
- ?line {ok,IP} = inet:getaddr(IP, inet),
- ?line {ok,IP} = inet:getaddr(IPStr, inet),
- ?line {error,nxdomain} = inet:getaddr(IP_46_Str, inet),
- ?line {error,eafnosupport} = inet:getaddr(IP46, inet),
-
- ?line {DName, DFullName, DIPStr, DIP, _, _, _} = ct:get_config(test_dummy_host),
- ?line {error,nxdomain} = inet:getaddr(DName, inet),
- ?line {error,nxdomain} = inet:getaddr(DFullName, inet),
- ?line {ok,DIP} = inet:getaddr(DIPStr, inet),
- ?line {ok,DIP} = inet:getaddr(DIP, inet),
+ {ok,IP} = inet:getaddr(list_to_atom(Name), inet),
+ {ok,IP} = inet:getaddr(Name, inet),
+ {ok,IP} = inet:getaddr(FullName, inet),
+ {ok,IP} = inet:getaddr(IP, inet),
+ {ok,IP} = inet:getaddr(IPStr, inet),
+ {error,nxdomain} = inet:getaddr(IP_46_Str, inet),
+ {error,eafnosupport} = inet:getaddr(IP46, inet),
+
+ {DName, DFullName, DIPStr, DIP, _, _, _} = ct:get_config(test_dummy_host),
+ {error,nxdomain} = inet:getaddr(DName, inet),
+ {error,nxdomain} = inet:getaddr(DFullName, inet),
+ {ok,DIP} = inet:getaddr(DIPStr, inet),
+ {ok,DIP} = inet:getaddr(DIP, inet),
ok.
t_getaddr_v6() -> required(v4) ++ required(v6).
-t_getaddr_v6(doc) -> "Test the inet:getaddr/2 function.";
-t_getaddr_v6(suite) -> [];
+%% Test the inet:getaddr/2 function.
t_getaddr_v6(Config) when is_list(Config) ->
{Name,FullName,IPStr,IP,_} =
ct:get_config(test_host_ipv6_only),
@@ -341,16 +340,15 @@ t_getaddr_v6(Config) when is_list(Config) ->
end.
ipv4_to_ipv6() -> required(v4).
-ipv4_to_ipv6(doc) -> "Test if IPv4 address is converted to IPv6 address.";
-ipv4_to_ipv6(suite) -> [];
+%% Test if IPv4 address is converted to IPv6 address.
ipv4_to_ipv6(Config) when is_list(Config) ->
%% Test what happens if an IPv4 address is looked up in an IPv6 context.
%% If the native resolver succeeds to look it up, an IPv4 compatible
%% address should be returned. If no IPv6 support on this host, an
%% error should beturned.
- ?line {_Name,_FullName,IPStr,_IP,Aliases,IP_46_Str,IP_46} =
+ {_Name,_FullName,IPStr,_IP,Aliases,IP_46_Str,IP_46} =
ct:get_config(test_host_ipv4_only),
- ?line IP4to6Res =
+ IP4to6Res =
case inet:getaddr(IPStr, inet6) of
{ok,IP_46} ->
io:format("IPv4->IPv6: success~n"),
@@ -362,36 +360,34 @@ ipv4_to_ipv6(Config) when is_list(Config) ->
io:format("IPv6->IPv4: eafnosupport~n"),
E;
Other ->
- ?line ?t:fail({ipv4_to_ipv6_lookup_failed,Other})
+ ct:fail({ipv4_to_ipv6_lookup_failed,Other})
end,
- ?line case {IP4to6Res,inet:gethostbyname(IPStr, inet6)} of
- {true,{ok,HEnt}} ->
- ?line HEnt_ = HEnt#hostent{h_addrtype = inet6,
- h_length = 16,
- h_addr_list = [IP_46]},
- ?line HEnt_ = HEnt,
- ?line check_elems([{HEnt#hostent.h_name,[IP_46_Str,IPStr]},
- {HEnt#hostent.h_aliases,[[],Aliases]}]);
- {_,IP4to6Res} -> ok
- end,
+ case {IP4to6Res,inet:gethostbyname(IPStr, inet6)} of
+ {true,{ok,HEnt}} ->
+ HEnt_ = HEnt#hostent{h_addrtype = inet6,
+ h_length = 16,
+ h_addr_list = [IP_46]},
+ HEnt_ = HEnt,
+ check_elems([{HEnt#hostent.h_name,[IP_46_Str,IPStr]},
+ {HEnt#hostent.h_aliases,[[],Aliases,tl(Aliases)]}]);
+ {_,IP4to6Res} -> ok
+ end,
ok.
-host_and_addr() -> required(hosts).
-host_and_addr(doc) -> ["Test looking up hosts and addresses. Use 'ypcat hosts' ",
- "or the local eqivalent to find all hosts."];
-host_and_addr(suite) -> [];
-host_and_addr(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:minutes(5)),
+host_and_addr() ->
+ [{timetrap,{minutes,5}}|required(hosts)].
- ?line lists:foreach(fun try_host/1, get_hosts(Config)),
- ?line test_server:timetrap_cancel(Dog),
+%% Test looking up hosts and addresses. Use 'ypcat hosts'
+%% or the local eqivalent to find all hosts.
+host_and_addr(Config) when is_list(Config) ->
+ lists:foreach(fun try_host/1, get_hosts(Config)),
ok.
try_host({Ip0, Host}) ->
- ?line {ok,Ip} = inet:getaddr(Ip0, inet),
- ?line {ok,{hostent, _, _, inet, _, Ips1}} = inet:gethostbyaddr(Ip),
- ?line {ok,{hostent, _, _, inet, _, _Ips2}} = inet:gethostbyname(Host),
- ?line true = lists:member(Ip, Ips1),
+ {ok,Ip} = inet:getaddr(Ip0, inet),
+ {ok,{hostent, _, _, inet, _, Ips1}} = inet:gethostbyaddr(Ip),
+ {ok,{hostent, _, _, inet, _, _Ips2}} = inet:gethostbyname(Host),
+ true = lists:member(Ip, Ips1),
ok.
%% Get all hosts from the system using 'ypcat hosts' or the local
@@ -438,105 +434,139 @@ get_hosts([C|Rest], Cur, Ip, Result) ->
get_hosts(Rest, [C|Cur], Ip, Result);
get_hosts([], _, _, Result) ->
Result.
-
+
parse_hosts(Config) when is_list(Config) ->
- ?line DataDir = ?config(data_dir,Config),
- ?line HostFile = filename:join(DataDir, "hosts"),
- ?line inet_parse:hosts(HostFile),
- ?line HostFileErr1 = filename:join(DataDir, "hosts_err1"),
- ?line inet_parse:hosts(HostFileErr1),
- ?line Resolv = filename:join(DataDir,"resolv.conf"),
- ?line inet_parse:resolv(Resolv),
- ?line ResolvErr1 = filename:join(DataDir,"resolv.conf.err1"),
- ?line inet_parse:resolv(ResolvErr1).
+ DataDir = proplists:get_value(data_dir,Config),
+ HostFile = filename:join(DataDir, "hosts"),
+ inet_parse:hosts(HostFile),
+ HostFileErr1 = filename:join(DataDir, "hosts_err1"),
+ inet_parse:hosts(HostFileErr1),
+ Resolv = filename:join(DataDir,"resolv.conf"),
+ inet_parse:resolv(Resolv),
+ ResolvErr1 = filename:join(DataDir,"resolv.conf.err1"),
+ inet_parse:resolv(ResolvErr1).
parse_address(Config) when is_list(Config) ->
- V4Strict =
+ V4Reversable =
[{{0,0,0,0},"0.0.0.0"},
- {{1,2,3,4},"1.2.3.4"},
+ {{1,2,3,4},"1.2.3.4"},
{{253,252,251,250},"253.252.251.250"},
{{1,2,255,254},"1.2.255.254"}],
- V6Strict =
+ V6Reversable =
[{{0,0,0,0,0,0,0,0},"::"},
+ {{0,0,0,0,0,0,0,1},"::1"},
+ {{0,0,0,0,0,0,0,2},"::0.0.0.2"},
{{15,0,0,0,0,0,0,2},"f::2"},
- {{15,16#f11,0,0,0,0,256,2},"f:f11::0100:2"},
- {{0,0,0,0,0,0,0,16#17},"::17"},
- {{16#700,0,0,0,0,0,0,0},"0700::"},
- {{0,0,0,0,0,0,2,1},"::2:1"},
+ {{15,16#f11,0,0,0,0,256,2},"f:f11::100:2"},
+ {{16#700,0,0,0,0,0,0,0},"700::"},
+ {{0,0,0,0,0,0,2,1},"::0.2.0.1"},
{{0,0,0,0,0,3,2,1},"::3:2:1"},
{{0,0,0,0,4,3,2,1},"::4:3:2:1"},
{{0,0,0,5,4,3,2,1},"::5:4:3:2:1"},
{{0,0,6,5,4,3,2,1},"::6:5:4:3:2:1"},
- {{0,7,6,5,4,3,2,1},"::7:6:5:4:3:2:1"},
+ {{0,7,6,5,4,3,2,1},"0:7:6:5:4:3:2:1"},
{{7,0,0,0,0,0,0,0},"7::"},
{{7,6,0,0,0,0,0,0},"7:6::"},
{{7,6,5,0,0,0,0,0},"7:6:5::"},
{{7,6,5,4,0,0,0,0},"7:6:5:4::"},
{{7,6,5,4,3,0,0,0},"7:6:5:4:3::"},
{{7,6,5,4,3,2,0,0},"7:6:5:4:3:2::"},
- {{7,6,5,4,3,2,1,0},"7:6:5:4:3:2:1::"},
+ {{7,6,5,4,3,2,1,0},"7:6:5:4:3:2:1:0"},
+ {{0,0,6,5,4,3,0,0},"::6:5:4:3:0:0"},
+ {{0,0,6,5,4,0,0,0},"0:0:6:5:4::"},
+ {{8,0,0,5,4,0,0,1},"8::5:4:0:0:1"},
+ {{8,0,0,5,0,0,0,1},"8:0:0:5::1"},
+ {{0,7,6,5,4,3,2,0},"0:7:6:5:4:3:2:0"},
+ {{0,0,6,5,4,3,0,0},"::6:5:4:3:0:0"},
+ {{0,0,0,5,4,0,0,0},"::5:4:0:0:0"},
+ {{0,0,0,0,4,0,0,0},"::4:0:0:0"},
+ {{0,0,0,5,0,0,0,0},"0:0:0:5::"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22:5c33:c440:55c0:c66c:77:0088"},
+ "c11:c22:5c33:c440:55c0:c66c:77:88"},
+ {{0,16#c22,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
+ "0:c22:5c33:c440:55c0:c66c:77:88"},
{{16#c11,0,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11::5c33:c440:55c0:c66c:77:0088"},
+ "c11:0:5c33:c440:55c0:c66c:77:88"},
{{16#c11,16#c22,0,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22::c440:55c0:c66c:77:0088"},
+ "c11:c22:0:c440:55c0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,0,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22:5c33::55c0:c66c:77:0088"},
+ "c11:c22:5c33:0:55c0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,0,16#c66c,16#77,16#88},
- "c11:0c22:5c33:c440::c66c:77:0088"},
+ "c11:c22:5c33:c440:0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,0,16#77,16#88},
- "c11:0c22:5c33:c440:55c0::77:0088"},
+ "c11:c22:5c33:c440:55c0:0:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,0,16#88},
- "c11:0c22:5c33:c440:55c0:c66c::0088"},
+ "c11:c22:5c33:c440:55c0:c66c:0:88"},
+ {{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,16#77,0},
+ "c11:c22:5c33:c440:55c0:c66c:77:0"},
+ {{0,0,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
+ "::5c33:c440:55c0:c66c:77:88"},
{{16#c11,0,0,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11::c440:55c0:c66c:77:0088"},
+ "c11::c440:55c0:c66c:77:88"},
{{16#c11,16#c22,0,0,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22::55c0:c66c:77:0088"},
+ "c11:c22::55c0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,0,0,16#c66c,16#77,16#88},
- "c11:0c22:5c33::c66c:77:0088"},
+ "c11:c22:5c33::c66c:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,0,0,16#77,16#88},
- "c11:0c22:5c33:c440::77:0088"},
+ "c11:c22:5c33:c440::77:88"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,0,0,16#88},
- "c11:0c22:5c33:c440:55c0::0088"},
+ "c11:c22:5c33:c440:55c0::88"},
+ {{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,0,0},
+ "c11:c22:5c33:c440:55c0:c66c::"},
+ {{0,0,0,16#c440,16#55c0,16#c66c,16#77,16#88},
+ "::c440:55c0:c66c:77:88"},
{{16#c11,0,0,0,16#55c0,16#c66c,16#77,16#88},
- "c11::55c0:c66c:77:0088"},
+ "c11::55c0:c66c:77:88"},
{{16#c11,16#c22,0,0,0,16#c66c,16#77,16#88},
- "c11:0c22::c66c:77:0088"},
+ "c11:c22::c66c:77:88"},
{{16#c11,16#c22,16#5c33,0,0,0,16#77,16#88},
- "c11:0c22:5c33::77:0088"},
+ "c11:c22:5c33::77:88"},
{{16#c11,16#c22,16#5c33,16#c440,0,0,0,16#88},
- "c11:0c22:5c33:c440::0088"},
+ "c11:c22:5c33:c440::88"},
+ {{16#c11,16#c22,16#5c33,16#c440,16#55c0,0,0,0},
+ "c11:c22:5c33:c440:55c0::"},
+ {{0,0,0,0,16#55c0,16#c66c,16#77,16#88},
+ "::55c0:c66c:77:88"},
{{16#c11,0,0,0,0,16#c66c,16#77,16#88},
- "c11::c66c:77:0088"},
+ "c11::c66c:77:88"},
{{16#c11,16#c22,0,0,0,0,16#77,16#88},
- "c11:0c22::77:0088"},
+ "c11:c22::77:88"},
{{16#c11,16#c22,16#5c33,0,0,0,0,16#88},
- "c11:0c22:5c33::0088"},
+ "c11:c22:5c33::88"},
+ {{16#c11,16#c22,16#5c33,16#c440,0,0,0,0},
+ "c11:c22:5c33:c440::"},
+ {{0,0,0,0,0,16#c66c,16#77,16#88},
+ "::c66c:77:88"},
{{16#c11,0,0,0,0,0,16#77,16#88},
- "c11::77:0088"},
+ "c11::77:88"},
{{16#c11,16#c22,0,0,0,0,0,16#88},
- "c11:0c22::0088"},
- {{0,0,0,0,0,65535,258,65534},"::FFFF:1.2.255.254"},
+ "c11:c22::88"},
+ {{16#c11,16#c22,16#5c33,0,0,0,0,0},
+ "c11:c22:5c33::"},
+ {{0,0,0,0,0,65535,258,65534},"::ffff:1.2.255.254"},
+ {{16#fe80,12345,0,0,0,0,0,16#12},"fe80::12%012345"},
{{16#ffff,16#ffff,16#ffff,16#ffff,16#ffff,16#ffff,16#ffff,16#ffff},
"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"}
- |[{{D2,0,0,0,0,P,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4},
- erlang:integer_to_list(D2, 16)++"::"++Q++S}
- || {{D1,D2,D3,D4},S} <- V4Strict,
- {P,Q} <- [{0,""},{16#17,"17:"},{16#ff0,"0ff0:"}]]],
+ |[{list_to_tuple(P++[(D1 bsl 8) bor D2,(D3 bsl 8) bor D4]),
+ Q++S}
+ || {{D1,D2,D3,D4},S} <-
+ tl(V4Reversable),
+ {P,Q} <-
+ [{[0,0,0,0,0,16#ffff],"::ffff:"},
+ {[0,0,0,0,0,0],"::"}]]],
V4Sloppy =
[{{10,1,16#98,16#76},"10.0x019876"},
{{8#12,1,8#130,8#321},"012.01.054321"},
- {{255,255,255,255},"255.255.255.0377"},
- {{255,255,255,255},"0Xff.000000000377.0x0000ff.255"},
- {{255,255,255,255},"255.255.65535"},
- {{255,255,255,255},"255.0xFF.0177777"},
- {{255,255,255,255},"255.16777215"},
- {{255,255,255,255},"00377.0XFFFFFF"},
- {{255,255,255,255},"4294967295"},
- {{255,255,255,255},"0xffffffff"},
- {{255,255,255,255},"00000000000037777777777"},
+ {{252,253,254,255},"252.253.254.0377"},
+ {{252,253,254,255},"0Xfc.000000000375.0x0000fe.255"},
+ {{252,253,254,255},"252.253.65279"},
+ {{252,253,254,255},"252.0xFD.0177377"},
+ {{252,253,254,255},"252.16645887"},
+ {{252,253,254,255},"00374.0XFDFEFF"},
+ {{252,253,254,255},"4244504319"},
+ {{252,253,254,255},"0xfcfdfeff"},
+ {{252,253,254,255},"00000000000037477377377"},
{{16#12,16#34,16#56,16#78},"0x12345678"},
{{16#12,16#34,16#56,16#78},"0x12.0x345678"},
{{16#12,16#34,16#56,16#78},"0x12.0X34.0x5678"},
@@ -548,8 +578,14 @@ parse_address(Config) when is_list(Config) ->
{{0,0,0,0},"0.00.0.0"},
{{0,0,0,0},"0.0.000000000000.0"}],
V6Sloppy =
- [{{0,0,0,0,0,65535,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4},S}
- || {{D1,D2,D3,D4},S} <- V4Strict++V4Sloppy],
+ [{{16#a,16#b,16#c,16#0,16#0,16#d,16#e,16#f},"A:B:C::d:e:f"},
+ {{16#fe80,0,0,0,0,0,0,16#12},"fe80::12%XXXXXXX"}]
+ ++
+ [{{P,0,0,0,0,D2,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4},
+ Q++erlang:integer_to_list(D2, 16)++":"++S}
+ || {{D1,D2,D3,D4},S} <- V4Reversable,
+ {P,Q} <-
+ [{16#2001,"2001::"},{16#177,"177::"},{16#ff0,"Ff0::"}]],
V4Err =
["0.256.0.1",
"1.2.3.4.5",
@@ -593,28 +629,36 @@ parse_address(Config) when is_list(Config) ->
"fec0::fFfF:127.0.0.1."],
t_parse_address
(parse_ipv6_address,
- V6Strict++V6Sloppy++V6Err++V4Err),
+ false,
+ V6Reversable++V6Sloppy++V6Err++V4Err),
t_parse_address
(parse_ipv6strict_address,
- V6Strict++V6Err++V4Err++[S || {_,S} <- V6Sloppy]),
+ true,
+ V6Reversable++V6Err++V4Err),
t_parse_address
(parse_ipv4_address,
- V4Strict++V4Sloppy++V4Err++V6Err++[S || {_,S} <- V6Strict]),
+ false,
+ V4Reversable++V4Sloppy++V4Err++V6Err++[S || {_,S} <- V6Reversable]),
t_parse_address
(parse_ipv4strict_address,
- V4Strict++V4Err++V6Err++[S || {_,S} <- V4Sloppy++V6Strict]).
+ true,
+ V4Reversable++V4Err++V6Err++[S || {_,S} <- V4Sloppy++V6Reversable]).
-t_parse_address(Func, []) ->
+t_parse_address(Func, _Reversable, []) ->
io:format("~p done.~n", [Func]),
ok;
-t_parse_address(Func, [{Addr,String}|L]) ->
+t_parse_address(Func, Reversable, [{Addr,String}|L]) ->
io:format("~p = ~p.~n", [Addr,String]),
{ok,Addr} = inet:Func(String),
- t_parse_address(Func, L);
-t_parse_address(Func, [String|L]) ->
+ case Reversable of
+ true ->String = inet:ntoa(Addr);
+ false -> ok
+ end,
+ t_parse_address(Func, Reversable, L);
+t_parse_address(Func, Reversable, [String|L]) ->
io:format("~p.~n", [String]),
{error,einval} = inet:Func(String),
- t_parse_address(Func, L).
+ t_parse_address(Func, Reversable, L).
parse_strict_address(Config) when is_list(Config) ->
{ok, {127,0,0,1}} =
@@ -624,42 +668,57 @@ parse_strict_address(Config) when is_list(Config) ->
{ok, {3089,3106,23603,50240,0,0,119,136}} =
inet:parse_strict_address("c11:0c22:5c33:c440::077:0088").
-t_gethostnative(suite) ->[];
-t_gethostnative(doc) ->[];
+ipv4_mapped_ipv6_address(Config) when is_list(Config) ->
+ {D1,D2,D3,D4} = IPv4Address =
+ {rand:uniform(256) - 1,
+ rand:uniform(256) - 1,
+ rand:uniform(256) - 1,
+ rand:uniform(256) - 1},
+ E7 = (D1 bsl 8) bor D2,
+ E8 = (D3 bsl 8) bor D4,
+ io:format("IPv4Address: ~p.~n", [IPv4Address]),
+ {0,0,0,0,0,65535,E7,E8} = inet:ipv4_mapped_ipv6_address(IPv4Address),
+ IPv6Address =
+ {rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1, E7, E8},
+ IPv4Address = inet:ipv4_mapped_ipv6_address(IPv6Address),
+ ok.
+
t_gethostnative(Config) when is_list(Config) ->
-%% this will result in 26 bytes sent which causes problem in Windows
-%% if the port-program has not assured stdin to be read in BINARY mode
-%% OTP-2555
- ?line case inet_gethost_native:gethostbyname(
- "a23456789012345678901234") of
+ %% this will result in 26 bytes sent which causes problem in Windows
+ %% if the port-program has not assured stdin to be read in BINARY mode
+ %% OTP-2555
+ case inet_gethost_native:gethostbyname(
+ "a23456789012345678901234") of
{error,notfound} ->
- ?line ok;
+ ok;
{error,no_data} ->
- ?line ok
+ ok
end.
-gethostnative_parallell(suite) ->
- [];
-gethostnative_parallell(doc) ->
- ["Check that the emulator survives crashes in gethost_native"];
+%% Check that the emulator survives crashes in gethost_native.
gethostnative_parallell(Config) when is_list(Config) ->
- ?line {ok,Hostname} = inet:gethostname(),
- ?line {ok,_} = inet:gethostbyname(Hostname),
+ {ok,Hostname} = inet:gethostname(),
+ {ok,_} = inet:gethostbyname(Hostname),
case whereis(inet_gethost_native) of
Pid when is_pid(Pid) ->
- ?line do_gethostnative_parallell();
+ do_gethostnative_parallell();
_ ->
- ?line {skipped, "Not running native gethostbyname"}
+ {skipped, "Not running native gethostbyname"}
end.
do_gethostnative_parallell() ->
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok,Node} = ?t:start_node(gethost_parallell, slave,
- [{args, "-pa " ++ PA}]),
- ?line ok = rpc:call(Node, ?MODULE, parallell_gethost, []),
- ?line receive after 10000 -> ok end,
- ?line pong = net_adm:ping(Node),
- ?line ?t:stop_node(Node),
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok,Node} = test_server:start_node(gethost_parallell, slave,
+ [{args, "-pa " ++ PA}]),
+ ok = rpc:call(Node, ?MODULE, parallell_gethost, []),
+ receive after 10000 -> ok end,
+ pong = net_adm:ping(Node),
+ test_server:stop_node(Node),
ok.
parallell_gethost() ->
@@ -752,7 +811,7 @@ wait_for_gethost(N) ->
%% This is what I call an exit tuple :)
exit({inet,gethostbyname, returned, Otherwise, 'when',
'N','=',N,'and','hostname','=',Hostname,'and',
- kill_gethost_n,'=',get(kill_gethost_n)})
+ kill_gethost_n,'=',get(kill_gethost_n)})
end,
case whereis(inet_gethost_native) of
Pid when is_pid(Pid) ->
@@ -764,23 +823,20 @@ wait_for_gethost(N) ->
end,
wait_for_gethost(N-1)
end.
-
-cname_loop(suite) ->
- [];
-cname_loop(doc) ->
- ["Check that the resolver handles a CNAME loop"];
+
+%% Check that the resolver handles a CNAME loop.
cname_loop(Config) when is_list(Config) ->
%% getbyname (hostent_by_domain)
- ?line ok = inet_db:add_rr("mydomain.com", in, ?S_CNAME, ttl, "mydomain.com"),
- ?line {error,nxdomain} = inet_db:getbyname("mydomain.com", ?S_A),
- ?line ok = inet_db:del_rr("mydomain.com", in, ?S_CNAME, "mydomain.com"),
+ ok = inet_db:add_rr("mydomain.com", in, ?S_CNAME, ttl, "mydomain.com"),
+ {error,nxdomain} = inet_db:getbyname("mydomain.com", ?S_A),
+ ok = inet_db:del_rr("mydomain.com", in, ?S_CNAME, "mydomain.com"),
%% res_hostent_by_domain
RR = #dns_rr{domain = "mydomain.com",
class = in,
type = ?S_CNAME,
data = "mydomain.com"},
Rec = #dns_rec{anlist = [RR]},
- ?line {error,nxdomain} = inet_db:res_hostent_by_domain("mydomain.com", ?S_A, Rec),
+ {error,nxdomain} = inet_db:res_hostent_by_domain("mydomain.com", ?S_A, Rec),
ok.
@@ -795,80 +851,75 @@ cname_loop(Config) when is_list(Config) ->
lookup_processes=20}).
gethostnative_soft_restart() -> required(hosts).
-gethostnative_soft_restart(suite) ->
- [];
-gethostnative_soft_restart(doc) ->
- ["Check that no name lookups fails during soft restart "
- "of inet_gethost_native"];
+
+%% Check that no name lookups fails during soft restart
+%% of inet_gethost_native.
gethostnative_soft_restart(Config) when is_list(Config) ->
- ?line gethostnative_control(Config,
- #gethostnative_control{
- control_seq=[soft_restart]}).
+ gethostnative_control(Config,
+ #gethostnative_control{
+ control_seq=[soft_restart]}).
gethostnative_debug_level() -> required(hosts).
-gethostnative_debug_level(suite) ->
- [];
-gethostnative_debug_level(doc) ->
- ["Check that no name lookups fails during debug level change "
- "of inet_gethost_native"];
+
+%% Check that no name lookups fails during debug level change
+%% of inet_gethost_native.
gethostnative_debug_level(Config) when is_list(Config) ->
- ?line gethostnative_control(Config,
- #gethostnative_control{
- control_seq=[{debug_level,1},
- {debug_level,0}]}).
+ gethostnative_control(Config,
+ #gethostnative_control{
+ control_seq=[{debug_level,1},
+ {debug_level,0}]}).
gethostnative_control(Config, Optrec) ->
- ?line case inet_db:res_option(lookup) of
- [native] ->
- case whereis(inet_gethost_native) of
- Pid when is_pid(Pid) ->
- ?line gethostnative_control_1(Config, Optrec);
- _ ->
- ?line {skipped, "Not running native gethostbyname"}
- end;
- _ ->
- ?line {skipped, "Native not only lookup metod"}
- end.
+ case inet_db:res_option(lookup) of
+ [native] ->
+ case whereis(inet_gethost_native) of
+ Pid when is_pid(Pid) ->
+ gethostnative_control_1(Config, Optrec);
+ _ ->
+ {skipped, "Not running native gethostbyname"}
+ end;
+ _ ->
+ {skipped, "Native not only lookup metod"}
+ end.
gethostnative_control_1(Config,
#gethostnative_control{
- control_seq=Seq,
- control_interval=Interval,
- lookup_delay=Delay,
- lookup_count=Cnt,
- lookup_processes=N}) ->
- ?line {ok, Hostname} = inet:gethostname(),
- ?line {ok, _} = inet:gethostbyname(Hostname),
- ?line Hosts =
+ control_seq=Seq,
+ control_interval=Interval,
+ lookup_delay=Delay,
+ lookup_count=Cnt,
+ lookup_processes=N}) ->
+ {ok, Hostname} = inet:gethostname(),
+ {ok, _} = inet:gethostbyname(Hostname),
+ Hosts =
[Hostname|[H || {_,H} <- get_hosts(Config)]
++[H++D || H <- ["www.","www1.","www2.",""],
D <- ["erlang.org","erlang.se"]]
++[H++"cslab.ericsson.net" || H <- ["morgoth.","hades.","styx."]]],
%% Spawn some processes to do parallel lookups while
%% I repeatedly do inet_gethost_native:control/1.
- ?line TrapExit = process_flag(trap_exit, true),
- ?line gethostnative_control_2([undefined], Interval, Delay, Cnt, N, Hosts),
- ?line test_server:format(
- "First intermission: now starting control sequence ~w\n",
- [Seq]),
- ?line erlang:display(first_intermission),
- ?line gethostnative_control_2(Seq, Interval, Delay, Cnt, N, Hosts),
- ?line erlang:display(second_intermission),
- ?line test_server:format(
- "Second intermission: now stopping control sequence ~w\n",
- [Seq]),
- ?line gethostnative_control_2([undefined], Interval, Delay, Cnt, N, Hosts),
- ?line true = process_flag(trap_exit, TrapExit),
- ?line ok.
+ TrapExit = process_flag(trap_exit, true),
+ gethostnative_control_2([undefined], Interval, Delay, Cnt, N, Hosts),
+ io:format(
+ "First intermission: now starting control sequence ~w\n",
+ [Seq]),
+ erlang:display(first_intermission),
+ gethostnative_control_2(Seq, Interval, Delay, Cnt, N, Hosts),
+ erlang:display(second_intermission),
+ io:format(
+ "Second intermission: now stopping control sequence ~w\n",
+ [Seq]),
+ gethostnative_control_2([undefined], Interval, Delay, Cnt, N, Hosts),
+ true = process_flag(trap_exit, TrapExit),
+ ok.
gethostnative_control_2(Seq, Interval, Delay, Cnt, N, Hosts) ->
- ?line Tag = make_ref(),
- ?line Parent = self(),
- ?line Lookupers =
+ Tag = make_ref(),
+ Parent = self(),
+ Lookupers =
[spawn_link(
fun () ->
- random:seed(),
lookup_loop(Hosts, Delay, Tag, Parent, Cnt, Hosts)
end)
|| _ <- lists:seq(1, N)],
@@ -878,7 +929,7 @@ gethostnative_control_2(Seq, Interval, Delay, Cnt, N, Hosts) ->
gethostnative_control_3(Tag, Reason) ->
receive
{Tag,Error} ->
- ?line gethostnative_control_3(Tag, Error)
+ gethostnative_control_3(Tag, Error)
after 0 ->
Reason
end.
@@ -893,27 +944,26 @@ control_loop([Op|Ops], Interval, Tag, Lookupers, Seq) ->
Seq).
control_loop_1(Op, Interval, Tag, Lookupers) ->
- ?line
- receive
- {'EXIT',Pid,Reason} ->
- ?line case Reason of
- Tag -> % Done
- ?line control_loop_1
- (Op, Interval, Tag,
- lists:delete(Pid, Lookupers));
- _ ->
- ?line io:format("Lookuper ~p died: ~p",
- [Pid,Reason]),
- ?line test_server:fail("Lookuper died")
- end
- after Interval ->
- ?line if Op =/= undefined ->
- ?line ok = inet_gethost_native:control(Op);
- true ->
- ?line ok
- end,
- ?line Lookupers
- end.
+ receive
+ {'EXIT',Pid,Reason} ->
+ case Reason of
+ Tag -> % Done
+ control_loop_1
+ (Op, Interval, Tag,
+ lists:delete(Pid, Lookupers));
+ _ ->
+ io:format("Lookuper ~p died: ~p",
+ [Pid,Reason]),
+ ct:fail("Lookuper died")
+ end
+ after Interval ->
+ if Op =/= undefined ->
+ ok = inet_gethost_native:control(Op);
+ true ->
+ ok
+ end,
+ Lookupers
+ end.
lookup_loop(_, _Delay, Tag, _Parent, 0, _Hosts) ->
exit(Tag);
@@ -924,21 +974,18 @@ lookup_loop([H|Hs], Delay, Tag, Parent, Cnt, Hosts) ->
{ok,_Hent} -> ok;
{error,nxdomain} -> ok;
Error ->
- ?line io:format("Name lookup error for ~p for ~p: ~p",
- [self(),H,Error]),
+ io:format("Name lookup error for ~p for ~p: ~p",
+ [self(),H,Error]),
Parent ! {Tag,Error}
end,
receive
- after random:uniform(Delay) ->
+ after rand:uniform(Delay) ->
lookup_loop(Hs, Delay, Tag, Parent, Cnt-1, Hosts)
end.
-lookup_bad_search_option(suite) ->
- [];
-lookup_bad_search_option(doc) ->
- ["Test lookup with erroneously configured lookup option (OTP-12133)"];
+%% Test lookup with erroneously configured lookup option (OTP-12133).
lookup_bad_search_option(Config) when is_list(Config) ->
%% Manipulation of resolver config is done in init_per_testcase
%% and end_per_testcase to ensure cleanup.
@@ -948,24 +995,21 @@ lookup_bad_search_option(Config) when is_list(Config) ->
-getif(suite) ->
- [];
-getif(doc) ->
- ["Tests basic functionality of getiflist, getif, and ifget"];
+%% Tests basic functionality of getiflist, getif, and ifget.
getif(Config) when is_list(Config) ->
- ?line case os:type() of
- {unix,Osname} ->
- ?line do_getif(Osname);
- {_,_} ->
- {skip,"inet:getif/0 probably not supported"}
- end.
+ case os:type() of
+ {unix,Osname} ->
+ do_getif(Osname);
+ {_,_} ->
+ {skip,"inet:getif/0 probably not supported"}
+ end.
do_getif(Osname) ->
- ?line {ok,Hostname} = inet:gethostname(),
- ?line {ok,Address} = inet:getaddr(Hostname, inet),
- ?line {ok,Loopback} = inet:getaddr("localhost", inet),
- ?line {ok,Interfaces} = inet:getiflist(),
- ?line HWAs =
+ {ok,Hostname} = inet:gethostname(),
+ {ok,Address} = inet:getaddr(Hostname, inet),
+ {ok,Loopback} = inet:getaddr("localhost", inet),
+ {ok,Interfaces} = inet:getiflist(),
+ HWAs =
lists:sort(
lists:foldl(
fun (I, Acc) ->
@@ -974,10 +1018,10 @@ do_getif(Osname) ->
{ok,[]} -> Acc
end
end, [], Interfaces)),
- ?line io:format("HWAs = ~p~n", [HWAs]),
- ?line (Osname =/= sunos)
- andalso ((length(HWAs) > 0) orelse (?t:fail(no_HWAs))),
- ?line Addresses =
+ io:format("HWAs = ~p~n", [HWAs]),
+ (Osname =/= sunos)
+ andalso ((length(HWAs) > 0) orelse (ct:fail(no_HWAs))),
+ Addresses =
lists:sort(
lists:foldl(
fun (I, Acc) ->
@@ -986,139 +1030,151 @@ do_getif(Osname) ->
{ok,[]} -> Acc
end
end, [], Interfaces)),
- ?line {ok,Getif} = inet:getif(),
- ?line Addresses = lists:sort([A || {A,_,_} <- Getif]),
- ?line true = ip_member(Address, Addresses),
- ?line true = ip_member(Loopback, Addresses),
- ?line ok.
-
-getif_ifr_name_overflow(doc) ->
- "Test long interface names do not overrun buffer";
+ {ok,Getif} = inet:getif(),
+ Addresses = lists:sort([A || {A,_,_} <- Getif]),
+ true = ip_member(Address, Addresses),
+ true = ip_member(Loopback, Addresses),
+ ok.
+
+%% Test long interface names do not overrun buffer.
getif_ifr_name_overflow(Config) when is_list(Config) ->
- ?line case os:type() of
- {unix,Osname} ->
- ?line do_getif_ifr_name_overflow(Osname);
- {_,_} ->
- {skip,"inet:ifget/2 probably not supported"}
- end.
+ case os:type() of
+ {unix,Osname} ->
+ do_getif_ifr_name_overflow(Osname);
+ {_,_} ->
+ {skip,"inet:ifget/2 probably not supported"}
+ end.
do_getif_ifr_name_overflow(_) ->
%% emulator should not crash
- ?line {ok,[]} = inet:ifget(lists:duplicate(128, "x"), [addr]),
+ {ok,[]} = inet:ifget(lists:duplicate(128, "x"), [addr]),
ok.
-getservbyname_overflow(doc) ->
- "Test long service names do not overrun buffer";
+%% Test long service names do not overrun buffer.
getservbyname_overflow(Config) when is_list(Config) ->
%% emulator should not crash
- ?line {error,einval} = inet:getservbyname(list_to_atom(lists:flatten(lists:duplicate(128, "x"))), tcp),
+ {error,einval} = inet:getservbyname(list_to_atom(lists:flatten(lists:duplicate(128, "x"))), tcp),
ok.
-getifaddrs(doc) ->
- "Test inet:gifaddrs/0";
+%% Test inet:gifaddrs/0.
getifaddrs(Config) when is_list (Config) ->
- ?line {ok,IfAddrs} = inet:getifaddrs(),
- ?line ?t:format("IfAddrs = ~p.~n", [IfAddrs]),
- ?line
- case
- {os:type(),
- [If ||
- {If,Opts} <- IfAddrs,
- lists:keymember(hwaddr, 1, Opts)]} of
- {{unix,sunos},[]} -> ok;
- {OT,[]} ->
- ?t:fail({should_have_hwaddr,OT});
- _ -> ok
- end,
- ?line Addrs =
- [element(1, A) || A <- ifaddrs(IfAddrs)],
- ?line ?t:format("Addrs = ~p.~n", [Addrs]),
- ?line [check_addr(Addr) || Addr <- Addrs],
+ {ok,IfAddrs} = inet:getifaddrs(),
+ io:format("IfAddrs = ~p.~n", [IfAddrs]),
+ case [If || {If,Opts} <- IfAddrs, lists:keymember(hwaddr, 1, Opts)] of
+ [] ->
+ case os:type() of
+ {unix,sunos} -> ok;
+ OT ->
+ ct:fail({should_have_hwaddr,OT})
+ end;
+ [_|_] -> ok
+ end,
+ Addrs = ifaddrs(IfAddrs),
+ io:format("Addrs = ~p.~n", [Addrs]),
+ [check_addr(Addr) || Addr <- Addrs],
ok.
check_addr(Addr)
when tuple_size(Addr) =:= 8,
element(1, Addr) band 16#FFC0 =:= 16#FE80 ->
- ?line ?t:format("Addr: ~p link local; SKIPPED!~n", [Addr]),
+ io:format("Addr: ~p link local; SKIPPED!~n", [Addr]),
ok;
check_addr(Addr) ->
- ?line ?t:format("Addr: ~p.~n", [Addr]),
- ?line Ping = "ping",
- ?line Pong = "pong",
- ?line {ok,L} = gen_tcp:listen(0, [{ip,Addr},{active,false}]),
- ?line {ok,P} = inet:port(L),
- ?line {ok,S1} = gen_tcp:connect(Addr, P, [{active,false}]),
- ?line {ok,S2} = gen_tcp:accept(L),
- ?line ok = gen_tcp:send(S2, Ping),
- ?line {ok,Ping} = gen_tcp:recv(S1, length(Ping)),
- ?line ok = gen_tcp:send(S1, Pong),
- ?line ok = gen_tcp:close(S1),
- ?line {ok,Pong} = gen_tcp:recv(S2, length(Pong)),
- ?line ok = gen_tcp:close(S2),
- ?line ok = gen_tcp:close(L),
- ok.
+ io:format("Addr: ~p.~n", [Addr]),
+ Ping = "ping",
+ Pong = "pong",
+ {ok,L} = gen_tcp:listen(0, [{ip,Addr},{active,false}]),
+ {ok,P} = inet:port(L),
+ {ok,S1} = gen_tcp:connect(Addr, P, [{active,false}]),
+ {ok,S2} = gen_tcp:accept(L),
+ ok = gen_tcp:send(S2, Ping),
+ {ok,Ping} = gen_tcp:recv(S1, length(Ping)),
+ ok = gen_tcp:send(S1, Pong),
+ ok = gen_tcp:close(S1),
+ {ok,Pong} = gen_tcp:recv(S2, length(Pong)),
+ ok = gen_tcp:close(S2),
+ ok = gen_tcp:close(L).
+
+ifaddrs(IfOpts) ->
+ IfMap = collect_ifopts(IfOpts),
+ ChkFun =
+ fun Self({{_,Flags} = Key, Opts}, ok) ->
+ Broadcast = lists:member(broadcast, Flags),
+ P2P = lists:member(pointtopoint, Flags),
+ case Opts of
+ [{addr,_},{netmask,_},{broadaddr,_}|Os]
+ when Broadcast ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_},{dstaddr,_}|Os]
+ when P2P ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_}|Os] ->
+ Self({Key, Os}, ok);
+ [{hwaddr,_}|Os] ->
+ Self({Key, Os}, ok);
+ [] ->
+ ok
+ end
+ end,
+ fold_ifopts(ChkFun, ok, IfMap),
+ AddrsFun =
+ fun ({{_,Flags}, Opts}, Acc) ->
+ case
+ lists:member(running, Flags)
+ andalso (not lists:member(pointtopoint, Flags))
+ of
+ true ->
+ lists:reverse(
+ [Addr || {addr,Addr} <- Opts],
+ Acc);
+ false ->
+ Acc
+ end
+ end,
+ fold_ifopts(AddrsFun, [], IfMap).
+
+collect_ifopts(IfOpts) ->
+ collect_ifopts(IfOpts, #{}).
+%%
+collect_ifopts(IfOpts, IfMap) ->
+ case IfOpts of
+ [{If,[{flags,Flags}|Opts]}|IfOs] ->
+ Key = {If,Flags},
+ case maps:is_key(Key, IfMap) of
+ true ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap});
+ false ->
+ collect_ifopts(IfOs, IfMap, Opts, Key, [])
+ end;
+ [] ->
+ IfMap;
+ _ ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap})
+ end.
+%%
+collect_ifopts(IfOpts, IfMap, Opts, Key, R) ->
+ case Opts of
+ [{flags,_}|_] ->
+ {If,_} = Key,
+ collect_ifopts(
+ [{If,Opts}|IfOpts], maps:put(Key, lists:reverse(R), IfMap));
+ [OptVal|Os] ->
+ collect_ifopts(IfOpts, IfMap, Os, Key, [OptVal|R]);
+ [] ->
+ collect_ifopts(IfOpts, maps:put(Key, lists:reverse(R), IfMap))
+ end.
--record(ifopts, {name,flags,addrs=[],hwaddr}).
-
-ifaddrs([]) -> [];
-ifaddrs([{If,Opts}|IOs]) ->
- ?line #ifopts{flags=Flags} = Ifopts =
- check_ifopts(Opts, #ifopts{name=If}),
- ?line case Flags =/= undefined andalso lists:member(up, Flags) of
- true ->
- Ifopts#ifopts.addrs;
- false ->
- []
- end++ifaddrs(IOs).
-
-check_ifopts([], #ifopts{name=If,flags=Flags,addrs=Raddrs}=Ifopts) ->
- Addrs = lists:reverse(Raddrs),
- R = Ifopts#ifopts{addrs=Addrs},
- ?t:format("~p.~n", [R]),
- %% See how we did...
- if is_list(Flags) -> ok;
- true ->
- ?t:fail({flags_undefined,If})
- end,
- case lists:member(broadcast, Flags) of
- true ->
- [case A of
- {_,_,_} -> A;
- {T,_} when tuple_size(T) =:= 8 -> A;
- _ ->
- ?t:fail({broaddr_missing,If,A})
- end || A <- Addrs];
- false ->
- [case A of {_,_} -> A;
- _ ->
- ?t:fail({should_have_netmask,If,A})
- end || A <- Addrs]
- end,
- R;
-check_ifopts([{flags,Flags}|Opts], #ifopts{flags=undefined}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{flags=Flags});
-check_ifopts([{flags,Fs}|Opts], #ifopts{flags=Flags}=Ifopts) ->
- case Fs of
- Flags ->
- check_ifopts(Opts, Ifopts#ifopts{});
- _ ->
- ?t:fail({multiple_flags,Fs,Ifopts})
- end;
-check_ifopts(
- [{addr,Addr},{netmask,Netmask},{broadaddr,Broadaddr}|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{Addr,Netmask,Broadaddr}|Addrs]});
-check_ifopts(
- [{addr,Addr},{netmask,Netmask}|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{Addr,Netmask}|Addrs]});
-check_ifopts([{addr,Addr}|Opts], #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{Addr}|Addrs]});
-check_ifopts([{hwaddr,Hwaddr}|Opts], #ifopts{hwaddr=undefined}=Ifopts)
- when is_list(Hwaddr) ->
- check_ifopts(Opts, Ifopts#ifopts{hwaddr=Hwaddr});
-check_ifopts([{hwaddr,HwAddr}|_], #ifopts{}=Ifopts) ->
- ?t:fail({multiple_hwaddrs,HwAddr,Ifopts}).
+fold_ifopts(Fun, Acc, IfMap) ->
+ fold_ifopts(Fun, Acc, IfMap, maps:keys(IfMap)).
+%%
+fold_ifopts(Fun, Acc, IfMap, Keys) ->
+ case Keys of
+ [Key|Ks] ->
+ Opts = maps:get(Key, IfMap),
+ fold_ifopts(Fun, Fun({Key,Opts}, Acc), IfMap, Ks);
+ [] ->
+ Acc
+ end.
%% Works just like lists:member/2, except that any {127,_,_,_} tuple
%% matches any other {127,_,_,_}. We do this to handle Linux systems
@@ -1152,9 +1208,13 @@ simple_netns(Config) when is_list(Config) ->
jog_netns_opt(L),
ok = gen_tcp:close(L),
%%
- {ok,S} = gen_sctp:open(),
- jog_netns_opt(S),
- ok = gen_sctp:close(S);
+ case gen_sctp:open() of
+ {ok,S} ->
+ jog_netns_opt(S),
+ ok = gen_sctp:close(S);
+ {error,eprotonosupport} ->
+ ok
+ end;
{error,einval} ->
{skip,"setns() not supported"}
end.
@@ -1168,24 +1228,28 @@ jog_netns_opt(S) ->
ok.
+%% Smoke test netns support.
simple_netns_open(Config) when is_list(Config) ->
+ %% Note: {error,enoent} will be returned if the run-time executable
+ %% has support for netns, but /proc/self/ns/net is missing.
case gen_udp:open(0, [binary,{netns,"/"},inet]) of
{ok,U} ->
ok = gen_udp:close(U);
- {error,E1} when E1 =:= einval; E1 =:= eperm ->
+ {error,E1} when E1 =:= einval; E1 =:= eperm; E1 =:= enoent ->
ok
end,
case gen_tcp:listen(0, [binary,{netns,"/"},inet]) of
{ok,T} ->
ok = gen_tcp:close(T);
- {error,E2} when E2 =:= einval; E2 =:= eperm ->
+ {error,E2} when E2 =:= einval; E2 =:= eperm; E2 =:= enoent ->
ok
end,
try gen_sctp:open(0, [binary,{netns,"/"},inet]) of
{ok,S} ->
ok = gen_sctp:close(S);
{error,E3}
- when E3 =:= einval; E3 =:= eperm; E3 =:= eprotonosupport ->
+ when E3 =:= einval; E3 =:= eperm;
+ E3 =:= enoent; E3 =:= eprotonosupport ->
ok
catch
error:badarg ->
@@ -1258,3 +1322,67 @@ cmd(CmdString) ->
io:put_chars(["# ",CmdString,io_lib:nl()]),
io:put_chars([os:cmd(CmdString++" ; echo ' =>' $?")]),
ok.
+
+-define(CAP_NET_RAW, 13). %% from /usr/include/linux/capability.h
+
+can_bind_to_device({unix, linux}, {Major, _, _})
+ when Major > 2 ->
+ Status = os:cmd("cat /proc/self/status | grep CapEff"),
+ [_, CapEffStr] = string:tokens(Status, [$\n, $\t]),
+ CapEff = list_to_integer(CapEffStr, 16),
+ if CapEff band (1 bsl ?CAP_NET_RAW) =/= 0 ->
+ ok;
+ true ->
+ {skip,"insufficient capabilities, CAP_NET_RAW not granted"}
+ end;
+can_bind_to_device(_OS, _Version) ->
+ {skip,"socket option bind_to_device not supported on this OS or version"}.
+
+simple_bind_to_device(Config) when is_list(Config) ->
+ case can_bind_to_device(os:type(), os:version()) of
+ ok ->
+ {ok,U} = gen_udp:open(0),
+ jog_bind_to_device_opt(U),
+ ok = gen_udp:close(U),
+ %%
+ {ok,L} = gen_tcp:listen(0, []),
+ jog_bind_to_device_opt(L),
+ ok = gen_tcp:close(L),
+ %%
+ case gen_sctp:open() of
+ {ok,S} ->
+ jog_bind_to_device_opt(S),
+ ok = gen_sctp:close(S);
+ {error,eprotonosupport} ->
+ ok
+ end;
+ Other ->
+ Other
+ end.
+
+%% Smoke test bind_to_device support.
+simple_bind_to_device_open(Config) when is_list(Config) ->
+ case can_bind_to_device(os:type(), os:version()) of
+ ok ->
+ {ok,U} = gen_udp:open(0, [binary,{bind_to_device,<<"lo">>},inet]),
+ ok = gen_udp:close(U),
+ {ok,T} = gen_tcp:listen(0, [binary,{bind_to_device,<<"lo">>},inet]),
+ ok = gen_tcp:close(T),
+
+ case gen_sctp:open(0, [binary,{bind_to_device,<<"lo">>},inet]) of
+ {ok,S} ->
+ ok = gen_sctp:close(S);
+ {error,eprotonosupport} ->
+ ok
+ end;
+ Other ->
+ Other
+ end.
+
+jog_bind_to_device_opt(S) ->
+ %% This is just jogging the option mechanics
+ ok = inet:setopts(S, [{bind_to_device,<<>>}]),
+ {ok,[{bind_to_device,<<>>}]} = inet:getopts(S, [bind_to_device]),
+ ok = inet:setopts(S, [{bind_to_device,<<"lo">>}]),
+ {ok,[{bind_to_device,<<"lo">>}]} = inet:getopts(S, [bind_to_device]),
+ ok.
diff --git a/lib/kernel/test/inet_res_SUITE.erl b/lib/kernel/test/inet_res_SUITE.erl
index 69dcb5a936..df6e48abae 100644
--- a/lib/kernel/test/inet_res_SUITE.erl
+++ b/lib/kernel/test/inet_res_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -42,7 +42,24 @@
-define(RUN_NAMED, "run-named").
-suite() -> [{ct_hooks,[ts_install_cth]}].
+%% This test suite use a script ?RUN_NAMED that tries to start
+%% a temporary local nameserver BIND 8 or 9 that must be installed
+%% on your machine.
+%%
+%% For example, on Ubuntu 14.04, as root:
+%% apt-get install bind9
+%% Now, that is not enough since Apparmor will not allow
+%% the nameserver daemon /usr/sbin/named to read from the test directory.
+%% Assuming that you run tests in /ldisk/daily_build, and still on
+%% Ubuntu 14.04, make /usr/apparmor.d/local/usr.sbin.named contain:
+%% /ldisk/daily_build/** r,
+%% And yes; the trailing comma must be there...
+
+
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[basic, resolve, edns0, txt_record, files_monitor,
@@ -77,8 +94,8 @@ zone_dir(TC) ->
end.
init_per_testcase(Func, Config) ->
- PrivDir = ?config(priv_dir, Config),
- DataDir = ?config(data_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ DataDir = proplists:get_value(data_dir, Config),
try ns_init(zone_dir(Func), PrivDir, DataDir) of
NsSpec ->
Lookup = inet_db:res_option(lookup),
@@ -88,29 +105,27 @@ init_per_testcase(Func, Config) ->
inet_db:ins_alt_ns(IP, Port);
_ -> ok
end,
- Dog = test_server:timetrap(test_server:seconds(20)),
- [{nameserver,NsSpec},{res_lookup,Lookup},{watchdog,Dog}|Config]
+ [{nameserver,NsSpec},{res_lookup,Lookup}|Config]
catch
SkipReason ->
{skip,SkipReason}
end.
end_per_testcase(_Func, Config) ->
- test_server:timetrap_cancel(?config(watchdog, Config)),
- inet_db:set_lookup(?config(res_lookup, Config)),
- NsSpec = ?config(nameserver, Config),
+ inet_db:set_lookup(proplists:get_value(res_lookup, Config)),
+ NsSpec = proplists:get_value(nameserver, Config),
case NsSpec of
{_,{IP,Port},_} ->
inet_db:del_alt_ns(IP, Port);
_ -> ok
end,
- ns_end(NsSpec, ?config(priv_dir, Config)).
+ ns_end(NsSpec, proplists:get_value(priv_dir, Config)).
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Nameserver control
ns(Config) ->
- {_ZoneDir,NS,_P} = ?config(nameserver, Config),
+ {_ZoneDir,NS,_P} = proplists:get_value(nameserver, Config),
NS.
ns_init(ZoneDir, PrivDir, DataDir) ->
@@ -119,7 +134,7 @@ ns_init(ZoneDir, PrivDir, DataDir) ->
{unix,_} ->
PortNum = case {os:type(),os:version()} of
{{unix,solaris},{M,V,_}} when M =< 5, V < 10 ->
- 11895 + random:uniform(100);
+ 11895 + rand:uniform(100);
_ ->
{ok,S} = gen_udp:open(0, [{reuseaddr,true}]),
{ok,PNum} = inet:port(S),
@@ -202,10 +217,10 @@ proxy_start(TC, {NS,P}) ->
spawn_link(
fun () ->
try proxy_start(TC, NS, P, Parent, Tag)
- catch C:X ->
+ catch C:X:Stacktrace ->
io:format(
"~w: ~w:~p ~p~n",
- [self(),C,X,erlang:get_stacktrace()])
+ [self(),C,X,Stacktrace])
end
end),
receive {started,Tag,Port} ->
@@ -277,8 +292,7 @@ proxy_ns({proxy,_,_,ProxyNS}) -> ProxyNS.
%%
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-basic(doc) ->
- ["Lookup an A record with different API functions"];
+%% Lookup an A record with different API functions.
basic(Config) when is_list(Config) ->
NS = ns(Config),
Name = "ns.otptest",
@@ -340,8 +354,7 @@ basic(Config) when is_list(Config) ->
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-resolve(doc) ->
- ["Lookup different records using resolve/2..4"];
+%% Lookup different records using resolve/2..4.
resolve(Config) when is_list(Config) ->
Class = in,
NS = ns(Config),
@@ -471,8 +484,7 @@ check_msg(Class, Type, Msg, AnList, NsList) ->
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-edns0(doc) ->
- ["Test EDNS and truncation"];
+%% Test EDNS and truncation.
edns0(Config) when is_list(Config) ->
NS = ns(Config),
Domain = "otptest",
@@ -533,10 +545,7 @@ inet_res_filter(Anlist, Class, Type) ->
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-txt_record(suite) ->
- [];
-txt_record(doc) ->
- ["Tests TXT records"];
+%% Tests TXT records.
txt_record(Config) when is_list(Config) ->
D1 = "cslab.ericsson.net",
D2 = "mail1.cslab.ericsson.net",
@@ -555,10 +564,7 @@ txt_record(Config) when is_list(Config) ->
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-files_monitor(suite) ->
- [];
-files_monitor(doc) ->
- ["Tests monitoring of /etc/hosts and /etc/resolv.conf, but not them"];
+%% Tests monitoring of /etc/hosts and /etc/resolv.conf, but not them.
files_monitor(Config) when is_list(Config) ->
Search = inet_db:res_option(search),
HostsFile = inet_db:res_option(hosts_file),
@@ -573,7 +579,7 @@ files_monitor(Config) when is_list(Config) ->
end.
do_files_monitor(Config) ->
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
{ok,Hostname} = inet:gethostname(),
io:format("Hostname = ~p.~n", [Hostname]),
FQDN =
@@ -647,8 +653,7 @@ do_files_monitor(Config) ->
%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-last_ms_answer(doc) ->
- ["Answer just when timeout is triggered (OTP-9221)"];
+%% Answer just when timeout is triggered (OTP-9221).
last_ms_answer(Config) when is_list(Config) ->
NS = ns(Config),
Name = "ns.otptest",
diff --git a/lib/kernel/test/inet_res_SUITE_data/run-named b/lib/kernel/test/inet_res_SUITE_data/run-named
index d9befb352d..d67295773a 100755
--- a/lib/kernel/test/inet_res_SUITE_data/run-named
+++ b/lib/kernel/test/inet_res_SUITE_data/run-named
@@ -2,7 +2,7 @@
##
## %CopyrightBegin%
##
-## Copyright Ericsson AB 2009-2012. All Rights Reserved.
+## Copyright Ericsson AB 2009-2016. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl
index cb522c8abe..27ff74e309 100644
--- a/lib/kernel/test/inet_sockopt_SUITE.erl
+++ b/lib/kernel/test/inet_sockopt_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2015. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
%%
-module(inet_sockopt_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(C_GET_IPPROTO_TCP,1).
@@ -62,7 +62,9 @@
-export([init_per_testcase/2, end_per_testcase/2]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[simple, loop_all, simple_raw, simple_raw_getbin,
@@ -90,97 +92,96 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:seconds(60)),
- [{watchdog,Dog}|Config].
+ Config.
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+end_per_testcase(_Func, _Config) ->
+ ok.
-simple(suite) -> [];
-simple(doc) -> "Test inet:setopt/getopt simple functionality.";
+%% Test inet:setopt/getopt simple functionality.
simple(Config) when is_list(Config) ->
- ?line XOpt = case os:type() of
- {unix,_} -> [{reuseaddr,true}];
- _ -> []
- end,
- ?line Opt = [{nodelay,true},
- {keepalive,true},{packet,4},
- {active,false}|XOpt],
- ?line OptTags = [X || {X,_} <- Opt],
- ?line {S1,S2} = create_socketpair(Opt, Opt),
- ?line {ok,Opt} = inet:getopts(S1,OptTags),
- ?line {ok,Opt} = inet:getopts(S2,OptTags),
- ?line COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt],
- ?line inet:setopts(S1,COpt),
- ?line {ok,COpt} = inet:getopts(S1,OptTags),
- ?line {ok,Opt} = inet:getopts(S2,OptTags),
- ?line gen_tcp:close(S1),
- ?line gen_tcp:close(S2),
+ XOpt = case os:type() of
+ {unix,_} -> [{reuseaddr,true}];
+ _ -> []
+ end,
+ Opt = [{nodelay,true},
+ {keepalive,true},{packet,4},
+ {active,false}|XOpt],
+ OptTags = [X || {X,_} <- Opt],
+ {S1,S2} = create_socketpair(Opt, Opt),
+ {ok,Opt} = inet:getopts(S1,OptTags),
+ {ok,Opt} = inet:getopts(S2,OptTags),
+ NoPushOpt = case os:type() of
+ {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true};
+ {_,_} -> {nopush, false}
+ end,
+ COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]],
+ COptTags = [X || {X,_} <- COpt],
+ inet:setopts(S1,COpt),
+ {ok,COpt} = inet:getopts(S1,COptTags),
+ {ok,Opt} = inet:getopts(S2,OptTags),
+ gen_tcp:close(S1),
+ gen_tcp:close(S2),
ok.
-loop_all(suite) -> [];
-loop_all(doc) -> "Loop through all socket options and check that they work";
+%% Loop through all socket options and check that they work.
loop_all(Config) when is_list(Config) ->
- ?line ListenFailures =
+ ListenFailures =
lists:foldr(make_check_fun(listen,1),[],all_listen_options()),
- ?line ConnectFailures =
+ ConnectFailures =
lists:foldr(make_check_fun(connect,2),[],all_connect_options()),
- ?line case ListenFailures++ConnectFailures of
- [] ->
- ?line ok;
- Failed ->
- ?line {comment,lists:flatten(
- io_lib:format("Non mandatory failed:~w",
- [Failed]))}
- end.
+ case ListenFailures++ConnectFailures of
+ [] ->
+ ok;
+ Failed ->
+ {comment,lists:flatten(
+ io_lib:format("Non mandatory failed:~w",
+ [Failed]))}
+ end.
-simple_raw(suite) -> [];
-simple_raw(doc) -> "Test simple setopt/getopt of raw options.";
+%% Test simple setopt/getopt of raw options.
simple_raw(Config) when is_list(Config) ->
do_simple_raw(Config,false).
-simple_raw_getbin(suite) -> [];
-simple_raw_getbin(doc) -> "Test simple setopt/getopt of raw options, "
- "with binaries in getopt.";
+
+%% Test simple setopt/getopt of raw options, with binaries in getopt.
simple_raw_getbin(Config) when is_list(Config) ->
do_simple_raw(Config,true).
do_simple_raw(Config,Binary) when is_list(Config) ->
- ?line Port = start_helper(Config),
- ?line SolSocket = ask_helper(Port,?C_GET_SOL_SOCKET),
- ?line SoKeepAlive = ask_helper(Port,?C_GET_SO_KEEPALIVE),
- ?line OptionTrue = {raw,SolSocket,SoKeepAlive,<<1:32/native>>},
- ?line OptionFalse = {raw,SolSocket,SoKeepAlive,<<0:32/native>>},
- ?line {S1,S2} = create_socketpair([OptionTrue],[{keepalive,true}]),
- ?line {ok,[{keepalive,true}]} = inet:getopts(S1,[keepalive]),
- ?line {ok,[{keepalive,true}]} = inet:getopts(S2,[keepalive]),
- ?line {ok,[{raw,SolSocket,SoKeepAlive,X1B}]} =
+ Port = start_helper(Config),
+ SolSocket = ask_helper(Port,?C_GET_SOL_SOCKET),
+ SoKeepAlive = ask_helper(Port,?C_GET_SO_KEEPALIVE),
+ OptionTrue = {raw,SolSocket,SoKeepAlive,<<1:32/native>>},
+ OptionFalse = {raw,SolSocket,SoKeepAlive,<<0:32/native>>},
+ {S1,S2} = create_socketpair([OptionTrue],[{keepalive,true}]),
+ {ok,[{keepalive,true}]} = inet:getopts(S1,[keepalive]),
+ {ok,[{keepalive,true}]} = inet:getopts(S2,[keepalive]),
+ {ok,[{raw,SolSocket,SoKeepAlive,X1B}]} =
inet:getopts(S1,[{raw,SolSocket,SoKeepAlive,binarify(4,Binary)}]),
- ?line X1 = nintbin2int(X1B),
- ?line {ok,[{raw,SolSocket,SoKeepAlive,X2B}]} =
+ X1 = nintbin2int(X1B),
+ {ok,[{raw,SolSocket,SoKeepAlive,X2B}]} =
inet:getopts(S2,[{raw,SolSocket,SoKeepAlive,binarify(4,Binary)}]),
- ?line X2 = nintbin2int(X2B),
- ?line true = X1 > 0,
- ?line true = X2 > 0,
- ?line inet:setopts(S1,[{keepalive,false}]),
- ?line inet:setopts(S2,[OptionFalse]),
- ?line {ok,[{keepalive,false}]} = inet:getopts(S1,[keepalive]),
- ?line {ok,[{keepalive,false}]} = inet:getopts(S2,[keepalive]),
- ?line {ok,[{raw,SolSocket,SoKeepAlive,Y1B}]} =
+ X2 = nintbin2int(X2B),
+ true = X1 > 0,
+ true = X2 > 0,
+ inet:setopts(S1,[{keepalive,false}]),
+ inet:setopts(S2,[OptionFalse]),
+ {ok,[{keepalive,false}]} = inet:getopts(S1,[keepalive]),
+ {ok,[{keepalive,false}]} = inet:getopts(S2,[keepalive]),
+ {ok,[{raw,SolSocket,SoKeepAlive,Y1B}]} =
inet:getopts(S1,[{raw,SolSocket,SoKeepAlive,binarify(4,Binary)}]),
- ?line Y1 = nintbin2int(Y1B),
- ?line {ok,[{raw,SolSocket,SoKeepAlive,Y2B}]} =
+ Y1 = nintbin2int(Y1B),
+ {ok,[{raw,SolSocket,SoKeepAlive,Y2B}]} =
inet:getopts(S2,[{raw,SolSocket,SoKeepAlive,binarify(4,Binary)}]),
- ?line Y2 = nintbin2int(Y2B),
- ?line true = Y1 == 0,
- ?line true = Y2 == 0,
- ?line gen_tcp:close(S1),
- ?line gen_tcp:close(S2),
- ?line stop_helper(Port),
+ Y2 = nintbin2int(Y2B),
+ true = Y1 == 0,
+ true = Y2 == 0,
+ gen_tcp:close(S1),
+ gen_tcp:close(S2),
+ stop_helper(Port),
ok.
-
+
nintbin2int(<<Int:32/native>>) -> Int;
nintbin2int(<<Int:24/native>>) -> Int;
nintbin2int(<<Int:16/native>>) -> Int;
@@ -189,13 +190,12 @@ nintbin2int(<<>>) -> 0.
-multiple_raw(suite) -> [];
-multiple_raw(doc) -> "Test setopt/getopt of multiple raw options.";
+%% Test setopt/getopt of multiple raw options.
multiple_raw(Config) when is_list(Config) ->
do_multiple_raw(Config,false).
-multiple_raw_getbin(suite) -> [];
-multiple_raw_getbin(doc) -> "Test setopt/getopt of multiple raw options, "
- "with binaries in getopt.";
+
+%% Test setopt/getopt of multiple raw options, with binaries in
+%% getopt.
multiple_raw_getbin(Config) when is_list(Config) ->
do_multiple_raw(Config,true).
@@ -265,145 +265,143 @@ do_multiple_raw(Config, Binary) ->
-doc_examples_raw(suite) -> [];
-doc_examples_raw(doc) -> "Test that the example code from the documentation "
- "works";
+%% Test that the example code from the documentation works.
doc_examples_raw(Config) when is_list(Config) ->
do_doc_examples_raw(Config,false).
-doc_examples_raw_getbin(suite) -> [];
-doc_examples_raw_getbin(doc) -> "Test that the example code from the "
- "documentation works when getopt uses "
- "binaries";
+
+%% Test that the example code from the documentation works when getopt
+%% uses binaries.
doc_examples_raw_getbin(Config) when is_list(Config) ->
do_doc_examples_raw(Config,true).
+
do_doc_examples_raw(Config,Binary) when is_list(Config) ->
- ?line Port = start_helper(Config),
- ?line Proto = ask_helper(Port,?C_GET_IPPROTO_TCP),
- ?line TcpInfo = ask_helper(Port,?C_GET_TCP_INFO),
- ?line TcpInfoSize = ask_helper(Port,?C_GET_TCP_INFO_SIZE),
- ?line TcpiSackedOffset = ask_helper(Port,?C_GET_OFF_TCPI_SACKED),
- ?line TcpiOptionsOffset = ask_helper(Port,?C_GET_OFF_TCPI_OPTIONS),
- ?line TcpiSackedSize = ask_helper(Port,?C_GET_SIZ_TCPI_SACKED),
- ?line TcpiOptionsSize = ask_helper(Port,?C_GET_SIZ_TCPI_OPTIONS),
- ?line TcpLinger2 = ask_helper(Port,?C_GET_TCP_LINGER2),
- ?line stop_helper(Port),
+ Port = start_helper(Config),
+ Proto = ask_helper(Port,?C_GET_IPPROTO_TCP),
+ TcpInfo = ask_helper(Port,?C_GET_TCP_INFO),
+ TcpInfoSize = ask_helper(Port,?C_GET_TCP_INFO_SIZE),
+ TcpiSackedOffset = ask_helper(Port,?C_GET_OFF_TCPI_SACKED),
+ TcpiOptionsOffset = ask_helper(Port,?C_GET_OFF_TCPI_OPTIONS),
+ TcpiSackedSize = ask_helper(Port,?C_GET_SIZ_TCPI_SACKED),
+ TcpiOptionsSize = ask_helper(Port,?C_GET_SIZ_TCPI_OPTIONS),
+ TcpLinger2 = ask_helper(Port,?C_GET_TCP_LINGER2),
+ stop_helper(Port),
case all_ok([Proto,TcpInfo,TcpInfoSize,TcpiSackedOffset,
TcpiOptionsOffset,TcpiSackedSize,TcpiOptionsSize,
TcpLinger2]) of
false ->
{skipped,"Does not run on this OS."};
true ->
- ?line {Sock,I} = create_socketpair([],[]),
- ?line {ok,[{raw,Proto,TcpLinger2,<<OrigLinger:32/native>>}]} =
+ {Sock,I} = create_socketpair([],[]),
+ {ok,[{raw,Proto,TcpLinger2,<<OrigLinger:32/native>>}]} =
inet:getopts(Sock,[{raw,Proto,TcpLinger2,binarify(4,Binary)}]),
- ?line NewLinger = OrigLinger div 2,
- ?line ok = inet:setopts(Sock,[{raw,Proto,TcpLinger2,
- <<NewLinger:32/native>>}]),
- ?line {ok,[{raw,Proto,TcpLinger2,<<NewLinger:32/native>>}]} =
+ NewLinger = OrigLinger div 2,
+ ok = inet:setopts(Sock,[{raw,Proto,TcpLinger2,
+ <<NewLinger:32/native>>}]),
+ {ok,[{raw,Proto,TcpLinger2,<<NewLinger:32/native>>}]} =
inet:getopts(Sock,[{raw,Proto,TcpLinger2,binarify(4,Binary)}]),
- ?line ok = inet:setopts(Sock,[{raw,Proto,TcpLinger2,
- <<OrigLinger:32/native>>}]),
- ?line {ok,[{raw,Proto,TcpLinger2,<<OrigLinger:32/native>>}]} =
+ ok = inet:setopts(Sock,[{raw,Proto,TcpLinger2,
+ <<OrigLinger:32/native>>}]),
+ {ok,[{raw,Proto,TcpLinger2,<<OrigLinger:32/native>>}]} =
inet:getopts(Sock,[{raw,Proto,TcpLinger2,binarify(4,Binary)}]),
- ?line {ok,[{raw,_,_,Info}]} =
+ {ok,[{raw,_,_,Info}]} =
inet:getopts(Sock,[{raw,Proto,TcpInfo,
binarify(TcpInfoSize,Binary)}]),
- ?line Bit1 = TcpiSackedSize * 8,
- ?line <<_:TcpiSackedOffset/binary,
- TcpiSacked:Bit1/native,_/binary>> =
+ Bit1 = TcpiSackedSize * 8,
+ <<_:TcpiSackedOffset/binary,
+ TcpiSacked:Bit1/native,_/binary>> =
Info,
- ?line 0 = TcpiSacked,
- ?line Bit2 = TcpiOptionsSize * 8,
- ?line <<_:TcpiOptionsOffset/binary,
- TcpiOptions:Bit2/native,_/binary>> =
+ 0 = TcpiSacked,
+ Bit2 = TcpiOptionsSize * 8,
+ <<_:TcpiOptionsOffset/binary,
+ TcpiOptions:Bit2/native,_/binary>> =
Info,
- ?line true = TcpiOptions =/= 0,
- ?line gen_tcp:close(Sock),
- ?line gen_tcp:close(I),
+ true = TcpiOptions =/= 0,
+ gen_tcp:close(Sock),
+ gen_tcp:close(I),
ok
end.
-
-large_raw(suite) -> [];
-large_raw(doc) -> "Test structs and large/too large buffers when raw";
+
+%% Test structs and large/too large buffers when raw.
large_raw(Config) when is_list(Config) ->
do_large_raw(Config,false).
-large_raw_getbin(suite) -> [];
-large_raw_getbin(doc) -> "Test structs and large/too large buffers when raw"
- "using binaries to getopts";
+
+%% Test structs and large/too large buffers when raw
+%% using binaries to getopts.
large_raw_getbin(Config) when is_list(Config) ->
do_large_raw(Config,true).
+
do_large_raw(Config,Binary) when is_list(Config) ->
- ?line Port = start_helper(Config),
- ?line Proto = ask_helper(Port,?C_GET_SOL_SOCKET),
- ?line Linger = ask_helper(Port,?C_GET_SO_LINGER),
- ?line LingerSize = ask_helper(Port,?C_GET_LINGER_SIZE),
- ?line LingerOnOffOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_ONOFF),
- ?line LingerLingerOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_LINGER),
- ?line LingerOnOffSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_ONOFF),
- ?line LingerLingerSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_LINGER),
- ?line stop_helper(Port),
+ Port = start_helper(Config),
+ Proto = ask_helper(Port,?C_GET_SOL_SOCKET),
+ Linger = ask_helper(Port,?C_GET_SO_LINGER),
+ LingerSize = ask_helper(Port,?C_GET_LINGER_SIZE),
+ LingerOnOffOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_ONOFF),
+ LingerLingerOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_LINGER),
+ LingerOnOffSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_ONOFF),
+ LingerLingerSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_LINGER),
+ stop_helper(Port),
case all_ok([Proto,Linger,LingerSize,LingerOnOffOffset,
LingerLingerOffset,LingerOnOffSize,LingerLingerSize]) of
false ->
{skipped,"Does not run on this OS."};
true ->
- ?line {Sock1,Sock2} = create_socketpair([{linger,{true,10}}],
- [{linger,{false,0}}]),
- ?line LargeSize = 1024, % Solaris can take up to 1024*9,
- % linux 1024*63...
- ?line TooLargeSize = 1024*64,
- ?line {ok,[{raw,Proto,Linger,Linger1}]} =
+ {Sock1,Sock2} = create_socketpair([{linger,{true,10}}],
+ [{linger,{false,0}}]),
+ LargeSize = 1024, % Solaris can take up to 1024*9,
+ % linux 1024*63...
+ TooLargeSize = 1024*64,
+ {ok,[{raw,Proto,Linger,Linger1}]} =
inet:getopts(Sock1,[{raw,Proto,Linger,
binarify(LargeSize,Binary)}]),
- ?line {ok,[{raw,Proto,Linger,Linger2}]} =
+ {ok,[{raw,Proto,Linger,Linger2}]} =
inet:getopts(Sock2,[{raw,Proto,Linger,
binarify(LingerSize,Binary)}]),
- ?line true = byte_size(Linger1) =:= LingerSize,
- ?line LingerLingerBits = LingerLingerSize * 8,
- ?line LingerOnOffBits = LingerOnOffSize * 8,
- ?line <<_:LingerLingerOffset/binary,
- Ling1:LingerLingerBits/native,_/binary>> = Linger1,
- ?line <<_:LingerOnOffOffset/binary,
- Off1:LingerOnOffBits/native,_/binary>> = Linger1,
- ?line <<_:LingerOnOffOffset/binary,
- Off2:LingerOnOffBits/native,_/binary>> = Linger2,
- ?line true = Off1 =/= 0,
- ?line true = Off2 == 0,
- ?line true = Ling1 == 10,
- ?line {error,einval} =
+ true = byte_size(Linger1) =:= LingerSize,
+ LingerLingerBits = LingerLingerSize * 8,
+ LingerOnOffBits = LingerOnOffSize * 8,
+ <<_:LingerLingerOffset/binary,
+ Ling1:LingerLingerBits/native,_/binary>> = Linger1,
+ <<_:LingerOnOffOffset/binary,
+ Off1:LingerOnOffBits/native,_/binary>> = Linger1,
+ <<_:LingerOnOffOffset/binary,
+ Off2:LingerOnOffBits/native,_/binary>> = Linger2,
+ true = Off1 =/= 0,
+ true = Off2 == 0,
+ true = Ling1 == 10,
+ {error,einval} =
inet:getopts(Sock1,[{raw,Proto,Linger,TooLargeSize}]),
- ?line gen_tcp:close(Sock1),
- ?line gen_tcp:close(Sock2),
+ gen_tcp:close(Sock1),
+ gen_tcp:close(Sock2),
ok
end.
-combined(suite) -> [];
-combined(doc) -> "Test raw structs combined w/ other options ";
+%% Test raw structs combined w/ other options .
combined(Config) when is_list(Config) ->
do_combined(Config,false).
-combined_getbin(suite) -> [];
-combined_getbin(doc) -> "Test raw structs combined w/ other options and "
- "binarise in getopts";
+
+%% Test raw structs combined w/ other options and
+%% binarise in getopts.
combined_getbin(Config) when is_list(Config) ->
do_combined(Config,true).
+
do_combined(Config,Binary) when is_list(Config) ->
- ?line Port = start_helper(Config),
- ?line Proto = ask_helper(Port,?C_GET_SOL_SOCKET),
- ?line Linger = ask_helper(Port,?C_GET_SO_LINGER),
- ?line LingerSize = ask_helper(Port,?C_GET_LINGER_SIZE),
- ?line LingerOnOffOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_ONOFF),
- ?line LingerLingerOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_LINGER),
- ?line LingerOnOffSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_ONOFF),
- ?line LingerLingerSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_LINGER),
- ?line stop_helper(Port),
+ Port = start_helper(Config),
+ Proto = ask_helper(Port,?C_GET_SOL_SOCKET),
+ Linger = ask_helper(Port,?C_GET_SO_LINGER),
+ LingerSize = ask_helper(Port,?C_GET_LINGER_SIZE),
+ LingerOnOffOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_ONOFF),
+ LingerLingerOffset = ask_helper(Port,?C_GET_OFF_LINGER_L_LINGER),
+ LingerOnOffSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_ONOFF),
+ LingerLingerSize = ask_helper(Port,?C_GET_SIZ_LINGER_L_LINGER),
+ stop_helper(Port),
case all_ok([Proto,Linger,LingerSize,LingerOnOffOffset,
LingerLingerOffset,LingerOnOffSize,LingerLingerSize]) of
false ->
{skipped,"Does not run on this OS."};
true ->
- ?line LingerLingerBits = LingerLingerSize * 8,
- ?line LingerOnOffBits = LingerOnOffSize * 8,
- ?line {LingerOn,LingerOff} =
+ LingerLingerBits = LingerLingerSize * 8,
+ LingerOnOffBits = LingerOnOffSize * 8,
+ {LingerOn,LingerOff} =
case LingerOnOffOffset < LingerLingerOffset of
true ->
Pad1 =
@@ -423,11 +421,11 @@ do_combined(Config,Binary) when is_list(Config) ->
lists:duplicate(Pad3Siz,
0)),
{<<Pad1/binary,1:LingerOnOffBits/native,
- Pad2/binary,10:LingerLingerBits/native,
- Pad3/binary>>,
+ Pad2/binary,10:LingerLingerBits/native,
+ Pad3/binary>>,
<<Pad1/binary,0:LingerOnOffBits/native,
- Pad2/binary,0:LingerLingerBits/native,
- Pad3/binary>>};
+ Pad2/binary,0:LingerLingerBits/native,
+ Pad3/binary>>};
false ->
Pad1 =
list_to_binary(
@@ -446,177 +444,174 @@ do_combined(Config,Binary) when is_list(Config) ->
lists:duplicate(Pad3Siz,
0)),
{<<Pad1/binary,1:LingerLingerBits/native,
- Pad2/binary,10:LingerOnOffBits/native,
- Pad3/binary>>,
+ Pad2/binary,10:LingerOnOffBits/native,
+ Pad3/binary>>,
<<Pad1/binary,0:LingerLingerBits/native,
- Pad2/binary,0:LingerOnOffBits/native,
- Pad3/binary>>}
+ Pad2/binary,0:LingerOnOffBits/native,
+ Pad3/binary>>}
end,
- ?line RawLingerOn = {raw,Proto,Linger,LingerOn},
- ?line RawLingerOff = {raw,Proto,Linger,LingerOff},
- ?line {Sock1,Sock2} =
+ RawLingerOn = {raw,Proto,Linger,LingerOn},
+ RawLingerOff = {raw,Proto,Linger,LingerOff},
+ {Sock1,Sock2} =
create_socketpair([{keepalive,true},
RawLingerOn],
[{keepalive,false},
RawLingerOff]),
- ?line {ok,[{raw,Proto,Linger,Linger1},{keepalive,Keep1}]} =
+ {ok,[{raw,Proto,Linger,Linger1},{keepalive,Keep1}]} =
inet:getopts(Sock1,[{raw,Proto,Linger,
binarify(LingerSize,Binary)},keepalive]),
- ?line {ok,[{raw,Proto,Linger,Linger2},{keepalive,Keep2}]} =
+ {ok,[{raw,Proto,Linger,Linger2},{keepalive,Keep2}]} =
inet:getopts(Sock2,[{raw,Proto,Linger,
binarify(LingerSize,Binary)},keepalive]),
- ?line true = byte_size(Linger1) =:= LingerSize,
- ?line <<_:LingerLingerOffset/binary,
- Ling1:LingerLingerBits/native,_/binary>> = Linger1,
- ?line <<_:LingerOnOffOffset/binary,
- Off1:LingerOnOffBits/native,_/binary>> = Linger1,
- ?line <<_:LingerOnOffOffset/binary,
- Off2:LingerOnOffBits/native,_/binary>> = Linger2,
- ?line true = Off1 =/= 0,
- ?line true = Off2 == 0,
- ?line true = Ling1 == 10,
- ?line true = Keep1 =:= true,
- ?line true = Keep2 =:= false,
- ?line {Sock3,Sock4} =
+ true = byte_size(Linger1) =:= LingerSize,
+ <<_:LingerLingerOffset/binary,
+ Ling1:LingerLingerBits/native,_/binary>> = Linger1,
+ <<_:LingerOnOffOffset/binary,
+ Off1:LingerOnOffBits/native,_/binary>> = Linger1,
+ <<_:LingerOnOffOffset/binary,
+ Off2:LingerOnOffBits/native,_/binary>> = Linger2,
+ true = Off1 =/= 0,
+ true = Off2 == 0,
+ true = Ling1 == 10,
+ true = Keep1 =:= true,
+ true = Keep2 =:= false,
+ {Sock3,Sock4} =
create_socketpair([RawLingerOn,{keepalive,true}],
[RawLingerOff,{keepalive,false}]),
- ?line {ok,[{raw,Proto,Linger,Linger3},{keepalive,Keep3}]} =
+ {ok,[{raw,Proto,Linger,Linger3},{keepalive,Keep3}]} =
inet:getopts(Sock3,[{raw,Proto,Linger,
binarify(LingerSize,Binary)},keepalive]),
- ?line {ok,[{raw,Proto,Linger,Linger4},{keepalive,Keep4}]} =
+ {ok,[{raw,Proto,Linger,Linger4},{keepalive,Keep4}]} =
inet:getopts(Sock4,[{raw,Proto,Linger,
binarify(LingerSize,Binary)},keepalive]),
- ?line true = byte_size(Linger3) =:= LingerSize,
- ?line <<_:LingerLingerOffset/binary,
- Ling3:LingerLingerBits/native,_/binary>> = Linger3,
- ?line <<_:LingerOnOffOffset/binary,
- Off3:LingerOnOffBits/native,_/binary>> = Linger3,
- ?line <<_:LingerOnOffOffset/binary,
- Off4:LingerOnOffBits/native,_/binary>> = Linger4,
- ?line true = Off3 =/= 0,
- ?line true = Off4 == 0,
- ?line true = Ling3 == 10,
- ?line true = Keep3 =:= true,
- ?line true = Keep4 =:= false,
- ?line {Sock5,Sock6} =
+ true = byte_size(Linger3) =:= LingerSize,
+ <<_:LingerLingerOffset/binary,
+ Ling3:LingerLingerBits/native,_/binary>> = Linger3,
+ <<_:LingerOnOffOffset/binary,
+ Off3:LingerOnOffBits/native,_/binary>> = Linger3,
+ <<_:LingerOnOffOffset/binary,
+ Off4:LingerOnOffBits/native,_/binary>> = Linger4,
+ true = Off3 =/= 0,
+ true = Off4 == 0,
+ true = Ling3 == 10,
+ true = Keep3 =:= true,
+ true = Keep4 =:= false,
+ {Sock5,Sock6} =
create_socketpair([{packet,4},RawLingerOn,{keepalive,true}],
[{packet,2},RawLingerOff,{keepalive,false}]),
- ?line {ok,[{packet,Pack5},{raw,Proto,Linger,Linger5},
- {keepalive,Keep5}]} =
+ {ok,[{packet,Pack5},{raw,Proto,Linger,Linger5},
+ {keepalive,Keep5}]} =
inet:getopts(Sock5,[packet,{raw,Proto,Linger,
binarify(LingerSize,Binary)},
keepalive]),
- ?line {ok,[{packet,Pack6},{raw,Proto,Linger,Linger6},
- {keepalive,Keep6}]} =
+ {ok,[{packet,Pack6},{raw,Proto,Linger,Linger6},
+ {keepalive,Keep6}]} =
inet:getopts(Sock6,[packet,{raw,Proto,Linger,
binarify(LingerSize,Binary)},
keepalive]),
- ?line true = byte_size(Linger5) =:= LingerSize,
- ?line <<_:LingerLingerOffset/binary,
- Ling5:LingerLingerBits/native,_/binary>> = Linger5,
- ?line <<_:LingerOnOffOffset/binary,
- Off5:LingerOnOffBits/native,_/binary>> = Linger5,
- ?line <<_:LingerOnOffOffset/binary,
- Off6:LingerOnOffBits/native,_/binary>> = Linger6,
- ?line true = Off5 =/= 0,
- ?line true = Off6 == 0,
- ?line true = Ling5 == 10,
- ?line true = Keep5 =:= true,
- ?line true = Keep6 =:= false,
- ?line true = Pack5 =:= 4,
- ?line true = Pack6 =:= 2,
- ?line inet:setopts(Sock6,[{packet,4},RawLingerOn,
- {keepalive,true}]),
- ?line {ok,[{packet,Pack7},{raw,Proto,Linger,Linger7},
- {keepalive,Keep7}]} =
+ true = byte_size(Linger5) =:= LingerSize,
+ <<_:LingerLingerOffset/binary,
+ Ling5:LingerLingerBits/native,_/binary>> = Linger5,
+ <<_:LingerOnOffOffset/binary,
+ Off5:LingerOnOffBits/native,_/binary>> = Linger5,
+ <<_:LingerOnOffOffset/binary,
+ Off6:LingerOnOffBits/native,_/binary>> = Linger6,
+ true = Off5 =/= 0,
+ true = Off6 == 0,
+ true = Ling5 == 10,
+ true = Keep5 =:= true,
+ true = Keep6 =:= false,
+ true = Pack5 =:= 4,
+ true = Pack6 =:= 2,
+ inet:setopts(Sock6,[{packet,4},RawLingerOn,
+ {keepalive,true}]),
+ {ok,[{packet,Pack7},{raw,Proto,Linger,Linger7},
+ {keepalive,Keep7}]} =
inet:getopts(Sock6,[packet,{raw,Proto,Linger,
binarify(LingerSize,Binary)},
keepalive]),
- ?line <<_:LingerOnOffOffset/binary,
- Off7:LingerOnOffBits/native,_/binary>> = Linger7,
- ?line true = Off7 =/= 0,
- ?line true = Keep7 =:= true,
- ?line true = Pack7 =:= 4,
- ?line gen_tcp:close(Sock1),
- ?line gen_tcp:close(Sock2),
- ?line gen_tcp:close(Sock3),
- ?line gen_tcp:close(Sock4),
- ?line gen_tcp:close(Sock5),
- ?line gen_tcp:close(Sock6),
+ <<_:LingerOnOffOffset/binary,
+ Off7:LingerOnOffBits/native,_/binary>> = Linger7,
+ true = Off7 =/= 0,
+ true = Keep7 =:= true,
+ true = Pack7 =:= 4,
+ gen_tcp:close(Sock1),
+ gen_tcp:close(Sock2),
+ gen_tcp:close(Sock3),
+ gen_tcp:close(Sock4),
+ gen_tcp:close(Sock5),
+ gen_tcp:close(Sock6),
ok
end.
-ipv6_v6only_udp(suite) -> [];
-ipv6_v6only_udp(doc) -> "Test socket option ipv6_v6only for UDP";
+%% Test socket option ipv6_v6only for UDP.
ipv6_v6only_udp(Config) when is_list(Config) ->
ipv6_v6only(Config, gen_udp).
-ipv6_v6only_tcp(suite) -> [];
-ipv6_v6only_tcp(doc) -> "Test socket option ipv6_v6only for TCP";
+%% Test socket option ipv6_v6only for TCP.
ipv6_v6only_tcp(Config) when is_list(Config) ->
ipv6_v6only(Config, gen_tcp).
-ipv6_v6only_sctp(suite) -> [];
-ipv6_v6only_sctp(doc) -> "Test socket option ipv6_v6only for SCTP";
+%% Test socket option ipv6_v6only for SCTP.
ipv6_v6only_sctp(Config) when is_list(Config) ->
ipv6_v6only(Config, gen_sctp).
ipv6_v6only(Config, Module) when is_list(Config) ->
- ?line case ipv6_v6only_open(Module, []) of
- {ok,S1} ->
- ?line case inet:getopts(S1, [ipv6_v6only]) of
- {ok,[{ipv6_v6only,Default}]}
- when is_boolean(Default) ->
- ?line ok =
- ipv6_v6only_close(Module, S1),
- ?line ipv6_v6only(Config, Module, Default);
- {ok,[]} ->
- ?line io:format("Not implemented.~n", []),
- %% This list of OS:es where the option is
- %% supposed to be not implemented is just
- %% a guess, and may grow with time.
- ?line case {os:type(),os:version()} of
- {{unix,linux},{2,M,_}}
- when M =< 4 -> ok
- end,
- %% At least this should work
- ?line {ok,S2} =
- ipv6_v6only_open(
- Module,
- [{ipv6_v6only,true}]),
- ?line ok =
- ipv6_v6only_close(Module, S2)
- end;
- {error,_} ->
- {skipped,"Socket type not supported"}
- end.
+ case ipv6_v6only_open(Module, []) of
+ {ok,S1} ->
+ case inet:getopts(S1, [ipv6_v6only]) of
+ {ok,[{ipv6_v6only,Default}]}
+ when is_boolean(Default) ->
+ ok =
+ ipv6_v6only_close(Module, S1),
+ ipv6_v6only(Config, Module, Default);
+ {ok,[]} ->
+ io:format("Not implemented.~n", []),
+ %% This list of OS:es where the option is
+ %% supposed to be not implemented is just
+ %% a guess, and may grow with time.
+ case {os:type(),os:version()} of
+ {{unix,linux},{2,M,_}}
+ when M =< 4 -> ok
+ end,
+ %% At least this should work
+ {ok,S2} =
+ ipv6_v6only_open(
+ Module,
+ [{ipv6_v6only,true}]),
+ ok =
+ ipv6_v6only_close(Module, S2)
+ end;
+ {error,_} ->
+ {skipped,"Socket type not supported"}
+ end.
ipv6_v6only(Config, Module, Default) when is_list(Config) ->
- ?line io:format("Default ~w.~n", [Default]),
- ?line {ok,S1} =
+ io:format("Default ~w.~n", [Default]),
+ {ok,S1} =
ipv6_v6only_open(Module, [{ipv6_v6only,Default}]),
- ?line {ok,[{ipv6_v6only,Default}]} =
+ {ok,[{ipv6_v6only,Default}]} =
inet:getopts(S1, [ipv6_v6only]),
- ?line ok =
+ ok =
ipv6_v6only_close(Module, S1),
- ?line NotDefault = not Default,
- ?line case ipv6_v6only_open(Module, [{ipv6_v6only,NotDefault}]) of
- {ok,S2} ->
- ?line io:format("Read-write.~n", []),
- ?line {ok,[{ipv6_v6only,NotDefault}]} =
- inet:getopts(S2, [ipv6_v6only]),
- ok;
- {error,einval} ->
- ?line io:format("Read-only.~n", []),
- %% This option is known to be read-only and true
- %% on Windows and OpenBSD
- ?line case os:type() of
- {unix,openbsd} when Default =:= true -> ok;
- {win32,_} when Default =:= true -> ok
- end
- end.
+ NotDefault = not Default,
+ case ipv6_v6only_open(Module, [{ipv6_v6only,NotDefault}]) of
+ {ok,S2} ->
+ io:format("Read-write.~n", []),
+ {ok,[{ipv6_v6only,NotDefault}]} =
+ inet:getopts(S2, [ipv6_v6only]),
+ ok;
+ {error,einval} ->
+ io:format("Read-only.~n", []),
+ %% This option is known to be read-only and true
+ %% on Windows and OpenBSD
+ case os:type() of
+ {unix,openbsd} when Default =:= true -> ok;
+ {win32,_} when Default =:= true -> ok
+ end
+ end.
ipv6_v6only_open(Module, Opts) ->
Module:case Module of
@@ -628,47 +623,46 @@ ipv6_v6only_close(Module, Socket) ->
Module:close(Socket).
-use_ipv6_v6only_udp(suite) -> [];
-use_ipv6_v6only_udp(doc) -> "Test using socket option ipv6_v6only for UDP";
+%% Test using socket option ipv6_v6only for UDP.
use_ipv6_v6only_udp(Config) when is_list(Config) ->
- ?line case gen_udp:open(0, [inet6,{ipv6_v6only,true}]) of
- {ok,S6} ->
- ?line case inet:getopts(S6, [ipv6_v6only]) of
- {ok,[{ipv6_v6only,true}]} ->
- use_ipv6_v6only_udp(Config, S6);
- {ok,Other} ->
- {skipped,{getopts,Other}}
- end;
- {error,_} ->
- {skipped,"Socket type not supported"}
- end.
+ case gen_udp:open(0, [inet6,{ip,{0,0,0,0,0,0,0,1}}, {ipv6_v6only,true}]) of
+ {ok,S6} ->
+ case inet:getopts(S6, [ipv6_v6only]) of
+ {ok,[{ipv6_v6only,true}]} ->
+ use_ipv6_v6only_udp(Config, S6);
+ {ok,Other} ->
+ {skipped,{getopts,Other}}
+ end;
+ {error,_} ->
+ {skipped,"Socket type not supported"}
+ end.
use_ipv6_v6only_udp(_Config, S6) ->
- ?line {ok,Port} = inet:port(S6),
- ?line {ok,S4} = gen_udp:open(Port, [inet]),
- ?line E6 = " IPv6-echo.",
- ?line E4 = " IPv4-echo.",
- ?line Sender =
+ {ok,Port} = inet:port(S6),
+ {ok,S4} = gen_udp:open(Port, [inet]),
+ E6 = " IPv6-echo.",
+ E4 = " IPv4-echo.",
+ Sender =
spawn_link(fun () -> use_ipv6_v6only_udp_sender(Port, E6, E4) end),
- ?line use_ipv6_v6only_udp_listener(
- S6, S4, E6, E4, monitor(process, Sender)).
+ use_ipv6_v6only_udp_listener(
+ S6, S4, E6, E4, monitor(process, Sender)).
use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref) ->
- ?line receive
- {udp,S6,IP,P,Data} ->
- ?line ok = gen_udp:send(S6, IP, P, [Data|E6]),
- ?line use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref);
- {udp,S4,IP,P,Data} ->
- ?line ok = gen_udp:send(S4, IP, P, [Data|E4]),
- ?line use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref);
- {'DOWN',Mref,_,_,normal} ->
- ok;
- {'DOWN',Mref,_,_,Result} ->
- %% Since we are linked we will never arrive here
- Result;
- Other ->
- ?line exit({failed,{listener_unexpected,Other}})
- end.
+ receive
+ {udp,S6,IP,P,Data} ->
+ ok = gen_udp:send(S6, IP, P, [Data|E6]),
+ use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref);
+ {udp,S4,IP,P,Data} ->
+ ok = gen_udp:send(S4, IP, P, [Data|E4]),
+ use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref);
+ {'DOWN',Mref,_,_,normal} ->
+ ok;
+ {'DOWN',Mref,_,_,Result} ->
+ %% Since we are linked we will never arrive here
+ Result;
+ Other ->
+ exit({failed,{listener_unexpected,Other}})
+ end.
use_ipv6_v6only_udp_sender(Port, E6, E4) ->
D6 = "IPv6-send.",
@@ -693,12 +687,9 @@ sndrcv(Ip, Port, Opts, Data) ->
-type_errors(suite) ->
- [];
-type_errors(doc) ->
- "Test that raw data requests are not executed for bad types";
+%% Test that raw data requests are not executed for bad types.
type_errors(Config) when is_list(Config) ->
- ?line BadSetOptions =
+ BadSetOptions =
[
{raw,x,3,<<1:32>>},
{raw,1,tre,<<1:32>>},
@@ -716,7 +707,7 @@ type_errors(Config) when is_list(Config) ->
rav,
{linger,banan}
],
- ?line BadGetOptions =
+ BadGetOptions =
[
{raw,x,3,<<1:32>>},
{raw,1,tre,<<1:32>>},
@@ -735,46 +726,46 @@ type_errors(Config) when is_list(Config) ->
rav,
{linger,banan}
],
- ?line lists:foreach(fun(Option) ->
- ?line case
- catch create_socketpair([Option],[]) of
- {'EXIT',badarg} ->
- ?line ok;
- Unexpected1 ->
- ?line exit({unexpected,
- Unexpected1})
- end,
- ?line case
- catch create_socketpair([],[Option]) of
- {'EXIT',badarg} ->
- ?line ok;
- Unexpected2 ->
- ?line exit({unexpected,
- Unexpected2})
- end,
- ?line {Sock1,Sock2} = create_socketpair([],[]),
- ?line case inet:setopts(Sock1, [Option]) of
- {error,einval} ->
- ?line ok;
- Unexpected3 ->
- ?line exit({unexpected,
- Unexpected3})
- end,
- ?line gen_tcp:close(Sock1),
- ?line gen_tcp:close(Sock2)
- end,BadSetOptions),
- ?line {Sock1,Sock2} = create_socketpair([],[]),
- ?line lists:foreach(fun(Option) ->
- ?line case inet:getopts(Sock1, [Option]) of
- {error,einval} ->
- ?line ok;
- Unexpected ->
- ?line exit({unexpected,
- Unexpected})
- end
- end,BadGetOptions),
- ?line gen_tcp:close(Sock1),
- ?line gen_tcp:close(Sock2),
+ lists:foreach(fun(Option) ->
+ case
+ catch create_socketpair([Option],[]) of
+ {'EXIT',badarg} ->
+ ok;
+ Unexpected1 ->
+ exit({unexpected,
+ Unexpected1})
+ end,
+ case
+ catch create_socketpair([],[Option]) of
+ {'EXIT',badarg} ->
+ ok;
+ Unexpected2 ->
+ exit({unexpected,
+ Unexpected2})
+ end,
+ {Sock1,Sock2} = create_socketpair([],[]),
+ case inet:setopts(Sock1, [Option]) of
+ {error,einval} ->
+ ok;
+ Unexpected3 ->
+ exit({unexpected,
+ Unexpected3})
+ end,
+ gen_tcp:close(Sock1),
+ gen_tcp:close(Sock2)
+ end,BadSetOptions),
+ {Sock1,Sock2} = create_socketpair([],[]),
+ lists:foreach(fun(Option) ->
+ case inet:getopts(Sock1, [Option]) of
+ {error,einval} ->
+ ok;
+ Unexpected ->
+ exit({unexpected,
+ Unexpected})
+ end
+ end,BadGetOptions),
+ gen_tcp:close(Sock1),
+ gen_tcp:close(Sock2),
ok.
all_ok([]) ->
@@ -784,59 +775,59 @@ all_ok([H|T]) when H >= 0 ->
all_ok(_) ->
false.
-
+
make_check_fun(Type,Element) ->
fun({Name,V1,V2,Mand,Chang},Acc) ->
- ?line {LO1,CO1} = setelement(Element,{[],[]}, [{Name,V1}]),
- ?line {LO2,CO2} = setelement(Element,{[],[]}, [{Name,V2}]),
- ?line {X1,Y1} = create_socketpair(LO1,CO1),
- ?line {X2,Y2} = create_socketpair(LO2,CO2),
- ?line S1 = element(Element,{X1,Y1}),
- ?line S2 = element(Element,{X2,Y2}),
- ?line {ok,[{Name,R1}]} = inet:getopts(S1,[Name]),
- ?line {ok,[{Name,R2}]} = inet:getopts(S2,[Name]),
+ {LO1,CO1} = setelement(Element,{[],[]}, [{Name,V1}]),
+ {LO2,CO2} = setelement(Element,{[],[]}, [{Name,V2}]),
+ {X1,Y1} = create_socketpair(LO1,CO1),
+ {X2,Y2} = create_socketpair(LO2,CO2),
+ S1 = element(Element,{X1,Y1}),
+ S2 = element(Element,{X2,Y2}),
+ {ok,[{Name,R1}]} = inet:getopts(S1,[Name]),
+ {ok,[{Name,R2}]} = inet:getopts(S2,[Name]),
NewAcc =
case R1 =/= R2 of
true ->
case Chang of
true ->
- ?line inet:setopts(S1,[{Name,V2}]),
- ?line {ok,[{Name,R3}]} =
+ inet:setopts(S1,[{Name,V2}]),
+ {ok,[{Name,R3}]} =
inet:getopts(S1,[Name]),
case {R3 =/= R1, R3 =:= R2} of
{true,true} ->
- ?line Acc;
+ Acc;
_ ->
case Mand of
true ->
- ?line exit
- ({failed_sockopt,
- {change,
- Name}});
+ exit
+ ({failed_sockopt,
+ {change,
+ Name}});
false ->
- ?line [{change,Name}|Acc]
+ [{change,Name}|Acc]
end
end;
false ->
- ?line Acc
+ Acc
end;
false ->
case Mand of
true ->
- ?line exit({failed_sockopt,
- {Type,Name}});
+ exit({failed_sockopt,
+ {Type,Name}});
false ->
- ?line [{Type,Name}|Acc]
+ [{Type,Name}|Acc]
end
end,
- ?line gen_tcp:close(X1),
- ?line gen_tcp:close(Y1),
- ?line gen_tcp:close(X2),
- ?line gen_tcp:close(Y2),
+ gen_tcp:close(X1),
+ gen_tcp:close(Y1),
+ gen_tcp:close(X2),
+ gen_tcp:close(Y2),
NewAcc
- end.
+ end.
-% {OptionName,Value1,Value2,Mandatory,Changeable}
+%% {OptionName,Value1,Value2,Mandatory,Changeable}
all_listen_options() ->
[{tos,0,1,false,true},
{priority,0,1,false,true},
@@ -887,19 +878,19 @@ all_connect_options() ->
{delay_send,false,true,true,true},
{packet_size,0,4,true,true}
].
-
+
create_socketpair(ListenOptions,ConnectOptions) ->
- ?line {ok,LS}=gen_tcp:listen(0,ListenOptions),
- ?line {ok,Port}=inet:port(LS),
- ?line {ok,CS}=gen_tcp:connect(localhost,Port,ConnectOptions),
- ?line {ok,AS}=gen_tcp:accept(LS),
- ?line gen_tcp:close(LS),
+ {ok,LS}=gen_tcp:listen(0,ListenOptions),
+ {ok,Port}=inet:port(LS),
+ {ok,CS}=gen_tcp:connect(localhost,Port,ConnectOptions),
+ {ok,AS}=gen_tcp:accept(LS),
+ gen_tcp:close(LS),
{AS,CS}.
start_helper(Config) ->
- Progname = filename:join(?config(data_dir, Config), "sockopt_helper"),
+ Progname = filename:join(proplists:get_value(data_dir, Config), "sockopt_helper"),
Port = open_port({spawn,Progname},[eof,line]),
Port.
diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl
index 54ab5aa566..6a006cdc01 100644
--- a/lib/kernel/test/init_SUITE.erl
+++ b/lib/kernel/test/init_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,15 +19,16 @@
%%
-module(init_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2]).
-export([get_arguments/1, get_argument/1, boot_var/1, restart/1,
- many_restarts/1,
+ many_restarts/0, many_restarts/1,
get_plain_arguments/1,
- reboot/1, stop/1, get_status/1, script_id/1]).
+ reboot/1, stop_status/1, stop/1, get_status/1, script_id/1,
+ find_system_processes/0]).
-export([boot1/1, boot2/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -41,12 +42,14 @@
%% Should be started in a CC view with:
%% erl -sname master -rsh ctrsh
%%-----------------------------------------------------------------
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() ->
[get_arguments, get_argument, boot_var,
many_restarts,
- get_plain_arguments, restart, get_status, script_id,
+ get_plain_arguments, restart, stop_status, get_status, script_id,
{group, boot}].
groups() ->
@@ -65,46 +68,35 @@ end_per_group(_GroupName, Config) ->
Config.
-init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
- Dog=?t:timetrap(?t:seconds(?DEFAULT_TIMEOUT_SEC)),
- [{watchdog, Dog}|Config].
+init_per_testcase(Func, Config) ->
+ Config.
-end_per_testcase(_Func, Config) ->
- Dog=?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+end_per_testcase(_Func, _Config) ->
+ ok.
-init(doc) -> [];
-init(suite) -> [];
init(Config) when is_list(Config) ->
Config.
-fini(doc) -> [];
-fini(suite) -> [];
fini(Config) when is_list(Config) ->
Host = list_to_atom(from($@, atom_to_list(node()))),
Node = list_to_atom(lists:concat([init_test, "@", Host])),
stop_node(Node),
Config.
-get_arguments(doc) ->[];
-get_arguments(suite) -> {req, [distribution, {local_slave_nodes, 1}]};
get_arguments(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(10)),
-
Args = args(),
- ?line {ok, Node} = start_node(init_test, Args),
- ?line case rpc:call(Node, init, get_arguments, []) of
- Arguments when is_list(Arguments) ->
- stop_node(Node),
- check_a(Arguments),
- check_b(Arguments),
- check_c(Arguments),
- check_d(Arguments);
- _ ->
- stop_node(Node),
- ?t:fail(get_arguments)
- end,
- ?line ?t:timetrap_cancel(Dog),
+ {ok, Node} = start_node(init_test, Args),
+ case rpc:call(Node, init, get_arguments, []) of
+ Arguments when is_list(Arguments) ->
+ stop_node(Node),
+ check_a(Arguments),
+ check_b(Arguments),
+ check_c(Arguments),
+ check_d(Arguments);
+ _ ->
+ stop_node(Node),
+ ct:fail(get_arguments)
+ end,
ok.
check_a(Args) ->
@@ -115,10 +107,10 @@ check_a(Args) ->
false ->
ok;
_ ->
- ?t:fail(check_a1)
+ ct:fail(check_a1)
end;
_ ->
- ?t:fail(check_a2)
+ ct:fail(check_a2)
end.
check_b(Args) ->
@@ -132,13 +124,13 @@ check_b(Args) ->
false ->
ok;
_ ->
- ?t:fail(check_b1)
+ ct:fail(check_b1)
end;
_ ->
- ?t:fail(check_b2)
+ ct:fail(check_b2)
end;
_ ->
- ?t:fail(check_b3)
+ ct:fail(check_b3)
end.
check_c(Args) ->
@@ -152,13 +144,13 @@ check_c(Args) ->
false ->
ok;
_ ->
- ?t:fail(check_c1)
+ ct:fail(check_c1)
end;
_ ->
- ?t:fail(check_c2)
+ ct:fail(check_c2)
end;
_ ->
- ?t:fail(check_c3)
+ ct:fail(check_c3)
end.
check_d(Args) ->
@@ -169,62 +161,54 @@ check_d(Args) ->
false ->
ok;
_ ->
- ?t:fail(check_d1)
+ ct:fail(check_d1)
end;
_ ->
- ?t:fail(check_d2)
+ ct:fail(check_d2)
end.
-get_argument(doc) ->[];
-get_argument(suite) -> {req, [distribution, {local_slave_nodes, 1}]};
get_argument(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(10)),
-
Args = args(),
- ?line {ok, Node} = start_node(init_test, Args),
- ?line case rpc:call(Node, init, get_argument, [b]) of
- {ok, [["hej", "hopp"],["san", "sa"]]} ->
- ok;
- _ ->
- stop_node(Node),
- ?t:fail({get_argument, b})
- end,
- ?line case rpc:call(Node, init, get_argument, [a]) of
- {ok, [["kalle"]]} ->
- ok;
- _ ->
- stop_node(Node),
- ?t:fail({get_argument, a})
- end,
- ?line case rpc:call(Node, init, get_argument, [c]) of
- {ok, [["4", "5", "6"], ["7", "8", "9"]]} ->
- ok;
- _ ->
- stop_node(Node),
- ?t:fail({get_argument, c})
- end,
- ?line case rpc:call(Node, init, get_argument, [d]) of
- {ok, [[]]} ->
- ok;
- _ ->
- stop_node(Node),
- ?t:fail({get_argument, d})
- end,
- ?line case rpc:call(Node, init, get_argument, [e]) of
- error ->
- ok;
- _ ->
- stop_node(Node),
- ?t:fail({get_argument, e})
- end,
+ {ok, Node} = start_node(init_test, Args),
+ case rpc:call(Node, init, get_argument, [b]) of
+ {ok, [["hej", "hopp"],["san", "sa"]]} ->
+ ok;
+ _ ->
+ stop_node(Node),
+ ct:fail({get_argument, b})
+ end,
+ case rpc:call(Node, init, get_argument, [a]) of
+ {ok, [["kalle"]]} ->
+ ok;
+ _ ->
+ stop_node(Node),
+ ct:fail({get_argument, a})
+ end,
+ case rpc:call(Node, init, get_argument, [c]) of
+ {ok, [["4", "5", "6"], ["7", "8", "9"]]} ->
+ ok;
+ _ ->
+ stop_node(Node),
+ ct:fail({get_argument, c})
+ end,
+ case rpc:call(Node, init, get_argument, [d]) of
+ {ok, [[]]} ->
+ ok;
+ _ ->
+ stop_node(Node),
+ ct:fail({get_argument, d})
+ end,
+ case rpc:call(Node, init, get_argument, [e]) of
+ error ->
+ ok;
+ _ ->
+ stop_node(Node),
+ ct:fail({get_argument, e})
+ end,
stop_node(Node),
- ?line ?t:timetrap_cancel(Dog),
ok.
-get_plain_arguments(doc) ->[];
-get_plain_arguments(suite) -> {req, [distribution, {local_slave_nodes, 1}]};
get_plain_arguments(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(10)),
Longstring =
"fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2"
"fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2"
@@ -235,18 +219,17 @@ get_plain_arguments(Config) when is_list(Config) ->
"fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2"
"fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2"
"fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2fjdkfjdkfjfdaa2",
- ?line true = (length(Longstring) > 255),
+ true = (length(Longstring) > 255),
Args = long_args(Longstring),
- ?line {ok, Node} = start_node(init_test, Args),
- ?line case rpc:call(Node, init, get_plain_arguments, []) of
- ["a", "b", "c", Longstring] ->
- ok;
- As ->
- stop_node(Node),
- ?t:fail({get_argument, As})
- end,
+ {ok, Node} = start_node(init_test, Args),
+ case rpc:call(Node, init, get_plain_arguments, []) of
+ ["a", "b", "c", Longstring] ->
+ ok;
+ As ->
+ stop_node(Node),
+ ct:fail({get_argument, As})
+ end,
stop_node(Node),
- ?line ?t:timetrap_cancel(Dog),
ok.
@@ -254,199 +237,252 @@ get_plain_arguments(Config) when is_list(Config) ->
%% ------------------------------------------------
%% Use -boot_var flag to set $TEST_VAR in boot script.
%% ------------------------------------------------
-boot_var(doc) -> [];
-boot_var(suite) -> {req, [distribution, {local_slave_nodes, 1}]};
boot_var(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(100)),
-
{BootScript, TEST_VAR, KernelVsn, StdlibVsn} = create_boot(Config),
%% Should fail as we have not given -boot_var TEST_VAR
- ?line {error, timeout} =
- start_node(init_test, "-boot " ++ BootScript),
+ {error, timeout} =
+ start_node(init_test, "-boot " ++ BootScript),
case is_real_system(KernelVsn, StdlibVsn) of
true ->
%% Now it should work !!
- ?line {ok, Node} =
- start_node(init_test,
- "-boot " ++ BootScript ++
- " -boot_var TEST_VAR " ++ TEST_VAR),
+ {ok, Node} =
+ start_node(init_test,
+ "-boot " ++ BootScript ++
+ " -boot_var TEST_VAR \"" ++
+ TEST_VAR ++ "\""),
stop_node(Node),
Res = ok;
_ ->
%% What we need is not so much version numbers on the directories, but
%% for the boot var TEST_VAR to appear in the boot script, and it doesn't
%% if we give the 'local' option to systools:make_script.
- ?t:format(
- "Test case not complete as we are not~n"
- "running in a real system!~n"
- "Probably this test is performed in a "
- "clearcase view or source tree.~n"
- "Need version numbers on the kernel and "
- "stdlib directories!~n",
- []),
+ io:format(
+ "Test case not complete as we are not~n"
+ "running in a real system!~n"
+ "Probably this test is performed in a "
+ "clearcase view or source tree.~n"
+ "Need version numbers on the kernel and "
+ "stdlib directories!~n",
+ []),
Res = {skip,
- "Test case only partially run since it is run "
- "in a clearcase view or in a source tree. "
- "Need an installed system to complete this test."}
+ "Test case only partially run since it is run "
+ "in a clearcase view or in a source tree. "
+ "Need an installed system to complete this test."}
end,
- ?line ?t:timetrap_cancel(Dog),
Res.
create_boot(Config) ->
- ?line {ok, OldDir} = file:get_cwd(),
- ?line {LatestDir, LatestName, KernelVsn, StdlibVsn} =
+ {ok, OldDir} = file:get_cwd(),
+ {LatestDir, LatestName, KernelVsn, StdlibVsn} =
create_script(Config),
LibDir = code:lib_dir(),
- ?line ok = file:set_cwd(LatestDir),
- ?line ok = systools:make_script(LatestName,
- [{variables, [{"TEST_VAR", LibDir}]}]),
- ?line ok = file:set_cwd(OldDir),
+ ok = file:set_cwd(LatestDir),
+ ok = systools:make_script(LatestName,
+ [{variables, [{"TEST_VAR", LibDir}]}]),
+ ok = file:set_cwd(OldDir),
{LatestDir ++ "/" ++ LatestName, LibDir, KernelVsn, StdlibVsn}.
is_real_system(KernelVsn, StdlibVsn) ->
LibDir = code:lib_dir(),
- filelib:is_dir(filename:join(LibDir, "kernel"++KernelVsn)) andalso
- filelib:is_dir(filename:join(LibDir, "stdlib"++StdlibVsn)).
-
+ filelib:is_dir(filename:join(LibDir, "kernel-"++KernelVsn)) andalso
+ filelib:is_dir(filename:join(LibDir, "stdlib-"++StdlibVsn)).
+
%% ------------------------------------------------
%% Slave executes erlang:halt() on master nodedown.
%% Therefore the slave process must be killed
%% before restart.
%% ------------------------------------------------
-many_restarts(doc) -> [];
-many_restarts(suite) ->
- case ?t:os_type() of
- {Fam, _} when Fam == unix; Fam == win32 ->
- {req, [distribution, {local_slave_nodes, 1}, {time, 5}]};
- _ ->
- {skip, "Only run on unix and win32"}
- end;
+many_restarts() ->
+ [{timetrap,{minutes,8}}].
many_restarts(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(480)),
- ?line {ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC),
- ?line loop_restart(30,Node,rpc:call(Node,erlang,whereis,[error_logger])),
- ?line loose_node:stop(Node),
- ?line ?t:timetrap_cancel(Dog),
+ {ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC),
+ loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])),
+ loose_node:stop(Node),
ok.
loop_restart(0,_,_) ->
ok;
loop_restart(N,Node,EHPid) ->
- ?line erlang:monitor_node(Node, true),
- ?line ok = rpc:call(Node, init, restart, []),
- ?line receive
- {nodedown, Node} ->
- ok
- after 10000 ->
- loose_node:stop(Node),
- ?t:fail(not_stopping)
- end,
- ?line ok = wait_for(30, Node, EHPid),
- ?line loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])).
+ erlang:monitor_node(Node, true),
+ ok = rpc:call(Node, init, restart, []),
+ receive
+ {nodedown, Node} ->
+ ok
+ after 10000 ->
+ loose_node:stop(Node),
+ ct:fail(not_stopping)
+ end,
+ ok = wait_for(30, Node, EHPid),
+ loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])).
wait_for(0,Node,_) ->
loose_node:stop(Node),
error;
wait_for(N,Node,EHPid) ->
- ?line case rpc:call(Node, erlang, whereis, [error_logger]) of
+ case rpc:call(Node, erlang, whereis, [logger]) of
Pid when is_pid(Pid), Pid =/= EHPid ->
- %% ?line erlang:display(ok),
- ?line ok;
+ %% erlang:display(ok),
+ ok;
_X ->
- %% ?line erlang:display(_X),
- %% ?line Procs = rpc:call(Node, erlang, processes, []),
- %% ?line erlang:display(Procs),
- %% case is_list(Procs) of
- %% true ->
- %% ?line [(catch erlang:display(
- %% rpc:call(Node,
- %% erlang,
- %% process_info,
- %% [Y,registered_name])))
- %% || Y <- Procs];
- %% _ ->
- %% ok
- %% end,
- receive
- after 100 ->
- ok
- end,
- ?line wait_for(N-1,Node,EHPid)
- end.
+ %% erlang:display(_X),
+ %% Procs = rpc:call(Node, erlang, processes, []),
+ %% erlang:display(Procs),
+ %% case is_list(Procs) of
+ %% true ->
+ %% [(catch erlang:display(
+ %% rpc:call(Node,
+ %% erlang,
+ %% process_info,
+ %% [Y,registered_name])))
+ %% || Y <- Procs];
+ %% _ ->
+ %% ok
+ %% end,
+ receive
+ after 100 ->
+ ok
+ end,
+ wait_for(N-1,Node,EHPid)
+ end.
%% ------------------------------------------------
%% Slave executes erlang:halt() on master nodedown.
%% Therefore the slave process must be killed
%% before restart.
%% ------------------------------------------------
-restart(doc) -> [];
-restart(suite) ->
- case ?t:os_type() of
- {Fam, _} when Fam == unix; Fam == win32 ->
- {req, [distribution, {local_slave_nodes, 1}, {time, 5}]};
- _ ->
- {skip, "Only run on unix and win32"}
- end;
restart(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(40)),
- ?line Args = args(),
+ Args = args(),
+
+ Pa = " -pa " ++ filename:dirname(code:which(?MODULE)),
%% Currently test_server:start_node cannot be used. The restarted
%% node immediately halts due to the implementation of
%% test_server:start_node.
- ?line {ok, Node} = loose_node:start(init_test, Args, ?DEFAULT_TIMEOUT_SEC),
+ {ok, Node} = loose_node:start(init_test, Args ++ Pa, ?DEFAULT_TIMEOUT_SEC),
%% Ok, the node is up, now the real test test begins.
- ?line erlang:monitor_node(Node, true),
- ?line InitPid = rpc:call(Node, erlang, whereis, [init]),
- ?line Procs = rpc:call(Node, erlang, processes, []),
- ?line MaxPid = lists:last(Procs),
- ?line ok = rpc:call(Node, init, restart, []),
- ?line receive
- {nodedown, Node} ->
- ok
- after 10000 ->
- loose_node:stop(Node),
- ?t:fail(not_stopping)
- end,
- ?line ok = wait_restart(30, Node),
+ erlang:monitor_node(Node, true),
+ SysProcs0 = rpc:call(Node, ?MODULE, find_system_processes, []),
+ io:format("SysProcs0=~p~n", [SysProcs0]),
+ [InitPid, PurgerPid, LitCollectorPid,
+ DirtySigNPid, DirtySigHPid, DirtySigMPid] = SysProcs0,
+ InitPid = rpc:call(Node, erlang, whereis, [init]),
+ PurgerPid = rpc:call(Node, erlang, whereis, [erts_code_purger]),
+ Procs = rpc:call(Node, erlang, processes, []),
+ MaxPid = lists:last(Procs),
+ ok = rpc:call(Node, init, restart, []),
+ receive
+ {nodedown, Node} ->
+ ok
+ after 10000 ->
+ loose_node:stop(Node),
+ ct:fail(not_stopping)
+ end,
+ ok = wait_restart(30, Node),
+
+ SysProcs1 = rpc:call(Node, ?MODULE, find_system_processes, []),
+ io:format("SysProcs1=~p~n", [SysProcs1]),
+ [InitPid1, PurgerPid1, LitCollectorPid1,
+ DirtySigNPid1, DirtySigHPid1, DirtySigMPid1] = SysProcs1,
%% Still the same init process!
- ?line InitPid1 = rpc:call(Node, erlang, whereis, [init]),
+ InitPid1 = rpc:call(Node, erlang, whereis, [init]),
InitP = pid_to_list(InitPid),
- ?line InitP = pid_to_list(InitPid1),
-
- ?line NewProcs0 = rpc:call(Node, erlang, processes, []),
- NewProcs = lists:delete(InitPid1, NewProcs0),
- ?line case check_processes(NewProcs, MaxPid) of
- true ->
- ok;
- _ ->
- loose_node:stop(Node),
- ?t:fail(processes_not_greater)
- end,
+ InitP = pid_to_list(InitPid1),
+
+ %% and same purger process!
+ PurgerPid1 = rpc:call(Node, erlang, whereis, [erts_code_purger]),
+ PurgerP = pid_to_list(PurgerPid),
+ PurgerP = pid_to_list(PurgerPid1),
+
+ %% and same literal area collector process!
+ LitCollectorP = pid_to_list(LitCollectorPid),
+ LitCollectorP = pid_to_list(LitCollectorPid1),
+
+ %% and same normal dirty signal handler process!
+ DirtySigNP = pid_to_list(DirtySigNPid),
+ DirtySigNP = pid_to_list(DirtySigNPid1),
+ %% and same high dirty signal handler process!
+ DirtySigHP = pid_to_list(DirtySigHPid),
+ DirtySigHP = pid_to_list(DirtySigHPid1),
+ %% and same max dirty signal handler process!
+ DirtySigMP = pid_to_list(DirtySigMPid),
+ DirtySigMP = pid_to_list(DirtySigMPid1),
+
+ NewProcs0 = rpc:call(Node, erlang, processes, []),
+ NewProcs = NewProcs0 -- SysProcs1,
+ case check_processes(NewProcs, MaxPid) of
+ true ->
+ ok;
+ _ ->
+ loose_node:stop(Node),
+ ct:fail(processes_not_greater)
+ end,
%% Test that, for instance, the same argument still exists.
- ?line case rpc:call(Node, init, get_argument, [c]) of
- {ok, [["4", "5", "6"], ["7", "8", "9"]]} ->
- ok;
- _ ->
- loose_node:stop(Node),
- ?t:fail({get_argument, restart_fail})
- end,
+ case rpc:call(Node, init, get_argument, [c]) of
+ {ok, [["4", "5", "6"], ["7", "8", "9"]]} ->
+ ok;
+ _ ->
+ loose_node:stop(Node),
+ ct:fail({get_argument, restart_fail})
+ end,
loose_node:stop(Node),
- ?line ?t:timetrap_cancel(Dog),
ok.
+-record(sys_procs, {init,
+ code_purger,
+ literal_collector,
+ dirty_sig_handler_normal,
+ dirty_sig_handler_high,
+ dirty_sig_handler_max}).
+
+find_system_processes() ->
+ find_system_procs(processes(), #sys_procs{}).
+
+find_system_procs([], SysProcs) ->
+ [SysProcs#sys_procs.init,
+ SysProcs#sys_procs.code_purger,
+ SysProcs#sys_procs.literal_collector,
+ SysProcs#sys_procs.dirty_sig_handler_normal,
+ SysProcs#sys_procs.dirty_sig_handler_high,
+ SysProcs#sys_procs.dirty_sig_handler_max];
+find_system_procs([P|Ps], SysProcs) ->
+ case process_info(P, [initial_call, priority]) of
+ [{initial_call,{otp_ring0,start,2}},_] ->
+ undefined = SysProcs#sys_procs.init,
+ find_system_procs(Ps, SysProcs#sys_procs{init = P});
+ [{initial_call,{erts_code_purger,start,0}},_] ->
+ undefined = SysProcs#sys_procs.code_purger,
+ find_system_procs(Ps, SysProcs#sys_procs{code_purger = P});
+ [{initial_call,{erts_literal_area_collector,start,0}},_] ->
+ undefined = SysProcs#sys_procs.literal_collector,
+ find_system_procs(Ps, SysProcs#sys_procs{literal_collector = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,normal}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_normal,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_normal = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,high}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_high,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_high = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,max}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_max,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_max = P});
+ _ ->
+ find_system_procs(Ps, SysProcs)
+ end.
+
wait_restart(0, _Node) ->
- ?t:fail(not_restarted);
+ ct:fail(not_restarted);
wait_restart(N, Node) ->
case net_adm:ping(Node) of
pong -> ok;
_ ->
- ?t:sleep(1000),
+ ct:sleep(1000),
wait_restart(N - 1, Node)
end.
@@ -474,124 +510,112 @@ apid(Pid) ->
%% The reboot facility using heart is tested
%% in the heart_SUITE.
%% ------------------------------------------------
-reboot(doc) -> [];
-reboot(suite) -> {req, [distribution, {local_slave_nodes, 1}]};
reboot(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(40)),
-
Args = args(),
- ?line {ok, Node} = start_node(init_test, Args),
+ {ok, Node} = start_node(init_test, Args),
erlang:monitor_node(Node, true),
- ?line ok = rpc:call(Node, init, reboot, []),
- ?line receive
- {nodedown, Node} ->
- ok
- after 10000 ->
- stop_node(Node),
- ?t:fail(not_stopping)
- end,
- ?t:sleep(5000),
- ?line case net_adm:ping(Node) of
- pang ->
- ok;
- _ ->
- stop_node(Node),
- ?t:fail(system_rebooted)
- end,
- ?line ?t:timetrap_cancel(Dog),
+ ok = rpc:call(Node, init, reboot, []),
+ receive
+ {nodedown, Node} ->
+ ok
+ after 10000 ->
+ stop_node(Node),
+ ct:fail(not_stopping)
+ end,
+ ct:sleep(5000),
+ case net_adm:ping(Node) of
+ pang ->
+ ok;
+ _ ->
+ stop_node(Node),
+ ct:fail(system_rebooted)
+ end,
ok.
%% ------------------------------------------------
%%
%% ------------------------------------------------
-stop(doc) -> [];
-stop(suite) -> [];
+stop_status(Config) when is_list(Config) ->
+ badarg = catch_stop([65,[66],67]), % flat strings only
+ badarg = catch_stop([65, 666, 67]), % only bytes in string
+ badarg = catch_stop(abort), % 'abort' not allowed
+ badarg = catch_stop(true), % other atoms not allowed
+ badarg = catch_stop(-1), % no negative statuses
+ ok.
+
+catch_stop(Status) ->
+ try init:stop(Status) catch error:badarg -> badarg end.
+
+%% ------------------------------------------------
+%%
+%% ------------------------------------------------
stop(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(20)),
Args = args(),
- ?line {ok, Node} = start_node(init_test, Args),
+ {ok, Node} = start_node(init_test, Args),
erlang:monitor_node(Node, true),
- ?line ok = rpc:call(Node, init, reboot, []),
- ?line receive
- {nodedown, Node} ->
- ok
- after 10000 ->
- stop_node(Node),
- ?t:fail(not_stopping)
- end,
- ?t:sleep(5000),
- ?line case net_adm:ping(Node) of
- pang ->
- ok;
- _ ->
- stop_node(Node),
- ?t:fail(system_rebooted)
- end,
- ?line ?t:timetrap_cancel(Dog),
+ ok = rpc:call(Node, init, reboot, []),
+ receive
+ {nodedown, Node} ->
+ ok
+ after 10000 ->
+ stop_node(Node),
+ ct:fail(not_stopping)
+ end,
+ ct:sleep(5000),
+ case net_adm:ping(Node) of
+ pang ->
+ ok;
+ _ ->
+ stop_node(Node),
+ ct:fail(system_rebooted)
+ end,
ok.
%% ------------------------------------------------
%%
%% ------------------------------------------------
-get_status(doc) -> [];
-get_status(suite) -> [];
get_status(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(10)),
- ?line ?t:timetrap_cancel(Dog),
+ {Start, _} = init:get_status(),
- ?line {Start, _} = init:get_status(),
%% Depending on how the test_server is started Start has
%% different values. staring if test_server started with
%% -s flag.
- ?line case lists:member(Start, [started, starting]) of
- true ->
- ok;
- _ ->
- ?t:fail(get_status)
- end.
+ case lists:member(Start, [started, starting]) of
+ true ->
+ ok;
+ _ ->
+ ct:fail(get_status)
+ end.
%% ------------------------------------------------
%%
%% ------------------------------------------------
-script_id(doc) -> [];
-script_id(suite) -> [];
script_id(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(10)),
-
- ?line {Name, Vsn} = init:script_id(),
- ?line if
- is_list(Name), is_list(Vsn) ->
- ok;
- true ->
- ?t:fail(not_standard_script)
- end,
- ?line ?t:timetrap_cancel(Dog),
+ {Name, Vsn} = init:script_id(),
+ if
+ is_list(Name), is_list(Vsn) ->
+ ok;
+ true ->
+ ct:fail(not_standard_script)
+ end,
ok.
%% ------------------------------------------------
%% Start the slave system with -boot flag.
%% ------------------------------------------------
-boot1(doc) -> [];
-boot1(suite) -> {req, [distribution, {local_slave_nodes, 1}, {time, 35}]};
boot1(Config) when is_list(Config) ->
- ?line Dog = ?t:timetrap(?t:seconds(80)),
Args = args() ++ " -boot start_sasl",
- ?line {ok, Node} = start_node(init_test, Args),
- ?line stop_node(Node),
+ {ok, Node} = start_node(init_test, Args),
+ stop_node(Node),
%% Try to start with non existing boot file.
Args1 = args() ++ " -boot dummy_script",
- ?line {error, timeout} = start_node(init_test, Args1),
+ {error, timeout} = start_node(init_test, Args1),
- ?line ?t:timetrap_cancel(Dog),
ok.
-boot2(doc) -> [];
-boot2(suite) -> {req, [distribution, {local_slave_nodes, 1}, {time, 35}]};
boot2(Config) when is_list(Config) ->
- Dog = ?t:timetrap(?t:seconds(80)),
-
%% Absolute boot file name
Boot = filename:join([code:root_dir(), "bin", "start_sasl"]),
@@ -604,9 +628,9 @@ boot2(Config) when is_list(Config) ->
%% Absolute boot file name for Windows -- all slashes are
%% converted to backslashes.
Win_boot = lists:map(fun
- ($/) -> $\\;
- (C) -> C
- end, Boot),
+ ($/) -> $\\;
+ (C) -> C
+ end, Boot),
Args2 = args() ++ " -boot \"" ++ Win_boot ++ "\"",
{ok, Node2} = start_node(init_test, Args2),
stop_node(Node2);
@@ -614,16 +638,15 @@ boot2(Config) when is_list(Config) ->
ok
end,
- ?t:timetrap_cancel(Dog),
ok.
%% Misc. functions
start_node(Name, Param) ->
- ?t:start_node(Name, slave, [{args, Param}]).
+ test_server:start_node(Name, slave, [{args, Param}]).
stop_node(Node) ->
- ?t:stop_node(Node).
+ test_server:stop_node(Node).
from(H, [H | T]) -> T;
from(H, [_ | T]) -> from(H, T);
@@ -639,18 +662,18 @@ long_args(A) ->
[A])).
create_script(Config) ->
- ?line PrivDir = ?config(priv_dir,Config),
- ?line Name = PrivDir ++ "boot_var_test",
- ?line Apps = application_controller:which_applications(),
- ?line {value,{_,_,KernelVer}} = lists:keysearch(kernel,1,Apps),
- ?line {value,{_,_,StdlibVer}} = lists:keysearch(stdlib,1,Apps),
- ?line {ok,Fd} = file:open(Name ++ ".rel", [write]),
- ?line io:format(Fd,
- "{release, {\"Test release 3\", \"P2A\"}, \n"
- " {erts, \"4.4\"}, \n"
- " [{kernel, \"~s\"}, {stdlib, \"~s\"}]}.\n",
- [KernelVer,StdlibVer]),
- ?line file:close(Fd),
+ PrivDir = proplists:get_value(priv_dir,Config),
+ Name = PrivDir ++ "boot_var_test",
+ Apps = application_controller:which_applications(),
+ {value,{_,_,KernelVer}} = lists:keysearch(kernel,1,Apps),
+ {value,{_,_,StdlibVer}} = lists:keysearch(stdlib,1,Apps),
+ {ok,Fd} = file:open(Name ++ ".rel", [write]),
+ io:format(Fd,
+ "{release, {\"Test release 3\", \"P2A\"}, \n"
+ " {erts, \"4.4\"}, \n"
+ " [{kernel, \"~s\"}, {stdlib, \"~s\"}]}.\n",
+ [KernelVer,StdlibVer]),
+ file:close(Fd),
{filename:dirname(Name), filename:basename(Name),
- KernelVer, StdlibVer}.
+ KernelVer, StdlibVer}.
diff --git a/lib/kernel/test/interactive_shell_SUITE.erl b/lib/kernel/test/interactive_shell_SUITE.erl
index 8adae1f606..298a364a91 100644
--- a/lib/kernel/test/interactive_shell_SUITE.erl
+++ b/lib/kernel/test/interactive_shell_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@
%% %CopyrightEnd%
%%
-module(interactive_shell_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
get_columns_and_rows/1, exit_initial/1, job_control_local/1,
@@ -30,15 +30,14 @@
-export([toerl_server/3]).
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:minutes(3)),
- [{watchdog,Dog}|Config].
-
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
+ Config.
+end_per_testcase(_Func, _Config) ->
+ ok.
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,3}}].
all() ->
[get_columns_and_rows, exit_initial, job_control_local,
@@ -55,7 +54,7 @@ init_per_suite(Config) ->
[{default_shell,DefShell},{term,Term}|Config].
end_per_suite(Config) ->
- Term = ?config(term,Config),
+ Term = proplists:get_value(term,Config),
os:putenv("TERM",Term),
ok.
@@ -66,68 +65,66 @@ end_per_group(_GroupName, Config) ->
Config.
-%-define(DEBUG,1).
+%%-define(DEBUG,1).
-ifdef(DEBUG).
-define(dbg(Data),erlang:display(Data)).
-else.
-define(dbg(Data),noop).
-endif.
-get_columns_and_rows(suite) -> [];
-get_columns_and_rows(doc) -> ["Test that the shell can access columns and rows"];
+%% Test that the shell can access columns and rows.
get_columns_and_rows(Config) when is_list(Config) ->
case proplists:get_value(default_shell,Config) of
old ->
%% Old shell tests
?dbg(old_shell),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
- {getline_re,".*{error,enotsup}"},
- {putline,"io:rows()."},
- {getline_re,".*{error,enotsup}"}
-
- ],[]),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
- {getline_re,".*{ok,90}"},
- {putline,"io:rows()."},
- {getline_re,".*{ok,40}"}],
- [],
- "stty rows 40; stty columns 90; ");
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline_re,".*{error,enotsup}"},
+ {putline,"io:rows()."},
+ {getline_re,".*{error,enotsup}"}
+
+ ],[]),
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline_re,".*{ok,90}"},
+ {putline,"io:rows()."},
+ {getline_re,".*{ok,40}"}],
+ [],
+ "stty rows 40; stty columns 90; ");
new ->
- % New shell tests
+ %% New shell tests
?dbg(new_shell),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
- %% Behaviour change in R12B-5, returns 80
- %% {getline,"{error,enotsup}"},
- {getline,"{ok,80}"},
- {putline,"io:rows()."},
- %% Behaviour change in R12B-5, returns 24
- %% {getline,"{error,enotsup}"}
- {getline,"{ok,24}"}
- ],[]),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
- {getline,"{ok,90}"},
- {putline,"io:rows()."},
- {getline,"{ok,40}"}],
- [],
- "stty rows 40; stty columns 90; ")
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ %% Behaviour change in R12B-5, returns 80
+ %% {getline,"{error,enotsup}"},
+ {getline,"{ok,80}"},
+ {putline,"io:rows()."},
+ %% Behaviour change in R12B-5, returns 24
+ %% {getline,"{error,enotsup}"}
+ {getline,"{ok,24}"}
+ ],[]),
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline,"{ok,90}"},
+ {putline,"io:rows()."},
+ {getline,"{ok,40}"}],
+ [],
+ "stty rows 40; stty columns 90; ")
end.
-
-
-exit_initial(suite) -> [];
-exit_initial(doc) -> ["Tests that exit of initial shell restarts shell"];
+
+
+%% Tests that exit of initial shell restarts shell.
exit_initial(Config) when is_list(Config) ->
case proplists:get_value(default_shell,Config) of
old ->
@@ -152,9 +149,7 @@ exit_initial(Config) when is_list(Config) ->
{getline_re,"35"}],[])
end.
-job_control_local(suite) -> [];
-job_control_local(doc) -> [ "Tests that local shell can be "
- "started by means of job control" ];
+%% Tests that local shell can be started by means of job control.
job_control_local(Config) when is_list(Config) ->
case proplists:get_value(default_shell,Config) of
old ->
@@ -162,133 +157,130 @@ job_control_local(Config) when is_list(Config) ->
{skip,"No new shell found"};
new ->
%% New shell tests
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,[7]},
- {sleep,timeout(short)},
- {putline,""},
- {getline," -->"},
- {putline,"s"},
- {putline,"c"},
- {putline_raw,""},
- {getline,"Eshell"},
- {putline_raw,""},
- {getline,"1>"},
- {putline,"35."},
- {getline,"35"}],[])
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,[7]},
+ {sleep,timeout(short)},
+ {putline,""},
+ {getline," -->"},
+ {putline,"s"},
+ {putline,"c"},
+ {putline_raw,""},
+ {getline,"Eshell"},
+ {putline_raw,""},
+ {getline,"1>"},
+ {putline,"35."},
+ {getline,"35"}],[])
end.
-job_control_remote(suite) -> [];
job_control_remote(doc) -> [ "Tests that remote shell can be "
"started by means of job control" ];
job_control_remote(Config) when is_list(Config) ->
case {node(),proplists:get_value(default_shell,Config)} of
{nonode@nohost,_} ->
- ?line exit(not_distributed);
+ exit(not_distributed);
{_,old} ->
{skip,"No new shell found"};
_ ->
- ?line RNode = create_nodename(),
- ?line MyNode = atom2list(node()),
- ?line Pid = spawn_link(fun() ->
- receive die ->
- ok
- end
- end),
- ?line PidStr = pid_to_list(Pid),
- ?line register(kalaskula,Pid),
- ?line CookieString = lists:flatten(
- io_lib:format("~w",
- [erlang:get_cookie()])),
- ?line Res = rtnode([{putline,""},
- {putline, "erlang:get_cookie()."},
- {getline, CookieString},
- {putline,[7]},
- {sleep,timeout(short)},
- {putline,""},
- {getline," -->"},
- {putline,"r '"++MyNode++"'"},
- {putline,"c"},
- {putline_raw,""},
- {getline,"Eshell"},
- {sleep,timeout(short)},
- {putline_raw,""},
- {getline,"("++MyNode++")1>"},
- {putline,"whereis(kalaskula)."},
- {getline,PidStr},
- {sleep,timeout(short)}, % Race, known bug.
- {putline_raw,"exit()."},
- {getline,"***"},
- {putline,[7]},
- {putline,""},
- {getline," -->"},
- {putline,"c 1"},
- {putline,""},
- {sleep,timeout(short)},
- {putline_raw,""},
- {getline,"("++RNode++")"}],RNode),
- ?line Pid ! die,
- ?line Res
+ RNode = create_nodename(),
+ MyNode = atom2list(node()),
+ Pid = spawn_link(fun() ->
+ receive die ->
+ ok
+ end
+ end),
+ PidStr = pid_to_list(Pid),
+ register(kalaskula,Pid),
+ CookieString = lists:flatten(
+ io_lib:format("~w",
+ [erlang:get_cookie()])),
+ Res = rtnode([{putline,""},
+ {putline, "erlang:get_cookie()."},
+ {getline, CookieString},
+ {putline,[7]},
+ {sleep,timeout(short)},
+ {putline,""},
+ {getline," -->"},
+ {putline,"r '"++MyNode++"'"},
+ {putline,"c"},
+ {putline_raw,""},
+ {getline,"Eshell"},
+ {sleep,timeout(short)},
+ {putline_raw,""},
+ {getline,"("++MyNode++")1>"},
+ {putline,"whereis(kalaskula)."},
+ {getline,PidStr},
+ {sleep,timeout(short)}, % Race, known bug.
+ {putline_raw,"exit()."},
+ {getline,"***"},
+ {putline,[7]},
+ {putline,""},
+ {getline," -->"},
+ {putline,"c 1"},
+ {putline,""},
+ {sleep,timeout(short)},
+ {putline_raw,""},
+ {getline,"("++RNode++")"}],RNode),
+ Pid ! die,
+ Res
end.
-job_control_remote_noshell(suite) -> [];
-job_control_remote_noshell(doc) ->
- [ "Tests that remote shell can be "
- "started by means of job control to -noshell node" ];
+
+%% Tests that remote shell can be
+%% started by means of job control to -noshell node.
job_control_remote_noshell(Config) when is_list(Config) ->
case {node(),proplists:get_value(default_shell,Config)} of
{nonode@nohost,_} ->
- ?line exit(not_distributed);
+ exit(not_distributed);
{_,old} ->
{skip,"No new shell found"};
_ ->
- ?line RNode = create_nodename(),
- ?line NSNode = start_noshell_node(interactive_shell_noshell),
- ?line Pid = spawn_link(NSNode, fun() ->
- receive die ->
- ok
- end
- end),
- ?line PidStr = rpc:call(NSNode,erlang,pid_to_list,[Pid]),
- ?line true = rpc:call(NSNode,erlang,register,[kalaskula,Pid]),
- ?line NSNodeStr = atom2list(NSNode),
- ?line CookieString = lists:flatten(
- io_lib:format("~w",
- [erlang:get_cookie()])),
- ?line Res = rtnode([{putline,""},
- {putline, "erlang:get_cookie()."},
- {getline, CookieString},
- {putline,[7]},
- {sleep,timeout(short)},
- {putline,""},
- {getline," -->"},
- {putline,"r '"++NSNodeStr++"'"},
- {putline,"c"},
- {putline_raw,""},
- {getline,"Eshell"},
- {sleep,timeout(short)},
- {putline_raw,""},
- {getline,"("++NSNodeStr++")1>"},
- {putline,"whereis(kalaskula)."},
- {getline,PidStr},
- {sleep,timeout(short)}, % Race, known bug.
- {putline_raw,"exit()."},
- {getline,"***"},
- {putline,[7]},
- {putline,""},
- {getline," -->"},
- {putline,"c 1"},
- {putline,""},
- {sleep,timeout(short)},
- {putline_raw,""},
- {getline,"("++RNode++")"}],RNode),
- ?line Pid ! die,
- ?line stop_noshell_node(NSNode),
- ?line Res
+ RNode = create_nodename(),
+ NSNode = start_noshell_node(interactive_shell_noshell),
+ Pid = spawn_link(NSNode, fun() ->
+ receive die ->
+ ok
+ end
+ end),
+ PidStr = rpc:call(NSNode,erlang,pid_to_list,[Pid]),
+ true = rpc:call(NSNode,erlang,register,[kalaskula,Pid]),
+ NSNodeStr = atom2list(NSNode),
+ CookieString = lists:flatten(
+ io_lib:format("~w",
+ [erlang:get_cookie()])),
+ Res = rtnode([{putline,""},
+ {putline, "erlang:get_cookie()."},
+ {getline, CookieString},
+ {putline,[7]},
+ {sleep,timeout(short)},
+ {putline,""},
+ {getline," -->"},
+ {putline,"r '"++NSNodeStr++"'"},
+ {putline,"c"},
+ {putline_raw,""},
+ {getline,"Eshell"},
+ {sleep,timeout(short)},
+ {putline_raw,""},
+ {getline,"("++NSNodeStr++")1>"},
+ {putline,"whereis(kalaskula)."},
+ {getline,PidStr},
+ {sleep,timeout(short)}, % Race, known bug.
+ {putline_raw,"exit()."},
+ {getline,"***"},
+ {putline,[7]},
+ {putline,""},
+ {getline," -->"},
+ {putline,"c 1"},
+ {putline,""},
+ {sleep,timeout(short)},
+ {putline_raw,""},
+ {getline,"("++RNode++")"}],RNode),
+ Pid ! die,
+ stop_noshell_node(NSNode),
+ Res
end.
-ctrl_keys(suite) -> [];
-ctrl_keys(doc) -> ["Tests various control keys"];
+%% Tests various control keys.
ctrl_keys(_Conf) when is_list(_Conf) ->
Cu=[$\^u],
Cw=[$\^w],
@@ -308,7 +300,7 @@ ctrl_keys(_Conf) when is_list(_Conf) ->
{getline,"\"hello world\""},
{putline,"\"hello world\""++Cu++Cy++"."},
{getline,"\"hello world\""}]
- ++wordLeft()++wordRight(),[]).
+ ++wordLeft()++wordRight(),[]).
wordLeft() ->
@@ -337,46 +329,46 @@ wordRight(Chars) ->
rtnode(C,N) ->
rtnode(C,N,[]).
rtnode(Commands,Nodename,ErlPrefix) ->
- ?line case get_progs() of
- {error,_Reason} ->
- ?line {skip,"No runerl present"};
- {RunErl,ToErl,Erl} ->
- ?line case create_tempdir() of
- {error, Reason2} ->
- ?line {skip, Reason2};
- Tempdir ->
- ?line SPid =
- start_runerl_node(RunErl,ErlPrefix++"\\\""++Erl++"\\\"",
- Tempdir,Nodename),
- ?line CPid = start_toerl_server(ToErl,Tempdir),
- ?line erase(getline_skipped),
- ?line Res =
- (catch get_and_put(CPid, Commands,1)),
- ?line case stop_runerl_node(CPid) of
- {error,_} ->
- ?line CPid2 =
- start_toerl_server
- (ToErl,Tempdir),
- ?line erase(getline_skipped),
- ?line ok = get_and_put
- (CPid2,
- [{putline,[7]},
- {sleep,
- timeout(short)},
- {putline,""},
- {getline," -->"},
- {putline,"s"},
- {putline,"c"},
- {putline,""}],1),
- ?line stop_runerl_node(CPid2);
- _ ->
- ?line ok
- end,
- ?line wait_for_runerl_server(SPid),
- ?line ok = rm_rf(Tempdir),
- ?line ok = Res
- end
- end.
+ case get_progs() of
+ {error,_Reason} ->
+ {skip,"No runerl present"};
+ {RunErl,ToErl,Erl} ->
+ case create_tempdir() of
+ {error, Reason2} ->
+ {skip, Reason2};
+ Tempdir ->
+ SPid =
+ start_runerl_node(RunErl,ErlPrefix++"\\\""++Erl++"\\\"",
+ Tempdir,Nodename),
+ CPid = start_toerl_server(ToErl,Tempdir),
+ erase(getline_skipped),
+ Res =
+ (catch get_and_put(CPid, Commands,1)),
+ case stop_runerl_node(CPid) of
+ {error,_} ->
+ CPid2 =
+ start_toerl_server
+ (ToErl,Tempdir),
+ erase(getline_skipped),
+ ok = get_and_put
+ (CPid2,
+ [{putline,[7]},
+ {sleep,
+ timeout(short)},
+ {putline,""},
+ {getline," -->"},
+ {putline,"s"},
+ {putline,"c"},
+ {putline,""}],1),
+ stop_runerl_node(CPid2);
+ _ ->
+ ok
+ end,
+ wait_for_runerl_server(SPid),
+ ok = rm_rf(Tempdir),
+ ok = Res
+ end
+ end.
timeout(long) ->
2 * timeout(normal);
@@ -389,7 +381,7 @@ timeout(normal) ->
start_noshell_node(Name) ->
PADir = filename:dirname(code:which(?MODULE)),
{ok, Node} = test_server:start_node(Name,slave,[{args," -noshell -pa "++
- PADir++" "}]),
+ PADir++" "}]),
Node.
stop_noshell_node(Node) ->
test_server:stop_node(Node).
@@ -397,20 +389,20 @@ stop_noshell_node(Node) ->
rm_rf(Dir) ->
try
- {ok,List} = file:list_dir(Dir),
- Files = [filename:join([Dir,X]) || X <- List],
- [case file:list_dir(Y) of
- {error, enotdir} ->
- ok = file:delete(Y);
- _ ->
- ok = rm_rf(Y)
- end || Y <- Files],
- ok = file:del_dir(Dir),
- ok
+ {ok,List} = file:list_dir(Dir),
+ Files = [filename:join([Dir,X]) || X <- List],
+ [case file:list_dir(Y) of
+ {error, enotdir} ->
+ ok = file:delete(Y);
+ _ ->
+ ok = rm_rf(Y)
+ end || Y <- Files],
+ ok = file:del_dir(Dir),
+ ok
catch
_:Exception -> {error, {Exception,Dir}}
end.
-
+
get_and_put(_CPid,[],_) ->
ok;
@@ -479,7 +471,7 @@ get_and_put(CPid, [{putline_raw, Line}|T],N) ->
Timeout = timeout(normal),
receive
{send_line, ok} ->
- get_and_put(CPid, T,N+1)
+ get_and_put(CPid, T,N+1)
after Timeout ->
error_logger:error_msg("~p: putline_raw timeout (~p) sending "
"\"~s\" (command number ~p)~n",
@@ -493,7 +485,7 @@ get_and_put(CPid, [{putline, Line}|T],N) ->
Timeout = timeout(normal),
receive
{send_line, ok} ->
- get_and_put(CPid, [{getline, []}|T],N)
+ get_and_put(CPid, [{getline, []}|T],N)
after Timeout ->
error_logger:error_msg("~p: putline timeout (~p) sending "
"\"~s\" (command number ~p)~n[~p]~n",
@@ -510,8 +502,8 @@ wait_for_runerl_server(SPid) ->
after Timeout ->
{error, timeout}
end.
-
-
+
+
stop_runerl_node(CPid) ->
Ref = erlang:monitor(process, CPid),
@@ -562,11 +554,11 @@ create_tempdir(Dir,X) when X > $Z, X < $a ->
create_tempdir(Dir,$a);
create_tempdir(Dir,X) when X > $z ->
Estr = lists:flatten(
- io_lib:format("Unable to create ~s, reason eexist",
- [Dir++[$z]])),
+ io_lib:format("Unable to create ~s, reason eexist",
+ [Dir++[$z]])),
{error, Estr};
create_tempdir(Dir0, Ch) ->
- % Expect fairly standard unix.
+ %% Expect fairly standard unix.
Dir = Dir0++[Ch],
case file:make_dir(Dir) of
{error, eexist} ->
@@ -604,13 +596,13 @@ start_runerl_node(RunErl,Erl,Tempdir,Nodename) ->
[];
_ ->
" -sname "++(if is_atom(Nodename) -> atom_to_list(Nodename);
- true -> Nodename
- end)++
+ true -> Nodename
+ end)++
" -setcookie "++atom_to_list(erlang:get_cookie())
end,
spawn(fun() ->
os:cmd("\""++RunErl++"\" "++Tempdir++"/ "++Tempdir++" \""++
- Erl++XArg++"\"")
+ Erl++XArg++"\"")
end).
start_toerl_server(ToErl,Tempdir) ->
@@ -668,7 +660,7 @@ toerl_loop(Port,Acc) ->
_ ->
toerl_loop(Port,[{Tag0,Data}|Acc])
end;
- {Pid,{get_line,Timeout}} ->
+ {Pid,{get_line,Timeout}} ->
case Acc of
[] ->
case get_data_within(Port,Timeout,[]) of
@@ -717,10 +709,10 @@ toerl_loop(Port,Acc) ->
Other ->
{error, {unexpected, Other}}
end.
-
+
millistamp() ->
- erlang:monotonic_time(milli_seconds).
-
+ erlang:monotonic_time(millisecond).
+
get_data_within(Port, X, Acc) when X =< 0 ->
?dbg({get_data_within, X, Acc, ?LINE}),
receive
@@ -751,7 +743,7 @@ get_data_within(Port, Timeout, Acc) ->
after Timeout ->
timeout
end.
-
+
get_default_shell() ->
try
rtnode([{putline,""},
diff --git a/lib/kernel/test/kernel_SUITE.erl b/lib/kernel/test/kernel_SUITE.erl
index 8ae2e4b23b..3e5ed855b5 100644
--- a/lib/kernel/test/kernel_SUITE.erl
+++ b/lib/kernel/test/kernel_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2014. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,24 +21,23 @@
%%% Kernel application test suite.
%%%-----------------------------------------------------------------
-module(kernel_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-% Test server specific exports
+%% Test server specific exports
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2]).
-export([init_per_testcase/2, end_per_testcase/2]).
-% Test cases must be exported.
--export([app_test/1, appup_test/1]).
+%% Test cases must be exported.
+-export([app_test/1, appup_test/1, refc/1]).
-%%
-%% all/1
-%%
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() ->
- [app_test, appup_test].
+ [app_test, appup_test, refc].
groups() ->
[].
@@ -61,15 +60,12 @@ init_per_testcase(_Case, Config) ->
end_per_testcase(_Case, _Config) ->
ok.
-%
-% Test cases starts here.
-%
-app_test(doc) ->
- ["Tests the applications consistency."];
-app_test(suite) ->
- [];
+%%
+%% Test cases starts here.
+%%
+%% Tests the applications consistency.
app_test(Config) when is_list(Config) ->
- ?line ok=?t:app_test(kernel),
+ ok=test_server:app_test(kernel),
ok.
@@ -167,3 +163,68 @@ check_appup([Vsn|Vsns],Instrs,Expected) ->
end;
check_appup([],_,_) ->
ok.
+
+%%% Check that refc module handles the counters as expected
+refc(_Config) ->
+ Enable = fun(Enable) -> erlang:system_flag(scheduler_wall_time, Enable) end,
+ IsOn = fun() -> undefined /= erlang:statistics(scheduler_wall_time) end,
+ Tester = self(),
+ Loop = fun Loop() ->
+ receive
+ die -> normal;
+ {apply, Bool} ->
+ Res = Enable(Bool),
+ Tester ! {self(), Res},
+ Loop()
+ end
+ end,
+
+ %% Counter should be 0
+ false = Enable(false),
+
+ false = Enable(true),
+ true = Enable(true),
+ true = Enable(false),
+ true = Enable(false),
+
+ %% Counter should be 0
+ false = IsOn(),
+
+ P1 = spawn_link(Loop),
+ P1 ! {apply, true},
+ receive {P1, R1} -> false = R1 end,
+
+ %% P1 has turned it on counter should be one
+ true = IsOn(),
+ true = Enable(true),
+ true = Enable(false),
+ true = IsOn(),
+
+ P1 ! {apply, false},
+ receive {P1, R2} -> true = R2 end,
+ false = IsOn(),
+
+ P1 ! {apply, true},
+ receive {P1, R3} -> false = R3 end,
+ true = IsOn(),
+ true = Enable(false),
+
+
+ P1 ! die,
+ timer:sleep(100),
+ false = IsOn(),
+ false = Enable(false),
+
+ P2 = spawn_link(Loop),
+ P2 ! {apply, true},
+ receive {P2, R4} -> false = R4 end,
+ true = IsOn(),
+ P2 ! {apply, true},
+ receive {P2, R5} -> true = R5 end,
+ true = IsOn(),
+
+ P2 ! die,
+ timer:sleep(100),
+ false = IsOn(),
+
+ ok.
diff --git a/lib/kernel/test/kernel_bench.spec b/lib/kernel/test/kernel_bench.spec
new file mode 100644
index 0000000000..4de133f21b
--- /dev/null
+++ b/lib/kernel/test/kernel_bench.spec
@@ -0,0 +1,2 @@
+{groups,"../kernel_test",zlib_SUITE,[bench]}.
+{groups,"../kernel_test",file_SUITE,[bench]}.
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 4be44015c9..9207025a2c 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,13 +19,15 @@
%%
-module(kernel_config_SUITE).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2, sync/1]).
-export([init_per_suite/1, end_per_suite/1]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() ->
[sync].
@@ -40,13 +42,9 @@ end_per_group(_GroupName, Config) ->
Config.
-init_per_suite(doc) -> [];
-init_per_suite(suite) -> [];
init_per_suite(Config) when is_list(Config) ->
Config.
-end_per_suite(doc) -> [];
-end_per_suite(suite) -> [];
end_per_suite(Config) when is_list(Config) ->
stop_node(init_test),
Config.
@@ -54,7 +52,7 @@ end_per_suite(Config) when is_list(Config) ->
config(Fd) ->
M = from($@, atom_to_list(node())),
io:format(Fd, "[{kernel, [{sync_nodes_optional, ['cp1@~s','cp2@~s']},"
- "{sync_nodes_timeout, 15000}]}].~n",
+ "{sync_nodes_timeout, 15000}]}].~n",
[M, M]).
from(H, [H | T]) -> T;
@@ -67,12 +65,9 @@ from(_, []) -> [].
%% Should be started in a CC view with:
%% erl -sname XXX where XX not in [cp1, cp2]
%%-----------------------------------------------------------------
-sync(doc) -> [];
-sync(suite) -> [];
sync(Conf) when is_list(Conf) ->
- ?line Dog = ?t:timetrap(?t:seconds(120)),
- % Write a config file
- Dir = ?config(priv_dir,Conf),
+ %% Write a config file
+ Dir = proplists:get_value(priv_dir,Conf),
{ok, Fd} = file:open(Dir ++ "sys.config", [write]),
config(Fd),
file:close(Fd),
@@ -81,34 +76,33 @@ sync(Conf) when is_list(Conf) ->
%% Reset wall_clock
{T1,_} = erlang:statistics(wall_clock),
io:format("~p~n", [{t1, T1}]),
- ?line Command = lists:concat([lib:progname(),
- " -detached -sname cp1 ",
- "-config ", Config,
- " -env ERL_CRASH_DUMP erl_crash_dump.cp1"]),
+ Command = lists:append([ct:get_progname(),
+ " -detached -sname cp1 ",
+ "-config ", Config,
+ " -env ERL_CRASH_DUMP erl_crash_dump.cp1"]),
io:format("Command: ~s", [Command]),
- ?line open_port({spawn, Command}, [stream]),
+ open_port({spawn, Command}, [stream]),
io:format("started~n"),
- ?line ?t:sleep(12000),
+ ct:sleep(12000),
io:format("waited12~n"),
- ?line Host = from($@, atom_to_list(node())),
- ?line Cp1 = list_to_atom("cp1@"++Host),
- ?line wait_for_node(Cp1),
+ Host = from($@, atom_to_list(node())),
+ Cp1 = list_to_atom("cp1@"++Host),
+ wait_for_node(Cp1),
io:format("waitednode~n"),
%% Check time since last call
- ?line {TT, T} = erlang:statistics(wall_clock),
+ {TT, T} = erlang:statistics(wall_clock),
io:format("~p~n", [{t2, {TT, T}}]),
- ?line stop_node(cp1),
+ stop_node(cp1),
if
- TT-T1 < 15000 -> ?line ?t:fail({too_short_time, TT-T1});
+ TT-T1 < 15000 -> ct:fail({too_short_time, TT-T1});
true -> ok
end,
- ?line ?t:timetrap_cancel(Dog),
ok.
wait_for_node(Node) ->
case rpc:call(Node, init, get_status, []) of
{started,_} -> ok;
- {badrpc, R} -> ?line ?t:fail({rpc_failed, R});
+ {badrpc, R} -> ct:fail({rpc_failed, R});
_Other -> wait_for_node(Node)
end.
diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover
new file mode 100644
index 0000000000..960bc0abff
--- /dev/null
+++ b/lib/kernel/test/logger.cover
@@ -0,0 +1,14 @@
+%% -*- erlang -*-
+{incl_mods,[error_logger,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_h_common,
+ logger_filters,
+ logger_formatter,
+ logger_server,
+ logger_simple_h,
+ logger_std_h,
+ logger_sup]}.
+
diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec
new file mode 100644
index 0000000000..1ab90b3e93
--- /dev/null
+++ b/lib/kernel/test/logger.spec
@@ -0,0 +1,11 @@
+%% -*-erlang-*-
+{suites,"../kernel_test", [error_logger_SUITE,
+ error_logger_warn_SUITE,
+ logger_SUITE,
+ logger_disk_log_h_SUITE,
+ logger_env_var_SUITE,
+ logger_filters_SUITE,
+ logger_formatter_SUITE,
+ logger_legacy_SUITE,
+ logger_simple_h_SUITE,
+ logger_std_h_SUITE]}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
new file mode 100644
index 0000000000..d831d0d108
--- /dev/null
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -0,0 +1,1330 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ file=>?FILE, line=>?LINE-N}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ case logger:get_handler_config(?STANDARD_HANDLER) of
+ {ok,StdH} ->
+ ok = logger:remove_handler(?STANDARD_HANDLER),
+ [{default_handler,StdH}|Config];
+ _ ->
+ Config
+ end.
+
+end_per_suite(Config) ->
+ case ?config(default_handler,Config) of
+ #{module:=HMod} = HConfig ->
+ ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig);
+ _ ->
+ ok
+ end.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ PC = logger:get_primary_config(),
+ [{logger_config,PC}|Config].
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ add_remove_handler,
+ multiple_handlers,
+ add_remove_filter,
+ change_config,
+ set_formatter,
+ log_no_levels,
+ log_all_levels_api,
+ macros,
+ set_level,
+ set_module_level,
+ set_application_level,
+ cache_module_level,
+ format_report,
+ filter_failed,
+ handler_failed,
+ config_sanity_check,
+ log_failed,
+ emulator,
+ via_logger_process,
+ other_node,
+ compare_levels,
+ process_metadata,
+ app_config,
+ kernel_config].
+
+start_stop(_Config) ->
+ S = whereis(logger),
+ true = is_pid(S),
+ ok.
+
+add_remove_handler(_Config) ->
+ register(callback_receiver,self()),
+ Hs0 = logger:get_handler_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ [add] = test_server:messages_get(),
+ Hs = logger:get_handler_config(),
+ Hs0 = lists:filter(fun(#{id:=h1}) -> false; (_) -> true end, Hs),
+ {ok,#{module:=?MODULE,level:=all,filters:=[],filter_default:=log}} = %defaults
+ logger:get_handler_config(h1),
+ ok = logger:set_handler_config(h1,filter_default,stop),
+ [changing_config] = test_server:messages_get(),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_no_log(),
+ ok = logger:set_handler_config(h1,filter_default,log),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{filter_default:=log}} = logger:get_handler_config(h1),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = logger:remove_handler(h1),
+ [remove] = test_server:messages_get(),
+ Hs0 = logger:get_handler_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+ logger:notice("hello",[]),
+ ok = check_no_log(),
+ ok.
+
+add_remove_handler(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+multiple_handlers(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}),
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+ ok.
+
+multiple_handlers(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+add_remove_filter(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ LF = {fun(Log,_) -> Log#{level=>error} end, []},
+ ok = logger:add_primary_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_primary_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_primary_filter(lf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+
+ ok = logger:add_handler(h2,?MODULE,#{level=>notice,filter_default=>log}),
+ HF = {fun(#{level:=error}=Log,_) ->
+ Log#{level=>mylevel};
+ (_,_) ->
+ ignore
+ end,
+ []},
+ ok = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_primary_filter(lf),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_logged(notice,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_handler_filter(h1,hf),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_logged(notice,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ok.
+
+add_remove_filter(cleanup,_Config) ->
+ logger:remove_primary_filter(lf),
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+change_config(_Config) ->
+ %% Overwrite handler config - check that defaults are added
+ {error,{not_found,h1}} = logger:set_handler_config(h1,#{}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,custom=>custom}),
+ {ok,#{module:=?MODULE,level:=notice,filter_default:=log,custom:=custom}} =
+ logger:get_handler_config(h1),
+ register(callback_receiver,self()),
+ ok = logger:set_handler_config(h1,#{filter_default=>stop}),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{module:=?MODULE,level:=all,filter_default:=stop}=C2} =
+ logger:get_handler_config(h1),
+ false = maps:is_key(custom,C2),
+ {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(
+ h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}),
+ ok =
+ logger:set_handler_config(
+ h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}),
+ {ok,C2} = logger:get_handler_config(h1),
+
+ %% Change handler config: Single key
+ {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end),
+ ok = logger:set_handler_config(h1,custom,custom),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{custom:=custom}=C3} = logger:get_handler_config(h1),
+ C2 = maps:remove(custom,C3),
+
+ %% Change handler config: Map
+ ok = logger:update_handler_config(h1,#{custom=>new_custom}),
+ [changing_config] = test_server:messages_get(),
+ {ok,C4} = logger:get_handler_config(h1),
+ C4 = C3#{custom:=new_custom},
+
+ %% Change handler config: Id and module can not be changed
+ {error,{illegal_config_change,Old,New}} =
+ logger:set_handler_config(h1,id,newid),
+ %% Check that only the faulty field is included in return
+ [{id,h1}] = maps:to_list(Old),
+ [{id,newid}] = maps:to_list(New),
+ %% Check that both fields are included when both are changed
+ {error,{illegal_config_change,
+ #{id:=h1,module:=?MODULE},
+ #{id:=newid,module:=newmodule}}} =
+ logger:set_handler_config(h1,#{id=>newid,module=>newmodule}),
+
+ %% Change primary config: Single key
+ PConfig0 = logger:get_primary_config(),
+ ok = logger:set_primary_config(level,warning),
+ PConfig1 = logger:get_primary_config(),
+ PConfig1 = PConfig0#{level:=warning},
+
+ %% Change primary config: Map
+ ok = logger:update_primary_config(#{level=>error}),
+ PConfig2 = logger:get_primary_config(),
+ PConfig2 = PConfig1#{level:=error},
+
+ %% Overwrite primary config - check that defaults are added
+ ok = logger:set_primary_config(#{filter_default=>stop}),
+ #{level:=notice,filters:=[],filter_default:=stop}=PC1 =
+ logger:get_primary_config(),
+ 3 = maps:size(PC1),
+ %% Check that internal 'handlers' field has not been changed
+ MS = [{{{?HANDLER_KEY,'$1'},'_','_'},[],['$1']}],
+ HIds1 = lists:sort(ets:select(?LOGGER_TABLE,MS)), % dirty, internal data
+ HIds2 = lists:sort(logger:get_handler_ids()),
+ HIds1 = HIds2,
+
+ %% Cleanup
+ ok = logger:set_primary_config(PConfig0),
+ [] = test_server:messages_get(),
+
+ ok.
+
+change_config(cleanup,Config) ->
+ logger:remove_handler(h1),
+ PC = ?config(logger_config,Config),
+ logger:set_primary_config(PC),
+ ok.
+
+set_formatter(_Config) ->
+ {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ logger:notice("hello",[]),
+ receive
+ {_Log,#{formatter:={?MODULE,[]}}} ->
+ ok
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end,
+ ok.
+
+set_formatter(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_no_levels(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+
+ Levels = [emergency,alert,critical,error,warning,notice,info,debug],
+ ok = logger:set_primary_config(level,none),
+ [logger:Level(#{Level=>rep}) || Level <- Levels],
+ ok = check_no_log(),
+
+ ok = logger:set_primary_config(level,all),
+ M2 = ?map_rep,
+ ?LOG_NOTICE(M2),
+ ok = check_logged(notice,M2,#{}),
+
+ ok = logger:set_module_level(?MODULE,none),
+ ?LOG_EMERGENCY(?map_rep),
+ ?LOG_ALERT(?map_rep),
+ ?LOG_CRITICAL(?map_rep),
+ ?LOG_ERROR(?map_rep),
+ ?LOG_WARNING(?map_rep),
+ ?LOG_NOTICE(?map_rep),
+ ?LOG_INFO(?map_rep),
+ ?LOG_DEBUG(?map_rep),
+ ok = check_no_log(),
+
+ ok = logger:unset_module_level(?MODULE),
+ logger:notice(M3=?map_rep),
+ ok = check_logged(notice,M3,#{}),
+
+ ok = logger:set_handler_config(h1,level,none),
+ [logger:Level(#{Level=>rep}) || Level <- Levels],
+ ok = check_no_log(),
+
+ ok.
+log_no_levels(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+log_all_levels_api(_Config) ->
+ ok = logger:set_primary_config(level,all),
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ test_api(emergency),
+ test_api(alert),
+ test_api(critical),
+ test_api(error),
+ test_api(warning),
+ test_api(notice),
+ test_api(info),
+ test_api(debug),
+ test_log_function(emergency),
+ ok.
+
+log_all_levels_api(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ ok.
+
+macros(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ test_macros(emergency),
+ test_log_macro(alert),
+ ok.
+
+macros(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+set_level(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ logger:debug(?map_rep),
+ ok = check_no_log(),
+ logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+ ok = logger:set_primary_config(level,debug),
+ logger:debug(M2=?map_rep),
+ ok = check_logged(debug,M2,#{}),
+ ok.
+
+set_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ ok.
+
+set_module_level(_Config) ->
+ [] = logger:get_module_level([?MODULE,other]),
+ [] = logger:get_module_level(?MODULE),
+ [] = logger:get_module_level(),
+
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad),
+ {error,{not_a_list_of_modules,{bad}}} =
+ logger:set_module_level({bad},warning),
+ {error,{not_a_list_of_modules,[{bad}]}} =
+ logger:set_module_level([{bad}],warning),
+ ok = logger:set_module_level(?MODULE,warning),
+ [{?MODULE,warning}] = logger:get_module_level([?MODULE,other]),
+ [{?MODULE,warning}] = logger:get_module_level(?MODULE),
+ [{?MODULE,warning}] = logger:get_module_level(),
+ logger:notice(?map_rep,?MY_LOC(0)),
+ ok = check_no_log(),
+ logger:warning(M1=?map_rep,?MY_LOC(0)),
+ ok = check_logged(warning,M1,?MY_LOC(1)),
+ ok = logger:set_module_level(?MODULE,notice),
+ [{?MODULE,notice}] = logger:get_module_level([?MODULE,other]),
+ [{?MODULE,notice}] = logger:get_module_level(?MODULE),
+ [{?MODULE,notice}] = logger:get_module_level(),
+ logger:notice(M2=?map_rep,?MY_LOC(0)),
+ ok = check_logged(notice,M2,?MY_LOC(1)),
+
+ {error,{not_a_list_of_modules,{bad}}} = logger:unset_module_level({bad}),
+ {error,{not_a_list_of_modules,[{bad}]}} = logger:unset_module_level([{bad}]),
+ ok = logger:unset_module_level(?MODULE),
+ [] = logger:get_module_level([?MODULE,other]),
+ [] = logger:get_module_level(?MODULE),
+ [] = logger:get_module_level(),
+
+ ok = logger:set_module_level([m1,m2,m3],notice),
+ [{m1,notice},{m2,notice},{m3,notice}] = logger:get_module_level(),
+ ok = logger:unset_module_level(m2),
+ [{m1,notice},{m3,notice}] = logger:get_module_level(),
+ ok = logger:unset_module_level(),
+ [] = logger:get_module_level(),
+
+ ok.
+
+set_module_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+set_application_level(_Config) ->
+
+ {error,{not_loaded,mnesia}} = logger:set_application_level(mnesia, warning),
+ {error,{not_loaded,mnesia}} = logger:unset_application_level(mnesia),
+
+ case application:load(mnesia) of
+ ok ->
+ {ok, Modules} = application:get_key(mnesia, modules),
+ [] = logger:get_module_level(Modules),
+
+ {error,{invalid_level,warn}} =
+ logger:set_application_level(mnesia, warn),
+
+ ok = logger:set_application_level(mnesia, debug),
+ DebugModules = lists:sort([{M,debug} || M <- Modules]),
+ DebugModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:set_application_level(mnesia, warning),
+
+ WarnModules = lists:sort([{M,warning} || M <- Modules]),
+ WarnModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:unset_application_level(mnesia),
+ [] = logger:get_module_level(Modules);
+ {error,{"no such file or directory","mnesia.app"}} ->
+ {skip, "Cannot load mnesia, does not exist"}
+ end.
+
+set_application_level(cleanup,_Config) ->
+ _ = logger:unset_application_level(mnesia),
+ _ = application:unload(mnesia),
+ ok.
+
+cache_module_level(_Config) ->
+ ok = logger:unset_module_level(?MODULE),
+ [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ?LOG_NOTICE(?map_rep),
+ %% Caching is done asynchronously, so wait a bit for the update
+ timer:sleep(100),
+ [_] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ok = logger:unset_module_level(?MODULE),
+ [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ok.
+
+cache_module_level(cleanup,_Config) ->
+ logger:unset_module_level(?MODULE),
+ ok.
+
+format_report(_Config) ->
+ {"~ts",["string"]} = logger:format_report("string"),
+ {"~tp",[term]} = logger:format_report(term),
+ {"~tp",[[]]} = logger:format_report([]),
+ {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]),
+ KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}],
+ KeyValRes =
+ {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp",
+ [key1,value1,key2,"value2",key3,[]]} =
+ logger:format_report(KeyVals),
+ KeyValRes = logger:format_report(maps:from_list(KeyVals)),
+ KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}),
+ {" ~tp: ~tp\n ~tp: ~tp",
+ [label,{?MODULE,test},report,KeyVals]} =
+ logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,term]} =
+ logger:format_report([{key1,value1},term]),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,[]]} =
+ logger:format_report([{key1,value1},[]]),
+
+ {"~tp",[[]]} = logger:format_report([[],[],[]]),
+
+ ok.
+
+filter_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+
+ %% Logger filters
+ {error,{invalid_filter,_}} =
+ logger:add_primary_filter(lf,{fun(_) -> ok end,args}),
+ ok = logger:add_primary_filter(lf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
+ #{filters:=[_]} = logger:get_primary_config(),
+ ok = logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ ok = logger:add_primary_filter(lf,{fun(_,_) -> faulty_return end,args}),
+ #{filters:=[_]} = logger:get_primary_config(),
+ ok = logger:notice(M2=?map_rep),
+ ok = check_logged(notice,M2,#{}),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ %% Handler filters
+ {error,{not_found,h0}} =
+ logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}),
+ {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf),
+ {error,{invalid_filter,_}} =
+ logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}),
+ ok = logger:add_handler_filter(h1,hf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
+ {ok,#{filters:=[_]}} = logger:get_handler_config(h1),
+ ok = logger:notice(M3=?map_rep),
+ ok = check_logged(notice,M3,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}),
+ {ok,#{filters:=[_]}} = logger:get_handler_config(h1),
+ ok = logger:notice(M4=?map_rep),
+ ok = check_logged(notice,M4,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok.
+
+filter_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+handler_failed(_Config) ->
+ register(callback_receiver,self()),
+ {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}),
+ {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}),
+ {error,{invalid_config,bad}} = logger:add_handler(h1,?MODULE,bad),
+ {error,{invalid_filters,false}} =
+ logger:add_handler(h1,?MODULE,#{filters=>false}),
+ {error,{invalid_filter_default,true}} =
+ logger:add_handler(h1,?MODULE,#{filter_default=>true}),
+ {error,{invalid_formatter,[]}} =
+ logger:add_handler(h1,?MODULE,#{formatter=>[]}),
+ {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}),
+ logger:notice(?map_rep),
+ check_no_log(),
+ H1 = logger:get_handler_config(),
+ false = lists:search(fun(#{id:=h1}) -> true; (_) -> false end,H1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+
+ ok = logger:add_handler(h2,?MODULE,
+ #{filter_default => log,
+ log_call => fun() ->
+ erlang:error({badmatch,b})
+ end}),
+ {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}),
+ [add] = test_server:messages_get(),
+
+ logger:notice(?map_rep),
+ [remove] = test_server:messages_get(),
+ H2 = logger:get_handler_config(),
+ false = lists:search(fun(#{id:=h2}) -> true; (_) -> false end,H2),
+ {error,{not_found,h2}} = logger:remove_handler(h2),
+
+ CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end,
+ CrashHandler = fun() -> erlang:error({badmatch,b}) end,
+ KillHandler = fun() -> exit(self(), die) end,
+
+ {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}),
+ {error,{handler_not_added,{callback_crashed,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}),
+ {error,{handler_not_added,{logger_process_exited,_,die}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CallAddHandler}),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CrashHandler}),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,#{conf_call=>KillHandler}),
+
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,conf_call,CallAddHandler),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,conf_call,CrashHandler),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,conf_call,KillHandler),
+
+ ok = logger:remove_handler(h1),
+ [add,remove] = test_server:messages_get(),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}),
+ ok = logger:remove_handler(h1),
+ [add,add,add] = test_server:messages_get(),
+
+ ok.
+
+handler_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+config_sanity_check(_Config) ->
+ %% Primary config
+ {error,{invalid_config,bad}} = logger:set_primary_config(bad),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_primary_config(filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_primary_config(level,bad),
+ {error,{invalid_filters,bad}} = logger:set_primary_config(filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_primary_config(filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_primary_config(filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_primary_config(filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_primary_config(filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_primary_config,{bad,bad}}} =
+ logger:set_primary_config(bad,bad),
+
+ %% Handler config
+ {error,{not_found,h1}} = logger:set_handler_config(h1,a,b),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_handler_config(h1,filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad),
+ {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_handler_config(h1,filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,bad),
+ {error,{invalid_module,{bad}}} =
+ logger:set_handler_config(h1,formatter,{{bad},cfg}),
+ {error,{invalid_formatter_config,logger_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,bad}),
+ {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}),
+ {error,{invalid_formatter_template,logger_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>bad}}),
+ {error,{invalid_formatter_template,logger_formatter,[1]}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[1]}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[]}}),
+ {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>fun(R) ->
+ {"~p",[R]}
+ end}}),
+ {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>4}}),
+ ok = logger:set_handler_config(h1,formatter,{module,config}),
+ {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} =
+ logger:set_handler_config(h1,formatter,{?MODULE,crash}),
+ ok = logger:set_handler_config(h1,custom,custom),
+
+ %% Old utc parameter is no longer allowed (replaced by time_offset)
+ {error,{invalid_formatter_config,logger_formatter,{utc,true}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{utc=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>""}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"Z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"-0:0"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+10:13"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+0"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>bad}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>"s"}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>$\s}}),
+ ok.
+
+config_sanity_check(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ {error,function_clause} = ?TRY(logger:log(bad,?map_rep)),
+ {error,function_clause} = ?TRY(logger:log(notice,?map_rep,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad,#{})),
+ {error,function_clause} = ?TRY(logger:log(notice,bad,bad,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,bad,bad,#{})),
+ check_no_log(),
+ ok = logger:log(notice,M1=?str,#{}),
+ check_logged(notice,M1,#{}),
+ ok = logger:log(notice,M2=?map_rep,#{}),
+ check_logged(notice,M2,#{}),
+ ok = logger:log(notice,M3=?keyval_rep,#{}),
+ check_logged(notice,M3,#{}),
+
+ %% Should we check report input more thoroughly?
+ ok = logger:log(notice,M4=?keyval_rep++[other,stuff,in,list],#{}),
+ check_logged(notice,M4,#{}),
+
+ %% This might break a handler since it is assumed to be a format
+ %% string and args, so it depends how the handler protects itself
+ %% against something like io_lib:format("ok","ok")
+ ok = logger:log(notice,"ok","ok",#{}),
+ check_logged(notice,"ok","ok",#{}),
+
+ ok.
+
+log_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+emulator(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+ Msg = "Error in process ~p on node ~p with exit value:~n~p~n",
+ Error = {badmatch,4},
+ Stack = [{module, function, 2, []}],
+ Pid = spawn(?MODULE, generate_error, [Error, Stack]),
+ check_logged(error, Msg, [Pid, node(), {Error, Stack}],
+ #{gl=>group_leader(),
+ error_logger=>#{tag=>error,emulator=>true}}),
+ ok.
+
+emulator(cleanup, _Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+generate_error(Error, Stack) ->
+ erlang:raise(error, Error, Stack).
+
+via_logger_process(Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+
+ %% Explicitly send a message to the logger process
+ %% This is used by code_server, erl_prim_loader, init, prim_file, ...
+ Msg = ?str,
+ logger ! {log,error,Msg,[],#{}},
+ check_logged(error, Msg, [], #{}),
+
+ case os:type() of
+ {win32,_} ->
+ %% Skip this part on windows - cant change file mode"
+ ok;
+ _ ->
+ %% This should trigger the same thing from erl_prim_loader
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ check_logged(error,
+ #{report=>"File operation error: eacces. Target: " ++
+ Dir ++". Function: list_dir. "},
+ #{pid=>self(),
+ gl=>group_leader(),
+ error_logger=>#{tag=>error_report,
+ type=>std_error}}),
+ ok
+ end.
+
+via_logger_process(cleanup, Config) ->
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ _ = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ logger:remove_handler(h1),
+ ok.
+
+other_node(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ rpc:call(Node,logger,error,[Msg=?str,#{}]),
+ check_logged(error,Msg,#{}),
+ ok.
+
+other_node(cleanup,_Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes],
+ logger:remove_handler(h1),
+ ok.
+
+compare_levels(_Config) ->
+ Levels = [emergency,alert,critical,error,warning,notice,info,debug],
+ ok = compare(Levels),
+ {error,badarg} = ?TRY(logger:compare_levels(bad,bad)),
+ {error,badarg} = ?TRY(logger:compare_levels({bad},notice)),
+ {error,badarg} = ?TRY(logger:compare_levels(notice,"bad")),
+ ok.
+
+compare([L|Rest]) ->
+ eq = logger:compare_levels(L,L),
+ [gt = logger:compare_levels(L,L1) || L1 <- Rest],
+ [lt = logger:compare_levels(L1,L) || L1 <- Rest],
+ compare(Rest);
+compare([]) ->
+ ok.
+
+process_metadata(_Config) ->
+ undefined = logger:get_process_metadata(),
+ {error,badarg} = ?TRY(logger:set_process_metadata(bad)),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ Time = erlang:system_time(microsecond),
+ ProcMeta = #{time=>Time,line=>0,custom=>proc},
+ ok = logger:set_process_metadata(ProcMeta),
+ S1 = ?str,
+ ?LOG_NOTICE(S1,#{custom=>macro}),
+ check_logged(notice,S1,#{time=>Time,line=>0,custom=>macro}),
+
+ Time2 = erlang:system_time(microsecond),
+ S2 = ?str,
+ ?LOG_NOTICE(S2,#{time=>Time2,line=>1,custom=>macro}),
+ check_logged(notice,S2,#{time=>Time2,line=>1,custom=>macro}),
+
+ logger:notice(S3=?str,#{custom=>func}),
+ check_logged(notice,S3,#{time=>Time,line=>0,custom=>func}),
+
+ ProcMeta = logger:get_process_metadata(),
+ ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}),
+ Expected = ProcMeta#{custom:=changed,custom2=>added},
+ Expected = logger:get_process_metadata(),
+ ok = logger:unset_process_metadata(),
+ undefined = logger:get_process_metadata(),
+
+ ok = logger:update_process_metadata(#{custom=>added_again}),
+ {error,badarg} = ?TRY(logger:update_process_metadata(bad)),
+ #{custom:=added_again} = logger:get_process_metadata(),
+
+ ok.
+
+process_metadata(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+app_config(Config) ->
+ %% Start a node with default configuration
+ {ok,_,Node} = logger_test_lib:setup(Config,[]),
+
+ App1Name = app1,
+ App1 = {application, App1Name,
+ [{description, "Test of app with logger config"},
+ {applications, [kernel]}]},
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+
+ %% Try to add an own default handler
+ {error,{bad_config,{handler,{app1,{already_exist,default}}}}} =
+ rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ %% Add a different handler
+ ok = rpc:call(Node,application,set_env,[App1Name,logger,
+ [{handler,myh,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ {ok,#{filters:=DF}} = rpc:call(Node,logger,get_handler_config,[default]),
+ {ok,#{filters:=[]}} = rpc:call(Node,logger,get_handler_config,[myh]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a node with no default handler, then add an own default handler
+ {ok,#{handlers:=[#{id:=simple}]},Node} =
+ logger_test_lib:setup(Config,[{logger,[{handler,default,undefined}]}]),
+
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a silent node, then add an own default handler
+ {ok,#{handlers:=[]},Node} =
+ logger_test_lib:setup(Config,[{error_logger,silent}]),
+
+ {error,{bad_config,{handler,[{some,bad,config}]}}} =
+ rpc:call(Node,logger,add_handlers,[[{some,bad,config}]]),
+ ok = rpc:call(Node,logger,add_handlers,
+ [[{handler,default,logger_std_h,#{}}]]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ ok.
+
+%% This test case is maintly to see code coverage. Note that
+%% logger_env_var_SUITE tests a lot of the same, and checks the
+%% functionality more thoroughly, but since it all happens at node
+%% start, it is not possible to see code coverage in that test.
+kernel_config(Config) ->
+ %% Start a node with simple handler only, then simulate kernel
+ %% start by calling internally exported
+ %% internal_init_logger(). This is to test all variants of kernel
+ %% config, including bad config, and see the code coverage.
+ {ok,#{handlers:=[#{id:=simple,filters:=DF}]}=LC,Node} =
+ logger_test_lib:setup(Config,[{error_logger,false}]),
+
+ %% Same once more, to get coverage
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% This shall mean the same as above, but using 'logger' parameter
+ %% instead of 'error_logger'
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% Silent
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,silent]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Default
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger=tty (same as default)
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,tty]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger={file,File}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ F = filename:join(?config(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,{file,F}]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but using 'logger' parameter instead of 'error_logger'
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with type={file,File,Modes}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ M = [raw,write,delayed_write],
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F,M}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F,M}}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with disk_log handler
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ M = [raw,write,delayed_write],
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_disk_log_h,
+ #{config=>#{file=>F}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{file:=F}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Set primary filters and module level. No default handler.
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined},
+ {filters,stop,[{f1,{fun(_,_) -> log end,ok}}]},
+ {module_level,debug,[?MODULE]}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=stop,filters:=[_]},
+ handlers:=[],
+ module_levels:=[{?MODULE,debug}]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Bad config
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,bad]),
+ {error,{bad_config,{kernel,{error_logger,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger_level,bad]),
+ {error,{bad_config,{kernel,{logger_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,logger_level]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[{f1,bad}]}]]),
+ {error,{bad_config,{kernel,{invalid_filter,{f1,bad}}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,MF=[{filters,stop,[]},{filters,log,[]}]]),
+ {error,{bad_config,{kernel,{multiple_filters,MF}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{module_level,bad,[?MODULE]}]]),
+ {error,{bad_config,{kernel,{invalid_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+check_logged(Level,Format,Args,Meta) ->
+ do_check_logged(Level,{Format,Args},Meta).
+
+check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) ->
+ do_check_logged(Level,{report,Msg},Meta);
+check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) ->
+ do_check_logged(Level,{string,Msg},Meta).
+
+do_check_logged(Level,Msg0,Meta0) ->
+ receive
+ {#{level:=Level,msg:=Msg,meta:=Meta},_} ->
+ check_msg(Msg0,Msg),
+ check_maps(Meta0,Meta,meta)
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end.
+
+check_no_log() ->
+ receive
+ X -> ct:fail({got_unexpected_log,X})
+ after 500 ->
+ ok
+ end.
+
+check_msg(Msg,Msg) ->
+ ok;
+check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) ->
+ check_maps(Expected,Got,msg);
+check_msg(Expected,Got) ->
+ ct:fail({unexpected,msg,Expected,Got}).
+
+check_maps(Expected,Got,What) ->
+ case maps:merge(Got,Expected) of
+ Got ->
+ ok;
+ _ ->
+ ct:fail({unexpected,What,Expected,Got})
+ end.
+
+%% Handler
+adding_handler(#{add_call:=Fun}) ->
+ Fun();
+adding_handler(Config) ->
+ maybe_send(add),
+ {ok,Config}.
+
+removing_handler(#{rem_call:=Fun}) ->
+ Fun();
+removing_handler(_Config) ->
+ maybe_send(remove),
+ ok.
+changing_config(_Old,#{conf_call:=Fun}) ->
+ Fun();
+changing_config(_Old,Config) ->
+ maybe_send(changing_config),
+ {ok,Config}.
+
+maybe_send(Msg) ->
+ case whereis(callback_receiver) of
+ undefined -> ok;
+ Pid -> Pid ! Msg
+ end.
+
+log(_Log,#{log_call:=Fun}) ->
+ Fun();
+log(Log,Config) ->
+ TcProc = maps:get(tc_proc,Config,self()),
+ TcProc ! {Log,Config},
+ ok.
+
+test_api(Level) ->
+ logger:Level(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:Level(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:Level("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_function(Level) ->
+ logger:log(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:log(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:log(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_macros(emergency=Level) ->
+ ?LOG_EMERGENCY(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG_EMERGENCY(F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG_EMERGENCY(F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_macro(Level) ->
+ ?LOG(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG(Level,F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG(Level,F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
+
+check_config(crash) ->
+ erlang:error({badmatch,3});
+check_config(_) ->
+ ok.
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
new file mode 100644
index 0000000000..905c2c52c5
--- /dev/null
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -0,0 +1,1679 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(log_no(File,N), lists:concat([File,".",N])).
+-define(domain,#{domain=>[?MODULE]}).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ false ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop_handler,
+ create_log,
+ open_existing_log,
+ disk_log_opts,
+ default_formatter,
+ logging,
+ filter_config,
+ errors,
+ formatter_fail,
+ config_fail,
+ bad_input,
+ info_and_reset,
+ reconfig,
+ sync,
+ disk_log_full,
+ disk_log_wrap,
+ disk_log_events,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync,
+ op_switch_to_drop,
+ op_switch_to_flush,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ %% qlen_kill_std,
+ mem_kill_new,
+ %% mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+start_stop_handler(_Config) ->
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ true = is_pid(whereis(h_proc_name())),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()).
+start_stop_handler(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+create_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])),
+ LogFile1 = filename:join(PrivDir, Name1),
+ ok = start_and_add(Name1, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("hello", ?domain),
+ logger_disk_log_h:filesync(Name1),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000),
+
+ %% test second handler
+ Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])),
+ DLName = lists:concat([?FUNCTION_NAME,"_B_log"]),
+ LogFile2 = filename:join(PrivDir, DLName),
+ ok = start_and_add(Name2, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile2}),
+ logger:notice("dummy", ?domain),
+ logger_disk_log_h:filesync(Name2),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+
+ remove_and_stop(Name1),
+ remove_and_stop(Name2),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+ ok.
+
+open_existing_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ HName = ?FUNCTION_NAME,
+ DLName = lists:concat([?FUNCTION_NAME,"_log"]),
+ LogFile1 = filename:join(PrivDir, DLName),
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("one", ?domain),
+ logger_disk_log_h:filesync(HName),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000),
+ logger:notice("two", ?domain),
+ ok = remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000),
+
+ logger:notice("two and a half", ?domain),
+
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("three", ?domain),
+ logger_disk_log_h:filesync(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000),
+ remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000).
+
+disk_log_opts(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ PrivDir = ?config(priv_dir,Config),
+ WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])),
+ WFile = lists:concat([?FUNCTION_NAME,"_W_log"]),
+ Size = length("12345"),
+ ConfigW = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ WFileFull = filename:join(PrivDir, WFile),
+ DLOptsW = #{file => WFileFull,
+ type => wrap,
+ max_no_bytes => Size,
+ max_no_files => 2},
+ ok = start_and_add(WName, ConfigW, DLOptsW),
+ WInfo1 = disk_log:info(WName),
+ ct:log("Fullname = ~s", [WFileFull]),
+ {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1),
+ Get(size,WInfo1),Get(current_file,WInfo1)},
+ logger:notice("123", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("45", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("6", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("7890", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])),
+ HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]),
+ ConfigH = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ HFile1Full = filename:join(PrivDir, HFile1),
+ DLOptsH1 = #{file => HFile1Full,
+ type => halt},
+ ok = start_and_add(HName1, ConfigH, DLOptsH1),
+ HInfo1 = disk_log:info(HName1),
+ ct:log("Fullname = ~s", [HFile1Full]),
+ {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1),
+ Get(size,HInfo1)},
+ logger:notice("12345", ?domain),
+ logger_disk_log_h:filesync(HName1),
+ timer:sleep(500),
+ 1 = Get(no_written_items, disk_log:info(HName1)),
+
+ HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])),
+ HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]),
+ HFile2Full = filename:join(PrivDir, HFile2),
+ DLOptsH2 = DLOptsH1#{file => HFile2Full,
+ max_no_bytes => 1000},
+ ok = start_and_add(HName2, ConfigH, DLOptsH2),
+ HInfo3 = disk_log:info(HName2),
+ ct:log("Fullname = ~s", [HFile2Full]),
+ {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3),
+ Get(size,HInfo3)},
+
+ remove_and_stop(WName),
+ remove_and_stop(HName1),
+ remove_and_stop(HName2),
+ ok.
+
+default_formatter(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
+ HandlerConfig = #{config => #{file=>LogFile},
+ filter_default=>log},
+ ct:pal("Log: ~p", [LogFile]),
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, HandlerConfig),
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ LogName = lists:concat([LogFile, ".1"]),
+ logger:notice("dummy"),
+ wait_until_written(LogName),
+ {ok,Bin} = file:read_file(LogName),
+ match = re:run(Bin, "=NOTICE REPORT====.*\ndummy", [{capture,none}]),
+ ok.
+default_formatter(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+logging(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile = filename:join(PrivDir, Name),
+ ok = start_and_add(Name, #{filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ #{file => LogFile}),
+ MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end,
+ logger:notice([{x,y}], #{report_cb => MsgFormatter}),
+ logger:notice([{x,y}], #{}),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]),
+ try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000).
+
+logging(cleanup, _Config) ->
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ remove_and_stop(Name).
+
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
+errors(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile1 = filename:join(PrivDir,Name1),
+ HandlerConfig = #{config=>#{file=>LogFile1},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ ok = logger:add_handler(Name1, logger_disk_log_h, HandlerConfig),
+ {error,{already_exist,Name1}} =
+ logger:add_handler(Name1, logger_disk_log_h, #{}),
+
+ %%! TODO:
+ %%! Check how bad log_opts are handled!
+
+ {error,{illegal_config_change,
+ #{config:=#{type:=wrap}},
+ #{config:=#{type:=halt}}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{type=>halt,
+ file=>LogFile1}),
+
+ {error,{illegal_config_change,
+ #{config:=#{file:=LogFile1}},
+ #{config:=#{file:="newfilename"}}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{file=>"newfilename"}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(Name1),
+ ok = logger:set_handler_config(Name1,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(Name1),
+
+
+ ok = logger:remove_handler(Name1),
+ {error,{not_found,Name1}} = logger:remove_handler(Name1),
+ ok.
+
+errors(cleanup, _Config) ->
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ _ = logger:remove_handler(Name1).
+
+formatter_fail(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name = ?FUNCTION_NAME,
+ LogFile = filename:join(PrivDir,Name),
+ ct:pal("Log = ~p", [LogFile]),
+ HandlerConfig = #{config => #{file=>LogFile},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])},
+ %% no formatter!
+ logger:add_handler(Name, logger_disk_log_h, HandlerConfig),
+ Pid = whereis(h_proc_name(Name)),
+ true = is_pid(Pid),
+ H = logger:get_handler_ids(),
+ true = lists:member(Name,H),
+
+ %% Formatter is added automatically
+ {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(Name),
+ logger:notice(M1=?msg,?domain),
+ Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* notice: "++M1,5000),
+
+ ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}),
+ logger:notice(M2=?msg,?domain),
+ Got2 = try_match_file(?log_no(LogFile,1),
+ escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}),
+ logger:notice(M3=?msg,?domain),
+ Got3 = try_match_file(?log_no(LogFile,1),
+ escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}),
+ logger:notice(?msg,?domain),
+ try_match_file(?log_no(LogFile,1),
+ escape(Got3)++"FORMATTER ERROR: bad return value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(h_proc_name(Name)),
+ H = logger:get_handler_ids(),
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ _ = logger:remove_handler(?FUNCTION_NAME),
+ ok.
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+
+ {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{drop_mode_qlen=>1}}),
+ {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{sync_mode_qlen=>43,
+ drop_mode_qlen=>42}}),
+ {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{drop_mode_qlen=>43,
+ flush_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ %% can't change the disk log options for a log already in use
+ {error,{illegal_config_change,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{max_no_files=>2}),
+ %% can't change name of an existing handler
+ {error,{illegal_config_change,_,_}} =
+ logger:update_handler_config(?MODULE,id,bad),
+ %% incorrect values of OP params
+ {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
+ {error,{invalid_levels,_}} =
+ logger:update_handler_config(?MODULE,config,
+ HConfig#{sync_mode_qlen=>100,
+ flush_qlen=>99}),
+ %% invalid name of config parameter
+ {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
+ logger:update_handler_config(?MODULE, config,
+ HConfig#{filesync_rep_int => 2000}),
+ ok.
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} =
+ logger_disk_log_h:filesync("BadType"),
+ {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"),
+ {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType").
+
+info_and_reset(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE} = logger_disk_log_h:info(?MODULE),
+ ok = logger_disk_log_h:reset(?MODULE).
+info_and_reset(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := DiskLogFile}} =
+ logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ file := DiskLogFile,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ type := wrap} = HConfig0}} =
+ logger:get_handler_config(?MODULE),
+
+ HConfig1 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => 3,
+ burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 10,
+ overload_kill_enable => true,
+ overload_kill_qlen => 100000,
+ overload_kill_mem_size => 10000000,
+ overload_kill_restart_after => infinity,
+ filesync_repeat_interval => no_repeat},
+ ok = logger:set_handler_config(?MODULE, config, HConfig1),
+ #{id := ?MODULE,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} =
+ logger_disk_log_h:info(?MODULE),
+ {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = HConfig0#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = HConfig0#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir, "logfile"),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => 1024,
+ file => File}}),
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}} =
+ logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}=HaltHConfig} = Config2} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, level, notice),
+ {ok,C5} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = Config2#{level => notice},
+
+ ok = logger:set_handler_config(?MODULE, level, info),
+ {ok,C6} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = Config2#{level => info},
+
+ %% You are not allowed to actively set the write once fields
+ %% (type, max_no_files, max_no_bytes, file) in runtime.
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>wrap}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
+ {ok,C7} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = C6,
+
+ %% ... but if you don't specify the write once fields, then
+ %% set_handler_config shall NOT reset them to their default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C8}} = logger:get_handler_config(?MODULE),
+ ct:log("C8: ~p",[C8]),
+ C8 = HaltHConfig#{sync_mode_qlen=>1},
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{config => #{file => File},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ start_tracer([{disk_log,blog,2},
+ {logger_disk_log_h,disk_log_sync,2}],
+ [{disk_log,blog,<<"first\n">>},
+ {logger_disk_log_h,disk_log_sync}]),
+
+ logger:notice("first", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
+
+ %% check that if there's no repeated disk_log_sync active,
+ %% a disk_log_sync is still performed when handler goes idle
+ {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
+ HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
+ ok = logger:update_handler_config(?MODULE, config, HConfig1),
+
+ no_repeat = maps:get(filesync_repeat_interval,
+ logger_disk_log_h:info(?MODULE)),
+ %% The following timer is to make sure the time from last log
+ %% ("first") to next ("second") is long enough, so the a flush is
+ %% triggered by the idle timeout between "fourth" and "fifth".
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+
+ start_tracer([{disk_log,blog,2},
+ {logger_disk_log_h,disk_log_sync,2}],
+ [{disk_log,blog,<<"second\n">>},
+ {logger_disk_log_h,disk_log_sync},
+ {disk_log,blog,<<"third\n">>},
+ {logger_disk_log_h,disk_log_sync}]),
+
+ logger:notice("second", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ logger:notice("third", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?IDLE_DETECT_TIME_MSEC*2),
+
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000),
+
+ %% switch repeated disk_log_sync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync},
+ %% receive 1 initial repeated_disk_log_sync, then 1 per sec
+ start_tracer([{logger_disk_log_h,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+
+ HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
+ ok = logger:update_handler_config(?MODULE, config, HConfig2),
+
+ SyncInt = maps:get(filesync_repeat_interval,
+ logger_disk_log_h:info(?MODULE)),
+ timer:sleep(WaitT),
+ HConfig3 = HConfig#{filesync_repeat_interval => no_repeat},
+ ok = logger:update_handler_config(?MODULE, config, HConfig3),
+ check_tracer(100),
+ ok.
+sync(cleanup,_Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_wrap(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxFiles = 3,
+ MaxBytes = 5,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => wrap,
+ max_no_files => MaxFiles,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+ Info = disk_log:info(?MODULE),
+ {File,wrap,{MaxBytes,MaxFiles},1} =
+ {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)},
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
+ ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
+ %% fill first file
+ lists:foreach(fun(N) ->
+ Log = lists:concat([File,".",N]),
+ logger:notice(Text, ?domain),
+ wait_until_written(Log),
+ ct:pal("N = ~w",
+ [N = Get(current_file,
+ disk_log:info(?MODULE))])
+ end, lists:seq(1,MaxFiles)),
+
+ %% wait for trace messages
+ timer:sleep(1000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)],
+ ok.
+
+disk_log_wrap(cleanup,_Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_full(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxBytes = 50,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ NoOfChars = 5,
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
+ [logger:notice(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)],
+
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ [{trace,full},
+ {trace,{error_status,{error,{full,_}}}}] = Received,
+ ok.
+disk_log_full(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_events(Config) ->
+ Node = node(),
+ Log = ?MODULE,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ %% Events copied from disk_log API
+ Events =
+ [{disk_log, Node, Log, {wrap, 0}},
+ {disk_log, Node, Log, {truncated, 0}},
+ {disk_log, Node, Log, {read_only, 42}},
+ {disk_log, Node, Log, {blocked_log, 42}},
+ {disk_log, Node, Log, {format_external, 42}},
+ {disk_log, Node, Log, full},
+ {disk_log, Node, Log, {error_status, ok}}],
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ [whereis(h_proc_name()) ! E || E <- Events],
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:map(fun({trace,_M,handle_info,
+ [Got,_]}) -> Got
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ NoOfEvents = length(Events),
+ NoOfEvents = length(Received),
+ lists:foreach(fun(Event) ->
+ true = lists:member(Event, Received)
+ end, Received),
+ ok.
+disk_log_events(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ct:pal("Log = ~p", [Log]),
+
+ Node = start_h_on_new_node(Config, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+
+ SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end,
+
+ try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,
+ {error,{full,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,
+ {error,{full,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]),
+ File = filename:join(Dir, FileName),
+
+
+ Node = start_h_on_new_node(Config, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ LogOpts = maps:get(log_opts, HState),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, update_handler_config,
+ [?STANDARD_HANDLER, config,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ SyncInt = maps:get(filesync_repeat_interval, Info),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result,
+ [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,
+ {error,{blocked_log,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_h_on_new_node(Config, File) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_disk_log_h,
+ #{ config => #{ file => File }}}]}]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:notice(Msg)
+ end),
+ ok.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Lines = count_lines(Log),
+ NumOfReqs = Lines,
+ ok = file_delete(Log),
+ ok.
+op_switch_to_sync(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop() ->
+ [{timetrap,{seconds,180}}].
+op_switch_to_drop(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => Procs*NumOfReqs*Bursts,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_drop(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_flush() ->
+ [{timetrap,{minutes,3}}].
+op_switch_to_flush(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 300,
+ flush_qlen => 300,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok= stop_handler(?MODULE),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_flush(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ NumOfReqs = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ReqLimit = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => BurstTWin,
+ drop_mode_qlen => 20000,
+ flush_qlen => 20001}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))),
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>false,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>100}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file_delete(Log),
+ true = is_pid(whereis(h_proc_name())),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>Mem0+50000,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_disk_log_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+mem_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>50000,
+ overload_kill_mem_size=>Mem0+500,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_disk_log_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+restart_after() ->
+ [{timetrap,{minutes,2}}].
+restart_after(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>infinity}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(h_proc_name())),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef1, _, _, _Reason1} ->
+ file_delete(Log),
+ error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3),
+ ok
+ after
+ 5000 ->
+ Info1 = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info1]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig2 =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(h_proc_name()),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef2, _, _, _Reason2} ->
+ file_delete(Log),
+ {ok,Pid1} = wait_for_process_up(RestartAfter * 3),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ Info2 = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info2]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (sync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{minutes,5}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => DLHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
+ {info,[]},
+ {reset,[]},
+ {change_config,[]}])
+ end),
+ Procs = 100,
+ Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, notice),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file_delete(Log).
+handler_requests_under_load(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result =
+ case Req of
+ change_config ->
+ logger:update_handler_config(HName, logger_disk_log_h,
+ #{overload_kill_enable =>
+ false});
+ Func ->
+ logger_disk_log_h:Func(HName)
+ end,
+ send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ end.
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, FuncName),
+ ct:pal("Logging to ~tp", [File]),
+ FullFile = lists:concat([File,".1"]),
+ _ = file_delete(FullFile),
+ ok = logger:add_handler(Name,
+ logger_disk_log_h,
+ #{config=>#{file => File,
+ max_no_files => 1,
+ max_no_bytes => 100000000},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := DLHConfig}} = logger:get_handler_config(Name),
+ {FullFile,HConfig,DLHConfig}.
+
+stop_handler(Name) ->
+ ct:pal("Stopping handler ~p!", [Name]),
+ logger:remove_handler(Name).
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(per_proc_fun(N,Text,Class,X)))
+ end || X <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+per_proc_fun(N,Text,Class,X) when X rem 2 == 0 ->
+ fun() ->
+ process_flag(priority,high),
+ send_n_burst(N, seq, Text, Class)
+ end;
+per_proc_fun(N,Text,Class,_) ->
+ fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) ->
+ format(Log#{msg=>Fun(R)},Config);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ if is_pid(OpOrPid) -> OpOrPid ! {log,String};
+ true -> ok
+ end,
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n";
+format(Msg,Tag) ->
+ Error = {unexpected_format,Msg,Tag},
+ erlang:display(Error),
+ exit(Error).
+
+remove(Handler, LogName) ->
+ logger_disk_log_h:remove(Handler, LogName),
+ HState = #{log_names := Logs} = logger_disk_log_h:info(),
+ false = maps:is_key(LogName, HState),
+ false = lists:member(LogName, Logs),
+ false = logger_config:exist(?LOGGER_TABLE, LogName),
+ {error,no_such_log} = disk_log:info(LogName),
+ ok.
+
+start_and_add(Name, Config, LogOpts) ->
+ HConfig = maps:get(config, Config, #{}),
+ HConfig1 = maps:merge(HConfig, LogOpts),
+ Config1 = Config#{config=>HConfig1},
+ ct:pal("Adding handler ~w with: ~p", [Name,Config1]),
+ ok = logger:add_handler(Name, logger_disk_log_h, Config1),
+ Pid = whereis(h_proc_name(Name)),
+ true = is_pid(Pid),
+ Name = proplists:get_value(name, disk_log:info(Name)),
+ ok.
+
+remove_and_stop(Handler) ->
+ ok = logger:remove_handler(Handler),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name(Handler)),
+ ok.
+
+try_read_file(FileName, Expected, Time) ->
+ try_read_file(FileName, Expected, Time, undefined).
+
+try_read_file(FileName, Expected, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ erlang:error(Error);
+ SomethingElse ->
+ ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500, SomethingElse)
+ end;
+
+try_read_file(_, _, _, Incorrect) ->
+ ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]),
+ erlang:error({error,not_matching_pattern,Incorrect}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+count_lines(File) ->
+ wait_until_written(File),
+ count_lines1(File).
+
+wait_until_written(File) ->
+ wait_until_written(File, -1).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ ok;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ {_,Dev} = file:open(File, [read]),
+ Lines = count_lines2(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+count_lines2(Dev, LC) ->
+ case file:read_line(Dev) of
+ {ok,"Handler logger_disk_log_h_SUITE " ++_} ->
+ %% Not counting handler info
+ count_lines2(Dev,LC);
+ {ok,_} ->
+ count_lines2(Dev,LC+1);
+ eof -> LC
+ end.
+
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(h_proc_name(),[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,c),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]},Caller},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller);
+tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller);
+tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func},Caller);
+tracer({trace,_,call,Call,Caller}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nCaller: ~p~nExpected: ~p~n",[Call,Caller,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[],Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
+ Pid ! tracer_done;
+maybe_tracer_done(Pid,Expected,Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ receive
+ tracer_done ->
+ dbg:stop_clear(),
+ ok;
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ ct:fail({timeout,tracer})
+ end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
+
+h_proc_name() ->
+ h_proc_name(?MODULE).
+h_proc_name(Name) ->
+ list_to_atom(lists:concat([logger_disk_log_h,"_",Name])).
+
+wait_for_process_up(T) ->
+ wait_for_process_up(?MODULE, h_proc_name(), T).
+
+wait_for_process_up(Name, RegName, T) ->
+ N = (T div 500) + 1,
+ wait_for_process_up1(Name, RegName, N).
+
+wait_for_process_up1(_Name, _RegName, 0) ->
+ error;
+wait_for_process_up1(Name, RegName, N) ->
+ timer:sleep(500),
+ case whereis(RegName) of
+ Pid when is_pid(Pid) ->
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
+ undefined ->
+ %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
+ wait_for_process_up1(Name, RegName, N-1)
+ end.
+
+file_delete(Log) ->
+ file:delete(Log).
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
new file mode 100644
index 0000000000..e8d1a313dc
--- /dev/null
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -0,0 +1,683 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_env_var_SUITE).
+
+-compile(export_all).
+
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]).
+
+suite() ->
+ [{timetrap,{seconds,60}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+groups() ->
+ [{error_logger,[],[error_logger_tty,
+ error_logger_tty_sasl_compatible,
+ error_logger_false,
+ error_logger_false_progress,
+ error_logger_false_sasl_compatible,
+ error_logger_silent,
+ error_logger_silent_sasl_compatible,
+ error_logger_file]},
+ {logger,[],[logger_file,
+ logger_file_sasl_compatible,
+ logger_file_log_progress,
+ logger_file_no_filter,
+ logger_file_no_filter_level,
+ logger_file_formatter,
+ logger_filters,
+ logger_filters_stop,
+ logger_module_level,
+ logger_disk_log,
+ logger_disk_log_formatter,
+ logger_undefined,
+ logger_many_handlers_default_first,
+ logger_many_handlers_default_last,
+ logger_many_handlers_default_last_broken_filter
+ ]},
+ {bad,[],[bad_error_logger,
+ bad_level,
+ bad_sasl_compatibility]}].
+
+all() ->
+ [default,
+ default_sasl_compatible,
+ sasl_compatible_false,
+ sasl_compatible_false_no_progress,
+ sasl_compatible,
+ all_logger_level,
+ {group,bad},
+ {group,error_logger},
+ {group,logger}
+ ].
+
+default(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = setup(Config,[]),
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+default_sasl_compatible(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,[{logger_sasl_compatible,true}]),
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_tty(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,[{error_logger,tty}]),
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_tty_sasl_compatible(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,tty},
+ {logger_sasl_compatible,true}]),
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false_progress(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice},
+ {logger_sasl_compatible,true}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ info = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,SimpleFilters),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_silent(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ ok.
+
+error_logger_silent_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent},
+ {logger_sasl_compatible,true}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ ok.
+
+
+error_logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+
+logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_sasl_compatible(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger_sasl_compatible,true},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_log_progress(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6,% progress in std logger
+ info),
+
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_no_filter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filter_default=>log,filters=>[],
+ config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_file_no_filter_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],level=>error,
+ config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ error),% level
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ error = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_file_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,Log}}}}]}]),
+ check_single_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_filters(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,primary:=P},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}},
+ {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ LoggerFilters = maps:get(filters,P),
+ true = lists:keymember(stop_progress,1,LoggerFilters),
+
+ ok.
+
+logger_filters_stop(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,primary:=P},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ config=>#{type=>{file,Log}}}},
+ {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ LoggerFilters = maps:get(filters,P),
+ true = lists:keymember(log_error,1,LoggerFilters),
+
+ ok.
+
+logger_module_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}},
+ {module_level,error,[supervisor]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 3,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [{supervisor,error}] = ModuleLevels,
+ ok.
+
+logger_disk_log(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{config=>#{file=>Log}}}]}]),
+ check_default_log(Node,Log,
+ disk_log,% dest
+ 0),% progress in std logger
+
+ #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_disk_log_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{file=>Log}}}]}]),
+ check_single_log(Node,Log,
+ disk_log,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_undefined(Config) ->
+ {ok,#{handlers:=Hs,primary:=P},_Node} =
+ setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ ok.
+
+
+%% Test that we can add multiple handlers with the default first
+logger_many_handlers_default_first(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ },
+ {handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 6).
+
+%% Test that we can add multiple handlers with the default last
+logger_many_handlers_default_last(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_last_error),
+ LogInfo = file(Config,logger_many_handlers_default_last_info),
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 7).
+
+%% Check that we can handle that an added logger has a broken filter
+%% This used to cause a deadlock.
+logger_many_handlers_default_last_broken_filter(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{broken,{fun logger_filters:level/2,broken_state}},
+ {level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 7).
+
+logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) ->
+ {ok,_,Node} = setup(Config,Env),
+ check_single_log(Node,LogErr,
+ file,% dest
+ 0,% progress in std logger
+ error), % level
+ ok = rpc:call(Node,logger_std_h,filesync,[info]),
+ {ok, Bin} = file:read_file(LogInfo),
+ ct:log("Log content:~n~s",[Bin]),
+ match(Bin,<<"info:">>,NumProgress,info,info),
+ match(Bin,<<"notice:">>,1,notice,info),
+ match(Bin,<<"alert:">>,0,alert,info),
+
+ ok.
+
+sasl_compatible_false(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false},
+ {logger_level,info}]), % to get progress
+ check_default_log(Node,Log,
+ file,% dest
+ 6,% progress in std logger
+ info),
+ ok.
+
+sasl_compatible_false_no_progress(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+sasl_compatible(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {sasl_compatible,true}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+all_logger_level(Config) ->
+ [all_logger_level(Config,Level) || Level <- [none,
+ emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug,
+ all]],
+ ok.
+
+all_logger_level(Config,Level) ->
+ {ok,#{primary:=#{level:=Level}},Node} = setup(Config,[{logger_level,Level}]),
+ true = test_server:stop_node(Node),
+ ok.
+
+bad_error_logger(Config) ->
+ error = setup(Config,[{error_logger,baddest}]).
+
+bad_level(Config) ->
+ error = setup(Config,[{logger_level,badlevel}]).
+
+bad_sasl_compatibility(Config) ->
+ error = setup(Config,[{logger_sasl_compatible,badcomp}]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+file(Config,Func) ->
+ filename:join(proplists:get_value(priv_dir,Config),
+ lists:concat([Func,".log"])).
+
+check_default_log(Node,Log,Dest,NumProgress) ->
+ check_default_log(Node,Log,Dest,NumProgress,notice).
+check_default_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level),
+ match(Bin1,<<"ALERT REPORT">>,1,alert,Level),
+ match(Bin1,<<"INFO REPORT">>,0,notice,Level),
+ match(Bin1,<<"DEBUG REPORT">>,0,debug,Level),
+
+ match(Bin2,<<"INFO REPORT">>,1,notice,Level),
+ match(Bin2,<<"DEBUG REPORT">>,0,debug,Level),
+ ok.
+
+check_single_log(Node,Log,Dest,NumProgress) ->
+ check_single_log(Node,Log,Dest,NumProgress,notice).
+check_single_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"info:">>,NumProgress,info,Level),
+ match(Bin1,<<"alert:">>,1,alert,Level),
+ match(Bin1,<<"debug:">>,0,debug,Level),
+
+ match(Bin2,<<"info:">>,NumProgress+1,info,Level),
+ match(Bin2,<<"debug:">>,0,debug,Level),
+
+ ok.
+
+check_log(Node,Log,Dest) ->
+
+ ok = log(Node,alert,["dummy1"]),
+ ok = log(Node,debug,["dummy1"]),
+
+ %% Check that there are progress reports (supervisor and
+ %% application_controller) and an error report (the call above) in
+ %% the log. There should not be any info reports yet.
+ {ok,Bin1} = sync_and_read(Node,Dest,Log),
+ ct:log("Log content:~n~s",[Bin1]),
+
+ %% Then stop sasl and see that the info report from
+ %% application_controller is there
+ ok = rpc:call(Node,application,stop,[sasl]),
+ {ok,Bin2} = sync_and_read(Node,Dest,Log),
+ ct:log("Log content:~n~s",[Bin2]),
+ {ok,Bin1,Bin2}.
+
+match(Bin,Pattern,0,_,_) ->
+ nomatch = re:run(Bin,Pattern,[{capture,none}]);
+match(Bin,Pattern,N,LogLevel,ConfLevel) ->
+ case logger:compare_levels(LogLevel,ConfLevel) of
+ lt -> match(Bin,Pattern,0,LogLevel,ConfLevel);
+ _ ->
+ {match,M} = re:run(Bin,Pattern,[{capture,all},global]),
+ N = length(M)
+ end.
+
+find(Id,Handlers) ->
+ case lists:search(fun(#{id:=Id0}) when Id0=:=Id-> true;
+ (_) -> false end,
+ Handlers) of
+ {value,Config} ->
+ Config;
+ false ->
+ false
+ end.
+
+exists(Id,Handlers) ->
+ case find(Id,Handlers) of
+ false ->
+ false;
+ _ ->
+ true
+ end.
diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl
new file mode 100644
index 0000000000..11cce8fd20
--- /dev/null
+++ b/lib/kernel/test/logger_filters_SUITE.erl
@@ -0,0 +1,227 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(ndlog,
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(dlog(Domain),
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}).
+-define(llog(Level),
+ #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(plog,
+ #{level=>info,
+ msg=>{report,#{label=>{?MODULE,progress}}},
+ meta=>#{line=>?LINE}}).
+-define(rlog(Node),
+ #{level=>info,
+ msg=>{"Line: ~p",[?LINE]},
+ meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [domain,
+ level,
+ progress,
+ remote_gl].
+
+domain(_Config) ->
+ L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,super,[]}),
+ L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,sub,[]}),
+ L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}),
+
+ L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}),
+ L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}),
+
+ ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}),
+ L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}),
+ L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}),
+
+ ignore = logger_filters:domain(?ndlog,{log,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,equal,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}),
+ L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}),
+ L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}),
+
+ L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}),
+ L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}),
+ L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}),
+
+ %% A domain field in meta which is not a list is allowed by the
+ %% filter, but since MatchDomain is always a list of atoms, only
+ %% Action=not_equal can ever match.
+ ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}),
+ L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}),
+
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})),
+
+ ok.
+
+level(_Config) ->
+ ignore = logger_filters:level(?llog(info),{log,lt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,lt,info}),
+ ignore = logger_filters:level(?llog(info),{log,gt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,info}),
+ L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,info}),
+ L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,gteq,info}),
+ L3 = logger_filters:level(L3=?llog(info),{log,eq,info}),
+ stop = logger_filters:level(?llog(info),{stop,eq,info}),
+ ignore = logger_filters:level(?llog(info),{log,neq,info}),
+ ignore = logger_filters:level(?llog(info),{stop,neq,info}),
+
+ ignore = logger_filters:level(?llog(error),{log,lt,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lt,info}),
+ L4 = logger_filters:level(L4=?llog(error),{log,gt,info}),
+ stop = logger_filters:level(?llog(error),{stop,gt,info}),
+ ignore = logger_filters:level(?llog(error),{log,lteq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lteq,info}),
+ L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}),
+ stop = logger_filters:level(?llog(error),{stop,gteq,info}),
+ ignore = logger_filters:level(?llog(error),{log,eq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,eq,info}),
+ L6 = logger_filters:level(L6=?llog(error),{log,neq,info}),
+ stop = logger_filters:level(?llog(error),{stop,neq,info}),
+
+ L7 = logger_filters:level(L7=?llog(info),{log,lt,error}),
+ stop = logger_filters:level(?llog(info),{stop,lt,error}),
+ ignore = logger_filters:level(?llog(info),{log,gt,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,error}),
+ L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,eq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,eq,error}),
+ L9 = logger_filters:level(L9=?llog(info),{log,neq,error}),
+ stop = logger_filters:level(?llog(info),{stop,neq,error}),
+
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})),
+
+ ok.
+
+progress(_Config) ->
+ L1 = logger_filters:progress(L1=?plog,log),
+ stop = logger_filters:progress(?plog,stop),
+ ignore = logger_filters:progress(?ndlog,log),
+ ignore = logger_filters:progress(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)),
+
+ ok.
+
+remote_gl(_Config) ->
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ L1 = logger_filters:remote_gl(L1=?rlog(Node),log),
+ stop = logger_filters:remote_gl(?rlog(Node),stop),
+ ignore = logger_filters:remote_gl(?ndlog,log),
+ ignore = logger_filters:remote_gl(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)),
+ ok.
+
+remote_gl(cleanup,_Config) ->
+ [test_server:stop_node(N) || N<-nodes()].
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
new file mode 100644
index 0000000000..8c13f0f908
--- /dev/null
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -0,0 +1,886 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [default,
+ legacy_header,
+ error_logger_notice_header,
+ single_line,
+ template,
+ format_msg,
+ report_cb,
+ max_size,
+ depth,
+ chars_limit,
+ format_mfa,
+ format_time,
+ level_or_msg_in_meta,
+ faulty_log,
+ faulty_config,
+ faulty_msg,
+ check_config,
+ update_config].
+
+default(_Config) ->
+ String1 = format(info,{"~p",[term]},#{},#{}),
+ ct:log(String1),
+ [_DateTime,"info:","term\n"] = string:lexemes(String1," "),
+
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{}),
+ ct:log(String2),
+ " info: term\n" = string:prefix(String2,ExpectedTimestamp),
+ ok.
+
+legacy_header(_Config) ->
+ Time = timestamp(),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true,
+ single_line=>false}),
+ ct:log(String1),
+ "=INFO REPORT==== "++Rest = String1,
+ [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="),
+ [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."),
+ integer(D,31),
+ integer(Y,2018,infinity),
+ integer(H,23),
+ integer(Min,59),
+ integer(S,59),
+ integer(Micro,999999),
+ true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun",
+ "Jul","Aug","Sep","Oct","Nov","Dec"]),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false,
+ single_line=>false}),
+ ct:log(String2),
+ ExpectedTimestamp = default_time_format(Time),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad,
+ single_line=>false}),
+ ct:log(String3),
+ String3 = String2,
+
+ String4 = format(info,{"~p",[term]},#{time=>Time},
+ #{legacy_header=>true,
+ single_line=>true}), % <---ignored
+ ct:log(String4),
+ String4 = String1,
+
+ String5 = format(info,{"~p",[term]},#{}, % <--- no time
+ #{legacy_header=>true,
+ single_line=>false}),
+ ct:log(String5),
+ "=INFO REPORT==== "++_ = String5,
+ ok.
+
+error_logger_notice_header(_Config) ->
+ Meta1 = #{error_logger=>#{tag => info_report,type => std_info}},
+ String1 = format(notice,{"~p",[term]},Meta1,
+ #{legacy_header=>true,
+ error_logger_notice_header=>notice}),
+ ct:log(String1),
+ "=NOTICE REPORT==== "++_ = String1,
+
+ String2 = format(notice,{"~p",[term]},Meta1,
+ #{legacy_header=>true,
+ error_logger_notice_header=>info}),
+ ct:log(String2),
+ "=INFO REPORT==== "++_ = String2,
+
+ String3 = format(notice,{"~p",[term]},#{},
+ #{legacy_header=>true,
+ error_logger_notice_header=>notice}),
+ ct:log(String3),
+ "=NOTICE REPORT==== "++_ = String3,
+
+ String4 = format(notice,{"~p",[term]},#{},
+ #{legacy_header=>true,
+ error_logger_notice_header=>info}),
+ ct:log(String4),
+ "=NOTICE REPORT==== "++_ = String4,
+
+ ok.
+
+single_line(_Config) ->
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}),
+ ct:log(String1),
+ " info: term\n" = string:prefix(String1,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}),
+
+
+ %% Test that no extra commas/spaces are added when removing
+ %% newlines, especially not after "=>" in a map association (as
+ %% was the case in OTP-21.0, when the only single_line adjustment
+ %% was done by regexp replacement of "\n" by ", ").
+ Prefix =
+ "Some characters to fill the line ------------------------------------- ",
+ String3 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>true}),
+ ct:log(String3),
+ match = re:run(String3,"\\[1,2,3,4,5,6,7,8,9,10\\]",[{capture,none}]),
+ match = re:run(String3,
+ "#{a => map,few => accociations,with => a}",
+ [{capture,none}]),
+
+ %% This part is added to make sure that the previous test made
+ %% sense, i.e. that there would actually be newlines inside the
+ %% list and map.
+ String4 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>false}),
+ ct:log(String4),
+ match = re:run(String4,"\\[1,2,3,\n",[global,{capture,none}]),
+ {match,Match4} = re:run(String4,"=>\n",[global,{capture,all}]),
+ 3 = length(Match4),
+
+ %% Test that big metadata fields do not get line breaks
+ String5 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>true,template=>[mymeta,"\n"]}),
+ ct:log(String5),
+ [_] = string:lexemes(String5,"\n"),
+
+ %% Ensure that the previous test made sense, i.e. that the
+ %% metadata field does produce multiple lines if
+ %% single_line==false.
+ String6 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>false,template=>[mymeta,"\n"]}),
+ ct:log(String6),
+ [_,_|_] = string:lexemes(String6,"\n"),
+
+ ok.
+
+template(_Config) ->
+ Time = timestamp(),
+
+ Template1 = [msg],
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}),
+ ct:log(String1),
+ "term" = String1,
+
+ Template2 = [msg,unknown],
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}),
+ ct:log(String2),
+ "term" = String2,
+
+ Template3 = ["string"],
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}),
+ ct:log(String3),
+ "string" = String3,
+
+ Template4 = ["string\nnewline"],
+ String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4,
+ single_line=>true}),
+ ct:log(String4),
+ "string\nnewline" = String4,
+
+ Template5 = [],
+ String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}),
+ ct:log(String5),
+ "" = String5,
+
+ Ref6 = erlang:make_ref(),
+ Meta6 = #{atom=>some_atom,
+ integer=>632,
+ list=>[list,"string",4321,#{},{tuple}],
+ mfa=>{mod,func,0},
+ pid=>self(),
+ ref=>Ref6,
+ string=>"some string",
+ time=>Time,
+ tuple=>{1,atom,"list"},
+ nested=>#{subkey=>subvalue}},
+ Template6 = lists:join(";",lists:sort(maps:keys(maps:remove(nested,Meta6))) ++
+ [[nested,subkey]]),
+ String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6,
+ single_line=>true}),
+ ct:log(String6),
+ SelfStr = pid_to_list(self()),
+ RefStr6 = ref_to_list(Ref6),
+ ListStr = "[list,\"string\",4321,#{},{tuple}]",
+ ExpectedTime6 = default_time_format(Time),
+ ["some_atom",
+ "632",
+ ListStr,
+ "mod:func/0",
+ SelfStr,
+ RefStr6,
+ "some string",
+ ExpectedTime6,
+ "{1,atom,\"list\"}",
+ "subvalue"] = string:lexemes(String6,";"),
+
+ Meta7 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template7 = lists:join(";",[nested,
+ [nested,key1],
+ [nested,key1,subkey1],
+ [nested,key2],
+ [nested,key2,subkey2],
+ [nested,key3],
+ [nested,key3,subkey3]]),
+ String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7,
+ single_line=>true}),
+ ct:log(String7),
+ [MultipleKeysStr7,
+ "#{subkey1 => value1}",
+ "value1",
+ "value2",
+ "",
+ "",
+ ""] = string:split(String7,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr7 of
+ "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr7})
+ end,
+
+ Meta8 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template8 =
+ lists:join(
+ ";",
+ [{nested,["exist:",nested],["noexist"]},
+ {[nested,key1],["exist:",[nested,key1]],["noexist"]},
+ {[nested,key1,subkey1],["exist:",[nested,key1,subkey1]],["noexist"]},
+ {[nested,key2],["exist:",[nested,key2]],["noexist"]},
+ {[nested,key2,subkey2],["exist:",[nested,key2,subkey2]],["noexist"]},
+ {[nested,key3],["exist:",[nested,key3]],["noexist"]},
+ {[nested,key3,subkey3],["exist:",[nested,key3,subkey3]],["noexist"]}]),
+ String8 = format(info,{"~p",[term]},Meta8,#{template=>Template8,
+ single_line=>true}),
+ ct:log(String8),
+ [MultipleKeysStr8,
+ "exist:#{subkey1 => value1}",
+ "exist:value1",
+ "exist:value2",
+ "noexist",
+ "noexist",
+ "noexist"] = string:split(String8,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr8 of
+ "exist:#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "exist:#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr8})
+ end,
+
+ ok.
+
+format_msg(_Config) ->
+ Template = [msg],
+
+ String1 = format(info,{"~p",[term]},#{},#{template=>Template}),
+ ct:log(String1),
+ "term" = String1,
+
+ String2 = format(info,{"list",[term]},#{},#{template=>Template}),
+ ct:log(String2),
+ "FORMAT ERROR: \"list\" - [term]" = String2,
+
+ String3 = format(info,{report,term},#{},#{template=>Template}),
+ ct:log(String3),
+ "term" = String3,
+
+ String4 = format(info,{report,term},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String4),
+ "formatted" = String4,
+
+ String5 = format(info,{report,term},
+ #{report_cb=>fun(_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String5),
+ "REPORT_CB/1 ERROR: term; Returned: faulty_return" = String5,
+
+ String6 = format(info,{report,term},
+ #{report_cb=>fun(_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String6),
+ "REPORT_CB/1 CRASH: term; Reason: {error,fun_crashed,"++_ = String6,
+
+ String7 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> ['not',a,string] end},
+ #{template=>Template}),
+ ct:log(String7),
+ "REPORT_CB/2 ERROR: term; Returned: ['not',a,string]" = String7,
+
+ String8 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String8),
+ "REPORT_CB/2 ERROR: term; Returned: faulty_return" = String8,
+
+ String9 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String9),
+ "REPORT_CB/2 CRASH: term; Reason: {error,fun_crashed,"++_ = String9,
+
+ %% strings are not formatted
+ String10 = format(info,{string,"string"},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String10),
+ "string" = String10,
+
+ String11 = format(info,{string,['not',printable,list]},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log("~ts",[String11]), % avoiding ct_log crash
+ "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String11,
+
+ String12 = format(info,{string,"string"},#{},#{template=>Template}),
+ ct:log(String12),
+ "string" = String12,
+
+ ok.
+
+report_cb(_Config) ->
+ Template = [msg],
+ MetaFun = fun(_) -> {"meta_rcb",[]} end,
+ ConfigFun = fun(_) -> {"config_rcb",[]} end,
+ "term" = format(info,{report,term},#{},#{template=>Template}),
+ "meta_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}),
+ "config_rcb" =
+ format(info,{report,term},#{},#{template=>Template,
+ report_cb=>ConfigFun}),
+ "config_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template,
+ report_cb=>ConfigFun}),
+ ok.
+
+max_size(_Config) ->
+ Cfg = #{template=>[msg],
+ single_line=>false},
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% application:set_env(kernel,logger_max_size,11),
+ %% "12345678901234567890" = % min value is 50, so this is not limited
+ %% format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% "12345678901234567890123456789012345678901234567..." = % 50
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
+ %% application:set_env(kernel,logger_max_size,53),
+ %% "12345678901234567890123456789012345678901234567890..." = %53
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
+ "123456789012..." =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}),
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}),
+ %% Check that one newline at the end of the line is kept (if it exists)
+ "12345678901...\n" =
+ format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}),
+ "12345678901...\n" =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"],
+ max_size=>15}),
+ ok.
+max_size(cleanup,_Config) ->
+ application:unset_env(kernel,logger_max_size),
+ ok.
+
+depth(_Config) ->
+ Template = [msg],
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,error_logger_format_depth,11),
+ "[1,2,3,4,5,6,7,8,9,0|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>13}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>unlimited}),
+ ok.
+depth(cleanup,_Config) ->
+ application:unset_env(kernel,error_logger_format_depth),
+ ok.
+
+chars_limit(_Config) ->
+ FA = {"LoL: ~p~nL: ~p~nMap: ~p~n",
+ [lists:duplicate(10,lists:seq(1,100)),
+ lists:seq(1,100),
+ maps:from_list(lists:zip(lists:seq(1,100),
+ lists:duplicate(100,value)))]},
+ Meta = #{time=>timestamp()},
+ Template = [time," - ", msg, "\n"],
+ FC = #{template=>Template,
+ depth=>unlimited,
+ max_size=>unlimited,
+ chars_limit=>unlimited,
+ single_line=>true},
+ CL1 = 80,
+ String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}),
+ L1 = string:length(String1),
+ ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]),
+ true = L1 > CL1,
+ true = L1 < CL1 + 15,
+
+ String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}),
+ L2 = string:length(String2),
+ ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]),
+ String2 = String1,
+
+ CL3 = 200,
+ String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}),
+ L3 = string:length(String3),
+ ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]),
+ true = L3 > CL3,
+ true = L3 < CL3 + 15,
+
+ String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}),
+ L4 = string:length(String4),
+ ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]),
+ true = L4 > CL3,
+ true = L4 < CL3 + 15,
+
+ %% Test that max_size truncates the string which is limited by
+ %% depth and chars_limit
+ MS5 = 150,
+ String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}),
+ L5 = string:length(String5),
+ ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]),
+ L5 = MS5,
+ true = lists:prefix(lists:sublist(String5,L5-4),String4),
+
+ %% Test that chars_limit limits string also
+ Str = "123456789012345678901234567890123456789012345678901234567890123456789",
+ CL6 = 80,
+ String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}),
+ L6 = string:length(String6),
+ ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]),
+ L6 = CL6,
+
+ ok.
+
+format_mfa(_Config) ->
+ Template = [mfa],
+
+ Meta1 = #{mfa=>{mod,func,0}},
+ String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}),
+ ct:log(String1),
+ "mod:func/0" = String1,
+
+ Meta2 = #{mfa=>{mod,func,[]}},
+ String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}),
+ ct:log(String2),
+ "mod:func/0" = String2,
+
+ Meta3 = #{mfa=>"mod:func/0"},
+ String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}),
+ ct:log(String3),
+ "mod:func/0" = String3,
+
+ Meta4 = #{mfa=>othermfa},
+ String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}),
+ ct:log(String4),
+ "othermfa" = String4,
+
+ ok.
+
+format_time(_Config) ->
+ Time = timestamp(),
+ Meta = #{time=>Time},
+ FC = #{template=>[time]},
+ Msg = {string,""},
+ ExpectedLocal = default_time_format(Time,false),
+ ExpectedUtc = default_time_format(Time,true),
+
+ %% default - local time
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% stdlib utc_log works when time_offset parameter is not set
+ application:set_env(stdlib,utc_log,true),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,true),
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+
+ %% time_designator config parameter to formatter
+ ExpectedLocalS = default_time_format(Time,false,$\s),
+ ExpectedUtcS = default_time_format(Time,true,$\s),
+
+ ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"",
+ time_designator=>$\s}),
+ ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z",
+ time_designator=>$\s}),
+
+ ok.
+
+format_time(cleanup,_Config) ->
+ application:unset_env(sasl,utc_log),
+ application:unset_env(stdlib,utc_log),
+ ok.
+
+level_or_msg_in_meta(_Config) ->
+ %% The template contains atoms to pick out values from meta,
+ %% or level/msg to add these from the log event. What if you have
+ %% a key named 'level' or 'msg' in meta and want to display
+ %% its value?
+ %% For now we simply ignore Meta on this and display the
+ %% actual level and msg from the log event.
+
+ Meta = #{level=>mylevel,
+ msg=>"metamsg"},
+ Template = [level,";",msg],
+ String = format(info,{"~p",[term]},Meta,#{template=>Template}),
+ ct:log(String),
+ "info;term" = String, % so mylevel and "metamsg" are ignored
+
+ ok.
+
+faulty_log(_Config) ->
+ %% Unexpected log (should be type logger:log_event()) - print error
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(unexp_log,#{})),
+ ok.
+
+faulty_config(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>{"~p",[term]},
+ meta=>#{time=>timestamp()}},
+ unexp_config)),
+ ok.
+
+faulty_msg(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,_,_,_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>term,
+ meta=>#{time=>timestamp()}},
+ #{})),
+ ok.
+
+-define(cfgerr(X), {error,{invalid_formatter_config,logger_formatter,X}}).
+check_config(_Config) ->
+ ok = logger_formatter:check_config(#{}),
+ ?cfgerr(bad) = logger_formatter:check_config(bad),
+
+ C1 = #{chars_limit => 1,
+ depth => 1,
+ legacy_header => true,
+ error_logger_notice_header => info,
+ max_size => 1,
+ report_cb => fun(R) -> {"~p",[R]} end,
+ single_line => false,
+ template => [],
+ time_designator => $T,
+ time_offset => 0},
+ ok = logger_formatter:check_config(C1),
+
+ ok = logger_formatter:check_config(#{chars_limit => unlimited}),
+ ?cfgerr({chars_limit,bad}) =
+ logger_formatter:check_config(#{chars_limit => bad}),
+
+ ok = logger_formatter:check_config(#{depth => unlimited}),
+ ?cfgerr({depth,bad}) =
+ logger_formatter:check_config(#{depth => bad}),
+
+ ok = logger_formatter:check_config(#{legacy_header => false}),
+ ?cfgerr({legacy_header,bad}) =
+ logger_formatter:check_config(#{legacy_header => bad}),
+
+ ok = logger_formatter:check_config(#{error_logger_notice_header => notice}),
+ ?cfgerr({error_logger_notice_header,bad}) =
+ logger_formatter:check_config(#{error_logger_notice_header => bad}),
+
+ ok = logger_formatter:check_config(#{max_size => unlimited}),
+ ?cfgerr({max_size,bad}) =
+ logger_formatter:check_config(#{max_size => bad}),
+
+ ok =
+ logger_formatter:check_config(#{report_cb => fun(_,_) -> "" end}),
+ ?cfgerr({report_cb,F}) =
+ logger_formatter:check_config(#{report_cb => F=fun(_,_,_) -> {"",[]} end}),
+ ?cfgerr({report_cb,bad}) =
+ logger_formatter:check_config(#{report_cb => bad}),
+
+ ok = logger_formatter:check_config(#{single_line => true}),
+ ?cfgerr({single_line,bad}) =
+ logger_formatter:check_config(#{single_line => bad}),
+
+ Ts = [[key],
+ [[key1,key2]],
+ [{key,[key],[]}],
+ [{[key1,key2],[[key1,key2]],["noexist"]}],
+ ["string"]],
+ [begin
+ ct:log("check template: ~p",[T]),
+ ok = logger_formatter:check_config(#{template => T})
+ end
+ || T <- Ts],
+
+ ETs = [bad,
+ [{key,bad}],
+ [{key,[key],bad}],
+ [{key,[key],"bad"}],
+ "bad",
+ [[key,$a,$b,$c]],
+ [[$a,$b,$c,key]]],
+ [begin
+ ct:log("check template: ~p",[T]),
+ {error,{invalid_formatter_template,logger_formatter,T}} =
+ logger_formatter:check_config(#{template => T})
+ end
+ || T <- ETs],
+
+ ?cfgerr({time_designator,bad}) =
+ logger_formatter:check_config(#{time_designator => bad}),
+ ?cfgerr({time_designator,"b"}) =
+ logger_formatter:check_config(#{time_designator => "b"}),
+
+ ok = logger_formatter:check_config(#{time_offset => -1}),
+ ok = logger_formatter:check_config(#{time_offset => "+02:00"}),
+ ok = logger_formatter:check_config(#{time_offset => "-23:59"}),
+ ok = logger_formatter:check_config(#{time_offset => "+24:00"}),
+ ok = logger_formatter:check_config(#{time_offset => "-25:00"}),
+ ?cfgerr({time_offset,bad}) =
+ logger_formatter:check_config(#{time_offset => bad}),
+ ?cfgerr({time_offset,"02:00"}) =
+ logger_formatter:check_config(#{time_offset => "02:00"}),
+ ?cfgerr({time_offset,"+02"}) =
+ logger_formatter:check_config(#{time_offset => "+02"}),
+
+ ok.
+
+%% Test that formatter config can be changed, and that the default
+%% template is updated accordingly
+update_config(_Config) ->
+ {error,{not_found,?MODULE}} = logger:update_formatter_config(?MODULE,#{}),
+
+ logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}),
+ ok = logger:add_handler(?MODULE,?MODULE,#{}),
+ D = lists:seq(1,1000),
+ logger:notice("~p~n",[D]),
+ {Lines1,C1} = check_log(),
+ [ct:log(L) || L <- Lines1],
+ ct:log("~p",[C1]),
+ [Line1] = Lines1,
+ [_Time,"notice: "++D1] = string:split(Line1," "),
+ true = length(D1)>3000,
+ true = #{}==C1,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,false),
+ logger:notice("~p~n",[D]),
+ {Lines2,C2} = check_log(),
+ [ct:log(L) || L <- Lines2],
+ ct:log("~p",[C2]),
+ true = length(Lines2)>50,
+ true = #{single_line=>false}==C2,
+
+ ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}),
+ logger:notice("~p~n",[D]),
+ {Lines3,C3} = check_log(),
+ [ct:log(L) || L <- Lines3],
+ ct:log("~p",[C3]),
+ ["=NOTICE REPORT==== "++_|D3] = Lines3,
+ true = length(D3)>50,
+ true = #{legacy_header=>true,single_line=>false}==C3,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,true),
+ logger:notice("~p~n",[D]),
+ {Lines4,C4} = check_log(),
+ [ct:log(L) || L <- Lines4],
+ ct:log("~p",[C4]),
+ ["=NOTICE REPORT==== "++_,D4] = Lines4,
+ true = length(D4)>3000,
+ true = #{legacy_header=>true,single_line=>true}==C4,
+
+ %% Finally, check that error_logger_notice_header works, default=info
+ error_logger:info_msg("~p",[D]),
+ {Lines5,C5} = check_log(),
+ [ct:log(L) || L <- Lines5],
+ ct:log("~p",[C5]),
+ ["=INFO REPORT==== "++_,_D5] = Lines5,
+
+ ok=logger:update_formatter_config(?MODULE,error_logger_notice_header,notice),
+ error_logger:info_msg("~p",[D]),
+ {Lines6,C6} = check_log(),
+ [ct:log(L) || L <- Lines6],
+ ct:log("~p",[C6]),
+ ["=NOTICE REPORT==== "++_,_D6] = Lines6,
+
+ {error,{invalid_formatter_config,bad}} =
+ logger:update_formatter_config(?MODULE,bad),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:update_formatter_config(?MODULE,depth,bad),
+
+ ok.
+
+update_config(cleanup,_Config) ->
+ _ = logger:remove_handler(?MODULE),
+ _ = logger:remove_handler_filter(default,silence),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+format(Level,Msg,Meta,Config) ->
+ format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config).
+
+format(Log,Config) ->
+ lists:flatten(logger_formatter:format(Log,Config)).
+
+default_time_format(Timestamp) ->
+ default_time_format(Timestamp,false).
+
+default_time_format(Timestamp,Utc) ->
+ default_time_format(Timestamp,Utc,$T).
+
+default_time_format(Timestamp,Utc,Sep) ->
+ Offset = if Utc -> "Z";
+ true -> ""
+ end,
+ calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond},
+ {time_designator,Sep},
+ {offset,Offset}]).
+
+integer(Str) ->
+ is_integer(list_to_integer(Str)).
+integer(Str,Max) ->
+ integer(Str,0,Max).
+integer(Str,Min,Max) ->
+ Int = list_to_integer(Str),
+ Int >= Min andalso Int =<Max.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R:S -> {C,R,hd(S)} end.
+
+timestamp() ->
+ erlang:system_time(microsecond).
+
+%% necessary?
+add_time(#{time:=_}=Meta) ->
+ Meta;
+add_time(Meta) ->
+ Meta#{time=>timestamp()}.
+
+%%%-----------------------------------------------------------------
+%%% handler callback
+log(Log,#{formatter:={M,C}}) ->
+ put(log,{M:format(Log,C),C}),
+ ok.
+
+check_log() ->
+ {S,C} = erase(log),
+ {string:lexemes(S,"\n"),C}.
diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl
new file mode 100644
index 0000000000..c3cab07d81
--- /dev/null
+++ b/lib/kernel/test/logger_legacy_SUITE.erl
@@ -0,0 +1,288 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_legacy_SUITE).
+
+-compile(export_all).
+-compile({nowarn_deprecated_function,[{gen_fsm,start,3},
+ {gen_fsm,send_all_state_event,2}]}).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% This test suite test that log events from within OTP can be
+%%% delivered to legacy error_logger event handlers on the same format
+%%% as before 'logger' was introduced.
+%%%
+%%% Before changing the expected format of any of the log events in
+%%% this suite, please make sure that the backwards incompatibility it
+%%% introduces is ok.
+%%% -----------------------------------------------------------------
+
+-define(check(Expected),
+ receive Expected ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+-define(check_no_flush(Expected),
+ receive Expected ->
+ ok
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>stop}),
+ Config.
+
+end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
+ ok.
+
+init_per_group(std, Config) ->
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,{log,super,[otp]}}}]),
+ Config;
+init_per_group(sasl, Config) ->
+ %% Since default level is notice, and progress reports are info,
+ %% we need to raise the global logger level to info in order to
+ %% receive these.
+ ok = logger:set_primary_config(level,info),
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,{log,super,[otp,sasl]}}}]),
+
+ %% cth_log_redirect checks if sasl is started before displaying
+ %% any sasl reports - so just to see the real sasl reports in tc
+ %% log:
+ {ok,Apps} = application:ensure_all_started(sasl),
+ [{stop_apps,Apps}|Config];
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(sasl, Config) ->
+ Apps = ?config(stop_apps,Config),
+ [application:stop(App) || App <- Apps],
+ ok = logger:set_primary_config(level,notice),
+ ok;
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ error_logger:add_report_handler(?MODULE,{event_handler,self()}),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ %% Using gen_event directly here, instead of
+ %% error_logger:delete_report_handler. This is to avoid
+ %% automatically stopping the error_logger process due to removing
+ %% the last handler.
+ gen_event:delete_handler(error_logger,?MODULE,[]),
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [{std,[],[gen_server,
+ gen_event,
+ gen_fsm,
+ gen_statem]},
+ {sasl,[],[sasl_reports,
+ supervisor_handle_info]}].
+
+all() ->
+ [{group,std},
+ {group,sasl}].
+
+gen_server(_Config) ->
+ {ok,Pid} = gen_server:start(?MODULE,gen_server,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ ok = gen_server:cast(Pid,Msg),
+ ?check({error,"** Generic server ~tp terminating"++_,
+ [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}).
+
+gen_event(_Config) ->
+ {ok,Pid} = gen_event:start(),
+ ok = gen_event:add_handler(Pid,?MODULE,gen_event),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}),
+ gen_event:notify(Pid,Msg),
+ ?check({error,"** gen_event handler ~p crashed."++_,
+ [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}).
+
+gen_fsm(_Config) ->
+ {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ gen_fsm:send_all_state_event(Pid,Msg),
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}).
+
+gen_statem(_Config) ->
+ {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}).
+
+sasl_reports(Config) ->
+ App = {application,?MODULE,[{description, ""},
+ {vsn, "1.0"},
+ {modules, [?MODULE]},
+ {registered, []},
+ {applications, []},
+ {mod, {?MODULE, []}}]},
+ AppStr = io_lib:format("~p.",[App]),
+ Dir = ?config(priv_dir,Config),
+ AppFile = filename:join(Dir,?MODULE_STRING++".app"),
+ ok = file:write_file(AppFile,AppStr),
+ true = code:add_patha(Dir),
+ ok = application:start(?MODULE),
+ SupName = sup_name(),
+ Pid = whereis(SupName),
+ [{ch,ChPid,_,_}] = supervisor:which_children(Pid),
+ Node = node(),
+ ?check_no_flush({info_report,progress,[{application,?MODULE},
+ {started_at,Node}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,[{pid,ChPid}|_]}]}),
+ ok = gen_server:cast(ChPid, fun() ->
+ spawn_link(fun() -> receive x->ok end end)
+ end),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ ok = gen_server:cast(ChPid,Msg),
+ ?check_no_flush({error,"** Generic server ~tp terminating"++_,
+ [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}),
+ ?check_no_flush({error_report,crash_report,
+ [[{initial_call,_},
+ {pid,ChPid},
+ {registered_name,[]},
+ {error_info,{error,{badmatch,b},_}},
+ {ancestors,_},
+ {message_queue_len,_},
+ {messages,_},
+ {links,[Pid,Neighbour]},
+ {dictionary,_},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_}],
+ [{neighbour,[{pid,Neighbour},
+ {registered_name,_},
+ {initial_call,_},
+ {current_function,_},
+ {ancestors,_},
+ {message_queue_len,_},
+ {links,[ChPid]},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_},
+ {current_stacktrace,_}]}]]}),
+ ?check_no_flush({error_report,supervisor_report,
+ [{supervisor,{local,SupName}},
+ {errorContext,child_terminated},
+ {reason,{{badmatch,b},_}},
+ {offender,[{pid,ChPid}|_]}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,_}]}),
+
+ ok = application:stop(?MODULE),
+ ?check({info_report,std_info,[{application,?MODULE},
+ {exited,stopped},
+ {type,temporary}]}).
+
+sasl_reports(cleanup,_Config) ->
+ application:stop(?MODULE).
+
+supervisor_handle_info(_Config) ->
+ {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor),
+ ?check({info_report,progress,[{supervisor,_},{started,_}]}),
+ Pid ! msg,
+ ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}).
+
+supervisor_handle_info(cleanup,_Config) ->
+ Pid = whereis(sup_name()),
+ unlink(Pid),
+ exit(Pid,shutdown).
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for error_logger event handler, gen_server, gen_statem,
+%%% gen_fsm, gen_event, supervisor and application.
+start(_,_) ->
+ supervisor:start_link({local,sup_name()},?MODULE,supervisor).
+
+init(supervisor) ->
+ {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}};
+init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm ->
+ {ok,mystate,StateMachine};
+init(State) ->
+ {ok,State}.
+
+%% error_logger event handler
+handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) ->
+ Pid ! {Tag,Type,Report},
+ {ok,State};
+%% other gen_event
+handle_event(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {next_state,State}.
+
+%% gen_fsm
+handle_event(Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_statem
+handle_event(info,Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_server
+handle_cast(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {noreply,State}.
+
+%% gen_statem
+callback_mode() ->
+ handle_event_function.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+sup_name() ->
+ list_to_atom(?MODULE_STRING++"_sup").
diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl
new file mode 100644
index 0000000000..e0ad792bdb
--- /dev/null
+++ b/lib/kernel/test/logger_simple_h_SUITE.erl
@@ -0,0 +1,209 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]).
+
+-define(check_no_log,[] = test_server:messages_get()).
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks, [logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Hs0 = logger:get_handler_config(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Env = [{App,Key,application:get_env(App,Key)} ||
+ {App,Key} <- [{kernel,logger_level}]],
+ [{env,Env},{logger,Hs}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ replace_default,
+ replace_file,
+ replace_disk_log
+ ].
+
+start_stop(_Config) ->
+ undefined = whereis(logger_simple_h),
+ register(logger_simple_h,self()),
+ {error,_} = logger:add_handler(simple,
+ logger_simple_h,
+ #{filter_default=>log}),
+ unregister(logger_simple_h),
+ ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}),
+ Pid = whereis(logger_simple_h),
+ true = is_pid(Pid),
+ ok = logger:remove_handler(simple),
+ false = is_pid(whereis(logger_simple_h)),
+ ok.
+start_stop(cleanup,_Config) ->
+ logger:remove_handler(simple).
+
+%% This testcase just tests that it does not crash, the default handler prints
+%% to stdout which we cannot read from in a detached slave.
+replace_default(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [?str]),
+ log(Node, alert, [?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, info, [?keyval_rep]),
+ log(Node, info, [?keyval_rep++[not_key_val]]),
+ rpc:call(Node, error_logger, error_report, [some_type,?map_rep]),
+ rpc:call(Node, error_logger, warning_report, ["some_type",?map_rep]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ ok = rpc:call(Node, logger, add_handlers, [kernel]),
+
+ ok.
+
+replace_file(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, warning, [?keyval_rep]),
+ log(Node, warning, [?keyval_rep++[not_key_val]]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_std_h,
+ #{ config => #{ type => {file, File} },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+
+ {ok,Bin} = sync_and_read(Node, file, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
+
+replace_disk_log(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, warning, [?keyval_rep]),
+ log(Node, warning, [?keyval_rep++[not_key_val]]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_disk_log_h,
+ #{ config => #{ file => File },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+ {ok,Bin} = sync_and_read(Node, disk_log, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
new file mode 100644
index 0000000000..b6a09f4980
--- /dev/null
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -0,0 +1,1718 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+-define(check(Expected),
+ receive
+ {log,Expected} ->
+ [] = test_server:messages_get()
+ after 5000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(domain,#{domain=>[?MODULE]}).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ {ok,#{formatter:=OrigFormatter}} =
+ logger:get_handler_config(?STANDARD_HANDLER),
+ [{formatter,OrigFormatter}|Config].
+
+end_per_suite(Config) ->
+ {OrigMod,OrigConf} = proplists:get_value(formatter,Config),
+ logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}),
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ false ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(OPCase, Config) when
+ OPCase == qlen_kill_new;
+ OPCase == restart_after ->
+ case re:run(erlang:system_info(system_version),
+ "dirty-schedulers-TEST",
+ [{capture,none}]) of
+ match ->
+ {skip,"Overload protection test skipped on dirty-schedulers-TEST"};
+ nomatch ->
+ ct:print("********** ~w **********", [OPCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [add_remove_instance_tty,
+ add_remove_instance_standard_io,
+ add_remove_instance_standard_error,
+ add_remove_instance_file1,
+ add_remove_instance_file2,
+ default_formatter,
+ filter_config,
+ errors,
+ formatter_fail,
+ config_fail,
+ crash_std_h_to_file,
+ crash_std_h_to_disk_log,
+ bad_input,
+ info_and_reset,
+ reconfig,
+ file_opts,
+ sync,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync_file,
+ op_switch_to_sync_tty,
+ op_switch_to_drop_file,
+ op_switch_to_drop_tty,
+ op_switch_to_flush_file,
+ op_switch_to_flush_tty,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ qlen_kill_std,
+ mem_kill_new,
+ mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+add_remove_instance_tty(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => tty},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ ok.
+
+add_remove_instance_standard_io(_Config) ->
+ add_remove_instance_nofile(standard_io).
+add_remove_instance_standard_io(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_standard_error(_Config) ->
+ add_remove_instance_nofile(standard_error).
+add_remove_instance_standard_error(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file1(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog1.txt"),
+ Type = {file,Log},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file1(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file2(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog2.txt"),
+ Type = {file,Log,[raw,append]},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file2(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) ->
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()),
+ logger:notice(?msg,?domain),
+ ?check_no_log,
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok.
+
+default_formatter(_Config) ->
+ ok = logger:set_handler_config(?STANDARD_HANDLER,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ ct:capture_start(),
+ logger:notice(M1=?msg),
+ timer:sleep(100),
+ ct:capture_stop(),
+ [Msg] = ct:capture_get(),
+ match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]),
+ ok.
+
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_std_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
+errors(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE,logger_std_h,#{}),
+
+ {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name),
+
+ ok = logger:remove_handler(?MODULE),
+ {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE),
+
+ {error,
+ {handler_not_added,
+ {invalid_config,logger_std_h,{type,faulty_type}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => faulty_type}}),
+
+ case os:type() of
+ {win32,_} ->
+ %% No use in testing file access on windows
+ ok;
+ _ ->
+ NoDir = lists:concat(["/",?MODULE,"_dir"]),
+ {error,
+ {handler_not_added,{{open_failed,NoDir,eacces},_}}} =
+ logger:add_handler(myh2,logger_std_h,
+ #{config=>#{type=>{file,NoDir}}})
+ end,
+
+ {error,
+ {handler_not_added,{{open_failed,Log,_},_}}} =
+ logger:add_handler(myh3,logger_std_h,
+ #{config=>#{type=>{file,Log,[bad_file_opt]}}}),
+
+ ok = logger:notice(?msg).
+
+errors(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+formatter_fail(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ %% no formatter
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,Log}},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ H = logger:get_handler_ids(),
+ true = lists:member(?MODULE,H),
+
+ %% Formatter is added automatically
+ {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(?MODULE),
+ logger:notice(M1=?msg,?domain),
+ Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* notice: "++M1,5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}),
+ logger:notice(M2=?msg,?domain),
+ Got2 = try_match_file(Log,
+ escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}),
+ logger:notice(M3=?msg,?domain),
+ Got3 = try_match_file(Log,
+ escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}),
+ logger:notice(?msg,?domain),
+ try_match_file(Log,
+ escape(Got3)++"FORMATTER ERROR: bad return value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(h_proc_name()),
+ H = logger:get_handler_ids(),
+
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {restart_type,bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{restart_type => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{drop_mode_qlen=>1}}),
+ {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{sync_mode_qlen=>43,
+ drop_mode_qlen=>42}}),
+ {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{drop_mode_qlen=>43,
+ flush_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{illegal_config_change,#{config:=#{type:=_}},#{config:=#{type:=_}}}} =
+ logger:set_handler_config(?MODULE,config,
+ #{type=>{file,"file"}}),
+
+ {error,{invalid_levels,_}} =
+ logger:set_handler_config(?MODULE,config,
+ #{sync_mode_qlen=>100,
+ flush_qlen=>99}),
+ {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
+ logger:set_handler_config(?MODULE, config,
+ #{filesync_rep_int => 2000}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(?MODULE),
+ ok = logger:set_handler_config(?MODULE,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(?MODULE),
+
+ ok.
+
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+crash_std_h_to_file(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_std_h,
+ #{ config => #{ type => {file, Log} }}}],
+ file, Log).
+crash_std_h_to_file(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h_to_disk_log(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_disk_log_h,
+ #{ config => #{ file => Log }}}],
+ disk_log,Log).
+crash_std_h_to_disk_log(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h(Config,Func,Var,Type,Log) ->
+ Dir = ?config(priv_dir,Config),
+ SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])),
+ ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])),
+ Pa = filename:dirname(code:which(?MODULE)),
+ Name = lists:concat([?MODULE,"_",Func]),
+ Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]),
+ ct:pal("Starting ~p with ~tp", [Name,Args]),
+ %% Start a node which prints kernel logs to the destination specified by Type
+ {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
+ HProcName =
+ case Type of
+ file -> ?name_to_reg_name(logger_std_h,?STANDARD_HANDLER);
+ disk_log -> ?name_to_reg_name(logger_disk_log_h,?STANDARD_HANDLER)
+ end,
+ Pid = rpc:call(Node,erlang,whereis,[HProcName]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,self()}]),
+ ok = log_on_remote_node(Node,"dummy1"),
+ ?check("dummy1"),
+ {ok,Bin1} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}),
+
+ %% Kill the logger_std_h process
+ exit(Pid, kill),
+
+ %% Wait a bit, then check that it is gone
+ timer:sleep(2000),
+ undefined = rpc:call(Node,erlang,whereis,[HProcName]),
+
+ %% Check that file is not empty
+ {ok,Bin2} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}),
+ ok.
+
+%% Can not use rpc:call here, since the code would execute on a
+%% process with group_leader on this (the calling) node, and thus
+%% logger would send the log event to the logger process here instead
+%% of logging it itself.
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:notice(Msg)
+ end),
+ ok.
+
+
+crash_std_h(cleanup) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log ++ ".1") of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log ++ ".1");
+ Ok ->
+ Ok
+ end;
+sync_and_read(Node,file,Log) ->
+ rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log) of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log);
+ Ok ->
+ Ok
+ end.
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"),
+ {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"),
+ {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType").
+
+
+info_and_reset(_Config) ->
+ #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER),
+ ok = logger_std_h:reset(?STANDARD_HANDLER).
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => standard_io},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE,
+ type := standard_io,
+ file_ctrl_pid := FileCtrlPid,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = DefaultInfo =
+ logger_std_h:info(?MODULE),
+
+ {ok,
+ #{config:=
+ #{type := standard_io,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ DefaultHConf}}
+ = logger:get_handler_config(?MODULE),
+
+ ok = logger:set_handler_config(?MODULE, config,
+ #{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => 3,
+ burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 10,
+ overload_kill_enable => true,
+ overload_kill_qlen => 100000,
+ overload_kill_mem_size => 10000000,
+ overload_kill_restart_after => infinity,
+ filesync_repeat_interval => no_repeat}),
+ #{id := ?MODULE,
+ type := standard_io,
+ file_ctrl_pid := FileCtrlPid,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = Info = logger_std_h:info(?MODULE),
+
+ {ok,#{config :=
+ #{type := standard_io,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = HConf}} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConf#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = DefaultHConf#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = DefaultHConf#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = DefaultHConf#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,File}},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:update_handler_config(?MODULE,config,
+ #{filesync_repeat_interval=>FSI+2000}),
+ {ok,#{config:=C5}} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000},
+
+ %% You are not allowed to actively set 'type' in runtime, since
+ %% this is a write once field.
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
+ {ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = C5,
+
+ %% ... but if you don't specify 'type', then set_handler_config shall
+ %% NOT reset it to its default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C7}} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = FileHConfig#{sync_mode_qlen=>1},
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+file_opts(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ BadFileOpts = [raw],
+ BadType = {file,Log,BadFileOpts},
+ {error,{handler_not_added,{{open_failed,Log,enoent},_}}} =
+ logger:add_handler(?MODULE, logger_std_h,
+ #{config => #{type => BadType}}),
+
+ OkFileOpts = [raw,append],
+ OkType = {file,Log,OkFileOpts},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => OkType},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{type := OkType} = logger_std_h:info(?MODULE),
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok.
+file_opts(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ Type = {file,Log},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ %% check repeated filesync happens
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"first\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync}]),
+
+ logger:notice("first", ?domain),
+ %% wait for automatic filesync
+ check_tracer(filesync_rep_int()*2),
+
+ %% check that explicit filesync is only done once
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"second\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync},
+ {no_more,500}
+ ]),
+ logger:notice("second", ?domain),
+ %% do explicit sync
+ logger_std_h:filesync(?MODULE),
+ %% a second sync should be ignored
+ logger_std_h:filesync(?MODULE),
+ check_tracer(100),
+
+ %% check that if there's no repeated filesync active,
+ %% a filesync is still performed when handler goes idle
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ %% The following timer is to make sure the time from last log
+ %% ("second") to next ("third") is long enough, so the a flush is
+ %% triggered by the idle timeout between "thrid" and "fourth".
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"third\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync},
+ {logger_std_h, write_to_dev, <<"fourth\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync}]),
+ logger:notice("third", ?domain),
+ %% wait for automatic filesync
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ logger:notice("fourth", ?domain),
+ %% wait for automatic filesync
+ check_tracer(?IDLE_DETECT_TIME_MSEC*2),
+
+ %% switch repeated filesync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_std_h,handle_cast,repeated_filesync},
+ %% receive 1 initial repeated_filesync, then 1 per sec
+ start_tracer([{logger_std_h,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ timer:sleep(WaitT),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
+ check_tracer(100),
+ ok.
+sync(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\n">>}, filesync_rep_int()),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, filesync_rep_int()),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, update_handler_config,
+ [?STANDARD_HANDLER, config,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]),
+ SyncInt = maps:get(filesync_repeat_interval, Info),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_std_h_on_new_node(Config, Log) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_std_h,
+ #{ config => #{ type => {file,Log}}}}]}]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% TRecvPid = start_op_trace(),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Lines = count_lines(Log),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(async,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(sync,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_switch(async,sync,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(drop,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(flush,Events) end),
+ %% stop_op_trace(TRecvPid),
+ NumOfReqs = Lines,
+ ok = file_delete(Log),
+ ok.
+op_switch_to_sync_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_sync_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 3,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ ok.
+op_switch_to_sync_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop_file() ->
+ [{timetrap,{seconds,180}}].
+op_switch_to_drop_file(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{config =>
+ StdHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen =>
+ Procs*NumOfReqs*Bursts,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler gets the
+ %% requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_drop_file(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_drop_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen =>
+ Procs*NumOfReqs+1,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ ok.
+op_switch_to_drop_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush_file() ->
+ [{timetrap,{minutes,5}}].
+op_switch_to_flush_file(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config =>
+ StdHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 300,
+ flush_qlen => 300,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_flush_file(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_flush_tty() ->
+ [{timetrap,{minutes,5}}].
+op_switch_to_flush_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 100,
+ flush_qlen => 100,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1000,
+ Procs = 100,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ ok.
+op_switch_to_flush_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ NumOfReqs = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ReqLimit = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => BurstTWin,
+ drop_mode_qlen => 20000,
+ flush_qlen => 20001}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))),
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>false,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>100}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file_delete(Log),
+ true = is_pid(whereis(h_proc_name())),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>Mem0+50000,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+qlen_kill_std(_Config) ->
+ %%! HERE
+ %% Dir = ?config(priv_dir, Config),
+ %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ %% Log = filename:join(Dir, File),
+ %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ %% ok = rpc:call(Node, logger, update_handler_config,
+ %% [?STANDARD_HANDLER, config,
+ %% #{overload_kill_enable=>true,
+ %% overload_kill_qlen=>10,
+ %% overload_kill_mem_size=>100000}]),
+ {skip,"Not done yet"}.
+
+mem_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>50000,
+ overload_kill_mem_size=>Mem0+500,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+mem_kill_std(_Config) ->
+ {skip,"Not done yet"}.
+
+restart_after() ->
+ [{timetrap,{minutes,2}}].
+restart_after(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>infinity}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(h_proc_name())),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef1, _, _, _Reason1} ->
+ file_delete(Log),
+ error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3),
+ ok
+ after
+ 5000 ->
+ Info1 = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info1]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+
+ NewHConfig2 =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(h_proc_name()),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef2, _, _, _Reason2} ->
+ file_delete(Log),
+ {ok,Pid1} = wait_for_process_up(RestartAfter * 3),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ Info2 = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info2]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (sync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{minutes,3}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
+ {info,[]},
+ {reset,[]},
+ {change_config,[]}])
+ end),
+ Sent = send_burst({t,10000}, seq, {chars,79}, notice),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file_delete(Log).
+handler_requests_under_load(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result =
+ case Req of
+ change_config ->
+ logger:update_handler_config(HName, config,
+ #{overload_kill_enable =>
+ false});
+ Func ->
+ logger_std_h:Func(HName)
+ end,
+ send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, TTY, Config) when TTY == standard_io;
+ TTY == standard_error->
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{config => #{type => TTY},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
+ {HConfig,StdHConfig};
+
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([FuncName,".log"])),
+ ct:pal("Logging to ~tp", [Log]),
+ Type = {file,Log},
+ _ = file_delete(Log),
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
+ {Log,HConfig,StdHConfig}.
+
+stop_handler(Name) ->
+ R = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
+ R.
+
+count_lines(File) ->
+ wait_until_written(File, -1),
+ count_lines1(File).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ ok;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ {_,Dev} = file:open(File, [read]),
+ Lines = count_lines2(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+count_lines2(Dev, LC) ->
+ case file:read_line(Dev) of
+ {ok,"Handler logger_std_h_SUITE " ++_} ->
+ %% Not counting handler info
+ count_lines2(Dev,LC);
+ {ok,_} ->
+ count_lines2(Dev,LC+1);
+ eof -> LC
+ end.
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(per_proc_fun(N,Text,Class,X)))
+ end || X <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+per_proc_fun(N,Text,Class,X) when X rem 2 == 0 ->
+ fun() ->
+ process_flag(priority,high),
+ send_n_burst(N, seq, Text, Class)
+ end;
+per_proc_fun(N,Text,Class,_) ->
+ fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ if is_pid(OpOrPid) -> OpOrPid ! {log,String};
+ true -> ok
+ end,
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n".
+
+add_remove_instance_nofile(Type) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ group_leader(group_leader(),Pid), % to get printouts in test log
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ %% check that sync doesn't do damage even if not relevant
+ ok = logger_std_h:filesync(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()),
+ logger:notice(?msg,?domain),
+ ?check_no_log,
+ ok.
+
+logger_std_h_remove() ->
+ logger:remove_handler(?MODULE).
+logger_std_h_remove(Id) ->
+ logger:remove_handler(Id).
+
+try_read_file(FileName, Expected, Time) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ ct:pal("Can't read ~tp: ~tp", [FileName,Error]),
+ erlang:error(Error);
+ Got ->
+ ct:pal("try_read_file got ~tp", [Got]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500)
+ end;
+try_read_file(FileName, Expected, _) ->
+ ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]),
+ erlang:error({error,missing_expected_pattern}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_op_trace() ->
+ TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) ->
+ Pid ! {trace_call,Func,Details},
+ Pid;
+ ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) ->
+ Pid ! {trace_return,Func,RetVal},
+ Pid
+ end,
+ TRecvPid = spawn_link(fun() -> trace_receiver(5000) end),
+ {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}),
+
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:p(self(), [c]),
+
+ MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
+ {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1),
+
+ {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
+
+ MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end),
+ {ok,_} = dbg:tpl(ets, lookup, 2, MS2),
+
+ ct:pal("Tracing started!", []),
+ TRecvPid.
+
+stop_op_trace(TRecvPid) ->
+ dbg:stop_clear(),
+ unlink(TRecvPid),
+ exit(TRecvPid, kill),
+ ok.
+
+find_mode(flush, Events) ->
+ lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true;
+ (_) -> false
+ end, Events);
+find_mode(Mode, Events) ->
+ lists:keymember([{mode,Mode}], 3, Events).
+
+%% find_switch(_From, To, Events) ->
+%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}},
+%% {trace_call,check_load,[#{mode := From}]}) ->
+%% throw(match);
+%% (Event, _) ->
+%% Event
+%% end, undefined, Events) of
+%% _ -> false
+%% catch
+%% throw:match -> true
+%% end.
+
+analyse_trace(TRecvPid, TestFun) ->
+ TRecvPid ! {test,self(),TestFun},
+ receive
+ {result,TRecvPid,Result} ->
+ Result
+ after
+ 60000 ->
+ fails
+ end.
+
+trace_receiver(IdleT) ->
+ Msgs = receive_until_idle(IdleT, 5, []),
+ ct:pal("~w trace events generated", [length(Msgs)]),
+ analyse(Msgs).
+
+receive_until_idle(IdleT, WaitN, Msgs) ->
+ receive
+ Msg = {trace_call,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs]);
+ Msg = {trace_return,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs])
+ after
+ IdleT ->
+ if WaitN == 0 ->
+ Msgs;
+ true ->
+ receive_until_idle(IdleT, WaitN-1, Msgs)
+ end
+ end.
+
+analyse(Msgs) ->
+ receive
+ {test,From,TestFun} ->
+ From ! {result,self(),TestFun(Msgs)},
+ analyse(Msgs)
+ end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(whereis(h_proc_name()),[c]),
+ dbg:p(FileCtrlPid,[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,[]),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
+ {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
+tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func});
+tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,0},
+ {Pid,Expected};
+maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,T},
+ {Pid,Expected};
+maybe_tracer_done(Pid,Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ check_tracer(T,fun() -> ct:fail({timeout,tracer}) end).
+check_tracer(T,TimeoutFun) ->
+ receive
+ {tracer_done,Delay} ->
+ %% Possibly wait Delay ms to check that no unexpected
+ %% traces are received
+ check_tracer(Delay,fun() -> ok end);
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ TimeoutFun()
+ end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
+
+h_proc_name() ->
+ h_proc_name(?MODULE).
+h_proc_name(Name) ->
+ ?name_to_reg_name(logger_std_h,Name).
+
+wait_for_process_up(T) ->
+ wait_for_process_up(?MODULE, h_proc_name(), T).
+
+wait_for_process_up(Name, RegName, T) ->
+ N = (T div 500) + 1,
+ wait_for_process_up1(Name, RegName, N).
+
+wait_for_process_up1(_Name, _RegName, 0) ->
+ error;
+wait_for_process_up1(Name, RegName, N) ->
+ timer:sleep(500),
+ case whereis(RegName) of
+ Pid when is_pid(Pid) ->
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
+ undefined ->
+ %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
+ wait_for_process_up1(Name, RegName, N-1)
+ end.
+
+filesync_rep_int() ->
+ case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end.
+
+
+file_delete(Log) ->
+ file:delete(Log).
+
diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl
new file mode 100644
index 0000000000..81eb9ce5eb
--- /dev/null
+++ b/lib/kernel/test/logger_test_lib.erl
@@ -0,0 +1,82 @@
+%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_test_lib).
+
+-include_lib("kernel/src/logger_internal.hrl").
+
+-export([setup/2, log/3, sync_and_read/3]).
+
+-export([init/2,
+ pre_init_per_suite/3, pre_init_per_testcase/4,
+ post_end_per_testcase/5, post_end_per_suite/3]).
+
+setup(Config,Vars) ->
+ FuncStr = lists:concat([proplists:get_value(suite, Config), "_",
+ proplists:get_value(tc, Config)]),
+ ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr),
+ file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])),
+ case test_server:start_node(proplists:get_value(tc, Config), slave,
+ [{args, ["-pa ",filename:dirname(code:which(?MODULE)),
+ " -boot start_sasl -kernel start_timer true "
+ "-config ",ConfigFileName]}]) of
+ {ok, Node} ->
+ L = rpc:call(Node, logger, get_config, []),
+ ct:log("~p",[L]),
+ {ok, L, Node};
+ {error, Reason} ->
+ ct:log("Failed to start node: ~p",[Reason]),
+ error
+ end.
+
+log(Node, F, A) ->
+ log(Node, logger, F, A).
+log(Node, M, F, A) ->
+ MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) },
+ rpc:call(Node, M, F, A ++ [MD]).
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
+ file:read_file(Log ++ ".1");
+sync_and_read(Node, file,Log) ->
+ ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ file:read_file(Log).
+
+
+init(_, _) ->
+ {ok, []}.
+
+pre_init_per_suite(_Suite, Config, State) ->
+ {[{nodes, nodes()} | Config], State}.
+
+pre_init_per_testcase(Suite, TC, Config, State) ->
+ cleanup(Config),
+ {[{suite, Suite}, {tc, TC} | Config], State}.
+
+post_end_per_testcase(_, _TC, Config, Res, State) ->
+ cleanup(Config),
+ {Res, State}.
+
+post_end_per_suite(_, Config, State) ->
+ cleanup(Config),
+ {Config, State}.
+
+cleanup(Config) ->
+ [test_server:stop_node(N) || N <- nodes(),
+ not lists:member(N, proplists:get_value(nodes, Config))].
diff --git a/lib/kernel/test/loose_node.erl b/lib/kernel/test/loose_node.erl
index ba293a821a..cc3f9bbea0 100644
--- a/lib/kernel/test/loose_node.erl
+++ b/lib/kernel/test/loose_node.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -57,9 +57,16 @@
%%
stop(Node) when is_atom(Node) ->
+ erlang:monitor_node(Node, true),
rpc:cast(Node, erlang, halt, []),
- io:format("Stopped loose node ~p~n", [Node]),
- ok.
+ receive
+ {nodedown, Node} ->
+ io:format("Stopped loose node ~p~n", [Node]),
+ ok
+ after 10000 ->
+ io:format("Failed to stop loose node: ~p~n", [Node]),
+ {error, node_not_stopped}
+ end.
start(Name, Args) ->
start(Name, Args, -1).
diff --git a/lib/kernel/test/multi_load_SUITE.erl b/lib/kernel/test/multi_load_SUITE.erl
new file mode 100644
index 0000000000..f3258ea520
--- /dev/null
+++ b/lib/kernel/test/multi_load_SUITE.erl
@@ -0,0 +1,419 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(multi_load_SUITE).
+-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
+ init_per_group/2,end_per_group/2,
+ basic_atomic_load/1,basic_errors/1,sticky_dir/1,
+ on_load_failing/1,ensure_modules_loaded/1,
+ native_code/1]).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("syntax_tools/include/merl.hrl").
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
+
+all() ->
+ [basic_atomic_load,basic_errors,sticky_dir,on_load_failing,
+ ensure_modules_loaded,native_code].
+
+groups() ->
+ [].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_GroupName, Config) ->
+ Config.
+
+end_per_group(_GroupName, Config) ->
+ Config.
+
+basic_atomic_load(Config) ->
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Dir = filename:join(PrivDir, multi_load_sticky_dir),
+ _ = file:make_dir(Dir),
+
+ OldPath = code:get_path(),
+ try
+ code:add_patha(Dir),
+ do_basic(Dir)
+ after
+ code:set_path(OldPath)
+ end,
+
+ ok.
+
+do_basic(Dir) ->
+ MsVer1_0 = make_modules(5, versioned_module(1)),
+ MsVer1 = [{M,filename:absname(F, Dir),Bin} || {M,F,Bin} <- MsVer1_0],
+ _ = [ok = file:write_file(F, Bin) || {_,F,Bin} <- MsVer1],
+
+ Ms = [M || {M,_,_} <- MsVer1],
+ [] = [loaded || M <- Ms, is_loaded(M)],
+
+ ok = code:atomic_load(Ms),
+ _ = [1 = M:M() || M <- Ms],
+ _ = [F = code:which(M) || {M,F,_} <- MsVer1],
+ [] = [not_loaded || M <- Ms, not is_loaded(M)],
+
+ MsVer2 = update_modules(Ms, versioned_module(2)),
+ {ok,Prepared} = code:prepare_loading(MsVer2),
+ ok = code:finish_loading(Prepared),
+ _ = [2 = M:M() || M <- Ms],
+ _ = [F = code:which(M) || {M,F,_} <- MsVer2],
+ [] = [not_loaded || M <- Ms, not is_loaded(M)],
+
+ MsVer3 = update_modules(Ms, versioned_module(2)),
+ NotPurged = lists:sort([{M,not_purged} || M <- Ms]),
+ NotPurged = atomic_load_error(MsVer3, true),
+
+ ok.
+
+versioned_module(Ver) ->
+ fun(Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-export(['@Mod@'/0]).\n",
+ "'@Mod@'() -> _@Ver@.\n"])
+ end.
+
+basic_errors(_Config) ->
+ atomic_load_fc([42]),
+ atomic_load_fc([{"mod","file","bin"}]),
+
+ finish_loading_fc(atom),
+ {ok,{PrepTag,_}} = code:prepare_loading([code]),
+ finish_loading_fc({PrepTag,[x]}),
+ finish_loading_fc({PrepTag,[{m,{<<>>,"",<<>>}}]}),
+ Prep = prepared_with_wrong_magic_bin(),
+ finish_loading_fc(Prep),
+
+ [{x,badfile}] = atomic_load_error([{x,"x",<<"bad">>}], false),
+ [{a,badfile},{some_nonexistent_file,nofile}] =
+ atomic_load_error([some_nonexistent_file,{a,"a",<<>>}],
+ false),
+
+ %% Modules mentioned more than once.
+ Mods = make_modules(2, fun basic_module/1),
+ Ms = [M || {M,_,_} <- Mods],
+ DupMods = Mods ++ [mnesia] ++ Mods ++ [mnesia],
+ DupErrors0 = lists:sort([mnesia|Ms]),
+ DupErrors = [{M,duplicated} || M <- DupErrors0],
+ DupErrors = atomic_load_error(DupMods, false),
+
+ ok.
+
+atomic_load_fc(L) ->
+ {'EXIT',{function_clause,[{code,atomic_load,[L],_}|_]}} =
+ (catch code:atomic_load(L)),
+ {'EXIT',{function_clause,[{code,prepare_loading,[L],_}|_]}} =
+ (catch code:prepare_loading(L)).
+
+finish_loading_fc(Term) ->
+ {'EXIT',{function_clause,[{code,finish_loading,[Term],_}|_]}} =
+ (catch code:finish_loading(Term)).
+
+prepared_with_wrong_magic_bin() ->
+ {ok,Prep} = code:prepare_loading([?MODULE]),
+ prep_magic(Prep).
+
+prep_magic([H|T]) ->
+ [prep_magic(H)|prep_magic(T)];
+prep_magic(Tuple) when is_tuple(Tuple) ->
+ L = prep_magic(tuple_to_list(Tuple)),
+ list_to_tuple(L);
+prep_magic(Ref) when is_reference(Ref) ->
+ try erlang:has_prepared_code_on_load(Ref) of
+ false ->
+ %% Create a different kind of magic ref.
+ ets:match_spec_compile([{'_',[true],['$_']}])
+ catch
+ _:_ ->
+ Ref
+ end;
+prep_magic(Other) ->
+ Other.
+
+sticky_dir(_Config) ->
+ Mod0 = make_module(lists, fun basic_module/1),
+ Mod1 = make_module(gen_server, fun basic_module/1),
+ Ms = [Mod0,Mod1],
+ SD = sticky_directory,
+ StickyErrors = [{gen_server,SD},{lists,SD}],
+ StickyErrors = atomic_load_error(Ms, true),
+
+ ok.
+
+on_load_failing(_Config) ->
+ OnLoad = make_modules(1, fun on_load_module/1),
+ [{OnLoadMod,_,_}] = OnLoad,
+ Ms = make_modules(10, fun basic_module/1) ++ OnLoad,
+
+ %% Fail because there is a module with on_load in the list.
+ on_load_failure(OnLoadMod, Ms),
+ on_load_failure(OnLoadMod, [lists:last(Ms)]),
+
+ %% Fail because there already is a pending on_load.
+ [{HangingOnLoad,_,_}|_] = Ms,
+ spawn_hanging_on_load(HangingOnLoad),
+ NoOnLoadMs = lists:droplast(Ms),
+ {error,[{HangingOnLoad,pending_on_load}]} =
+ code:atomic_load(NoOnLoadMs),
+ hanging_on_load ! stop_hanging_and_unload,
+
+ ok.
+
+on_load_failure(OnLoadMod, Ms) ->
+ [{OnLoadMod,on_load_not_allowed}] = atomic_load_error(Ms, false).
+
+spawn_hanging_on_load(Mod) ->
+ {Mod,Name,Bin} = make_module(Mod, "unknown",
+ fun(_) ->
+ hanging_on_load_module(Mod)
+ end),
+ spawn_link(fun() ->
+ {error,on_load_failure} =
+ code:load_binary(Mod, Name, Bin)
+ end).
+
+hanging_on_load_module(Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-on_load(hang/0).\n",
+ "hang() ->\n"
+ " register(hanging_on_load, self()),\n"
+ " receive _ -> unload end.\n"]).
+
+ensure_modules_loaded(Config) ->
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Dir = filename:join(PrivDir, multi_load_ensure_modules_loaded),
+ _ = file:make_dir(Dir),
+
+ OldPath = code:get_path(),
+ try
+ code:add_patha(Dir),
+ do_ensure_modules_loaded(Dir)
+ after
+ code:set_path(OldPath)
+ end,
+
+ ok.
+
+do_ensure_modules_loaded(Dir) ->
+ %% Create a dummy "lists" module and place it in our code path.
+ {lists,ListsFile,ListsCode} = make_module(lists, fun basic_module/1),
+ ok = file:write_file(filename:absname(ListsFile, Dir), ListsCode),
+ {error,sticky_directory} = code:load_file(lists),
+
+ %% Make a new module that we can load.
+ Mod = make_module_file(Dir, fun basic_module/1),
+ false = is_loaded(Mod),
+
+ %% Make a new module with an on_load function.
+ OLMod = make_module_file(Dir, fun on_load_module/1),
+ false = is_loaded(OLMod),
+
+ %% lists should not be loaded again; Mod and OLMod should be
+ %% loaded. ?MODULE should not be reloaded, but there is no easy
+ %% way to test that. Repeating modules is OK.
+ ok = code:ensure_modules_loaded([?MODULE,lists,Mod,OLMod,
+ Mod,OLMod,Mod,lists]),
+ last = lists:last([last]),
+ true = is_loaded(Mod),
+ ok = Mod:Mod(),
+ true = is_loaded(OLMod),
+ _ = OLMod:module_info(),
+
+ %% Unload the modules that were loaded.
+ [begin
+ code:purge(M),
+ code:delete(M),
+ code:purge(M),
+ false = is_loaded(M)
+ end || M <- [Mod,OLMod]],
+
+ %% If there are some errors, all other modules should be loaded
+ %% anyway.
+ [{BadMod,BadFile,_}] = make_modules(1, fun basic_module/1),
+ ok = file:write_file(filename:absname(BadFile, Dir), <<"bad_code">>),
+ BadOLMod = make_module_file(Dir, fun failing_on_load_module/1),
+ BadEgg = bad__egg,
+ NativeMod = a_native_module,
+ NativeModFile = atom_to_list(NativeMod) ++ ".beam",
+ {NativeMod,_,NativeCode} = make_module(NativeMod, NativeModFile,
+ fun basic_module/1, [native]),
+ ok = file:write_file(filename:absname(NativeModFile, Dir), NativeCode),
+ ModulesToLoad = [OLMod,?MODULE,Mod,BadOLMod,NativeMod,
+ BadEgg,BadMod,lists],
+ {error,Error0} = code:ensure_modules_loaded(ModulesToLoad),
+ Error = lists:sort([{BadEgg,nofile},
+ {BadMod,badfile},
+ {BadOLMod,on_load_failure}]),
+ Error = lists:sort(Error0),
+ true = is_loaded(Mod),
+ true = is_loaded(OLMod),
+ true = is_loaded(NativeMod),
+
+ ModuleNative = case erlang:system_info(hipe_architecture) of
+ undefined -> false;
+ _ -> true
+ end,
+ ModuleNative = NativeMod:module_info(native),
+
+ ok.
+
+failing_on_load_module(Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-on_load(f/0).\n",
+ "f() -> fail.\n"]).
+
+native_code(_Config) ->
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ {skip,"No native support"};
+ _ ->
+ do_native_code()
+ end.
+
+do_native_code() ->
+ CalledMod = native_called_module,
+ CallingMod = native_calling_module,
+
+ %% Create a module in native code that calls another module.
+ CallingMod = make_and_load(CallingMod,
+ calling_module_fun(CalledMod),
+ [native]),
+
+ %% Create a threaded-code module.
+ _ = make_and_load(CalledMod, called_module_fun(42), []),
+ 42 = CallingMod:call(),
+
+ %% Now replace it with a changed module in native code.
+ code:purge(CalledMod),
+ make_and_load(CalledMod, called_module_fun(43), [native]),
+ true = test_server:is_native(CalledMod),
+ 43 = CallingMod:call(),
+
+ %% Reload the called module and call it.
+ code:purge(CalledMod),
+ ModVer3 = make_module(CalledMod, "", called_module_fun(changed)),
+ ok = code:atomic_load([ModVer3]),
+ false = test_server:is_native(CalledMod),
+ changed = CallingMod:call(),
+ code:purge(CalledMod),
+
+ ok.
+
+make_and_load(Mod, Fun, Opts) ->
+ {Mod,_,Code} = make_module(Mod, "", Fun, Opts),
+ {module,Mod} = code:load_binary(Mod, "", Code),
+ Mod.
+
+calling_module_fun(Called) ->
+ fun(Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-export([call/0]).\n",
+ "call() -> _@Called@:f().\n"])
+ end.
+
+called_module_fun(Ret) ->
+ fun(Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-export([f/0]).\n",
+ "f() -> _@Ret@.\n"])
+ end.
+
+%%%
+%%% Common utilities
+%%%
+
+atomic_load_error(Modules, ErrorInFinishLoading) ->
+ {error,Errors0} = code:atomic_load(Modules),
+ {Errors1,Bool} =
+ case code:prepare_loading(Modules) of
+ {ok,Prepared} ->
+ {error,Es0} = code:finish_loading(Prepared),
+ {Es0,true};
+ {error,Es0} ->
+ {Es0,false}
+ end,
+ Errors = lists:sort(Errors0),
+ Errors = lists:sort(Errors1),
+ case {ErrorInFinishLoading,Bool} of
+ {B,B} ->
+ Errors;
+ {false,true} ->
+ ct:fail("code:prepare_loading/1 should have failed");
+ {true,false} ->
+ ct:fail("code:prepare_loading/1 should have succeeded")
+ end.
+
+is_loaded(Mod) ->
+ case erlang:module_loaded(Mod) of
+ false ->
+ false = code:is_loaded(Mod);
+ true ->
+ {file,_} = code:is_loaded(Mod),
+ true
+ end.
+
+basic_module(Mod) ->
+ ?Q(["-module('@Mod@').\n"
+ "-export(['@Mod@'/0]).\n",
+ "'@Mod@'() -> ok."]).
+
+on_load_module(Mod) ->
+ ?Q(["-module('@Mod@').\n",
+ "-on_load(f/0).\n",
+ "f() -> ok.\n"]).
+
+make_module_file(Dir, Fun) ->
+ [{Mod,File,Code}] = make_modules(1, Fun),
+ ok = file:write_file(filename:absname(File, Dir), Code),
+ Mod.
+
+make_modules(0, _) ->
+ [];
+make_modules(N, Fun) ->
+ U = erlang:unique_integer([positive]),
+ ModName = "m__" ++ integer_to_list(N) ++ "_" ++ integer_to_list(U),
+ Mod = list_to_atom(ModName),
+ ModItem = make_module(Mod, Fun),
+ [ModItem|make_modules(N-1, Fun)].
+
+update_modules(Ms, Fun) ->
+ [make_module(M, Fun) || M <- Ms].
+
+make_module(Mod, Fun) ->
+ Filename = atom_to_list(Mod) ++ ".beam",
+ make_module(Mod, Filename, Fun).
+
+make_module(Mod, Filename, Fun) ->
+ make_module(Mod, Filename, Fun, []).
+
+make_module(Mod, Filename, Fun, Opts) ->
+ Tree = Fun(Mod),
+ merl:print(Tree),
+ {ok,Mod,Code} = merl:compile(Tree, Opts),
+ {Mod,Filename,Code}.
diff --git a/lib/kernel/test/myApp.erl b/lib/kernel/test/myApp.erl
index add1d5d500..0318e55c52 100644
--- a/lib/kernel/test/myApp.erl
+++ b/lib/kernel/test/myApp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl
index 29d8d10262..710b9b115c 100644
--- a/lib/kernel/test/os_SUITE.erl
+++ b/lib/kernel/test/os_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -20,17 +20,27 @@
-module(os_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2]).
--export([space_in_cwd/1, quoting/1, cmd_unicode/1, space_in_name/1, bad_command/1,
- find_executable/1, unix_comment_in_command/1, deep_list_command/1, evil/1]).
+ init_per_group/2,end_per_group/2,
+ init_per_testcase/2,end_per_testcase/2]).
+-export([space_in_cwd/1, quoting/1, cmd_unicode/1,
+ null_in_command/1, space_in_name/1, bad_command/1,
+ find_executable/1, unix_comment_in_command/1, deep_list_command/1,
+ large_output_command/1, background_command/0, background_command/1,
+ message_leak/1, close_stdin/0, close_stdin/1, max_size_command/1,
+ perf_counter_api/1]).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
- [space_in_cwd, quoting, cmd_unicode, space_in_name, bad_command,
- find_executable, unix_comment_in_command, deep_list_command, evil].
+ [space_in_cwd, quoting, cmd_unicode, null_in_command,
+ space_in_name, bad_command,
+ find_executable, unix_comment_in_command, deep_list_command,
+ large_output_command, background_command, message_leak,
+ close_stdin, max_size_command, perf_counter_api].
groups() ->
[].
@@ -47,16 +57,27 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
+init_per_testcase(TC, Config)
+ when TC =:= background_command; TC =:= close_stdin ->
+ case os:type() of
+ {win32, _} ->
+ {skip,"Should not work on windows"};
+ _ ->
+ Config
+ end;
+init_per_testcase(_TC,Config) ->
+ Config.
+
+end_per_testcase(_,_Config) ->
+ ok.
-space_in_cwd(doc) ->
- "Test that executing a command in a current working directory "
- "with space in its name works.";
-space_in_cwd(suite) -> [];
+%% Test that executing a command in a current working directory
+%% with space in its name works.
space_in_cwd(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Dirname = filename:join(PrivDir, "cwd with space"),
- ?line ok = file:make_dir(Dirname),
- ?line ok = file:set_cwd(Dirname),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Dirname = filename:join(PrivDir, "cwd with space"),
+ ok = file:make_dir(Dirname),
+ ok = file:set_cwd(Dirname),
%% Using `more' gives the almost the same result on both Unix and Windows.
@@ -67,70 +88,74 @@ space_in_cwd(Config) when is_list(Config) ->
"more </dev/null"
end,
- ?line case os:cmd(Cmd) of
- [] -> ok; % Unix.
- "\r\n" -> ok; % Windows.
- Other ->
- ?line test_server:fail({unexpected, Other})
- end,
+ case os:cmd(Cmd) of
+ [] -> ok; % Unix.
+ "\r\n" -> ok; % Windows.
+ Other ->
+ ct:fail({unexpected, Other})
+ end,
- ?t:sleep(5),
- ?line [] = receive_all(),
+ ct:sleep(5),
+ [] = receive_all(),
ok.
-quoting(doc) -> "Test that various ways of quoting arguments work.";
-quoting(suite) -> [];
+%% Test that various ways of quoting arguments work.
quoting(Config) when is_list(Config) ->
- ?line DataDir = ?config(data_dir, Config),
- ?line Echo = filename:join(DataDir, "my_echo"),
-
- ?line comp("one", os:cmd(Echo ++ " one")),
- ?line comp("one::two", os:cmd(Echo ++ " one two")),
- ?line comp("one two", os:cmd(Echo ++ " \"one two\"")),
- ?line comp("x::one two::y", os:cmd(Echo ++ " x \"one two\" y")),
- ?line comp("x::one two", os:cmd(Echo ++ " x \"one two\"")),
- ?line comp("one two::y", os:cmd(Echo ++ " \"one two\" y")),
- ?line comp("x::::y", os:cmd(Echo ++ " x \"\" y")),
- ?t:sleep(5),
- ?line [] = receive_all(),
+ DataDir = proplists:get_value(data_dir, Config),
+ Echo = filename:join(DataDir, "my_echo"),
+
+ comp("one", os:cmd(Echo ++ " one")),
+ comp("one::two", os:cmd(Echo ++ " one two")),
+ comp("one two", os:cmd(Echo ++ " \"one two\"")),
+ comp("x::one two::y", os:cmd(Echo ++ " x \"one two\" y")),
+ comp("x::one two", os:cmd(Echo ++ " x \"one two\"")),
+ comp("one two::y", os:cmd(Echo ++ " \"one two\" y")),
+ comp("x::::y", os:cmd(Echo ++ " x \"\" y")),
+ ct:sleep(5),
+ [] = receive_all(),
ok.
-cmd_unicode(doc) -> "Test that unicode arguments work.";
-cmd_unicode(suite) -> [];
+%% Test that unicode arguments work.
cmd_unicode(Config) when is_list(Config) ->
- ?line DataDir = ?config(data_dir, Config),
- ?line Echo = filename:join(DataDir, "my_echo"),
-
- ?line comp("one", os:cmd(Echo ++ " one")),
- ?line comp("one::two", os:cmd(Echo ++ " one two")),
- ?line comp("åäö::ϼΩ", os:cmd(Echo ++ " åäö " ++ [1020, 937])),
- ?t:sleep(5),
- ?line [] = receive_all(),
+ DataDir = proplists:get_value(data_dir, Config),
+ Echo = filename:join(DataDir, "my_echo"),
+
+ comp("one", os:cmd(Echo ++ " one")),
+ comp("one::two", os:cmd(Echo ++ " one two")),
+ comp("åäö::ϼΩ", os:cmd(Echo ++ " åäö " ++ [1020, 937])),
+ ct:sleep(5),
+ [] = receive_all(),
ok.
+null_in_command(Config) ->
+ {Ok, Error} = case os:type() of
+ {win32,_} -> {"dir", "di\0r"};
+ _ -> {"ls", "l\0s"}
+ end,
+ true = is_list(try os:cmd(Ok) catch Class0:_ -> Class0 end),
+ error = try os:cmd(Error) catch Class1:_ -> Class1 end,
+ ok.
-space_in_name(doc) ->
- "Test that program with a space in its name can be executed.";
-space_in_name(suite) -> [];
+%% Test that program with a space in its name can be executed.
space_in_name(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line DataDir = ?config(data_dir, Config),
- ?line Spacedir = filename:join(PrivDir, "program files"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ DataDir = proplists:get_value(data_dir, Config),
+ Spacedir = filename:join(PrivDir, "program files"),
Ext = case os:type() of
{win32,_} -> ".exe";
_ -> ""
end,
- ?line OrigEcho = filename:join(DataDir, "my_echo" ++ Ext),
- ?line Echo0 = filename:join(Spacedir, "my_echo" ++ Ext),
+ OrigEcho = filename:join(DataDir, "my_echo" ++ Ext),
+ Echo0 = filename:join(Spacedir, "my_echo" ++ Ext),
%% Copy the `my_echo' program to a directory whose name contains a space.
- ?line ok = file:make_dir(Spacedir),
- ?line {ok, Bin} = file:read_file(OrigEcho),
- ?line ok = file:write_file(Echo0, Bin),
- ?line Echo = filename:nativename(Echo0),
- ?line ok = file:change_mode(Echo, 8#777), % Make it executable on Unix.
+ ok = file:make_dir(Spacedir),
+ {ok, Bin} = file:read_file(OrigEcho),
+ ok = file:write_file(Echo0, Bin),
+ Echo = filename:nativename(Echo0),
+ ok = file:change_mode(Echo, 8#777), % Make it executable on Unix.
%% Run the echo program.
%% Quoting on windows depends on if the full path of the executable
@@ -146,78 +171,74 @@ space_in_name(Config) when is_list(Config) ->
_ ->
"\""
end,
- ?line comp("", os:cmd(Quote ++ Echo ++ Quote)),
- ?line comp("a::b::c", os:cmd(Quote ++ Echo ++ Quote ++ " a b c")),
- ?t:sleep(5),
- ?line [] = receive_all(),
+ comp("", os:cmd(Quote ++ Echo ++ Quote)),
+ comp("a::b::c", os:cmd(Quote ++ Echo ++ Quote ++ " a b c")),
+ ct:sleep(5),
+ [] = receive_all(),
ok.
-bad_command(doc) ->
- "Check that a bad command doesn't crasch the server or the emulator (it used to).";
-bad_command(suite) -> [];
+%% Check that a bad command doesn't crasch the server or the emulator (it used to).
bad_command(Config) when is_list(Config) ->
- ?line catch os:cmd([a|b]),
- ?line catch os:cmd({bad, thing}),
+ catch os:cmd([a|b]),
+ catch os:cmd({bad, thing}),
%% This should at least not crash (on Unix it typically returns
%% a message from the shell).
- ?line os:cmd("xxxxx"),
+ os:cmd("xxxxx"),
ok.
-find_executable(suite) -> [];
-find_executable(doc) -> [];
find_executable(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
- ?line DataDir = filename:join(?config(data_dir, Config), "win32"),
- ?line ok = file:set_cwd(filename:join([DataDir, "current"])),
- ?line Bin = filename:join(DataDir, "bin"),
- ?line Abin = filename:join(DataDir, "abin"),
- ?line UsrBin = filename:join([DataDir, "usr", "bin"]),
- ?line {ok, Current} = file:get_cwd(),
+ DataDir = filename:join(proplists:get_value(data_dir, Config), "win32"),
+ ok = file:set_cwd(filename:join([DataDir, "current"])),
+ Bin = filename:join(DataDir, "bin"),
+ Abin = filename:join(DataDir, "abin"),
+ UsrBin = filename:join([DataDir, "usr", "bin"]),
+ {ok, Current} = file:get_cwd(),
- ?line Path = lists:concat([Bin, ";", Abin, ";", UsrBin]),
- ?line io:format("Path = ~s", [Path]),
+ Path = lists:concat([Bin, ";", Abin, ";", UsrBin]),
+ io:format("Path = ~s", [Path]),
%% Search for programs in Bin (second element in PATH).
- ?line find_exe(Abin, "my_ar", ".exe", Path),
- ?line find_exe(Abin, "my_ascii", ".com", Path),
- ?line find_exe(Abin, "my_adb", ".bat", Path),
+ find_exe(Abin, "my_ar", ".exe", Path),
+ find_exe(Abin, "my_ascii", ".com", Path),
+ find_exe(Abin, "my_adb", ".bat", Path),
%% OTP-3626 find names of executables given with extension
- ?line find_exe(Abin, "my_ar.exe", "", Path),
- ?line find_exe(Abin, "my_ascii.com", "", Path),
- ?line find_exe(Abin, "my_adb.bat", "", Path),
- ?line find_exe(Abin, "my_ar.EXE", "", Path),
- ?line find_exe(Abin, "my_ascii.COM", "", Path),
- ?line find_exe(Abin, "MY_ADB.BAT", "", Path),
+ find_exe(Abin, "my_ar.exe", "", Path),
+ find_exe(Abin, "my_ascii.com", "", Path),
+ find_exe(Abin, "my_adb.bat", "", Path),
+ find_exe(Abin, "my_ar.EXE", "", Path),
+ find_exe(Abin, "my_ascii.COM", "", Path),
+ find_exe(Abin, "MY_ADB.BAT", "", Path),
%% Search for programs in Abin (second element in PATH).
- ?line find_exe(Abin, "my_ar", ".exe", Path),
- ?line find_exe(Abin, "my_ascii", ".com", Path),
- ?line find_exe(Abin, "my_adb", ".bat", Path),
+ find_exe(Abin, "my_ar", ".exe", Path),
+ find_exe(Abin, "my_ascii", ".com", Path),
+ find_exe(Abin, "my_adb", ".bat", Path),
%% Search for programs in the current working directory.
- ?line find_exe(Current, "my_program", ".exe", Path),
- ?line find_exe(Current, "my_command", ".com", Path),
- ?line find_exe(Current, "my_batch", ".bat", Path),
+ find_exe(Current, "my_program", ".exe", Path),
+ find_exe(Current, "my_command", ".com", Path),
+ find_exe(Current, "my_batch", ".bat", Path),
ok;
{unix, _} ->
- DataDir = ?config(data_dir, Config),
+ DataDir = proplists:get_value(data_dir, Config),
%% Smoke test.
- case lib:progname() of
- erl ->
- ?line ErlPath = os:find_executable("erl"),
- ?line true = is_list(ErlPath),
- ?line true = filelib:is_regular(ErlPath);
+ case ct:get_progname() of
+ "erl" ->
+ ErlPath = os:find_executable("erl"),
+ true = is_list(ErlPath),
+ true = filelib:is_regular(ErlPath);
_ ->
%% Don't bother -- the progname could include options.
ok
end,
%% Never return a directory name.
- ?line false = os:find_executable("unix", [DataDir]),
+ false = os:find_executable("unix", [DataDir]),
ok
end.
@@ -233,29 +254,23 @@ find_exe(Where, Name, Ext, Path) ->
Other ->
io:format("Expected ~p; got (converted to absolute) ~p",
[Expected, Other]),
- test_server:fail()
+ ct:fail(failed)
end;
Other ->
io:format("Expected ~p; got ~p", [Expected, Other]),
- test_server:fail()
+ ct:fail(failed)
end.
-unix_comment_in_command(doc) ->
- "OTP-1805: Test that os:cmd(\"ls #\") works correctly (used to hang).";
-unix_comment_in_command(suite) -> [];
+%% OTP-1805: Test that os:cmd(\ls #\) works correctly (used to hang).
unix_comment_in_command(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(20)),
- ?line Priv = ?config(priv_dir, Config),
- ?line ok = file:set_cwd(Priv),
- ?line _ = os:cmd("ls #"), % Any result is ok.
- ?t:sleep(5),
- ?line [] = receive_all(),
- ?line test_server:timetrap_cancel(Dog),
+ Priv = proplists:get_value(priv_dir, Config),
+ ok = file:set_cwd(Priv),
+ _ = os:cmd("ls #"), % Any result is ok.
+ ct:sleep(5),
+ [] = receive_all(),
ok.
-deep_list_command(doc) ->
- "Check that a deep list in command works equally on unix and on windows.";
-deep_list_command(suite) -> [];
+%% Check that a deep list in command works equally on unix and on windows.
deep_list_command(Config) when is_list(Config) ->
%% As a 'io_lib' module description says: "There is no guarantee that the
%% character lists returned from some of the functions are flat, they can
@@ -267,50 +282,101 @@ deep_list_command(Config) when is_list(Config) ->
%% FYI: [$e, $c, "ho"] =:= io_lib:format("ec~s", ["ho"])
ok.
+%% Test to make sure that the correct data is
+%% received when doing large commands.
+large_output_command(Config) when is_list(Config) ->
+ %% Maximum allowed on windows is 8192, so we test well below that
+ AAA = lists:duplicate(7000, $a),
+ comp(AAA,os:cmd("echo " ++ AAA)).
--define(EVIL_PROCS, 100).
--define(EVIL_LOOPS, 100).
--define(PORT_CREATOR, os_cmd_port_creator).
-evil(Config) when is_list(Config) ->
- Dog = test_server:timetrap(test_server:minutes(5)),
- Parent = self(),
- Ps = lists:map(fun (N) ->
- spawn_link(fun () ->
- evil_loop(Parent, ?EVIL_LOOPS,N)
- end)
- end, lists:seq(1, ?EVIL_PROCS)),
- Devil = spawn_link(fun () -> devil(hd(Ps), hd(lists:reverse(Ps))) end),
- lists:foreach(fun (P) -> receive {P, done} -> ok end end, Ps),
- unlink(Devil),
- exit(Devil, kill),
- test_server:timetrap_cancel(Dog),
- ok.
+%% Test that it is possible on unix to start a background task using os:cmd.
+background_command() ->
+ [{timetrap, {seconds, 5}}].
+background_command(_Config) ->
+ %% This testcase fails when the os:cmd takes
+ %% longer then the 5 second timeout
+ os:cmd("sleep 10&").
+
+%% Test that message does not leak to the calling process
+message_leak(_Config) ->
+ process_flag(trap_exit, true),
+
+ os:cmd("echo hello"),
+ [] = receive_all(),
+
+ case os:type() of
+ {unix, _} ->
+ os:cmd("for i in $(seq 1 100); do echo hello; done&"),
+ [] = receive_all();
+ _ ->
+ ok % Cannot background on non-unix
+ end,
+
+ process_flag(trap_exit, false).
+
+%% Test that os:cmd closes stdin of the program that is executed
+close_stdin() ->
+ [{timetrap, {seconds, 5}}].
+close_stdin(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ Fds = filename:join(DataDir, "my_fds"),
+
+ "-1" = os:cmd(Fds).
+
+max_size_command(_Config) ->
+
+ Res20 = os:cmd("cat /dev/zero", #{ max_size => 20 }),
+ 20 = length(Res20),
+
+ Res0 = os:cmd("cat /dev/zero", #{ max_size => 0 }),
+ 0 = length(Res0),
+
+ Res32768 = os:cmd("cat /dev/zero", #{ max_size => 32768 }),
+ 32768 = length(Res32768),
+
+ ResHello = string:trim(os:cmd("echo hello", #{ max_size => 20 })),
+ 5 = length(ResHello).
+
+%% Test that the os:perf_counter api works as expected
+perf_counter_api(_Config) ->
+
+ true = is_integer(os:perf_counter()),
+ true = os:perf_counter() > 0,
+
+ Conv = fun(T1, T2) ->
+ erlang:convert_time_unit(T2 - T1, perf_counter, nanosecond)
+ end,
+
+ do_perf_counter_test([], Conv, 120000000, 80000000),
+ do_perf_counter_test([1000], fun(T1, T2) -> T2 - T1 end, 120, 80).
+
+do_perf_counter_test(CntArgs, Conv, Upper, Lower) ->
+ %% We run the test multiple times to try to get a somewhat
+ %% stable value... what does this test? That the
+ %% calculate_perf_counter_unit in sys_time.c works somewhat ok.
+ do_perf_counter_test(CntArgs, Conv, Upper, Lower, 10).
+
+do_perf_counter_test(CntArgs, _Conv, Upper, Lower, 0) ->
+ ct:fail("perf_counter_test ~p ~p ~p",[CntArgs, Upper, Lower]);
+do_perf_counter_test(CntArgs, Conv, Upper, Lower, Iters) ->
+
+ T1 = apply(os, perf_counter, CntArgs),
+ timer:sleep(100),
+ T2 = apply(os, perf_counter, CntArgs),
+ TsDiff = Conv(T1, T2),
+ ct:log("T1: ~p~n"
+ "T2: ~p~n"
+ "TsDiff: ~p~n",
+ [T1,T2,TsDiff]),
+
+ if
+ TsDiff < Upper, TsDiff > Lower ->
+ ok;
+ true ->
+ do_perf_counter_test(CntArgs, Conv, Upper, Lower, Iters-1)
+ end.
-devil(P1, P2) ->
- erlang:display({?PORT_CREATOR, whereis(?PORT_CREATOR)}),
- (catch ?PORT_CREATOR ! lists:seq(1,1000000)),
- (catch ?PORT_CREATOR ! lists:seq(1,666)),
- (catch ?PORT_CREATOR ! grrrrrrrrrrrrrrrr),
- (catch ?PORT_CREATOR ! {'EXIT', P1, buhuuu}),
- (catch ?PORT_CREATOR ! {'EXIT', hd(erlang:ports()), buhuuu}),
- (catch ?PORT_CREATOR ! {'EXIT', P2, arggggggg}),
- receive after 500 -> ok end,
- (catch exit(whereis(?PORT_CREATOR), kill)),
- (catch ?PORT_CREATOR ! ">8|"),
- receive after 500 -> ok end,
- (catch exit(whereis(?PORT_CREATOR), diiiiiiiiiiiiiiiiiiiie)),
- receive after 100 -> ok end,
- devil(P1, P2).
-
-evil_loop(Parent, Loops, N) ->
- Res = integer_to_list(N),
- evil_loop(Parent, Loops, Res, "echo " ++ Res).
-
-evil_loop(Parent, 0, _Res, _Cmd) ->
- Parent ! {self(), done};
-evil_loop(Parent, Loops, Res, Cmd) ->
- comp(Res, os:cmd(Cmd)),
- evil_loop(Parent, Loops-1, Res, Cmd).
+%% Util functions
comp(Expected, Got) ->
case strip_nl(Got) of
@@ -319,10 +385,10 @@ comp(Expected, Got) ->
Other ->
ok = io:format("Expected: ~ts\n", [Expected]),
ok = io:format("Got: ~ts\n", [Other]),
- test_server:fail()
+ ct:fail(failed)
end.
-%% Like lib:nonl/1, but strips \r as well as \n.
+%% strips \n and \r\n from end of string
strip_nl([$\r, $\n]) -> [];
strip_nl([$\n]) -> [];
diff --git a/lib/kernel/test/os_SUITE_data/Makefile.src b/lib/kernel/test/os_SUITE_data/Makefile.src
index 912d0cbcb1..f83f781411 100644
--- a/lib/kernel/test/os_SUITE_data/Makefile.src
+++ b/lib/kernel/test/os_SUITE_data/Makefile.src
@@ -3,7 +3,7 @@ LD = @LD@
CFLAGS = @CFLAGS@ -I@erl_include@ @DEFS@
CROSSLDFLAGS = @CROSSLDFLAGS@
-PROGS = my_echo@exe@
+PROGS = my_echo@exe@ my_fds@exe@
all: $(PROGS)
@@ -12,3 +12,9 @@ my_echo@exe@: my_echo@obj@
my_echo@obj@: my_echo.c
$(CC) -c -o my_echo@obj@ $(CFLAGS) my_echo.c
+
+my_fds@exe@: my_fds@obj@
+ $(LD) $(CROSSLDFLAGS) -o my_fds my_fds@obj@ @LIBS@
+
+my_fds@obj@: my_fds.c
+ $(CC) -c -o my_fds@obj@ $(CFLAGS) my_fds.c
diff --git a/lib/kernel/test/os_SUITE_data/my_fds.c b/lib/kernel/test/os_SUITE_data/my_fds.c
new file mode 100644
index 0000000000..704a4d1e1d
--- /dev/null
+++ b/lib/kernel/test/os_SUITE_data/my_fds.c
@@ -0,0 +1,9 @@
+#include <stdio.h>
+
+int
+main(int argc, char** argv)
+{
+ char buff[1];
+ int res = read(stdin, buff, 1);
+ printf("%d", res);
+}
diff --git a/lib/kernel/test/pdict_SUITE.erl b/lib/kernel/test/pdict_SUITE.erl
index 6de4ff9f77..3685e51c10 100644
--- a/lib/kernel/test/pdict_SUITE.erl
+++ b/lib/kernel/test/pdict_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -18,10 +18,9 @@
%% %CopyrightEnd%
%%
-module(pdict_SUITE).
-%% NB: The ?line macro cannot be used when testing the dictionary.
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(M(A,B),m(A,B,?MODULE,?LINE)).
-ifdef(DEBUG).
@@ -32,22 +31,30 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
+ mixed/1,
+ literals/1,
+ destructive/1,
simple/1, complicated/1, heavy/1, simple_all_keys/1, info/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
-export([other_process/2]).
+-export([put_do/2, get_do/1, erase_do/1]).
+
init_per_testcase(_Case, Config) ->
- ?line Dog = ?t:timetrap(test_server:minutes(10)),
- [{watchdog, Dog} | Config].
-end_per_testcase(_Case, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+ Config.
+
+end_per_testcase(_Case, _Config) ->
ok.
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
- [simple, complicated, heavy, simple_all_keys, info].
+ [simple, complicated, heavy, simple_all_keys, info,
+ literals,
+ destructive,
+ mixed].
groups() ->
[].
@@ -65,10 +72,7 @@ end_per_group(_GroupName, Config) ->
Config.
-simple(doc) ->
- ["Tests simple functionality in process dictionary."];
-simple(suite) ->
- [];
+%% Tests simple functionality in process dictionary.
simple(Config) when is_list(Config) ->
XX = get(),
ok = match_keys(XX),
@@ -109,7 +113,7 @@ complicated(Config) when is_list(Config) ->
Previous = get(),
ok = match_keys(Previous),
Previous = erase(),
- N = case ?t:is_debug() of
+ N = case test_server:is_debug() of
false -> 500000;
true -> 5000
end,
@@ -143,10 +147,7 @@ comp_4([{{key,_}=K,{value,_}=Val}|T]) ->
comp_4(T);
comp_4([]) -> ok.
-heavy(doc) ->
- ["Tests heavy usage of the process dictionary"];
-heavy(suite) ->
- [];
+%% Tests heavy usage of the process dictionary.
heavy(Config) when is_list(Config) ->
XX = get(),
erase(),
@@ -156,7 +157,7 @@ heavy(Config) when is_list(Config) ->
?M([],get()),
time(5000),
?M([],get()),
- case {os:type(),?t:is_debug()} of
+ case {os:type(),test_server:is_debug()} of
{_,true} -> ok;
_ ->
time(50000),
@@ -185,10 +186,7 @@ simple_all_keys_del_loop([K|Ks]) ->
ok = match_keys(get()),
simple_all_keys_del_loop(Ks).
-info(doc) ->
- ["Tests process_info(Pid, dictionary)"];
-info(suite) ->
- [];
+%% Tests process_info(Pid, dictionary).
info(Config) when is_list(Config) ->
L = [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,
q,r,s,t,u,v,x,y,z,'A','B','C','D'],
@@ -369,3 +367,146 @@ match_keys(All) ->
Ks = lists:sort([K||{K,_}<-All]),
Ks = lists:sort(erlang:get_keys()),
ok.
+
+
+%% Test destructive put optimization of immed values
+%% does not affect get/0 or process_info.
+destructive(_Config) ->
+ Keys = lists:seq(1,100),
+ [put(Key, 17) || Key <- Keys],
+ Get1 = get(),
+ {dictionary,PI1} = process_info(self(), dictionary),
+
+ [begin
+ {Key, 17} = lists:keyfind(Key, 1, Get1),
+ {Key, 17} = lists:keyfind(Key, 1, PI1)
+ end
+ || Key <- Keys],
+
+ [17 = put(Key, 42) || Key <- Keys], % Mutate
+
+ Get2 = get(),
+ {dictionary,PI2} = process_info(self(), dictionary),
+
+ [begin
+ {Key, 17} = lists:keyfind(Key, 1, Get1),
+ {Key, 17} = lists:keyfind(Key, 1, PI1),
+ {Key, 42} = lists:keyfind(Key, 1, Get2),
+ {Key, 42} = lists:keyfind(Key, 1, PI2)
+
+ end
+ || Key <- Keys],
+
+ ok.
+
+%% Do random mixed put/erase to test grow/shrink
+%% Written for a temporary bug in gc during shrink
+mixed(_Config) ->
+ Rand0 = rand:seed_s(exsplus),
+ io:format("Random seed = ~p\n\n", [rand:export_seed_s(Rand0)]),
+
+ erts_debug:set_internal_state(available_internal_state, true),
+ try
+ C = do_mixed([10,0,100,50,1000,500,600,100,150,1,11,2,30,0],
+ 0,
+ array:new(),
+ 1,
+ Rand0),
+ io:format("\nDid total of ~p operations\n", [C])
+ after
+ erts_debug:set_internal_state(available_internal_state, false)
+ end.
+
+do_mixed([], _, _, C, _) ->
+ C;
+do_mixed([GoalN | Tail], GoalN, Array, C, Rand0) ->
+ io:format("Reached goal of ~p keys in dict after ~p mixed ops\n",[GoalN, C]),
+ GoalN = array:size(Array),
+ do_mixed(Tail, GoalN, Array, C, Rand0);
+do_mixed([GoalN | _]=Goals, CurrN, Array0, C, Rand0) ->
+ CurrN = array:size(Array0),
+ GrowPercent = case GoalN > CurrN of
+ true when CurrN == 0 -> 100;
+ true -> 75;
+ false -> 25
+ end,
+ {R, Rand1} = rand:uniform_s(100, Rand0),
+ case R of
+ _ when R =< GrowPercent -> %%%%%%%%%%%%% GROW
+ {Key, Rand2} = rand:uniform_s(10000, Rand1),
+ case put(Key, {Key,C}) of
+ undefined ->
+ Array1 = array:set(CurrN, Key, Array0),
+ do_mixed(Goals, CurrN+1, Array1, C+1, Rand2);
+ _ ->
+ do_mixed(Goals, CurrN, Array0, C+1, Rand2)
+ end;
+
+ _ -> %%%%%%%%%% SHRINK
+ {Kix, Rand2} = rand:uniform_s(CurrN, Rand1),
+ Key = array:get(Kix-1, Array0),
+
+ %% provoke GC during shrink
+ erts_debug:set_internal_state(fill_heap, true),
+
+ {Key, _} = erase(Key),
+ Array1 = array:set(Kix-1, array:get(CurrN-1, Array0), Array0),
+ Array2 = array:resize(CurrN-1, Array1),
+ do_mixed(Goals, CurrN-1, Array2, C+1, Rand2)
+ end.
+
+%% Test hash precalculation of literal keys
+literals(_Config) ->
+ %% Put literal -> get variable
+ put(1742, "1742"),
+ "1742" = ?MODULE:get_do(1742),
+ "1742" = ?MODULE:erase_do(1742),
+
+ put(-1742, "-1742"),
+ "-1742" = ?MODULE:get_do(-1742),
+ "-1742" = ?MODULE:erase_do(-1742),
+
+ put([], "NIL"),
+ "NIL" = ?MODULE:get_do([]),
+ "NIL" = ?MODULE:erase_do([]),
+
+ put(<<"binary">>, "binary"),
+ "binary" = ?MODULE:get_do(<<"binary">>),
+ "binary" = ?MODULE:erase_do(<<"binary">>),
+
+ BigBin = <<"A large binary with a lot of bytes to make it go off heap as shared and reference counted">>,
+ put(BigBin, "bigbin"),
+ "bigbin" = ?MODULE:get_do(BigBin),
+ "bigbin" = ?MODULE:erase_do(BigBin),
+
+ %% Put variable -> get literal
+ ?MODULE:put_do(4217, "4217"),
+ "4217" = get(4217),
+ "4217" = erase(4217),
+
+ ?MODULE:put_do(-4217, "-4217"),
+ "-4217" = get(-4217),
+ "-4217" = erase(-4217),
+
+ ?MODULE:put_do([], "NIL"),
+ "NIL" = get([]),
+ "NIL" = erase([]),
+
+ ?MODULE:put_do(<<"bytes">>, "bytes"),
+ "bytes" = get(<<"bytes">>),
+ "bytes" = erase(<<"bytes">>),
+
+ ?MODULE:put_do(BigBin, "BigBin"),
+ "BigBin" = get(BigBin),
+ "BigBin" = erase(BigBin),
+
+ ok.
+
+put_do(K, V) ->
+ put(K, V).
+
+get_do(K) ->
+ get(K).
+
+erase_do(K) ->
+ erase(K).
diff --git a/lib/kernel/test/pg2_SUITE.erl b/lib/kernel/test/pg2_SUITE.erl
index 832d2d1c27..acecd34ead 100644
--- a/lib/kernel/test/pg2_SUITE.erl
+++ b/lib/kernel/test/pg2_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,9 +21,9 @@
%%-----------------------------------------------------------------
-module(pg2_SUITE).
--include_lib("test_server/include/test_server.hrl").
--define(datadir, ?config(data_dir, Config)).
--define(privdir, ?config(priv_dir, Config)).
+-include_lib("common_test/include/ct.hrl").
+-define(datadir, proplists:get_value(data_dir, Config)).
+-define(privdir, proplists:get_value(priv_dir, Config)).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
@@ -31,36 +31,32 @@
-export([
otp_7277/1, otp_8259/1, otp_8653/1,
- compat/1, basic/1]).
-
-% Default timetrap timeout (set in init_per_testcase).
--define(default_timeout, ?t:minutes(1)).
+ basic/1]).
-define(TESTCASE, testcase_name).
--define(testcase, ?config(?TESTCASE, Config)).
+-define(testcase, proplists:get_value(?TESTCASE, Config)).
%% Internal export.
-export([mk_part_node_and_group/3, part2/4,
mk_part_node/3, part1/5, p_init/3, start_proc/1, sane/0]).
init_per_testcase(Case, Config) ->
- ?line Dog = ?t:timetrap(?default_timeout),
- [{?TESTCASE, Case}, {watchdog, Dog} | Config].
+ [{?TESTCASE, Case}| Config].
end_per_testcase(_Case, _Config) ->
test_server_ctrl:kill_slavenodes(),
- Dog = ?config(watchdog, _Config),
- test_server:timetrap_cancel(Dog),
ok.
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[{group, tickets}].
groups() ->
[{tickets, [],
- [otp_7277, otp_8259, otp_8653, compat, basic]}].
+ [otp_7277, otp_8259, otp_8653, basic]}].
init_per_suite(Config) ->
Config.
@@ -76,67 +72,59 @@ end_per_group(_GroupName, Config) ->
-otp_7277(doc) ->
- "OTP-7277. Bugfix leave().";
-otp_7277(suite) -> [];
+%% OTP-7277. Bugfix leave().
otp_7277(Config) when is_list(Config) ->
- ?line ok = pg2:create(a),
- ?line ok = pg2:create(b),
+ ok = pg2:create(a),
+ ok = pg2:create(b),
P = spawn(forever()),
- ?line ok = pg2:join(a, P),
- ?line ok = pg2:leave(b, P),
- ?line true = exit(P, kill),
+ ok = pg2:join(a, P),
+ ok = pg2:leave(b, P),
+ true = exit(P, kill),
case {pg2:get_members(a), pg2:get_local_members(a)} of
{[], []} ->
ok;
_ ->
timer:sleep(100),
- ?line [] = pg2:get_members(a),
- ?line [] = pg2:get_local_members(a)
+ [] = pg2:get_members(a),
+ [] = pg2:get_local_members(a)
end,
- ?line _ = pg2:delete(a),
- ?line _ = pg2:delete(b),
+ _ = pg2:delete(a),
+ _ = pg2:delete(b),
ok.
-define(UNTIL(Seq), loop_until_true(fun() -> Seq end, Config)).
-define(UNTIL_LOOP, 300).
-otp_8653(suite) -> [];
-otp_8653(doc) ->
- ["OTP-8259. Member was not removed after being killed."];
+%% OTP-8259. Member was not removed after being killed.
otp_8653(Config) when is_list(Config) ->
- Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
-
- ?line [A, B, C] = start_nodes([a, b, c], peer, Config),
+ [A, B, C] = start_nodes([a, b, c], peer, Config),
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
- % make b and c connected, partitioned from node() and a
- ?line rpc_cast(B, ?MODULE, part2, [Config, node(), A, C]),
- ?line ?UNTIL(is_ready_partition(Config)),
+ %% make b and c connected, partitioned from node() and a
+ rpc_cast(B, ?MODULE, part2, [Config, node(), A, C]),
+ ?UNTIL(is_ready_partition(Config)),
- % Connect to the other partition.
- ?line pong = net_adm:ping(B),
+ %% Connect to the other partition.
+ pong = net_adm:ping(B),
timer:sleep(100),
- ?line pong = net_adm:ping(C),
- ?line _ = global:sync(),
- ?line [A, B, C] = lists:sort(nodes()),
+ pong = net_adm:ping(C),
+ _ = global:sync(),
+ [A, B, C] = lists:sort(nodes()),
G = pg2_otp_8653,
- ?line ?UNTIL(begin
- GA = lists:sort(rpc:call(A, pg2, get_members, [G])),
- GB = lists:sort(rpc:call(B, pg2, get_members, [G])),
- GC = lists:sort(rpc:call(C, pg2, get_members, [G])),
- GT = lists:sort(pg2:get_members(G)),
- GA =:= GB andalso
- GB =:= GC andalso
- GC =:= GT andalso
- 8 =:= length(GA)
- end),
- ?line ok = pg2:delete(G),
- ?line stop_nodes([A,B,C]),
- ?line test_server:timetrap_cancel(Dog),
+ ?UNTIL(begin
+ GA = lists:sort(rpc:call(A, pg2, get_members, [G])),
+ GB = lists:sort(rpc:call(B, pg2, get_members, [G])),
+ GC = lists:sort(rpc:call(C, pg2, get_members, [G])),
+ GT = lists:sort(pg2:get_members(G)),
+ GA =:= GB andalso
+ GB =:= GC andalso
+ GC =:= GT andalso
+ 8 =:= length(GA)
+ end),
+ ok = pg2:delete(G),
+ stop_nodes([A,B,C]),
ok.
part2(Config, Main, A, C) ->
@@ -158,54 +146,48 @@ mk_part_node_and_group(File, MyPart0, Config) ->
_ = [ok = pg2:join(G, Pid) || _ <- [1,1]],
touch(File, "done").
-otp_8259(suite) -> [];
-otp_8259(doc) ->
- ["OTP-8259. Member was not removed after being killed."];
+%% OTP-8259. Member was not removed after being killed.
otp_8259(Config) when is_list(Config) ->
- Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ [A, B, C] = start_nodes([a, b, c], peer, Config),
- ?line [A, B, C] = start_nodes([a, b, c], peer, Config),
-
- ?line wait_for_ready_net(Config),
+ wait_for_ready_net(Config),
G = pg2_otp_8259,
Name = otp_8259_a_global_name,
- % start different processes in both partitions
- ?line {Pid, yes} = rpc:call(A, ?MODULE, start_proc, [Name]),
+ %% start different processes in both partitions
+ {Pid, yes} = rpc:call(A, ?MODULE, start_proc, [Name]),
- ?line ok = pg2:create(G),
- ?line ok = pg2:join(G, Pid),
+ ok = pg2:create(G),
+ ok = pg2:join(G, Pid),
- % make b and c connected, partitioned from node() and a
- ?line rpc_cast(B, ?MODULE, part1, [Config, node(), A, C, Name]),
- ?line ?UNTIL(is_ready_partition(Config)),
+ %% make b and c connected, partitioned from node() and a
+ rpc_cast(B, ?MODULE, part1, [Config, node(), A, C, Name]),
+ ?UNTIL(is_ready_partition(Config)),
- % Connect to the other partition.
- % The resolver on node b will be called.
- ?line pong = net_adm:ping(B),
+ %% Connect to the other partition.
+ %% The resolver on node b will be called.
+ pong = net_adm:ping(B),
timer:sleep(100),
- ?line pong = net_adm:ping(C),
- ?line _ = global:sync(),
- ?line [A, B, C] = lists:sort(nodes()),
+ pong = net_adm:ping(C),
+ _ = global:sync(),
+ [A, B, C] = lists:sort(nodes()),
%% Pid has been killed by the resolver.
%% Pid has been removed from pg2 on all nodes, in particular node B.
- ?line ?UNTIL([] =:= rpc:call(B, pg2, get_members, [G])),
- ?line ?UNTIL([] =:= pg2:get_members(G)),
- ?line ?UNTIL([] =:= rpc:call(A, pg2, get_members, [G])),
- ?line ?UNTIL([] =:= rpc:call(C, pg2, get_members, [G])),
-
- ?line ok = pg2:delete(G),
- ?line stop_nodes([A,B,C]),
- ?line test_server:timetrap_cancel(Dog),
+ ?UNTIL([] =:= rpc:call(B, pg2, get_members, [G])),
+ ?UNTIL([] =:= pg2:get_members(G)),
+ ?UNTIL([] =:= rpc:call(A, pg2, get_members, [G])),
+ ?UNTIL([] =:= rpc:call(C, pg2, get_members, [G])),
+
+ ok = pg2:delete(G),
+ stop_nodes([A,B,C]),
ok.
part1(Config, Main, A, C, Name) ->
case catch begin
make_partition(Config, [Main, A], [node(), C]),
- ?line {_Pid, yes} = start_proc(Name)
+ {_Pid, yes} = start_proc(Name)
end of
{_, yes} -> ok
end.
@@ -236,39 +218,10 @@ loop() ->
exit(normal)
end.
-compat(suite) -> [];
-compat(doc) ->
- ["OTP-8259. Check that 'exchange' and 'del_member' work."];
-compat(Config) when is_list(Config) ->
- case ?t:is_release_available("r13b") of
- true ->
- Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
- Pid = spawn(forever()),
- G = a,
- ?line ok = pg2:create(G),
- ?line ok = pg2:join(G, Pid),
- ?line ok = pg2:join(G, Pid),
- ?line {ok, A} = start_node_rel(r13, r13b, slave),
- ?line pong = net_adm:ping(A),
- ?line wait_for_ready_net(Config),
- ?line {ok, _} = rpc:call(A, pg2, start, []),
- ?line ?UNTIL([Pid,Pid] =:= rpc:call(A, pg2, get_members, [a])),
- ?line true = exit(Pid, kill),
- ?line ?UNTIL([] =:= pg2:get_members(a)),
- ?line ?UNTIL([] =:= rpc:call(A, pg2, get_members, [a])),
- ?t:stop_node(A),
- ?line test_server:timetrap_cancel(Dog);
- false ->
- {skipped, "No support for old node"}
- end.
-
-basic(suite) -> [];
-basic(doc) ->
- ["OTP-8259. Some basic tests."];
+%% OTP-8259. Some basic tests.
basic(Config) when is_list(Config) ->
_ = [pg2:delete(G) || G <- pg2:which_groups()],
- ?line _ = [do(Cs, T, Config) || {T,Cs} <- ts()],
+ _ = [do(Cs, T, Config) || {T,Cs} <- ts()],
ok.
ts() ->
@@ -373,7 +326,7 @@ ts() ->
].
do(Cs, T, Config) ->
- ?t:format("*** Test ~p ***~n", [T]),
+ io:format("*** Test ~p ***~n", [T]),
{ok,T} = (catch {do(Cs, [], [], Config),T}).
do([{nodeup,N} | Cs], Ps, Ns, Config) ->
@@ -425,7 +378,7 @@ doit(N, C, Ps, Ns) ->
Result when Result =:= R orelse R =:= ignore ->
sane(Ns);
Else ->
- ?t:format("~p and ~p: expected ~p, but got ~p~n",
+ io:format("~p and ~p: expected ~p, but got ~p~n",
[F, As, R, Else]),
throw({error,{F, As, R, Else}})
end.
@@ -446,8 +399,8 @@ killit(N, P, Ps, Ns) ->
lists:keydelete(P, 1, Ps).
pr(Node, C) ->
- _ = [?t:format("~p: ", [Node]) || Node =/= node()],
- ?t:format("do ~p~n", [C]).
+ _ = [io:format("~p: ", [Node]) || Node =/= node()],
+ io:format("do ~p~n", [C]).
get_node(N, Ns) ->
if
@@ -475,7 +428,7 @@ replace_pids(A, Ps) ->
sane(Ns) ->
Nodes = [node()] ++ [NN || {_,NN} <- Ns],
- _ = [?t:format("~p, pg2_table:~n ~p~n", % debug
+ _ = [io:format("~p, pg2_table:~n ~p~n", % debug
[N, rpc:call(N, ets, tab2list, [pg2_table])]) ||
N <- Nodes],
R = [case rpc:call(Node, ?MODULE, sane, []) of
@@ -509,7 +462,7 @@ wsane(Ns) ->
Pid when is_pid(Pid), node(Pid) =:= N ->
true =
lists:member(Pid, rpc:call(N, pg2, get_local_members, [G]));
-%% FIXME. Om annan nod: member, local = [].
+ %% FIXME. Om annan nod: member, local = [].
_ -> [] = rpc:call(N, pg2, get_local_members, [G])
end || N <- Ns]
|| G <- pg2:which_groups()].
@@ -558,21 +511,21 @@ start_node_rel(Name, Rel, How) ->
Rel when is_atom(Rel) ->
{[{release, atom_to_list(Rel)}], ""};
RelList ->
- {RelList, ""}
- end,
- ?line Pa = filename:dirname(code:which(?MODULE)),
- ?line Res = test_server:start_node(Name, How,
- [{args,
- Compat ++
- " -kernel net_setuptime 100 "
- " -pa " ++ Pa},
- {erl, Release}]),
+ {RelList, ""}
+ end,
+ Pa = filename:dirname(code:which(?MODULE)),
+ Res = test_server:start_node(Name, How,
+ [{args,
+ Compat ++
+ " -kernel net_setuptime 100 "
+ " -pa " ++ Pa},
+ {erl, Release}]),
Res.
start_nodes(L, How, Config) ->
start_nodes2(L, How, 0, Config),
Nodes = collect_nodes(0, length(L)),
- ?line ?UNTIL([] =:= Nodes -- nodes()),
+ ?UNTIL([] =:= Nodes -- nodes()),
%% Pinging doesn't help, we have to wait too, for nodes() to become
%% correct on the other node.
lists:foreach(fun(E) ->
@@ -588,7 +541,7 @@ verify_nodes(Nodes, Config) ->
verify_nodes([], _N, _Config) ->
[];
verify_nodes([Node | Rest], N, Config) ->
- ?line ?UNTIL(
+ ?UNTIL(
case rpc:call(Node, erlang, nodes, []) of
Nodes when is_list(Nodes) ->
case N =:= lists:sort([Node | Nodes]) of
@@ -620,7 +573,7 @@ start_nodes2([Name | Rest], How, N, Config) ->
Self ! {N, R},
%% sleeping is necessary, or with peer nodes, they will
%% go down again, despite {linked, false}.
- test_server:sleep(100000)
+ ct:sleep(100000)
end),
start_nodes2(Rest, How, N+1, Config).
@@ -640,7 +593,7 @@ start_node(Name0, How, Args, Config) ->
Pa = filename:dirname(code:which(?MODULE)),
test_server:start_node(Name, How, [{args,
Args ++ " " ++
- "-kernel net_setuptime 100 "
+ "-kernel net_setuptime 100 "
"-noshell "
"-pa " ++ Pa},
{linked, false}]).
@@ -648,7 +601,7 @@ stop_nodes(Nodes) ->
lists:foreach(fun(Node) -> stop_node(Node) end, Nodes).
stop_node(Node) ->
- ?t:stop_node(Node).
+ test_server:stop_node(Node).
get_known(Node) ->
case catch gen_server:call({global_name_server,Node},get_known,infinity) of
@@ -672,7 +625,7 @@ make_partition(Config, Part1, Part2) ->
make_partition(Config, Part1, Part2, mk_part_node).
make_partition(Config, Part1, Part2, Function) ->
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
Ns = [begin
Name = lists:concat([atom_to_list(N),"_",msec(),".part"]),
File = filename:join([Dir, Name]),
@@ -726,13 +679,13 @@ wait_for_ready_net(Config) ->
wait_for_ready_net(Nodes0, Config) ->
Nodes = lists:sort(Nodes0),
- ?t:format("wait_for_ready_net ~p~n", [Nodes]),
+ io:format("wait_for_ready_net ~p~n", [Nodes]),
?UNTIL(begin
lists:all(fun(N) -> Nodes =:= get_known(N) end, Nodes) and
- lists:all(fun(N) ->
- LNs = rpc:call(N, erlang, nodes, []),
- Nodes =:= lists:sort([N | LNs])
- end, Nodes)
+ lists:all(fun(N) ->
+ LNs = rpc:call(N, erlang, nodes, []),
+ Nodes =:= lists:sort([N | LNs])
+ end, Nodes)
end).
%% To make it less probable that some low-level problem causes
@@ -788,11 +741,11 @@ file_contents(File, ContentsList, Config, LogFile) ->
end).
make_partition_file(Config) ->
- Dir = ?config(priv_dir, Config),
+ Dir = proplists:get_value(priv_dir, Config),
filename:join([Dir, atom_to_list(make_partition_done)]).
msec() ->
msec(now()).
msec(T) ->
- element(1,T)*1000000000 + element(2,T)*1000 + element(3,T) div 1000.
+ element(1,T)*1000000000 + element(2,T)*1000 + element(3,T) div 1000.
diff --git a/lib/kernel/test/prim_file_SUITE.erl b/lib/kernel/test/prim_file_SUITE.erl
index 3e6d8492f7..2f465a15bc 100644
--- a/lib/kernel/test/prim_file_SUITE.erl
+++ b/lib/kernel/test/prim_file_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2014. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,40 +19,25 @@
%%
-module(prim_file_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2,
- read_write_file/1]).
--export([cur_dir_0a/1, cur_dir_0b/1,
- cur_dir_1a/1, cur_dir_1b/1,
- make_del_dir_a/1, make_del_dir_b/1,
- pos1/1, pos2/1]).
--export([close/1,
- delete_a/1, delete_b/1]).
--export([ open1/1, modes/1]).
--export([
- file_info_basic_file_a/1, file_info_basic_file_b/1,
- file_info_basic_directory_a/1, file_info_basic_directory_b/1,
- file_info_bad_a/1, file_info_bad_b/1,
- file_info_times_a/1, file_info_times_b/1,
- file_write_file_info_a/1, file_write_file_info_b/1,
- file_read_file_info_opts/1, file_write_file_info_opts/1,
- file_write_read_file_info_opts/1
- ]).
--export([rename_a/1, rename_b/1,
- access/1, truncate/1, datasync/1, sync/1,
+ init_per_group/2,end_per_group/2, init_per_testcase/2, end_per_testcase/2,
+ read_write_file/1, free_memory/0]).
+-export([cur_dir_0/1, cur_dir_1/1,
+ make_del_dir/1, pos1/1, pos2/1]).
+-export([close/1, delete/1]).
+-export([open1/1, modes/1]).
+-export([file_info_basic_file/1, file_info_basic_directory/1, file_info_bad/1,
+ file_info_times/1, file_write_file_info/1,
+ file_read_file_info_opts/1, file_write_file_info_opts/1,
+ file_write_read_file_info_opts/1]).
+-export([rename/1, access/1, truncate/1, datasync/1, sync/1,
read_write/1, pread_write/1, append/1, exclusive/1]).
--export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
+-export([e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
--export([ read_not_really_compressed/1,
- read_compressed/1, write_compressed/1,
- compress_errors/1]).
-
--export([
- make_link_a/1, make_link_b/1,
- read_link_info_for_non_link/1,
- symlinks_a/1, symlinks_b/1,
- list_dir_limit/1,
- list_dir_error/1,
- list_dir/1]).
+-export([make_link/1, read_link_info_for_non_link/1,
+ symlinks/1,
+ list_dir_limit/1,
+ list_dir_error/1,
+ list_dir/1]).
-export([advise/1]).
-export([large_write/1]).
@@ -62,34 +47,21 @@
-export([allocate/1]).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/file.hrl").
-define(PRIM_FILE, prim_file).
-%% Calls ?PRIM_FILE:F with arguments A and an optional handle H
-%% as first argument, unless the handle is [], i.e no handle.
-%% This is a macro to give the compiler and thereby
-%% the cross reference tool the possibility to interprete
-%% the call, since M, F, A (or [H | A]) can all be known at
-%% compile time.
--define(PRIM_FILE_call(F, H, A),
- case H of
- [] -> apply(?PRIM_FILE, F, A);
- _ -> apply(?PRIM_FILE, F, [H | A])
- end).
-
suite() -> [].
all() ->
[read_write_file, {group, dirs}, {group, files},
- delete_a, delete_b, rename_a, rename_b, {group, errors},
- {group, compression}, {group, links}, list_dir_limit, list_dir].
+ delete, rename, {group, errors}, {group, links},
+ list_dir_limit, list_dir].
groups() ->
[{dirs, [],
- [make_del_dir_a, make_del_dir_b, cur_dir_0a, cur_dir_0b,
- cur_dir_1a, cur_dir_1b]},
+ [make_del_dir, cur_dir_0, cur_dir_1]},
{files, [],
[{group, open}, {group, pos}, {group, file_info},
truncate, sync, datasync, advise, large_write, allocate]},
@@ -98,22 +70,26 @@ groups() ->
append, exclusive]},
{pos, [], [pos1, pos2]},
{file_info, [],
- [file_info_basic_file_a, file_info_basic_file_b,
- file_info_basic_directory_a,
- file_info_basic_directory_b, file_info_bad_a,
- file_info_bad_b, file_info_times_a, file_info_times_b,
- file_write_file_info_a, file_write_file_info_b,
- file_read_file_info_opts, file_write_file_info_opts,
- file_write_read_file_info_opts
- ]},
+ [file_info_basic_file,file_info_basic_directory, file_info_bad,
+ file_info_times, file_write_file_info, file_read_file_info_opts,
+ file_write_file_info_opts, file_write_read_file_info_opts
+ ]},
{errors, [],
[e_delete, e_rename, e_make_dir, e_del_dir]},
- {compression, [],
- [read_compressed, read_not_really_compressed,
- write_compressed, compress_errors]},
{links, [],
- [make_link_a, make_link_b, read_link_info_for_non_link,
- symlinks_a, symlinks_b, list_dir_error]}].
+ [make_link, read_link_info_for_non_link, symlinks, list_dir_error]}].
+
+init_per_testcase(large_write, Config) ->
+ {ok, Started} = application:ensure_all_started(os_mon),
+ [{started, Started}|Config];
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(large_write, Config) ->
+ [application:stop(App) || App <- lists:reverse(proplists:get_value(started, Config))],
+ ok;
+end_per_testcase(_, _Config) ->
+ ok.
init_per_group(_GroupName, Config) ->
Config.
@@ -125,7 +101,7 @@ end_per_group(_GroupName, Config) ->
init_per_suite(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
HasAccessTime =
case file:read_file_info(Priv) of
{ok, #file_info{atime={_, {0, 0, 0}}}} ->
@@ -181,114 +157,96 @@ time_dist({_D1, _T1} = DT1, {_D2, _T2} = DT2) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-read_write_file(suite) -> [];
-read_write_file(doc) -> [];
read_write_file(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_read_write_file"),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_read_write_file"),
%% Try writing and reading back some term
- ?line SomeTerm = {"This term",{will,be},[written,$t,$o],1,file,[]},
- ?line ok = ?PRIM_FILE:write_file(Name,term_to_binary(SomeTerm)),
- ?line {ok,Bin1} = ?PRIM_FILE:read_file(Name),
- ?line SomeTerm = binary_to_term(Bin1),
-
+ SomeTerm = {"This term",{will,be},[written,$t,$o],1,file,[]},
+ ok = ?PRIM_FILE:write_file(Name,term_to_binary(SomeTerm)),
+ {ok,Bin1} = ?PRIM_FILE:read_file(Name),
+ SomeTerm = binary_to_term(Bin1),
+
%% Try a "null" term
- ?line NullTerm = [],
- ?line ok = ?PRIM_FILE:write_file(Name,term_to_binary(NullTerm)),
- ?line {ok,Bin2} = ?PRIM_FILE:read_file(Name),
- ?line NullTerm = binary_to_term(Bin2),
+ NullTerm = [],
+ ok = ?PRIM_FILE:write_file(Name,term_to_binary(NullTerm)),
+ {ok,Bin2} = ?PRIM_FILE:read_file(Name),
+ NullTerm = binary_to_term(Bin2),
%% Try some "complicated" types
- ?line BigNum = 123456789012345678901234567890,
- ?line ComplTerm = {self(),make_ref(),BigNum,3.14159},
- ?line ok = ?PRIM_FILE:write_file(Name,term_to_binary(ComplTerm)),
- ?line {ok,Bin3} = ?PRIM_FILE:read_file(Name),
- ?line ComplTerm = binary_to_term(Bin3),
+ BigNum = 123456789012345678901234567890,
+ ComplTerm = {self(),make_ref(),BigNum,3.14159},
+ ok = ?PRIM_FILE:write_file(Name,term_to_binary(ComplTerm)),
+ {ok,Bin3} = ?PRIM_FILE:read_file(Name),
+ ComplTerm = binary_to_term(Bin3),
%% Try reading a nonexistent file
- ?line Name2 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_nonexistent_file"),
- ?line {error, enoent} = ?PRIM_FILE:read_file(Name2),
- ?line {error, enoent} = ?PRIM_FILE:read_file(""),
-
- % Try writing to a bad filename
- ?line {error, enoent} =
+ Name2 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_nonexistent_file"),
+ {error, enoent} = ?PRIM_FILE:read_file(Name2),
+ {error, enoent} = ?PRIM_FILE:read_file(""),
+
+ %% Try writing to a bad filename
+ {error, enoent} =
?PRIM_FILE:write_file("",term_to_binary(NullTerm)),
- % Try writing something else than a binary
- ?line {error, badarg} = ?PRIM_FILE:write_file(Name,{1,2,3}),
- ?line {error, badarg} = ?PRIM_FILE:write_file(Name,self()),
+ %% Try writing something else than a binary
+ {error, badarg} = ?PRIM_FILE:write_file(Name,{1,2,3}),
+ {error, badarg} = ?PRIM_FILE:write_file(Name,self()),
%% Some non-term binaries
- ?line ok = ?PRIM_FILE:write_file(Name,[]),
- ?line {ok,Bin4} = ?PRIM_FILE:read_file(Name),
- ?line 0 = byte_size(Bin4),
+ ok = ?PRIM_FILE:write_file(Name,[]),
+ {ok,Bin4} = ?PRIM_FILE:read_file(Name),
+ 0 = byte_size(Bin4),
- ?line ok = ?PRIM_FILE:write_file(Name,[Bin1,[],[[Bin2]]]),
- ?line {ok,Bin5} = ?PRIM_FILE:read_file(Name),
- ?line {Bin1,Bin2} = split_binary(Bin5,byte_size(Bin1)),
+ ok = ?PRIM_FILE:write_file(Name,[Bin1,[],[[Bin2]]]),
+ {ok,Bin5} = ?PRIM_FILE:read_file(Name),
+ {Bin1,Bin2} = split_binary(Bin5,byte_size(Bin1)),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-make_del_dir_a(suite) -> [];
-make_del_dir_a(doc) -> [];
-make_del_dir_a(Config) when is_list(Config) ->
- make_del_dir(Config, [], "_a").
-
-make_del_dir_b(suite) -> [];
-make_del_dir_b(doc) -> [];
-make_del_dir_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = make_del_dir(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- %% Just to make sure the state of the server makes a difference
- ?line {error, einval} = ?PRIM_FILE_call(get_cwd, Handle, []),
- Result.
-
-make_del_dir(Config, Handle, Suffix) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_mk-dir"++Suffix),
- ?line ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- ?line {error, eexist} = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- ?line ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
- ?line {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
-
- % Make sure we are not in a directory directly under test_server
- % as that would result in eacces errors when trying to delete '..',
- % because there are processes having that directory as current.
- ?line ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- ?line {ok, CurrentDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+make_del_dir(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_mk-dir"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ {error, eexist} = ?PRIM_FILE:make_dir(NewDir),
+ ok = ?PRIM_FILE:del_dir(NewDir),
+ {error, enoent} = ?PRIM_FILE:del_dir(NewDir),
+
+ %% Make sure we are not in a directory directly under test_server
+ %% as that would result in eacces errors when trying to delete '..',
+ %% because there are processes having that directory as current.
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ {ok, CurrentDir} = ?PRIM_FILE:get_cwd(),
case {os:type(), length(NewDir) >= 260 } of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH)\n", []),
io:format("\nNewDir = ~p\n", [NewDir]);
_ ->
- ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir])
+ ok = ?PRIM_FILE:set_cwd(NewDir)
end,
try
%% Check that we get an error when trying to create...
%% a deep directory
- ?line NewDir2 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_mk-dir-noexist/foo"),
- ?line {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [NewDir2]),
+ NewDir2 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_mk-dir-noexist/foo"),
+ {error, enoent} = ?PRIM_FILE:make_dir(NewDir2),
%% a nameless directory
- ?line {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [""]),
+ {error, enoent} = ?PRIM_FILE:make_dir(""),
%% a directory with illegal name
- ?line {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, ['mk-dir']),
-
+ {error, badarg} = ?PRIM_FILE:make_dir('mk-dir'),
+
%% a directory with illegal name, even if it's a (bad) list
- ?line {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, [[1,2,3,{}]]),
-
+ {error, badarg} = ?PRIM_FILE:make_dir([1,2,3,{}]),
+
%% Maybe this isn't an error, exactly, but worth mentioning anyway:
%% ok = ?PRIM_FILE:make_dir([$f,$o,$o,0,$b,$a,$r])),
%% The above line works, and created a directory "./foo"
@@ -296,137 +254,105 @@ make_del_dir(Config, Handle, Suffix) ->
%% a directory, but with a name that incorporates the "bar" part of
%% the list, so that [$f,$o,$o,0,$f,$o,$o] wouldn't refer to the same
%% dir. But this would slow it down.
-
+
%% Try deleting some bad directories
%% Deleting the parent directory to the current, sounds dangerous, huh?
%% Don't worry ;-) the parent directory should never be empty, right?
- ?line case ?PRIM_FILE_call(del_dir, Handle, [".."]) of
- {error, eexist} -> ok;
- {error, eacces} -> ok; %OpenBSD
- {error, einval} -> ok %FreeBSD
- end,
- ?line {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [""]),
- ?line {error, badarg} = ?PRIM_FILE_call(del_dir, Handle, [[3,2,1,{}]])
+ case ?PRIM_FILE:del_dir("..") of
+ {error, eexist} -> ok;
+ {error, eacces} -> ok; %OpenBSD
+ {error, einval} -> ok %FreeBSD
+ end,
+ {error, enoent} = ?PRIM_FILE:del_dir(""),
+ {error, badarg} = ?PRIM_FILE:del_dir([3,2,1,{}])
after
- ?line ok = ?PRIM_FILE_call(set_cwd, Handle, [CurrentDir])
+ ok = ?PRIM_FILE:set_cwd(CurrentDir)
end,
ok.
-cur_dir_0a(suite) -> [];
-cur_dir_0a(doc) -> [];
-cur_dir_0a(Config) when is_list(Config) ->
- cur_dir_0(Config, []).
-
-cur_dir_0b(suite) -> [];
-cur_dir_0b(doc) -> [];
-cur_dir_0b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = cur_dir_0(Config, Handle),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-cur_dir_0(Config, Handle) ->
+cur_dir_0(Config) when is_list(Config) ->
%% Find out the current dir, and cd to it ;-)
- ?line {ok,BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
- ?line Dir1 = BaseDir ++ "", %% Check that it's a string
- ?line ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
- ?line DirName = atom_to_list(?MODULE) ++
- case Handle of
- [] ->
- "_curdir";
- _ ->
- "_curdir_h"
- end,
+ {ok,BaseDir} = ?PRIM_FILE:get_cwd(),
+ Dir1 = BaseDir ++ "", %% Check that it's a string
+ ok = ?PRIM_FILE:set_cwd(Dir1),
+ DirName = atom_to_list(?MODULE) ++ "_curdir",
%% Make a new dir, and cd to that
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir, DirName),
- ?line ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir, DirName),
+ ok = ?PRIM_FILE:make_dir(NewDir),
case {os:type(), length(NewDir) >= 260} of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH):\n"),
io:format("\nNewDir = ~p\n", [NewDir]);
_ ->
io:format("cd to ~s",[NewDir]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]),
+ ok = ?PRIM_FILE:set_cwd(NewDir),
%% Create a file in the new current directory, and check that it
%% really is created there
UncommonName = "uncommon.fil",
{ok,Fd} = ?PRIM_FILE:open(UncommonName, [read, write]),
ok = ?PRIM_FILE:close(Fd),
- {ok,NewDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
+ {ok,NewDirFiles} = ?PRIM_FILE:list_dir("."),
true = lists:member(UncommonName,NewDirFiles),
%% Delete the directory and return to the old current directory
%% and check that the created file isn't there (too!)
expect({error, einval}, {error, eacces}, {error, eexist},
- ?PRIM_FILE_call(del_dir, Handle, [NewDir])),
- ?PRIM_FILE_call(delete, Handle, [UncommonName]),
- {ok,[]} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
+ ?PRIM_FILE:del_dir(NewDir)),
+ ?PRIM_FILE:delete(UncommonName),
+ {ok,[]} = ?PRIM_FILE:list_dir("."),
+ ok = ?PRIM_FILE:set_cwd(Dir1),
io:format("cd back to ~s",[Dir1]),
- ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
- {error, enoent} = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
+ ok = ?PRIM_FILE:del_dir(NewDir),
+ {error, enoent} = ?PRIM_FILE:set_cwd(NewDir),
+ ok = ?PRIM_FILE:set_cwd(Dir1),
io:format("cd back to ~s",[Dir1]),
- {ok,OldDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
+ {ok,OldDirFiles} = ?PRIM_FILE:list_dir("."),
false = lists:member(UncommonName,OldDirFiles)
end,
%% Try doing some bad things
- ?line {error, badarg} =
- ?PRIM_FILE_call(set_cwd, Handle, [{foo,bar}]),
- ?line {error, enoent} =
- ?PRIM_FILE_call(set_cwd, Handle, [""]),
- ?line {error, enoent} =
- ?PRIM_FILE_call(set_cwd, Handle, [".......a......"]),
- ?line {ok,BaseDir} =
- ?PRIM_FILE_call(get_cwd, Handle, []), %% Still there?
+ {error, badarg} =
+ ?PRIM_FILE:set_cwd({foo,bar}),
+ {error, enoent} =
+ ?PRIM_FILE:set_cwd(""),
+ {error, enoent} =
+ ?PRIM_FILE:set_cwd(".......a......"),
+ {ok,BaseDir} =
+ ?PRIM_FILE:get_cwd(), %% Still there?
%% On Windows, there should only be slashes, no backslashes,
%% in the return value of get_cwd().
%% (The test is harmless on Unix, because filenames usually
%% don't contain backslashes.)
- ?line {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
- ?line false = lists:member($\\, BaseDir),
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd(),
+ false = lists:member($\\, BaseDir),
ok.
%% Tests ?PRIM_FILE:get_cwd/1.
-cur_dir_1a(suite) -> [];
-cur_dir_1a(doc) -> [];
-cur_dir_1a(Config) when is_list(Config) ->
- cur_dir_1(Config, []).
-
-cur_dir_1b(suite) -> [];
-cur_dir_1b(doc) -> [];
-cur_dir_1b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = cur_dir_1(Config, Handle),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-cur_dir_1(Config, Handle) ->
- ?line case os:type() of
- {win32, _} ->
- win_cur_dir_1(Config, Handle);
- _ ->
- ?line {error, enotsup} =
- ?PRIM_FILE_call(get_cwd, Handle, ["d:"])
- end,
+cur_dir_1(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ win_cur_dir_1(Config);
+ _ ->
+ {error, enotsup} =
+ ?PRIM_FILE:get_cwd("d:")
+ end,
ok.
-
-win_cur_dir_1(_Config, Handle) ->
- ?line {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+
+win_cur_dir_1(_Config) ->
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd(),
%% Get the drive letter from the current directory,
%% and try to get current directory for that drive.
- ?line [Drive, $:|_] = BaseDir,
- ?line {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, [[Drive, $:]]),
+ [Drive, $:|_] = BaseDir,
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd([Drive, $:]),
io:format("BaseDir = ~s\n", [BaseDir]),
%% Unfortunately, there is no way to move away from the
@@ -439,526 +365,439 @@ win_cur_dir_1(_Config, Handle) ->
-open1(suite) -> [];
-open1(doc) -> [];
open1(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_files"),
- ?line ok = ?PRIM_FILE:make_dir(NewDir),
- ?line Name = filename:join(NewDir, "foo1.fil"),
- ?line {ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]),
- ?line {ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
- ?line Str = "{a,tuple}.\n",
- ?line Length = length(Str),
- ?line ?PRIM_FILE:write(Fd1,Str),
- ?line {ok,0} = ?PRIM_FILE:position(Fd1,bof),
- ?line {ok, Str} = ?PRIM_FILE:read(Fd1,Length),
- ?line case ?PRIM_FILE:read(Fd2,Length) of
- {ok,Str} -> Str;
- eof -> Str
- end,
- ?line ok = ?PRIM_FILE:close(Fd2),
- ?line {ok,0} = ?PRIM_FILE:position(Fd1,bof),
- ?line ok = ?PRIM_FILE:truncate(Fd1),
- ?line eof = ?PRIM_FILE:read(Fd1,Length),
- ?line ok = ?PRIM_FILE:close(Fd1),
- ?line {ok,Fd3} = ?PRIM_FILE:open(Name, [read]),
- ?line eof = ?PRIM_FILE:read(Fd3,Length),
- ?line ok = ?PRIM_FILE:close(Fd3),
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_files"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ Name = filename:join(NewDir, "foo1.fil"),
+ {ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]),
+ {ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
+ Bin = list_to_binary("{a,tuple}.\n"),
+ Length = byte_size(Bin),
+ ?PRIM_FILE:write(Fd1,Bin),
+ {ok,0} = ?PRIM_FILE:position(Fd1,bof),
+ {ok, Bin} = ?PRIM_FILE:read(Fd1,Length),
+ {ok, Bin} = ?PRIM_FILE:read(Fd2,Length),
+ ok = ?PRIM_FILE:close(Fd2),
+ {ok,0} = ?PRIM_FILE:position(Fd1,bof),
+ ok = ?PRIM_FILE:truncate(Fd1),
+ eof = ?PRIM_FILE:read(Fd1,Length),
+ ok = ?PRIM_FILE:close(Fd1),
+ {ok,Fd3} = ?PRIM_FILE:open(Name, [read]),
+ eof = ?PRIM_FILE:read(Fd3,Length),
+ ok = ?PRIM_FILE:close(Fd3),
ok.
%% Tests all open modes.
-modes(suite) -> [];
-modes(doc) -> [];
modes(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_open_modes"),
- ?line ok = ?PRIM_FILE:make_dir(NewDir),
- ?line Name1 = filename:join(NewDir, "foo1.fil"),
- ?line Marker = "hello, world",
- ?line Length = length(Marker),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_open_modes"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ Name1 = filename:join(NewDir, "foo1.fil"),
+ Marker = <<"hello, world">>,
+ Length = byte_size(Marker),
%% write
- ?line {ok, Fd1} = ?PRIM_FILE:open(Name1, [write]),
- ?line ok = ?PRIM_FILE:write(Fd1, Marker),
- ?line ok = ?PRIM_FILE:write(Fd1, ".\n"),
- ?line ok = ?PRIM_FILE:close(Fd1),
+ {ok, Fd1} = ?PRIM_FILE:open(Name1, [write]),
+ ok = ?PRIM_FILE:write(Fd1, Marker),
+ ok = ?PRIM_FILE:write(Fd1, <<".\n">>),
+ ok = ?PRIM_FILE:close(Fd1),
%% read
- ?line {ok, Fd2} = ?PRIM_FILE:open(Name1, [read]),
- ?line {ok, Marker} = ?PRIM_FILE:read(Fd2, Length),
- ?line ok = ?PRIM_FILE:close(Fd2),
+ {ok, Fd2} = ?PRIM_FILE:open(Name1, [read]),
+ {ok, Marker} = ?PRIM_FILE:read(Fd2, Length),
+ ok = ?PRIM_FILE:close(Fd2),
%% read and write
- ?line {ok, Fd3} = ?PRIM_FILE:open(Name1, [read, write]),
- ?line {ok, Marker} = ?PRIM_FILE:read(Fd3, Length),
- ?line ok = ?PRIM_FILE:write(Fd3, Marker),
- ?line ok = ?PRIM_FILE:close(Fd3),
+ {ok, Fd3} = ?PRIM_FILE:open(Name1, [read, write]),
+ {ok, Marker} = ?PRIM_FILE:read(Fd3, Length),
+ ok = ?PRIM_FILE:write(Fd3, Marker),
+ ok = ?PRIM_FILE:close(Fd3),
%% read by default
- ?line {ok, Fd4} = ?PRIM_FILE:open(Name1, []),
- ?line {ok, Marker} = ?PRIM_FILE:read(Fd4, Length),
- ?line ok = ?PRIM_FILE:close(Fd4),
-
- %% read and binary
- ?line BinaryMarker = list_to_binary(Marker),
- ?line {ok, Fd5} = ?PRIM_FILE:open(Name1, [read, binary]),
- ?line {ok, BinaryMarker} = ?PRIM_FILE:read(Fd5, Length),
- ?line ok = ?PRIM_FILE:close(Fd5),
+ {ok, Fd4} = ?PRIM_FILE:open(Name1, []),
+ {ok, Marker} = ?PRIM_FILE:read(Fd4, Length),
+ ok = ?PRIM_FILE:close(Fd4),
- ?line test_server:timetrap_cancel(Dog),
ok.
-close(suite) -> [];
-close(doc) -> [];
close(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_close.fil"),
- ?line {ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_close.fil"),
+ {ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]),
%% Just closing it is no fun, we did that a million times already
%% This is a common error, for code written before Erlang 4.3
%% bacause then ?PRIM_FILE:open just returned a Pid, and not everyone
%% really checked what they got.
- ?line {'EXIT',_Msg} = (catch ok = ?PRIM_FILE:close({ok,Fd1})),
- ?line ok = ?PRIM_FILE:close(Fd1),
+ {'EXIT',_Msg} = (catch ok = ?PRIM_FILE:close({ok,Fd1})),
+ ok = ?PRIM_FILE:close(Fd1),
%% Try closing one more time
- ?line Val = ?PRIM_FILE:close(Fd1),
- ?line io:format("Second close gave: ~p", [Val]),
+ Val = ?PRIM_FILE:close(Fd1),
+ io:format("Second close gave: ~p", [Val]),
ok.
-access(suite) -> [];
-access(doc) -> [];
access(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_access.fil"),
- ?line Str = "ABCDEFGH",
- ?line {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?line ?PRIM_FILE:write(Fd1,Str),
- ?line ok = ?PRIM_FILE:close(Fd1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_access.fil"),
+ Bin = <<"ABCDEFGH">>,
+ {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
+ ?PRIM_FILE:write(Fd1,Bin),
+ ok = ?PRIM_FILE:close(Fd1),
%% Check that we can't write when in read only mode
- ?line {ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
- ?line case catch ?PRIM_FILE:write(Fd2,"XXXX") of
- ok ->
- test_server:fail({access,write});
- _ ->
- ok
- end,
- ?line ok = ?PRIM_FILE:close(Fd2),
- ?line {ok, Fd3} = ?PRIM_FILE:open(Name, [read]),
- ?line {ok, Str} = ?PRIM_FILE:read(Fd3,length(Str)),
- ?line ok = ?PRIM_FILE:close(Fd3),
+ {ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
+ case catch ?PRIM_FILE:write(Fd2,"XXXX") of
+ ok ->
+ ct:fail({access,write});
+ _ ->
+ ok
+ end,
+ ok = ?PRIM_FILE:close(Fd2),
+ {ok, Fd3} = ?PRIM_FILE:open(Name, [read]),
+ {ok, Bin} = ?PRIM_FILE:read(Fd3,byte_size(Bin)),
+ ok = ?PRIM_FILE:close(Fd3),
ok.
%% Tests ?PRIM_FILE:read/2 and ?PRIM_FILE:write/2.
-read_write(suite) -> [];
-read_write(doc) -> [];
read_write(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_read_write"),
- ?line ok = ?PRIM_FILE:make_dir(NewDir),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_read_write"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
%% Raw file.
- ?line Name = filename:join(NewDir, "raw.fil"),
- ?line {ok, Fd} = ?PRIM_FILE:open(Name, [read, write]),
- ?line read_write_test(Fd),
+ Name = filename:join(NewDir, "raw.fil"),
+ {ok, Fd} = ?PRIM_FILE:open(Name, [read, write]),
+ read_write_test(Fd),
ok.
read_write_test(File) ->
- ?line Marker = "hello, world",
- ?line ok = ?PRIM_FILE:write(File, Marker),
- ?line {ok, 0} = ?PRIM_FILE:position(File, 0),
- ?line {ok, Marker} = ?PRIM_FILE:read(File, 100),
- ?line eof = ?PRIM_FILE:read(File, 100),
- ?line ok = ?PRIM_FILE:close(File),
+ Marker = <<"hello, world">>,
+ ok = ?PRIM_FILE:write(File, Marker),
+ {ok, 0} = ?PRIM_FILE:position(File, 0),
+ {ok, Marker} = ?PRIM_FILE:read(File, 100),
+ eof = ?PRIM_FILE:read(File, 100),
+ ok = ?PRIM_FILE:close(File),
ok.
%% Tests ?PRIM_FILE:pread/2 and ?PRIM_FILE:pwrite/2.
-pread_write(suite) -> [];
-pread_write(doc) -> [];
pread_write(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_pread_write"),
- ?line ok = ?PRIM_FILE:make_dir(NewDir),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_pread_write"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
%% Raw file.
- ?line Name = filename:join(NewDir, "raw.fil"),
- ?line {ok, Fd} = ?PRIM_FILE:open(Name, [read, write]),
- ?line pread_write_test(Fd),
+ Name = filename:join(NewDir, "raw.fil"),
+ {ok, Fd} = ?PRIM_FILE:open(Name, [read, write]),
+ pread_write_test(Fd),
ok.
pread_write_test(File) ->
- ?line Marker = "hello, world",
- ?line Len = length(Marker),
- ?line ok = ?PRIM_FILE:write(File, Marker),
- ?line {ok, Marker} = ?PRIM_FILE:pread(File, 0, 100),
- ?line eof = ?PRIM_FILE:pread(File, 100, 1),
- ?line ok = ?PRIM_FILE:pwrite(File, Len, Marker),
- ?line {ok, Marker} = ?PRIM_FILE:pread(File, Len, 100),
- ?line eof = ?PRIM_FILE:pread(File, 100, 1),
- ?line MM = Marker ++ Marker,
- ?line {ok, MM} = ?PRIM_FILE:pread(File, 0, 100),
- ?line ok = ?PRIM_FILE:close(File),
+ Marker = <<"hello, world">>,
+ Len = byte_size(Marker),
+ ok = ?PRIM_FILE:write(File, Marker),
+ {ok, Marker} = ?PRIM_FILE:pread(File, 0, 100),
+ eof = ?PRIM_FILE:pread(File, 100, 1),
+ ok = ?PRIM_FILE:pwrite(File, Len, Marker),
+ {ok, Marker} = ?PRIM_FILE:pread(File, Len, 100),
+ eof = ?PRIM_FILE:pread(File, 100, 1),
+ MM = <<Marker/binary,Marker/binary>>,
+ {ok, MM} = ?PRIM_FILE:pread(File, 0, 100),
+ ok = ?PRIM_FILE:close(File),
ok.
-append(doc) -> "Test appending to a file.";
-append(suite) -> [];
+%% Test appending to a file.
append(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_append"),
- ?line ok = ?PRIM_FILE:make_dir(NewDir),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_append"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
- ?line First = "First line\n",
- ?line Second = "Seond lines comes here\n",
- ?line Third = "And here is the third line\n",
+ First = "First line\n",
+ Second = "Seond lines comes here\n",
+ Third = "And here is the third line\n",
%% Write a small text file.
- ?line Name1 = filename:join(NewDir, "a_file.txt"),
- ?line {ok, Fd1} = ?PRIM_FILE:open(Name1, [write]),
- ?line ok = ?PRIM_FILE:write(Fd1, First),
- ?line ok = ?PRIM_FILE:write(Fd1, Second),
- ?line ok = ?PRIM_FILE:close(Fd1),
+ Name1 = filename:join(NewDir, "a_file.txt"),
+ {ok, Fd1} = ?PRIM_FILE:open(Name1, [write]),
+ ok = ?PRIM_FILE:write(Fd1, First),
+ ok = ?PRIM_FILE:write(Fd1, Second),
+ ok = ?PRIM_FILE:close(Fd1),
%% Open it a again and a append a line to it.
- ?line {ok, Fd2} = ?PRIM_FILE:open(Name1, [append]),
- ?line ok = ?PRIM_FILE:write(Fd2, Third),
- ?line ok = ?PRIM_FILE:close(Fd2),
+ {ok, Fd2} = ?PRIM_FILE:open(Name1, [append]),
+ ok = ?PRIM_FILE:write(Fd2, Third),
+ ok = ?PRIM_FILE:close(Fd2),
%% Read it back and verify.
- ?line Expected = list_to_binary([First, Second, Third]),
- ?line {ok, Expected} = ?PRIM_FILE:read_file(Name1),
+ Expected = list_to_binary([First, Second, Third]),
+ {ok, Expected} = ?PRIM_FILE:read_file(Name1),
ok.
-exclusive(suite) -> [];
-exclusive(doc) -> "Test exclusive access to a file.";
+%% Test exclusive access to a file.
exclusive(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_exclusive"),
- ?line ok = ?PRIM_FILE:make_dir(NewDir),
- ?line Name = filename:join(NewDir, "ex_file.txt"),
- ?line {ok,Fd} = ?PRIM_FILE:open(Name, [write, exclusive]),
- ?line {error, eexist} = ?PRIM_FILE:open(Name, [write, exclusive]),
- ?line ok = ?PRIM_FILE:close(Fd),
+ RootDir = proplists:get_value(priv_dir,Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_exclusive"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ Name = filename:join(NewDir, "ex_file.txt"),
+ {ok,Fd} = ?PRIM_FILE:open(Name, [write, exclusive]),
+ {error, eexist} = ?PRIM_FILE:open(Name, [write, exclusive]),
+ ok = ?PRIM_FILE:close(Fd),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-pos1(suite) -> [];
-pos1(doc) -> [];
pos1(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_pos1.fil"),
- ?line {ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?line ?PRIM_FILE:write(Fd1,"ABCDEFGH"),
- ?line ok = ?PRIM_FILE:close(Fd1),
- ?line {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_pos1.fil"),
+ {ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
+ ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>),
+ ok = ?PRIM_FILE:close(Fd1),
+ {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
%% Start pos is first char
- ?line io:format("Relative positions"),
- ?line {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,1}),
- ?line {ok, "C"} = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 0} = ?PRIM_FILE:position(Fd2,{cur,-3}),
- ?line {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ io:format("Relative positions"),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,1}),
+ {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 0} = ?PRIM_FILE:position(Fd2,{cur,-3}),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
%% Backwards from first char should be an error
- ?line {ok,0} = ?PRIM_FILE:position(Fd2,{cur,-1}),
- ?line {error, einval} = ?PRIM_FILE:position(Fd2,{cur,-1}),
+ {ok,0} = ?PRIM_FILE:position(Fd2,{cur,-1}),
+ {error, einval} = ?PRIM_FILE:position(Fd2,{cur,-1}),
%% Reset position and move again
- ?line {ok, 0} = ?PRIM_FILE:position(Fd2,0),
- ?line {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,2}),
- ?line {ok, "C"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 0} = ?PRIM_FILE:position(Fd2,0),
+ {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,2}),
+ {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1),
%% Go a lot forwards
- ?line {ok, 13} = ?PRIM_FILE:position(Fd2,{cur,10}),
- ?line eof = ?PRIM_FILE:read(Fd2,1),
+ {ok, 13} = ?PRIM_FILE:position(Fd2,{cur,10}),
+ eof = ?PRIM_FILE:read(Fd2,1),
%% Try some fixed positions
- ?line io:format("Fixed positions"),
- ?line {ok, 8} = ?PRIM_FILE:position(Fd2,8),
- ?line eof = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 8} = ?PRIM_FILE:position(Fd2,cur),
- ?line eof = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 7} = ?PRIM_FILE:position(Fd2,7),
- ?line {ok, "H"} = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 0} = ?PRIM_FILE:position(Fd2,0),
- ?line {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 3} = ?PRIM_FILE:position(Fd2,3),
- ?line {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 12} = ?PRIM_FILE:position(Fd2,12),
- ?line eof = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 3} = ?PRIM_FILE:position(Fd2,3),
- ?line {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ io:format("Fixed positions"),
+ {ok, 8} = ?PRIM_FILE:position(Fd2,8),
+ eof = ?PRIM_FILE:read(Fd2,1),
+ {ok, 8} = ?PRIM_FILE:position(Fd2,cur),
+ eof = ?PRIM_FILE:read(Fd2,1),
+ {ok, 7} = ?PRIM_FILE:position(Fd2,7),
+ {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 0} = ?PRIM_FILE:position(Fd2,0),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 3} = ?PRIM_FILE:position(Fd2,3),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 12} = ?PRIM_FILE:position(Fd2,12),
+ eof = ?PRIM_FILE:read(Fd2,1),
+ {ok, 3} = ?PRIM_FILE:position(Fd2,3),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
%% Try the {bof,X} notation
- ?line {ok, 3} = ?PRIM_FILE:position(Fd2,{bof,3}),
- ?line {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 3} = ?PRIM_FILE:position(Fd2,{bof,3}),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
%% Try eof positions
- ?line io:format("EOF positions"),
- ?line {ok, 8} = ?PRIM_FILE:position(Fd2,{eof,0}),
- ?line eof = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 7} = ?PRIM_FILE:position(Fd2,{eof,-1}),
- ?line {ok, "H"} = ?PRIM_FILE:read(Fd2,1),
- ?line {ok, 0} = ?PRIM_FILE:position(Fd2,{eof,-8}),
- ?line {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
- ?line {error, einval} = ?PRIM_FILE:position(Fd2,{eof,-9}),
+ io:format("EOF positions"),
+ {ok, 8} = ?PRIM_FILE:position(Fd2,{eof,0}),
+ eof = ?PRIM_FILE:read(Fd2,1),
+ {ok, 7} = ?PRIM_FILE:position(Fd2,{eof,-1}),
+ {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 0} = ?PRIM_FILE:position(Fd2,{eof,-8}),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
+ {error, einval} = ?PRIM_FILE:position(Fd2,{eof,-9}),
ok.
-pos2(suite) -> [];
-pos2(doc) -> [];
pos2(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_pos2.fil"),
- ?line {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?line ?PRIM_FILE:write(Fd1,"ABCDEFGH"),
- ?line ok = ?PRIM_FILE:close(Fd1),
- ?line {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
- ?line {error, einval} = ?PRIM_FILE:position(Fd2,-1),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_pos2.fil"),
+ {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
+ ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>),
+ ok = ?PRIM_FILE:close(Fd1),
+ {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
+ {error, einval} = ?PRIM_FILE:position(Fd2,-1),
%% Make sure that we still can search after an error.
- ?line {ok, 0} = ?PRIM_FILE:position(Fd2, 0),
- ?line {ok, 3} = ?PRIM_FILE:position(Fd2, {bof,3}),
- ?line {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, 0} = ?PRIM_FILE:position(Fd2, 0),
+ {ok, 3} = ?PRIM_FILE:position(Fd2, {bof,3}),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
- ?line io:format("DONE"),
+ io:format("DONE"),
ok.
-
-file_info_basic_file_a(suite) -> [];
-file_info_basic_file_a(doc) -> [];
-file_info_basic_file_a(Config) when is_list(Config) ->
- file_info_basic_file(Config, [], "_a").
-
-file_info_basic_file_b(suite) -> [];
-file_info_basic_file_b(doc) -> [];
-file_info_basic_file_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_basic_file(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_basic_file(Config, Handle, Suffix) ->
- ?line RootDir = ?config(priv_dir, Config),
+file_info_basic_file(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir, Config),
%% Create a short file.
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_basic_test"++Suffix++".fil"),
- ?line {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?line ?PRIM_FILE:write(Fd1, "foo bar"),
- ?line ok = ?PRIM_FILE:close(Fd1),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_basic_test"".fil"),
+ {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
+ ?PRIM_FILE:write(Fd1, "foo bar"),
+ ok = ?PRIM_FILE:close(Fd1),
%% Test that the file has the expected attributes.
%% The times are tricky, so we will save them to a separate test case.
- ?line {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]),
- ?line #file_info{size = Size, type = Type, access = Access,
- atime = AccessTime, mtime = ModifyTime} =
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name),
+ #file_info{size = Size, type = Type, access = Access,
+ atime = AccessTime, mtime = ModifyTime} =
FileInfo,
- ?line io:format("Access ~p, Modify ~p", [AccessTime, ModifyTime]),
- ?line Size = 7,
- ?line Type = regular,
- ?line Access = read_write,
- ?line true = abs(time_dist(filter_atime(AccessTime, Config),
- filter_atime(ModifyTime,
- Config))) < 2,
- ?line {AD, AT} = AccessTime,
- ?line all_integers(tuple_to_list(AD) ++ tuple_to_list(AT)),
- ?line {MD, MT} = ModifyTime,
- ?line all_integers(tuple_to_list(MD) ++ tuple_to_list(MT)),
+ io:format("Access ~p, Modify ~p", [AccessTime, ModifyTime]),
+ Size = 7,
+ Type = regular,
+ Access = read_write,
+ true = abs(time_dist(filter_atime(AccessTime, Config),
+ filter_atime(ModifyTime,
+ Config))) < 2,
+ {AD, AT} = AccessTime,
+ all_integers(tuple_to_list(AD) ++ tuple_to_list(AT)),
+ {MD, MT} = ModifyTime,
+ all_integers(tuple_to_list(MD) ++ tuple_to_list(MT)),
ok.
-file_info_basic_directory_a(suite) -> [];
-file_info_basic_directory_a(doc) -> [];
-file_info_basic_directory_a(Config) when is_list(Config) ->
- file_info_basic_directory(Config, []).
-
-file_info_basic_directory_b(suite) -> [];
-file_info_basic_directory_b(doc) -> [];
-file_info_basic_directory_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_basic_directory(Config, Handle),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_basic_directory(Config, Handle) ->
+file_info_basic_directory(Config) when is_list(Config) ->
%% Note: filename:join/1 removes any trailing slash,
%% which is essential for ?PRIM_FILE:read_file_info/1 to work on
%% platforms such as Windows95.
- ?line RootDir = filename:join([?config(priv_dir, Config)]),
+ RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
%% Test that the RootDir directory has the expected attributes.
- ?line test_directory(RootDir, read_write, Handle),
+ test_directory(RootDir, read_write),
%% Note that on Windows file systems, "/" or "c:/" are *NOT* directories.
%% Therefore, test that ?PRIM_FILE:read_file_info/1 behaves
%% as if they were directories.
- ?line case os:type() of
- {win32, _} ->
- ?line test_directory("/", read_write, Handle),
- ?line test_directory("c:/", read_write, Handle),
- ?line test_directory("c:\\", read_write, Handle);
- _ ->
- ?line test_directory("/", read, Handle)
- end,
+ case os:type() of
+ {win32, _} ->
+ test_directory("/", read_write),
+ test_directory("c:/", read_write),
+ test_directory("c:\\", read_write);
+ _ ->
+ test_directory("/", read)
+ end,
ok.
-test_directory(Name, ExpectedAccess, Handle) ->
- ?line {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]),
- ?line #file_info{size = Size, type = Type, access = Access,
- atime = AccessTime, mtime = ModifyTime} =
+test_directory(Name, ExpectedAccess) ->
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name),
+ #file_info{size = Size, type = Type, access = Access,
+ atime = AccessTime, mtime = ModifyTime} =
FileInfo,
- ?line io:format("Testing directory ~s", [Name]),
- ?line io:format("Directory size is ~p", [Size]),
- ?line io:format("Access ~p", [Access]),
- ?line io:format("Access time ~p; Modify time~p",
- [AccessTime, ModifyTime]),
- ?line Type = directory,
- ?line Access = ExpectedAccess,
- ?line {AD, AT} = AccessTime,
- ?line all_integers(tuple_to_list(AD) ++ tuple_to_list(AT)),
- ?line {MD, MT} = ModifyTime,
- ?line all_integers(tuple_to_list(MD) ++ tuple_to_list(MT)),
+ io:format("Testing directory ~s", [Name]),
+ io:format("Directory size is ~p", [Size]),
+ io:format("Access ~p", [Access]),
+ io:format("Access time ~p; Modify time~p",
+ [AccessTime, ModifyTime]),
+ Type = directory,
+ Access = ExpectedAccess,
+ {AD, AT} = AccessTime,
+ all_integers(tuple_to_list(AD) ++ tuple_to_list(AT)),
+ {MD, MT} = ModifyTime,
+ all_integers(tuple_to_list(MD) ++ tuple_to_list(MT)),
ok.
all_integers([Int|Rest]) when is_integer(Int) ->
- ?line all_integers(Rest);
+ all_integers(Rest);
all_integers([]) ->
ok.
%% Try something nonexistent.
-file_info_bad_a(suite) -> [];
-file_info_bad_a(doc) -> [];
-file_info_bad_a(Config) when is_list(Config) ->
- file_info_bad(Config, []).
-
-file_info_bad_b(suite) -> [];
-file_info_bad_b(doc) -> [];
-file_info_bad_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_bad(Config, Handle),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_bad(Config, Handle) ->
- ?line RootDir = filename:join([?config(priv_dir, Config)]),
- ?line {error, enoent} =
- ?PRIM_FILE_call(
- read_file_info, Handle,
- [filename:join(RootDir,
- atom_to_list(?MODULE)++"_nonexistent")]),
+file_info_bad(Config) when is_list(Config) ->
+ RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
+ NonExistent = filename:join(RootDir, atom_to_list(?MODULE)++"_nonexistent"),
+ {error, enoent} = ?PRIM_FILE:read_file_info(NonExistent),
ok.
%% Test that the file times behave as they should.
-file_info_times_a(suite) -> [];
-file_info_times_a(doc) -> [];
-file_info_times_a(Config) when is_list(Config) ->
- file_info_times(Config, [], "_a").
-
-file_info_times_b(suite) -> [];
-file_info_times_b(doc) -> [];
-file_info_times_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_times(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_times(Config, Handle, Suffix) ->
- ?line Dog = test_server:timetrap(test_server:seconds(60)),
+file_info_times(Config) when is_list(Config) ->
%% We have to try this twice, since if the test runs across the change
%% of a month the time diff calculations will fail. But it won't happen
%% if you run it twice in succession.
- ?line test_server:m_out_of_n(
- 1,2,
- fun() -> ?line file_info_int(Config, Handle, Suffix) end),
- ?line test_server:timetrap_cancel(Dog),
+ test_server:m_out_of_n(
+ 1,2,
+ fun() -> file_info_int(Config) end),
ok.
-file_info_int(Config, Handle, Suffix) ->
+file_info_int(Config) ->
%% Note: filename:join/1 removes any trailing slash,
%% which is essential for ?PRIM_FILE:read_file_info/1 to work on
%% platforms such as Windows95.
- ?line RootDir = filename:join([?config(priv_dir, Config)]),
- ?line test_server:format("RootDir = ~p", [RootDir]),
+ RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
+ io:format("RootDir = ~p", [RootDir]),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_file_info"++Suffix++".fil"),
- ?line {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?line ?PRIM_FILE:write(Fd1,"foo"),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_file_info.fil"),
+ {ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
+ ?PRIM_FILE:write(Fd1,"foo"),
%% check that the file got a modify date max a few seconds away from now
- ?line {ok, #file_info{type = regular,
- atime = AccTime1, mtime = ModTime1}} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
- ?line Now = erlang:localtime(),
- ?line io:format("Now ~p",[Now]),
- ?line io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]),
- ?line true = abs(time_dist(filter_atime(Now, Config),
- filter_atime(AccTime1,
- Config))) < 8,
- ?line true = abs(time_dist(Now, ModTime1)) < 8,
-
+ {ok, #file_info{type = regular,
+ atime = AccTime1, mtime = ModTime1}} =
+ ?PRIM_FILE:read_file_info(Name),
+ Now = erlang:localtime(),
+ io:format("Now ~p",[Now]),
+ io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]),
+ true = abs(time_dist(filter_atime(Now, Config),
+ filter_atime(AccTime1,
+ Config))) < 8,
+ true = abs(time_dist(Now, ModTime1)) < 8,
+
%% Sleep until we can be sure the seconds value has changed.
%% Note: FAT-based filesystem (like on Windows 95) have
%% a resolution of 2 seconds.
- ?line test_server:sleep(test_server:seconds(2.2)),
+ ct:sleep({seconds,2.2}),
%% close the file, and watch the modify date change
- ?line ok = ?PRIM_FILE:close(Fd1),
- ?line {ok, #file_info{size = Size, type = regular, access = Access,
- atime = AccTime2, mtime = ModTime2}} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
- ?line io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]),
- ?line true = time_dist(ModTime1, ModTime2) >= 0,
+ ok = ?PRIM_FILE:close(Fd1),
+ {ok, #file_info{size = Size, type = regular, access = Access,
+ atime = AccTime2, mtime = ModTime2}} =
+ ?PRIM_FILE:read_file_info(Name),
+ io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]),
+ true = time_dist(ModTime1, ModTime2) >= 0,
%% this file is supposed to be binary, so it'd better keep it's size
- ?line Size = 3,
- ?line Access = read_write,
+ Size = 3,
+ Access = read_write,
%% Do some directory checking
- ?line {ok, #file_info{size = DSize, type = directory,
- access = DAccess,
- atime = AccTime3, mtime = ModTime3}} =
- ?PRIM_FILE_call(read_file_info, Handle, [RootDir]),
+ {ok, #file_info{size = DSize, type = directory,
+ access = DAccess,
+ atime = AccTime3, mtime = ModTime3}} =
+ ?PRIM_FILE:read_file_info(RootDir),
%% this dir was modified only a few secs ago
- ?line io:format("Dir Acc ~p; Mod ~p; Now ~p",
- [AccTime3, ModTime3, Now]),
- ?line true = abs(time_dist(Now, ModTime3)) < 5,
- ?line DAccess = read_write,
- ?line io:format("Dir size is ~p",[DSize]),
+ io:format("Dir Acc ~p; Mod ~p; Now ~p",
+ [AccTime3, ModTime3, Now]),
+ true = abs(time_dist(Now, ModTime3)) < 5,
+ DAccess = read_write,
+ io:format("Dir size is ~p",[DSize]),
ok.
%% Filter access times, to cope with a deficiency of FAT file systems
@@ -969,9 +808,9 @@ filter_atime(Atime, Config) ->
true ->
case Atime of
{Date, _} ->
- {Date, {0, 0, 0}};
+ {Date, {0, 0, 0}};
{Y, M, D, _, _, _} ->
- {Y, M, D, 0, 0, 0}
+ {Y, M, D, 0, 0, 0}
end;
false ->
Atime
@@ -979,186 +818,151 @@ filter_atime(Atime, Config) ->
%% Test the write_file_info/2 function.
-file_write_file_info_a(suite) -> [];
-file_write_file_info_a(doc) -> [];
-file_write_file_info_a(Config) when is_list(Config) ->
- file_write_file_info(Config, [], "_a").
-
-file_write_file_info_b(suite) -> [];
-file_write_file_info_b(doc) -> [];
-file_write_file_info_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_write_file_info(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_write_file_info(Config, Handle, Suffix) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = get_good_directory(Config),
- ?line test_server:format("RootDir = ~p", [RootDir]),
+file_write_file_info(Config) when is_list(Config) ->
+ RootDir = get_good_directory(Config),
+ io:format("RootDir = ~p", [RootDir]),
%% Set the file to read only AND update the file times at the same time.
%% (This used to fail on Windows NT/95 for a local filesystem.)
%% Note: Seconds must be even; see note in file_info_times/1.
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_write_file_info_ro"++Suffix),
- ?line ok = ?PRIM_FILE:write_file(Name, "hello"),
- ?line Time = {{1997, 01, 02}, {12, 35, 42}},
- ?line Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time},
- ?line ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, Info]),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_write_file_info_ro"),
+ ok = ?PRIM_FILE:write_file(Name, "hello"),
+ Time = {{1997, 01, 02}, {12, 35, 42}},
+ Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time},
+ ok = ?PRIM_FILE:write_file_info(Name, Info),
%% Read back the times.
- ?line {ok, ActualInfo} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
- ?line #file_info{mode=_Mode, atime=ActAtime, mtime=Time,
- ctime=ActCtime} = ActualInfo,
- ?line FilteredAtime = filter_atime(Time, Config),
- ?line FilteredAtime = filter_atime(ActAtime, Config),
- ?line case os:type() of
- {win32, _} ->
- %% On Windows, "ctime" means creation time and it can
- %% be set.
- ActCtime = Time;
- _ ->
- ok
- end,
- ?line {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
+ {ok, ActualInfo} =
+ ?PRIM_FILE:read_file_info(Name),
+ #file_info{mode=_Mode, atime=ActAtime, mtime=Time,
+ ctime=ActCtime} = ActualInfo,
+ FilteredAtime = filter_atime(Time, Config),
+ FilteredAtime = filter_atime(ActAtime, Config),
+ case os:type() of
+ {win32, _} ->
+ %% On Windows, "ctime" means creation time and it can
+ %% be set.
+ ActCtime = Time;
+ _ ->
+ ok
+ end,
+ {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
%% Make the file writable again.
-
- ?line ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#600}]),
- ?line ok = ?PRIM_FILE:write_file(Name, "hello again"),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}),
+ ok = ?PRIM_FILE:write_file(Name, "hello again"),
%% And unwritable.
- ?line ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#400}]),
- ?line {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#400}),
+ {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
%% Write the times again.
%% Note: Seconds must be even; see note in file_info_times/1.
- ?line NewTime = {{1997, 02, 15}, {13, 18, 20}},
- ?line NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime},
- ?line ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, NewInfo]),
- ?line {ok, ActualInfo2} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
- ?line #file_info{atime=NewActAtime, mtime=NewTime,
- ctime=NewActCtime} = ActualInfo2,
- ?line NewFilteredAtime = filter_atime(NewTime, Config),
- ?line NewFilteredAtime = filter_atime(NewActAtime, Config),
- ?line case os:type() of
- {win32, _} -> NewActCtime = NewTime;
- _ -> ok
- end,
+ NewTime = {{1997, 02, 15}, {13, 18, 20}},
+ NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime},
+ ok = ?PRIM_FILE:write_file_info(Name, NewInfo),
+ {ok, ActualInfo2} =
+ ?PRIM_FILE:read_file_info(Name),
+ #file_info{atime=NewActAtime, mtime=NewTime,
+ ctime=NewActCtime} = ActualInfo2,
+ NewFilteredAtime = filter_atime(NewTime, Config),
+ NewFilteredAtime = filter_atime(NewActAtime, Config),
+ case os:type() of
+ {win32, _} -> NewActCtime = NewTime;
+ _ -> ok
+ end,
%% The file should still be unwritable.
- ?line {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
+ {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
%% Make the file writeable again, so that we can remove the
%% test suites ... :-)
- ?line ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#600}]),
- ?line test_server:timetrap_cancel(Dog),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}),
ok.
%% Test the write_file_info/3 function.
-file_write_file_info_opts(suite) -> [];
-file_write_file_info_opts(doc) -> [];
file_write_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Dog = test_server:timetrap(test_server:seconds(10)),
RootDir = get_good_directory(Config),
- test_server:format("RootDir = ~p", [RootDir]),
+ io:format("RootDir = ~p", [RootDir]),
Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_write_file_info_opts"),
ok = ?PRIM_FILE:write_file(Name, "hello_opts"),
lists:foreach(fun
- ({FI, Opts}) ->
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts])
- end, [
- {#file_info{ mode=8#600, atime = Time, mtime = Time, ctime = Time}, Opts} ||
- Opts <- [[{time, posix}]],
- Time <- [ 0,1,-1,100,-100,1000,-1000,10000,-10000 ]
- ]),
-
- % REM: determine date range dependent on time_t = Uint32 | Sint32 | Sint64
- % Determine time_t on os:type()?
- lists:foreach(fun
- ({FI, Opts}) ->
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts])
- end, [
- {#file_info{ mode=8#400, atime = Time, mtime = Time, ctime = Time}, Opts} ||
- Opts <- [[{time, universal}],[{time, local}]],
- Time <- [
- {{1970,1,1},{0,0,0}},
- {{1970,1,1},{0,0,1}},
- {{1969,12,31},{23,59,59}},
- {{1908,2,3},{23,59,59}},
- {{2012,2,3},{23,59,59}},
- {{2037,2,3},{23,59,59}},
- erlang:localtime()
- ]]),
- ok = ?PRIM_FILE:stop(Handle),
- test_server:timetrap_cancel(Dog),
+ ({FI, Opts}) ->
+ ok = ?PRIM_FILE:write_file_info(Name, FI, Opts)
+ end, [
+ {#file_info{ mode=8#600, atime = Time, mtime = Time, ctime = Time}, Opts} ||
+ Opts <- [[{time, posix}]],
+ Time <- [ 0,1,-1,100,-100,1000,-1000,10000,-10000 ]
+ ]),
+
+ %% REM: determine date range dependent on time_t = Uint32 | Sint32 | Sint64 | Uint64
+ %% Determine time_t on os:type()?
+ lists:foreach(fun ({FI, Opts}) ->
+ ok = ?PRIM_FILE:write_file_info(Name, FI, Opts)
+ end, [ {#file_info{ mode=8#400, atime = Time, mtime = Time, ctime = Time}, Opts} ||
+ Opts <- [[{time, universal}],[{time, local}]],
+ Time <- [
+ {{1970,1,1},{0,0,0}},
+ {{1970,1,1},{0,0,1}},
+ % {{1969,12,31},{23,59,59}},
+ % {{1908,2,3},{23,59,59}},
+ {{2012,2,3},{23,59,59}},
+ {{2037,2,3},{23,59,59}},
+ erlang:localtime()
+ ]]),
ok.
-file_read_file_info_opts(suite) -> [];
-file_read_file_info_opts(doc) -> [];
file_read_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Dog = test_server:timetrap(test_server:seconds(10)),
RootDir = get_good_directory(Config),
- test_server:format("RootDir = ~p", [RootDir]),
+ io:format("RootDir = ~p", [RootDir]),
Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_read_file_info_opts"),
ok = ?PRIM_FILE:write_file(Name, "hello_opts"),
lists:foreach(fun
- (Opts) ->
- {ok,_} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts])
- end, [[{time, Type}] || Type <- [local, universal, posix]]),
- ok = ?PRIM_FILE:stop(Handle),
- test_server:timetrap_cancel(Dog),
+ (Opts) ->
+ {ok,_} = ?PRIM_FILE:read_file_info(Name, Opts)
+ end, [[{time, Type}] || Type <- [local, universal, posix]]),
ok.
%% Test the write and read back *_file_info/3 functions.
-file_write_read_file_info_opts(suite) -> [];
-file_write_read_file_info_opts(doc) -> [];
file_write_read_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Dog = test_server:timetrap(test_server:seconds(10)),
RootDir = get_good_directory(Config),
- test_server:format("RootDir = ~p", [RootDir]),
+ io:format("RootDir = ~p", [RootDir]),
Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_read_write_file_info_opts"),
ok = ?PRIM_FILE:write_file(Name, "hello_opts2"),
- ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]),
- ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]),
- ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]),
- ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]),
- ok = file_write_read_file_info_opts(Handle, Name, 1, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, -1, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, 300000, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, -300000, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, 0, [{time, posix}]),
-
- ok = ?PRIM_FILE:stop(Handle),
- test_server:timetrap_cancel(Dog),
+ ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]),
+ ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]),
+ %% will not work on platforms with unsigned time_t
+ %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]),
+ %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]),
+ ok = file_write_read_file_info_opts(Name, 1, [{time, posix}]),
+ %% will not work on platforms with unsigned time_t
+ %ok = file_write_read_file_info_opts(Name, -1, [{time, posix}]),
+ %ok = file_write_read_file_info_opts(Name, -300000, [{time, posix}]),
+ ok = file_write_read_file_info_opts(Name, 300000, [{time, posix}]),
+ ok = file_write_read_file_info_opts(Name, 0, [{time, posix}]),
+
ok.
-file_write_read_file_info_opts(Handle, Name, Mtime, Opts) ->
- {ok, FI} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]),
+file_write_read_file_info_opts(Name, Mtime, Opts) ->
+ {ok, FI} = ?PRIM_FILE:read_file_info(Name, Opts),
FI2 = FI#file_info{ mtime = Mtime },
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI2, Opts]),
- {ok, FI2} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]),
+ ok = ?PRIM_FILE:write_file_info(Name, FI2, Opts),
+ {ok, FI3} = ?PRIM_FILE:read_file_info(Name, Opts),
+ io:format("Expecting mtime = ~p, got ~p~n", [FI2#file_info.mtime, FI3#file_info.mtime]),
+ FI2 = FI3,
ok.
@@ -1166,136 +970,131 @@ file_write_read_file_info_opts(Handle, Name, Mtime, Opts) ->
%% Returns a directory on a file system that has correct file times.
get_good_directory(Config) ->
- ?line ?config(priv_dir, Config).
+ proplists:get_value(priv_dir, Config).
-truncate(suite) -> [];
-truncate(doc) -> [];
truncate(Config) when is_list(Config) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_truncate.fil"),
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_truncate.fil"),
%% Create a file with some data.
- ?line MyData = "0123456789abcdefghijklmnopqrstuvxyz",
- ?line ok = ?PRIM_FILE:write_file(Name, MyData),
+ MyData = "0123456789abcdefghijklmnopqrstuvxyz",
+ ok = ?PRIM_FILE:write_file(Name, MyData),
%% Truncate the file to 10 characters.
- ?line {ok, Fd} = ?PRIM_FILE:open(Name, [read, write]),
- ?line {ok, 10} = ?PRIM_FILE:position(Fd, 10),
- ?line ok = ?PRIM_FILE:truncate(Fd),
- ?line ok = ?PRIM_FILE:close(Fd),
+ {ok, Fd} = ?PRIM_FILE:open(Name, [read, write]),
+ {ok, 10} = ?PRIM_FILE:position(Fd, 10),
+ ok = ?PRIM_FILE:truncate(Fd),
+ ok = ?PRIM_FILE:close(Fd),
%% Read back the file and check that it has been truncated.
- ?line Expected = list_to_binary("0123456789"),
- ?line {ok, Expected} = ?PRIM_FILE:read_file(Name),
+ Expected = list_to_binary("0123456789"),
+ {ok, Expected} = ?PRIM_FILE:read_file(Name),
%% Open the file read only and verify that it is not possible to
%% truncate it, OTP-1960
- ?line {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
- ?line {ok, 5} = ?PRIM_FILE:position(Fd2, 5),
- ?line {error, _} = ?PRIM_FILE:truncate(Fd2),
+ {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
+ {ok, 5} = ?PRIM_FILE:position(Fd2, 5),
+ {error, _} = ?PRIM_FILE:truncate(Fd2),
ok.
-datasync(suite) -> [];
-datasync(doc) -> "Tests that ?PRIM_FILE:datasync/1 at least doesn't crash.";
+%% Tests that ?PRIM_FILE:datasync/1 at least doesn't crash.
datasync(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Sync = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_sync.fil"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Sync = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_sync.fil"),
%% Raw open.
- ?line {ok, Fd} = ?PRIM_FILE:open(Sync, [write]),
- ?line ok = ?PRIM_FILE:datasync(Fd),
- ?line ok = ?PRIM_FILE:close(Fd),
+ {ok, Fd} = ?PRIM_FILE:open(Sync, [write]),
+ ok = ?PRIM_FILE:datasync(Fd),
+ ok = ?PRIM_FILE:close(Fd),
ok.
-sync(suite) -> [];
-sync(doc) -> "Tests that ?PRIM_FILE:sync/1 at least doesn't crash.";
+%% Tests that ?PRIM_FILE:sync/1 at least doesn't crash.
sync(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Sync = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_sync.fil"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Sync = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_sync.fil"),
%% Raw open.
- ?line {ok, Fd} = ?PRIM_FILE:open(Sync, [write]),
- ?line ok = ?PRIM_FILE:sync(Fd),
- ?line ok = ?PRIM_FILE:close(Fd),
+ {ok, Fd} = ?PRIM_FILE:open(Sync, [write]),
+ ok = ?PRIM_FILE:sync(Fd),
+ ok = ?PRIM_FILE:close(Fd),
ok.
-advise(suite) -> [];
-advise(doc) -> "Tests that ?PRIM_FILE:advise/4 at least doesn't crash.";
+%% Tests that ?PRIM_FILE:advise/4 at least doesn't crash.
advise(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Advise = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_advise.fil"),
-
- Line1 = "Hello\n",
- Line2 = "World!\n",
-
- ?line {ok, Fd} = ?PRIM_FILE:open(Advise, [write]),
- ?line ok = ?PRIM_FILE:advise(Fd, 0, 0, normal),
- ?line ok = ?PRIM_FILE:write(Fd, Line1),
- ?line ok = ?PRIM_FILE:write(Fd, Line2),
- ?line ok = ?PRIM_FILE:close(Fd),
-
- ?line {ok, Fd2} = ?PRIM_FILE:open(Advise, [write]),
- ?line ok = ?PRIM_FILE:advise(Fd2, 0, 0, random),
- ?line ok = ?PRIM_FILE:write(Fd2, Line1),
- ?line ok = ?PRIM_FILE:write(Fd2, Line2),
- ?line ok = ?PRIM_FILE:close(Fd2),
-
- ?line {ok, Fd3} = ?PRIM_FILE:open(Advise, [write]),
- ?line ok = ?PRIM_FILE:advise(Fd3, 0, 0, sequential),
- ?line ok = ?PRIM_FILE:write(Fd3, Line1),
- ?line ok = ?PRIM_FILE:write(Fd3, Line2),
- ?line ok = ?PRIM_FILE:close(Fd3),
-
- ?line {ok, Fd4} = ?PRIM_FILE:open(Advise, [write]),
- ?line ok = ?PRIM_FILE:advise(Fd4, 0, 0, will_need),
- ?line ok = ?PRIM_FILE:write(Fd4, Line1),
- ?line ok = ?PRIM_FILE:write(Fd4, Line2),
- ?line ok = ?PRIM_FILE:close(Fd4),
-
- ?line {ok, Fd5} = ?PRIM_FILE:open(Advise, [write]),
- ?line ok = ?PRIM_FILE:advise(Fd5, 0, 0, dont_need),
- ?line ok = ?PRIM_FILE:write(Fd5, Line1),
- ?line ok = ?PRIM_FILE:write(Fd5, Line2),
- ?line ok = ?PRIM_FILE:close(Fd5),
-
- ?line {ok, Fd6} = ?PRIM_FILE:open(Advise, [write]),
- ?line ok = ?PRIM_FILE:advise(Fd6, 0, 0, no_reuse),
- ?line ok = ?PRIM_FILE:write(Fd6, Line1),
- ?line ok = ?PRIM_FILE:write(Fd6, Line2),
- ?line ok = ?PRIM_FILE:close(Fd6),
-
- ?line {ok, Fd7} = ?PRIM_FILE:open(Advise, [write]),
- ?line {error, einval} = ?PRIM_FILE:advise(Fd7, 0, 0, bad_advise),
- ?line ok = ?PRIM_FILE:close(Fd7),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Advise = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_advise.fil"),
+
+ Line1 = <<"Hello\n">>,
+ Line2 = <<"World!\n">>,
+
+ {ok, Fd} = ?PRIM_FILE:open(Advise, [write]),
+ ok = ?PRIM_FILE:advise(Fd, 0, 0, normal),
+ ok = ?PRIM_FILE:write(Fd, Line1),
+ ok = ?PRIM_FILE:write(Fd, Line2),
+ ok = ?PRIM_FILE:close(Fd),
+
+ {ok, Fd2} = ?PRIM_FILE:open(Advise, [write]),
+ ok = ?PRIM_FILE:advise(Fd2, 0, 0, random),
+ ok = ?PRIM_FILE:write(Fd2, Line1),
+ ok = ?PRIM_FILE:write(Fd2, Line2),
+ ok = ?PRIM_FILE:close(Fd2),
+
+ {ok, Fd3} = ?PRIM_FILE:open(Advise, [write]),
+ ok = ?PRIM_FILE:advise(Fd3, 0, 0, sequential),
+ ok = ?PRIM_FILE:write(Fd3, Line1),
+ ok = ?PRIM_FILE:write(Fd3, Line2),
+ ok = ?PRIM_FILE:close(Fd3),
+
+ {ok, Fd4} = ?PRIM_FILE:open(Advise, [write]),
+ ok = ?PRIM_FILE:advise(Fd4, 0, 0, will_need),
+ ok = ?PRIM_FILE:write(Fd4, Line1),
+ ok = ?PRIM_FILE:write(Fd4, Line2),
+ ok = ?PRIM_FILE:close(Fd4),
+
+ {ok, Fd5} = ?PRIM_FILE:open(Advise, [write]),
+ ok = ?PRIM_FILE:advise(Fd5, 0, 0, dont_need),
+ ok = ?PRIM_FILE:write(Fd5, Line1),
+ ok = ?PRIM_FILE:write(Fd5, Line2),
+ ok = ?PRIM_FILE:close(Fd5),
+
+ {ok, Fd6} = ?PRIM_FILE:open(Advise, [write]),
+ ok = ?PRIM_FILE:advise(Fd6, 0, 0, no_reuse),
+ ok = ?PRIM_FILE:write(Fd6, Line1),
+ ok = ?PRIM_FILE:write(Fd6, Line2),
+ ok = ?PRIM_FILE:close(Fd6),
+
+ {ok, Fd7} = ?PRIM_FILE:open(Advise, [write]),
+ {error, einval} = ?PRIM_FILE:advise(Fd7, 0, 0, bad_advise),
+ ok = ?PRIM_FILE:close(Fd7),
%% test write without advise, then a read after an advise
- ?line {ok, Fd8} = ?PRIM_FILE:open(Advise, [write]),
- ?line ok = ?PRIM_FILE:write(Fd8, Line1),
- ?line ok = ?PRIM_FILE:write(Fd8, Line2),
- ?line ok = ?PRIM_FILE:close(Fd8),
- ?line {ok, Fd9} = ?PRIM_FILE:open(Advise, [read]),
+ {ok, Fd8} = ?PRIM_FILE:open(Advise, [write]),
+ ok = ?PRIM_FILE:write(Fd8, Line1),
+ ok = ?PRIM_FILE:write(Fd8, Line2),
+ ok = ?PRIM_FILE:close(Fd8),
+ {ok, Fd9} = ?PRIM_FILE:open(Advise, [read]),
Offset = 0,
%% same as a 0 length in some implementations
- Length = length(Line1) + length(Line2),
- ?line ok = ?PRIM_FILE:advise(Fd9, Offset, Length, sequential),
- ?line {ok, Line1} = ?PRIM_FILE:read_line(Fd9),
- ?line {ok, Line2} = ?PRIM_FILE:read_line(Fd9),
- ?line eof = ?PRIM_FILE:read_line(Fd9),
- ?line ok = ?PRIM_FILE:close(Fd9),
+ Length = byte_size(Line1) + byte_size(Line2),
+ ok = ?PRIM_FILE:advise(Fd9, Offset, Length, sequential),
+ {ok, Line1} = ?PRIM_FILE:read_line(Fd9),
+ {ok, Line2} = ?PRIM_FILE:read_line(Fd9),
+ eof = ?PRIM_FILE:read_line(Fd9),
+ ok = ?PRIM_FILE:close(Fd9),
ok.
@@ -1307,7 +1106,6 @@ large_write(Config) when is_list(Config) ->
"_large_write").
do_large_write(Name) ->
- Dog = test_server:timetrap(test_server:minutes(60)),
ChunkSize = (256 bsl 20) + 1, % 256 M + 1
Chunks = 16, % times 16 -> 4 G + 16
Base = 100,
@@ -1315,192 +1113,159 @@ do_large_write(Name) ->
Chunk = <<0:ChunkSize/unit:8>>,
Data = zip_data(lists:duplicate(Chunks, Chunk), Interleave),
Size = Chunks * ChunkSize + Chunks, % 4 G + 32
- Wordsize = erlang:system_info(wordsize),
- case prim_file:write_file(Name, Data) of
- ok when Wordsize =:= 8 ->
- {ok,#file_info{size=Size}} = file:read_file_info(Name),
- {ok,Fd} = prim_file:open(Name, [read]),
- check_large_write(Dog, Fd, ChunkSize, 0, Interleave);
- {error,einval} when Wordsize =:= 4 ->
- ok
- end.
+ ok = ?PRIM_FILE:write_file(Name, Data),
+ {ok,#file_info{size=Size}} = file:read_file_info(Name),
+ {ok,Fd} = ?PRIM_FILE:open(Name, [read]),
+ check_large_write(Fd, ChunkSize, 0, Interleave).
-check_large_write(Dog, Fd, ChunkSize, Pos, [X|Interleave]) ->
+check_large_write(Fd, ChunkSize, Pos, [X|Interleave]) ->
Pos1 = Pos + ChunkSize,
- {ok,Pos1} = prim_file:position(Fd, {cur,ChunkSize}),
- {ok,[X]} = prim_file:read(Fd, 1),
- check_large_write(Dog, Fd, ChunkSize, Pos1+1, Interleave);
-check_large_write(Dog, Fd, _, _, []) ->
- eof = prim_file:read(Fd, 1),
- test_server:timetrap_cancel(Dog),
+ {ok,Pos1} = ?PRIM_FILE:position(Fd, {cur,ChunkSize}),
+ {ok,<<X>>} = ?PRIM_FILE:read(Fd, 1),
+ check_large_write(Fd, ChunkSize, Pos1+1, Interleave);
+check_large_write(Fd, _, _, []) ->
+ eof = ?PRIM_FILE:read(Fd, 1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-allocate(suite) -> [];
-allocate(doc) -> "Tests that ?PRIM_FILE:allocate/3 at least doesn't crash.";
+%% Tests that ?PRIM_FILE:allocate/3 at least doesn't crash.
allocate(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
- ?line Allocate = filename:join(PrivDir,
- atom_to_list(?MODULE)
- ++"_allocate.fil"),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Allocate = filename:join(PrivDir,
+ atom_to_list(?MODULE)
+ ++"_allocate.fil"),
Line1 = "Hello\n",
Line2 = "World!\n",
- ?line {ok, Fd} = ?PRIM_FILE:open(Allocate, [write, binary]),
+ {ok, Fd} = ?PRIM_FILE:open(Allocate, [write, binary]),
allocate_and_assert(Fd, 1, iolist_size([Line1, Line2])),
- ?line ok = ?PRIM_FILE:write(Fd, Line1),
- ?line ok = ?PRIM_FILE:write(Fd, Line2),
- ?line ok = ?PRIM_FILE:close(Fd),
+ ok = ?PRIM_FILE:write(Fd, Line1),
+ ok = ?PRIM_FILE:write(Fd, Line2),
+ ok = ?PRIM_FILE:close(Fd),
- ?line {ok, Fd2} = ?PRIM_FILE:open(Allocate, [write, binary]),
+ {ok, Fd2} = ?PRIM_FILE:open(Allocate, [write, binary]),
allocate_and_assert(Fd2, 1, iolist_size(Line1)),
- ?line ok = ?PRIM_FILE:write(Fd2, Line1),
- ?line ok = ?PRIM_FILE:write(Fd2, Line2),
- ?line ok = ?PRIM_FILE:close(Fd2),
+ ok = ?PRIM_FILE:write(Fd2, Line1),
+ ok = ?PRIM_FILE:write(Fd2, Line2),
+ ok = ?PRIM_FILE:close(Fd2),
- ?line {ok, Fd3} = ?PRIM_FILE:open(Allocate, [write, binary]),
+ {ok, Fd3} = ?PRIM_FILE:open(Allocate, [write, binary]),
allocate_and_assert(Fd3, 1, iolist_size(Line1) + 1),
- ?line ok = ?PRIM_FILE:write(Fd3, Line1),
- ?line ok = ?PRIM_FILE:write(Fd3, Line2),
- ?line ok = ?PRIM_FILE:close(Fd3),
+ ok = ?PRIM_FILE:write(Fd3, Line1),
+ ok = ?PRIM_FILE:write(Fd3, Line2),
+ ok = ?PRIM_FILE:close(Fd3),
- ?line {ok, Fd4} = ?PRIM_FILE:open(Allocate, [write, binary]),
+ {ok, Fd4} = ?PRIM_FILE:open(Allocate, [write, binary]),
allocate_and_assert(Fd4, 1, 4 * iolist_size([Line1, Line2])),
- ?line ok = ?PRIM_FILE:write(Fd4, Line1),
- ?line ok = ?PRIM_FILE:write(Fd4, Line2),
- ?line ok = ?PRIM_FILE:close(Fd4),
+ ok = ?PRIM_FILE:write(Fd4, Line1),
+ ok = ?PRIM_FILE:write(Fd4, Line2),
+ ok = ?PRIM_FILE:close(Fd4),
ok.
allocate_and_assert(Fd, Offset, Length) ->
- % Just verify that calls to ?PRIM_FILE:allocate/3 don't crash or have
- % any other negative side effect. We can't really asssert against a
- % specific return value, because support for file space pre-allocation
- % depends on the OS, OS version and underlying filesystem.
- %
- % The Linux kernel added support for fallocate() in version 2.6.23,
- % which currently works only for the ext4, ocfs2, xfs and btrfs file
- % systems. posix_fallocate() is available in glibc as of version
- % 2.1.94, but it was buggy until glibc version 2.7.
- %
- % Mac OS X, as of version 10.3, supports the fcntl operation F_PREALLOCATE.
- %
- % Solaris supports posix_fallocate() but only for the UFS file system
- % apparently (not supported for ZFS).
- %
- % FreeBSD 9.0 is the first FreeBSD release supporting posix_fallocate().
- %
- % For Windows there's apparently no way to pre-allocate file space, at
- % least with similar API/semantics as posix_fallocate(), fallocate() or
- % fcntl F_PREALLOCATE.
+ %% Just verify that calls to ?PRIM_FILE:allocate/3 don't crash or have
+ %% any other negative side effect. We can't really asssert against a
+ %% specific return value, because support for file space pre-allocation
+ %% depends on the OS, OS version and underlying filesystem.
+ %%
+ %% The Linux kernel added support for fallocate() in version 2.6.23,
+ %% which currently works only for the ext4, ocfs2, xfs and btrfs file
+ %% systems. posix_fallocate() is available in glibc as of version
+ %% 2.1.94, but it was buggy until glibc version 2.7.
+ %%
+ %% Mac OS X, as of version 10.3, supports the fcntl operation F_PREALLOCATE.
+ %%
+ %% Solaris supports posix_fallocate() but only for the UFS file system
+ %% apparently (not supported for ZFS).
+ %%
+ %% FreeBSD 9.0 is the first FreeBSD release supporting posix_fallocate().
+ %%
+ %% For Windows there's apparently no way to pre-allocate file space, at
+ %% least with similar API/semantics as posix_fallocate(), fallocate() or
+ %% fcntl F_PREALLOCATE.
Result = ?PRIM_FILE:allocate(Fd, Offset, Length),
case os:type() of
{win32, _} ->
- ?line {error, enotsup} = Result;
+ {error, enotsup} = Result;
_ ->
- ?line _ = Result
+ _ = Result
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-delete_a(suite) -> [];
-delete_a(doc) -> [];
-delete_a(Config) when is_list(Config) ->
- delete(Config, [], "_a").
-
-delete_b(suite) -> [];
-delete_b(doc) -> [];
-delete_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = delete(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-delete(Config, Handle, Suffix) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line Name = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_delete"++Suffix++".fil"),
- ?line {ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?line ?PRIM_FILE:write(Fd1,"ok.\n"),
- ?line ok = ?PRIM_FILE:close(Fd1),
+delete(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir,Config),
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_delete.fil"),
+ {ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
+ ?PRIM_FILE:write(Fd1,"ok.\n"),
+ ok = ?PRIM_FILE:close(Fd1),
%% Check that the file is readable
- ?line {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
- ?line ok = ?PRIM_FILE:close(Fd2),
- ?line ok = ?PRIM_FILE_call(delete, Handle, [Name]),
+ {ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
+ ok = ?PRIM_FILE:close(Fd2),
+ ok = ?PRIM_FILE:delete(Name),
%% Check that the file is not readable anymore
- ?line {error, _} = ?PRIM_FILE:open(Name, [read]),
+ {error, _} = ?PRIM_FILE:open(Name, [read]),
%% Try deleting a nonexistent file
- ?line {error, enoent} = ?PRIM_FILE_call(delete, Handle, [Name]),
+ {error, enoent} = ?PRIM_FILE:delete(Name),
ok.
-rename_a(suite) ->[];
-rename_a(doc) ->[];
-rename_a(Config) when is_list(Config) ->
- rename(Config, [], "_a").
-
-rename_b(suite) ->[];
-rename_b(doc) ->[];
-rename_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = rename(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-rename(Config, Handle, Suffix) ->
- ?line RootDir = ?config(priv_dir,Config),
- ?line FileName1 = atom_to_list(?MODULE)++"_rename"++Suffix++".fil",
- ?line FileName2 = atom_to_list(?MODULE)++"_rename"++Suffix++".ful",
- ?line Name1 = filename:join(RootDir, FileName1),
- ?line Name2 = filename:join(RootDir, FileName2),
- ?line {ok,Fd1} = ?PRIM_FILE:open(Name1, [write]),
- ?line ok = ?PRIM_FILE:close(Fd1),
+rename(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir,Config),
+ FileName1 = atom_to_list(?MODULE)++"_rename.fil",
+ FileName2 = atom_to_list(?MODULE)++"_rename.ful",
+ Name1 = filename:join(RootDir, FileName1),
+ Name2 = filename:join(RootDir, FileName2),
+ {ok,Fd1} = ?PRIM_FILE:open(Name1, [write]),
+ ok = ?PRIM_FILE:close(Fd1),
%% Rename, and check that it really changed name
- ?line ok = ?PRIM_FILE_call(rename, Handle, [Name1, Name2]),
- ?line {error, _} = ?PRIM_FILE:open(Name1, [read]),
- ?line {ok, Fd2} = ?PRIM_FILE:open(Name2, [read]),
- ?line ok = ?PRIM_FILE:close(Fd2),
+ ok = ?PRIM_FILE:rename(Name1, Name2),
+ {error, _} = ?PRIM_FILE:open(Name1, [read]),
+ {ok, Fd2} = ?PRIM_FILE:open(Name2, [read]),
+ ok = ?PRIM_FILE:close(Fd2),
%% Try renaming something to itself
- ?line ok = ?PRIM_FILE_call(rename, Handle, [Name2, Name2]),
+ ok = ?PRIM_FILE:rename(Name2, Name2),
%% Try renaming something that doesn't exist
- ?line {error, enoent} =
- ?PRIM_FILE_call(rename, Handle, [Name1, Name2]),
+ {error, enoent} =
+ ?PRIM_FILE:rename(Name1, Name2),
%% Try renaming to something else than a string
- ?line {error, badarg} =
- ?PRIM_FILE_call(rename, Handle, [Name1, foobar]),
-
+ {error, badarg} =
+ ?PRIM_FILE:rename(Name1, foobar),
+
%% Move between directories
- ?line DirName1 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_rename_dir"++Suffix),
- ?line DirName2 = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_second_rename_dir"++Suffix),
- ?line Name1foo = filename:join(DirName1, "foo.fil"),
- ?line Name2foo = filename:join(DirName2, "foo.fil"),
- ?line Name2bar = filename:join(DirName2, "bar.dir"),
- ?line ok = ?PRIM_FILE:make_dir(DirName1),
+ DirName1 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_rename_dir"),
+ DirName2 = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_second_rename_dir"),
+ Name1foo = filename:join(DirName1, "foo.fil"),
+ Name2foo = filename:join(DirName2, "foo.fil"),
+ Name2bar = filename:join(DirName2, "bar.dir"),
+ ok = ?PRIM_FILE:make_dir(DirName1),
%% The name has to include the full file name, path is not enough
- ?line expect(
- {error, eexist}, {error, eisdir},
- ?PRIM_FILE_call(rename, Handle, [Name2, DirName1])),
- ?line ok =
- ?PRIM_FILE_call(rename, Handle, [Name2, Name1foo]),
+ expect(
+ {error, eexist}, {error, eisdir},
+ ?PRIM_FILE:rename(Name2, DirName1)),
+ ok =
+ ?PRIM_FILE:rename(Name2, Name1foo),
%% Now rename the directory
- ?line ok = ?PRIM_FILE_call(rename, Handle, [DirName1, DirName2]),
+ ok = ?PRIM_FILE:rename(DirName1, DirName2),
%% And check that the file is there now
- ?line {ok,Fd3} = ?PRIM_FILE:open(Name2foo, [read]),
- ?line ok = ?PRIM_FILE:close(Fd3),
+ {ok,Fd3} = ?PRIM_FILE:open(Name2foo, [read]),
+ ok = ?PRIM_FILE:close(Fd3),
%% Try some dirty things now: move the directory into itself
- ?line {error, Msg1} =
- ?PRIM_FILE_call(rename, Handle, [DirName2, Name2bar]),
- ?line io:format("Errmsg1: ~p",[Msg1]),
+ {error, Msg1} =
+ ?PRIM_FILE:rename(DirName2, Name2bar),
+ io:format("Errmsg1: ~p",[Msg1]),
%% move dir into a file in itself
- ?line {error, Msg2} =
- ?PRIM_FILE_call(rename, Handle, [DirName2, Name2foo]),
- ?line io:format("Errmsg2: ~p",[Msg2]),
+ {error, Msg2} =
+ ?PRIM_FILE:rename(DirName2, Name2foo),
+ io:format("Errmsg2: ~p",[Msg2]),
ok.
@@ -1509,45 +1274,42 @@ rename(Config, Handle, Suffix) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-e_delete(suite) -> [];
-e_delete(doc) -> [];
e_delete(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line Base = filename:join(RootDir,
- atom_to_list(?MODULE)++"_e_delete"),
- ?line ok = ?PRIM_FILE:make_dir(Base),
+ RootDir = proplists:get_value(priv_dir, Config),
+ Base = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_e_delete"),
+ ok = ?PRIM_FILE:make_dir(Base),
%% Delete a non-existing file.
- ?line {error, enoent} =
+ {error, enoent} =
?PRIM_FILE:delete(filename:join(Base, "non_existing")),
%% Delete a directory.
- ?line {error, eperm} = ?PRIM_FILE:delete(Base),
+ {error, eperm} = ?PRIM_FILE:delete(Base),
%% Use a path-name with a non-directory component.
- ?line Afile = filename:join(Base, "a_file"),
- ?line ok = ?PRIM_FILE:write_file(Afile, "hello\n"),
- ?line {error, E} =
+ Afile = filename:join(Base, "a_file"),
+ ok = ?PRIM_FILE:write_file(Afile, "hello\n"),
+ {error, E} =
expect(
{error, enotdir}, {error, enoent},
?PRIM_FILE:delete(filename:join(Afile, "another_file"))),
- ?line io:format("Result: ~p~n", [E]),
+ io:format("Result: ~p~n", [E]),
%% No permission.
- ?line case os:type() of
- {win32, _} ->
- %% Remove a character device.
- ?line {error, eacces} = ?PRIM_FILE:delete("nul");
- _ ->
- ?line ?PRIM_FILE:write_file_info(
- Base, #file_info {mode=0}),
- ?line {error, eacces} = ?PRIM_FILE:delete(Afile),
- ?line ?PRIM_FILE:write_file_info(
- Base, #file_info {mode=8#600})
- end,
-
- ?line test_server:timetrap_cancel(Dog),
+ case os:type() of
+ {win32, _} ->
+ %% Remove a character device.
+ expect({error, eacces}, {error, einval},
+ ?PRIM_FILE:delete("nul"));
+ _ ->
+ ?PRIM_FILE:write_file_info(
+ Base, #file_info {mode=0}),
+ {error, eacces} = ?PRIM_FILE:delete(Afile),
+ ?PRIM_FILE:write_file_info(
+ Base, #file_info {mode=8#700})
+ end,
+
ok.
%%% FreeBSD gives EEXIST when renaming a file to an empty dir, although the
@@ -1555,66 +1317,63 @@ e_delete(Config) when is_list(Config) ->
%%% (What about FreeBSD? We store our nightly build results on a FreeBSD
%%% file system, that's what.)
-e_rename(suite) -> [];
-e_rename(doc) -> [];
e_rename(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line Base = filename:join(RootDir,
- atom_to_list(?MODULE)++"_e_rename"),
- ?line ok = ?PRIM_FILE:make_dir(Base),
+ RootDir = proplists:get_value(priv_dir, Config),
+ Base = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_e_rename"),
+ ok = ?PRIM_FILE:make_dir(Base),
%% Create an empty directory.
- ?line EmptyDir = filename:join(Base, "empty_dir"),
- ?line ok = ?PRIM_FILE:make_dir(EmptyDir),
+ EmptyDir = filename:join(Base, "empty_dir"),
+ ok = ?PRIM_FILE:make_dir(EmptyDir),
%% Create a non-empty directory.
- ?line NonEmptyDir = filename:join(Base, "non_empty_dir"),
- ?line ok = ?PRIM_FILE:make_dir(NonEmptyDir),
- ?line ok = ?PRIM_FILE:write_file(
- filename:join(NonEmptyDir, "a_file"),
- "hello\n"),
+ NonEmptyDir = filename:join(Base, "non_empty_dir"),
+ ok = ?PRIM_FILE:make_dir(NonEmptyDir),
+ ok = ?PRIM_FILE:write_file(
+ filename:join(NonEmptyDir, "a_file"),
+ "hello\n"),
%% Create another non-empty directory.
- ?line ADirectory = filename:join(Base, "a_directory"),
- ?line ok = ?PRIM_FILE:make_dir(ADirectory),
- ?line ok = ?PRIM_FILE:write_file(
- filename:join(ADirectory, "a_file"),
- "howdy\n\n"),
+ ADirectory = filename:join(Base, "a_directory"),
+ ok = ?PRIM_FILE:make_dir(ADirectory),
+ ok = ?PRIM_FILE:write_file(
+ filename:join(ADirectory, "a_file"),
+ "howdy\n\n"),
%% Create a data file.
- ?line File = filename:join(Base, "just_a_file"),
- ?line ok = ?PRIM_FILE:write_file(File, "anything goes\n\n"),
+ File = filename:join(Base, "just_a_file"),
+ ok = ?PRIM_FILE:write_file(File, "anything goes\n\n"),
%% Move an existing directory to a non-empty directory.
- ?line {error, eexist} =
- ?PRIM_FILE:rename(ADirectory, NonEmptyDir),
+ {error, eexist} =
+ ?PRIM_FILE:rename(ADirectory, NonEmptyDir),
%% Move a root directory.
- ?line {error, einval} = ?PRIM_FILE:rename("/", "arne"),
+ {error, einval} = ?PRIM_FILE:rename("/", "arne"),
%% Move Base into Base/new_name.
- ?line {error, einval} =
- ?PRIM_FILE:rename(Base, filename:join(Base, "new_name")),
+ {error, einval} =
+ ?PRIM_FILE:rename(Base, filename:join(Base, "new_name")),
%% Overwrite a directory with a file.
- ?line expect({error, eexist}, % FreeBSD (?)
- {error, eisdir},
- ?PRIM_FILE:rename(File, EmptyDir)),
- ?line expect({error, eexist}, % FreeBSD (?)
- {error, eisdir},
- ?PRIM_FILE:rename(File, NonEmptyDir)),
+ expect({error, eexist}, % FreeBSD (?)
+ {error, eisdir},
+ ?PRIM_FILE:rename(File, EmptyDir)),
+ expect({error, eexist}, % FreeBSD (?)
+ {error, eisdir},
+ ?PRIM_FILE:rename(File, NonEmptyDir)),
%% Move a non-existing file.
- ?line NonExistingFile = filename:join(
- Base, "non_existing_file"),
- ?line {error, enoent} =
- ?PRIM_FILE:rename(NonExistingFile, NonEmptyDir),
+ NonExistingFile = filename:join(
+ Base, "non_existing_file"),
+ {error, enoent} =
+ ?PRIM_FILE:rename(NonExistingFile, NonEmptyDir),
%% Overwrite a file with a directory.
- ?line expect({error, eexist}, % FreeBSD (?)
- {error, enotdir},
- ?PRIM_FILE:rename(ADirectory, File)),
+ expect({error, eexist}, % FreeBSD (?)
+ {error, enotdir},
+ ?PRIM_FILE:rename(ADirectory, File)),
%% Move a file to another filesystem.
%% XXX - This test case is bogus. We cannot be guaranteed that
@@ -1622,315 +1381,136 @@ e_rename(Config) when is_list(Config) ->
%% different filesystems.
%%
%% XXX - Gross hack!
- ?line Comment =
- case os:type() of
- {win32, _} ->
- %% At least Windows NT can
- %% successfully move a file to
- %% another drive.
- ok;
- {unix, _ } ->
- OtherFs = "/tmp",
- ?line NameOnOtherFs =
- filename:join(OtherFs,
- filename:basename(File)),
- ?line {ok, Com} =
- case ?PRIM_FILE:rename(
- File, NameOnOtherFs) of
- {error, exdev} ->
- %% The file could be in
- %% the same filesystem!
- {ok, ok};
- ok ->
- {ok, {comment,
- "Moving between filesystems "
- "suceeded, files are probably "
- "in the same filesystem!"}};
- {error, eperm} ->
- {ok, {comment, "SBS! You don't "
- "have the permission to do "
- "this test!"}};
- Else ->
- Else
- end,
- Com;
- {ose, _} ->
- %% disabled for now
- ok
- end,
- ?line test_server:timetrap_cancel(Dog),
+ Comment =
+ case os:type() of
+ {win32, _} ->
+ %% At least Windows NT can
+ %% successfully move a file to
+ %% another drive.
+ ok;
+ _ ->
+ OtherFs = "/tmp",
+ NameOnOtherFs =
+ filename:join(OtherFs,
+ filename:basename(File)),
+ {ok, Com} =
+ case ?PRIM_FILE:rename(
+ File, NameOnOtherFs) of
+ {error, exdev} ->
+ %% The file could be in
+ %% the same filesystem!
+ {ok, ok};
+ ok ->
+ {ok, {comment,
+ "Moving between filesystems "
+ "suceeded, files are probably "
+ "in the same filesystem!"}};
+ {error, eperm} ->
+ {ok, {comment, "SBS! You don't "
+ "have the permission to do "
+ "this test!"}};
+ Else ->
+ Else
+ end,
+ Com
+ end,
Comment.
-e_make_dir(suite) -> [];
-e_make_dir(doc) -> [];
e_make_dir(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line Base = filename:join(RootDir,
- atom_to_list(?MODULE)++"_e_make_dir"),
- ?line ok = ?PRIM_FILE:make_dir(Base),
+ RootDir = proplists:get_value(priv_dir, Config),
+ Base = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_e_make_dir"),
+ ok = ?PRIM_FILE:make_dir(Base),
%% A component of the path does not exist.
- ?line {error, enoent} =
+ {error, enoent} =
?PRIM_FILE:make_dir(filename:join([Base, "a", "b"])),
%% Use a path-name with a non-directory component.
- ?line Afile = filename:join(Base, "a_directory"),
- ?line ok = ?PRIM_FILE:write_file(Afile, "hello\n"),
- ?line case ?PRIM_FILE:make_dir(
- filename:join(Afile, "another_directory")) of
- {error, enotdir} -> io:format("Result: enotdir");
- {error, enoent} -> io:format("Result: enoent")
- end,
+ Afile = filename:join(Base, "a_directory"),
+ ok = ?PRIM_FILE:write_file(Afile, "hello\n"),
+ case ?PRIM_FILE:make_dir(
+ filename:join(Afile, "another_directory")) of
+ {error, enotdir} -> io:format("Result: enotdir");
+ {error, enoent} -> io:format("Result: enoent")
+ end,
%% No permission (on Unix only).
case os:type() of
{win32, _} ->
ok;
_ ->
- ?line ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
- ?line {error, eacces} =
+ ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
+ {error, eacces} =
?PRIM_FILE:make_dir(filename:join(Base, "xxxx")),
- ?line
- ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#600})
+ ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#700})
end,
- ?line test_server:timetrap_cancel(Dog),
ok.
-e_del_dir(suite) -> [];
-e_del_dir(doc) -> [];
e_del_dir(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line Base = filename:join(RootDir,
- atom_to_list(?MODULE)++"_e_del_dir"),
- ?line io:format("Base: ~p", [Base]),
- ?line ok = ?PRIM_FILE:make_dir(Base),
+ RootDir = proplists:get_value(priv_dir, Config),
+ Base = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_e_del_dir"),
+ io:format("Base: ~p", [Base]),
+ ok = ?PRIM_FILE:make_dir(Base),
%% Delete a non-existent directory.
- ?line {error, enoent} =
+ {error, enoent} =
?PRIM_FILE:del_dir(filename:join(Base, "non_existing")),
%% Use a path-name with a non-directory component.
- ?line Afile = filename:join(Base, "a_directory"),
- ?line ok = ?PRIM_FILE:write_file(Afile, "hello\n"),
- ?line {error, E1} =
+ Afile = filename:join(Base, "a_directory"),
+ ok = ?PRIM_FILE:write_file(Afile, "hello\n"),
+ {error, E1} =
expect({error, enotdir}, {error, enoent},
?PRIM_FILE:del_dir(
filename:join(Afile, "another_directory"))),
- ?line io:format("Result: ~p", [E1]),
+ io:format("Result: ~p", [E1]),
%% Delete a non-empty directory.
%% Delete a non-empty directory.
- ?line {error, E2} =
+ {error, E2} =
expect({error, enotempty}, {error, eexist}, {error, eacces},
?PRIM_FILE:del_dir(Base)),
- ?line io:format("Result: ~p", [E2]),
+ io:format("Result: ~p", [E2]),
%% Remove the current directory.
- ?line {error, E3} =
+ {error, E3} =
expect({error, einval},
{error, eperm}, % Linux and DUX
{error, eacces},
{error, ebusy},
?PRIM_FILE:del_dir(".")),
- ?line io:format("Result: ~p", [E3]),
+ io:format("Result: ~p", [E3]),
%% No permission.
case os:type() of
{win32, _} ->
ok;
_ ->
- ?line ADirectory = filename:join(Base, "no_perm"),
- ?line ok = ?PRIM_FILE:make_dir(ADirectory),
- ?line ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
- ?line {error, eacces} = ?PRIM_FILE:del_dir(ADirectory),
- ?line ?PRIM_FILE:write_file_info(
- Base, #file_info {mode=8#600})
- end,
- ?line test_server:timetrap_cancel(Dog),
- ok.
-
-
-%% Trying reading and positioning from a compressed file.
-
-read_compressed(suite) -> [];
-read_compressed(doc) -> [];
-read_compressed(Config) when is_list(Config) ->
- ?line Data = ?config(data_dir, Config),
- ?line Real = filename:join(Data, "realmen.html.gz"),
- ?line {ok, Fd} = ?PRIM_FILE:open(Real, [read, compressed]),
- ?line try_read_file(Fd).
-
-%% Trying reading and positioning from an uncompressed file,
-%% but with the compressed flag given.
-
-read_not_really_compressed(suite) -> [];
-read_not_really_compressed(doc) -> [];
-read_not_really_compressed(Config) when is_list(Config) ->
- ?line Data = ?config(data_dir, Config),
- ?line Priv = ?config(priv_dir, Config),
-
- %% The file realmen.html might have got CRs added (by WinZip).
- %% Remove them, or the file positions will not be correct.
-
- ?line Real = filename:join(Data, "realmen.html"),
- ?line RealPriv = filename:join(Priv,
- atom_to_list(?MODULE)++"_realmen.html"),
- ?line {ok, RealDataBin} = ?PRIM_FILE:read_file(Real),
- ?line RealData = remove_crs(binary_to_list(RealDataBin), []),
- ?line ok = ?PRIM_FILE:write_file(RealPriv, RealData),
- ?line {ok, Fd} = ?PRIM_FILE:open(RealPriv, [read, compressed]),
- ?line try_read_file(Fd).
-
-remove_crs([$\r|Rest], Result) ->
- remove_crs(Rest, Result);
-remove_crs([C|Rest], Result) ->
- remove_crs(Rest, [C|Result]);
-remove_crs([], Result) ->
- lists:reverse(Result).
-
-try_read_file(Fd) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
-
- %% Seek to the current position (nothing should happen).
-
- ?line {ok, 0} = ?PRIM_FILE:position(Fd, 0),
- ?line {ok, 0} = ?PRIM_FILE:position(Fd, {cur, 0}),
-
- %% Read a few lines from a compressed file.
-
- ?line ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n",
- ?line {ok, ShouldBe} = ?PRIM_FILE:read(Fd, length(ShouldBe)),
-
- %% Now seek forward.
-
- ?line {ok, 381} = ?PRIM_FILE:position(Fd, 381),
- ?line Back = "Back in the good old days -- the \"Golden Era\" " ++
- "of computers, it was\n",
- ?line {ok, Back} = ?PRIM_FILE:read(Fd, length(Back)),
-
- %% Try to search forward relative to the current position.
-
- ?line {ok, CurPos} = ?PRIM_FILE:position(Fd, {cur, 0}),
- ?line RealPos = 4273,
- ?line {ok, RealPos} = ?PRIM_FILE:position(Fd, {cur, RealPos-CurPos}),
- ?line RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n",
- ?line {ok, RealProg} = ?PRIM_FILE:read(Fd, length(RealProg)),
-
- %% Seek backward.
-
- ?line AfterTitle = length("<TITLE>"),
- ?line {ok, AfterTitle} = ?PRIM_FILE:position(Fd, AfterTitle),
- ?line Title = "Real Programmers Don't Use PASCAL</TITLE>\n",
- ?line {ok, Title} = ?PRIM_FILE:read(Fd, length(Title)),
-
- %% Done.
-
- ?line ?PRIM_FILE:close(Fd),
- ?line test_server:timetrap_cancel(Dog),
- ok.
-
-write_compressed(suite) -> [];
-write_compressed(doc) -> [];
-write_compressed(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line Priv = ?config(priv_dir, Config),
- ?line MyFile = filename:join(Priv,
- atom_to_list(?MODULE)++"_test.gz"),
-
- %% Write a file.
-
- ?line {ok, Fd} = ?PRIM_FILE:open(MyFile, [write, compressed]),
- ?line {ok, 0} = ?PRIM_FILE:position(Fd, 0),
- ?line Prefix = "hello\n",
- ?line End = "end\n",
- ?line ok = ?PRIM_FILE:write(Fd, Prefix),
- ?line {ok, 143} = ?PRIM_FILE:position(Fd, 143),
- ?line ok = ?PRIM_FILE:write(Fd, End),
- ?line ok = ?PRIM_FILE:close(Fd),
-
- %% Read the file and verify the contents.
-
- ?line {ok, Fd1} = ?PRIM_FILE:open(MyFile, [read, compressed]),
- ?line {ok, Prefix} = ?PRIM_FILE:read(Fd1, length(Prefix)),
- ?line Second = lists:duplicate(143-length(Prefix), 0) ++ End,
- ?line {ok, Second} = ?PRIM_FILE:read(Fd1, length(Second)),
- ?line ok = ?PRIM_FILE:close(Fd1),
-
- %% Ensure that the file is compressed.
-
- TotalSize = 143 + length(End),
- case ?PRIM_FILE:read_file_info(MyFile) of
- {ok, #file_info{size=Size}} when Size < TotalSize ->
- ok;
- {ok, #file_info{size=Size}} when Size == TotalSize ->
- test_server:fail(file_not_compressed)
+ ADirectory = filename:join(Base, "no_perm"),
+ ok = ?PRIM_FILE:make_dir(ADirectory),
+ ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
+ {error, eacces} = ?PRIM_FILE:del_dir(ADirectory),
+ ?PRIM_FILE:write_file_info(
+ Base, #file_info {mode=8#700})
end,
-
- %% Write again to ensure that the file is truncated.
-
- ?line {ok, Fd2} = ?PRIM_FILE:open(MyFile, [write, compressed]),
- ?line NewString = "aaaaaaaaaaa",
- ?line ok = ?PRIM_FILE:write(Fd2, NewString),
- ?line ok = ?PRIM_FILE:close(Fd2),
- ?line {ok, Fd3} = ?PRIM_FILE:open(MyFile, [read, compressed]),
- ?line {ok, NewString} = ?PRIM_FILE:read(Fd3, 1024),
- ?line ok = ?PRIM_FILE:close(Fd3),
-
- %% Done.
-
- ?line test_server:timetrap_cancel(Dog),
- ok.
-
-compress_errors(suite) -> [];
-compress_errors(doc) -> [];
-compress_errors(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line Data = ?config(data_dir, Config),
- ?line {error, enoent} = ?PRIM_FILE:open("non_existing__",
- [compressed, read]),
- ?line {error, einval} = ?PRIM_FILE:open("non_existing__",
- [compressed, read, write]),
-
- %% Read a corrupted .gz file.
-
- ?line Corrupted = filename:join(Data, "corrupted.gz"),
- ?line {ok, Fd} = ?PRIM_FILE:open(Corrupted, [read, compressed]),
- ?line {error, eio} = ?PRIM_FILE:read(Fd, 100),
- ?line ?PRIM_FILE:close(Fd),
-
- ?line test_server:timetrap_cancel(Dog),
ok.
-make_link_a(doc) -> "Test creating a hard link.";
-make_link_a(suite) -> [];
-make_link_a(Config) when is_list(Config) ->
- make_link(Config, [], "_a").
+make_link(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_make_link"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
-make_link_b(doc) -> "Test creating a hard link.";
-make_link_b(suite) -> [];
-make_link_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = make_link(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
+ Name = filename:join(NewDir, "a_file"),
+ ok = ?PRIM_FILE:write_file(Name, "some contents\n"),
-make_link(Config, Handle, Suffix) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_make_link"++Suffix),
- ?line ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
-
- ?line Name = filename:join(NewDir, "a_file"),
- ?line ok = ?PRIM_FILE:write_file(Name, "some contents\n"),
-
- ?line Alias = filename:join(NewDir, "an_alias"),
- ?line Result =
- case ?PRIM_FILE_call(make_link, Handle, [Name, Alias]) of
+ Alias = filename:join(NewDir, "an_alias"),
+ Result =
+ case ?PRIM_FILE:make_link(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
ok ->
@@ -1939,133 +1519,102 @@ make_link(Config, Handle, Suffix) ->
%% which should in behave exactly as
%% ?PRIM_FILE:read_file_info/1
%% since they are not used on symbolic links.
-
- ?line {ok, Info} =
- ?PRIM_FILE_call(read_link_info, Handle, [Name]),
- ?line {ok, Info} =
- ?PRIM_FILE_call(read_link_info, Handle, [Alias]),
- ?line #file_info{links = 2, type = regular} = Info,
- ?line {error, eexist} =
- ?PRIM_FILE_call(make_link, Handle, [Name, Alias]),
+
+ {ok, Info} =
+ ?PRIM_FILE:read_link_info(Name),
+ {ok, Info} =
+ ?PRIM_FILE:read_link_info(Alias),
+ #file_info{links = 2, type = regular} = Info,
+ {error, eexist} =
+ ?PRIM_FILE:make_link(Name, Alias),
ok
end,
-
- ?line test_server:timetrap_cancel(Dog),
+
Result.
-read_link_info_for_non_link(doc) ->
- "Test that reading link info for an ordinary file or directory works "
- "(on all platforms).";
-read_link_info_for_non_link(suite) -> [];
+%% Test that reading link info for an ordinary file or directory works
+%% (on all platforms).
read_link_info_for_non_link(Config) when is_list(Config) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
-
- ?line {ok, #file_info{type=directory}} = ?PRIM_FILE:read_link_info("."),
-
- ?line test_server:timetrap_cancel(Dog),
+ {ok, #file_info{type=directory}} = ?PRIM_FILE:read_link_info("."),
ok.
-
-symlinks_a(doc) -> "Test operations on symbolic links (for Unix).";
-symlinks_a(suite) -> [];
-symlinks_a(Config) when is_list(Config) ->
- symlinks(Config, [], "_a").
-
-symlinks_b(doc) -> "Test operations on symbolic links (for Unix).";
-symlinks_b(suite) -> [];
-symlinks_b(Config) when is_list(Config) ->
- ?line {ok, Handle} = ?PRIM_FILE:start(),
- Result = symlinks(Config, Handle, "_b"),
- ?line ok = ?PRIM_FILE:stop(Handle),
- Result.
-symlinks(Config, Handle, Suffix) ->
- ?line Dog = test_server:timetrap(test_server:seconds(10)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)
- ++"_make_symlink"++Suffix),
- ?line ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
-
- ?line Name = filename:join(NewDir, "a_plain_file"),
- ?line ok = ?PRIM_FILE:write_file(Name, "some stupid content\n"),
-
- ?line Alias = filename:join(NewDir, "a_symlink_alias"),
- ?line Result =
- case ?PRIM_FILE_call(make_symlink, Handle, [Name, Alias]) of
+symlinks(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)
+ ++"_make_symlink"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+
+ Name = filename:join(NewDir, "a_plain_file"),
+ ok = ?PRIM_FILE:write_file(Name, "some stupid content\n"),
+
+ Alias = filename:join(NewDir, "a_symlink_alias"),
+ Result =
+ case ?PRIM_FILE:make_symlink(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
{error, eperm} ->
{win32,_} = os:type(),
{skipped, "Windows user not privileged to create links"};
ok ->
- ?line {ok, Info1} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
- ?line {ok, Info1} =
- ?PRIM_FILE_call(read_file_info, Handle, [Alias]),
- ?line {ok, Info1} =
- ?PRIM_FILE_call(read_link_info, Handle, [Name]),
- ?line #file_info{links = 1, type = regular} = Info1,
-
- ?line {ok, Info2} =
- ?PRIM_FILE_call(read_link_info, Handle, [Alias]),
- ?line #file_info{links=1, type=symlink} = Info2,
- ?line {ok, Name} =
- ?PRIM_FILE_call(read_link, Handle, [Alias]),
+ {ok, Info1} =
+ ?PRIM_FILE:read_file_info(Name),
+ {ok, Info1} =
+ ?PRIM_FILE:read_file_info(Alias),
+ {ok, Info1} =
+ ?PRIM_FILE:read_link_info(Name),
+ #file_info{links = 1, type = regular} = Info1,
+
+ {ok, Info2} =
+ ?PRIM_FILE:read_link_info(Alias),
+ #file_info{links=1, type=symlink} = Info2,
+ {ok, Name} =
+ ?PRIM_FILE:read_link(Alias),
{ok, Name} =
- ?PRIM_FILE_call(read_link_all, Handle, [Alias]),
+ ?PRIM_FILE:read_link_all(Alias),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(?PRIM_FILE,NewDir),
ok
end,
-
- ?line test_server:timetrap_cancel(Dog),
+
Result.
%% Creates as many files as possible during a certain time,
%% periodically calls list_dir/2 to check if it works,
%% then deletes all files.
-list_dir_limit(doc) ->
- "Tests if large directories can be read";
-list_dir_limit(suite) ->
- [];
+%% Tests if large directories can be read.
list_dir_limit(Config) when is_list(Config) ->
- ?line MaxTime = 120,
- ?line MaxNumber = 20000,
- ?line Dog = test_server:timetrap(
- test_server:seconds(2*MaxTime + MaxTime)),
- ?line RootDir = ?config(priv_dir, Config),
- ?line NewDir = filename:join(RootDir,
- atom_to_list(?MODULE)++"_list_dir_limit"),
- ?line {ok, Handle1} = ?PRIM_FILE:start(),
- ?line ok = ?PRIM_FILE_call(make_dir, Handle1, [NewDir]),
+ MaxTime = 120,
+ MaxNumber = 20000,
+ ct:timetrap({seconds,2*MaxTime + MaxTime}),
+ RootDir = proplists:get_value(priv_dir, Config),
+ NewDir = filename:join(RootDir,
+ atom_to_list(?MODULE)++"_list_dir_limit"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
Ref = erlang:start_timer(MaxTime*1000, self(), []),
- ?line Result = list_dir_limit_loop(NewDir, Handle1, Ref, MaxNumber, 0),
- ?line Time = case erlang:cancel_timer(Ref) of
- false -> MaxTime;
- T -> MaxTime - (T div 1000)
- end,
- ?line Number = case Result of
- {ok, N} -> N;
- {error, _Reason, N} -> N;
- _ -> 0
- end,
- ?line {ok, Handle2} = ?PRIM_FILE:start(),
- ?line list_dir_limit_cleanup(NewDir, Handle2, Number, 0),
- ?line ok = ?PRIM_FILE:stop(Handle1),
- ?line ok = ?PRIM_FILE:stop(Handle2),
- ?line {ok, Number} = Result,
- ?line test_server:timetrap_cancel(Dog),
+ Result = list_dir_limit_loop(NewDir, Ref, MaxNumber, 0),
+ Time = case erlang:cancel_timer(Ref) of
+ false -> MaxTime;
+ T -> MaxTime - (T div 1000)
+ end,
+ Number = case Result of
+ {ok, N} -> N;
+ {error, _Reason, N} -> N;
+ _ -> 0
+ end,
+ list_dir_limit_cleanup(NewDir, Number, 0),
+ {ok, Number} = Result,
{comment,
"Created " ++ integer_to_list(Number) ++ " files in "
++ integer_to_list(Time) ++ " seconds."}.
-list_dir_limit_loop(Dir, Handle, _Ref, N, Cnt) when Cnt >= N ->
- list_dir_check(Dir, Handle, Cnt);
-list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) ->
+list_dir_limit_loop(Dir, _Ref, N, Cnt) when Cnt >= N ->
+ list_dir_check(Dir, Cnt);
+list_dir_limit_loop(Dir, Ref, N, Cnt) ->
receive
{timeout, Ref, []} ->
- list_dir_check(Dir, Handle, Cnt)
+ list_dir_check(Dir, Cnt)
after 0 ->
Name = integer_to_list(Cnt),
case ?PRIM_FILE:write_file(filename:join(Dir, Name), Name) of
@@ -2073,23 +1622,23 @@ list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) ->
Next = Cnt + 1,
case Cnt rem 100 of
0 ->
- case list_dir_check(Dir, Handle, Next) of
+ case list_dir_check(Dir, Next) of
{ok, Next} ->
list_dir_limit_loop(
- Dir, Handle, Ref, N, Next);
+ Dir, Ref, N, Next);
Other ->
Other
end;
_ ->
- list_dir_limit_loop(Dir, Handle, Ref, N, Next)
+ list_dir_limit_loop(Dir, Ref, N, Next)
end;
{error, Reason} ->
{error, Reason, Cnt}
end
end.
-list_dir_check(Dir, Handle, Cnt) ->
- case ?PRIM_FILE:list_dir(Handle, Dir) of
+list_dir_check(Dir, Cnt) ->
+ case ?PRIM_FILE:list_dir(Dir) of
{ok, ListDir} ->
case length(ListDir) of
Cnt ->
@@ -2106,27 +1655,27 @@ list_dir_check(Dir, Handle, Cnt) ->
%% Deletes N files while ignoring errors, then continues deleting
%% as long as they exist.
-list_dir_limit_cleanup(Dir, Handle, N, Cnt) when Cnt >= N ->
+list_dir_limit_cleanup(Dir, N, Cnt) when Cnt >= N ->
Name = integer_to_list(Cnt),
- case ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)) of
+ case ?PRIM_FILE:delete(filename:join(Dir, Name)) of
ok ->
- list_dir_limit_cleanup(Dir, Handle, N, Cnt+1);
+ list_dir_limit_cleanup(Dir, N, Cnt+1);
_ ->
ok
end;
-list_dir_limit_cleanup(Dir, Handle, N, Cnt) ->
+list_dir_limit_cleanup(Dir, N, Cnt) ->
Name = integer_to_list(Cnt),
- ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)),
- list_dir_limit_cleanup(Dir, Handle, N, Cnt+1).
+ ?PRIM_FILE:delete(filename:join(Dir, Name)),
+ list_dir_limit_cleanup(Dir, N, Cnt+1).
%%%
%%% Test list_dir() on a non-existing pathname.
%%%
list_dir_error(Config) ->
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
NonExisting = filename:join(Priv, "non-existing-dir"),
- {error,enoent} = prim_file:list_dir(NonExisting),
+ {error,enoent} = ?PRIM_FILE:list_dir(NonExisting),
ok.
%%%
@@ -2134,7 +1683,7 @@ list_dir_error(Config) ->
%%%
list_dir(Config) ->
- RootDir = ?config(priv_dir, Config),
+ RootDir = proplists:get_value(priv_dir, Config),
TestDir = filename:join(RootDir, ?MODULE_STRING++"_list_dir"),
?PRIM_FILE:make_dir(TestDir),
list_dir_1(TestDir, 42, []).
@@ -2165,11 +1714,13 @@ run_large_file_test(Config, Run, Name) ->
{{unix,sunos},OsVersion} when OsVersion < {5,5,1} ->
{skip,"Only supported on Win32, Unix or SunOS >= 5.5.1"};
{{unix,_},_} ->
- N = unix_free(?config(priv_dir, Config)),
- io:format("Free disk: ~w KByte~n", [N]),
- if N < 5 bsl 20 ->
+ DiscFree = unix_free(proplists:get_value(priv_dir, Config)),
+ MemFree = free_memory(),
+ io:format("Free disk: ~w KByte~n", [DiscFree]),
+ io:format("Free mem: ~w MByte~n", [MemFree]),
+ if DiscFree < 5 bsl 20; MemFree < 5 bsl 10 ->
%% Less than 5 GByte free
- {skip,"Less than 5 GByte free disk"};
+ {skip,"Less than 5 GByte free disk/mem"};
true ->
do_run_large_file_test(Config, Run, Name)
end;
@@ -2179,9 +1730,9 @@ run_large_file_test(Config, Run, Name) ->
do_run_large_file_test(Config, Run, Name0) ->
- Name = filename:join(?config(priv_dir, Config),
+ Name = filename:join(proplists:get_value(priv_dir, Config),
?MODULE_STRING ++ Name0),
-
+
%% Set up a process that will delete this file.
Tester = self(),
Deleter =
@@ -2192,9 +1743,9 @@ do_run_large_file_test(Config, Run, Name0) ->
{'DOWN',Mref,_,_,_} -> ok;
{Tester,done} -> ok
end,
- prim_file:delete(Name)
+ ?PRIM_FILE:delete(Name)
end),
-
+
%% Run the test case.
Res = Run(Name),
@@ -2222,6 +1773,40 @@ zip_data([], Bs) ->
zip_data(As, []) ->
As.
+%% Stolen from emulator -> alloc_SUITE
+free_memory() ->
+ %% Free memory in MB.
+ try
+ SMD = memsup:get_system_memory_data(),
+ {value, {free_memory, Free}} = lists:keysearch(free_memory, 1, SMD),
+ TotFree = (Free +
+ case lists:keysearch(cached_memory, 1, SMD) of
+ {value, {cached_memory, Cached}} -> Cached;
+ false -> 0
+ end +
+ case lists:keysearch(buffered_memory, 1, SMD) of
+ {value, {buffered_memory, Buffed}} -> Buffed;
+ false -> 0
+ end),
+ usable_mem(TotFree) div (1024*1024)
+ catch
+ error : undef ->
+ ct:fail({"os_mon not built"})
+ end.
+
+usable_mem(Memory) ->
+ case test_server:is_valgrind() of
+ true ->
+ %% Valgrind uses extra memory for the V- and A-bits.
+ %% http://valgrind.org/docs/manual/mc-manual.html#mc-manual.value
+ %% Docs says it uses "compression to represent the V bits compactly"
+ %% but let's be conservative and cut usable memory in half.
+ Memory div 2;
+ false ->
+ Memory
+ end.
+
+
%%%-----------------------------------------------------------------
%%% Utilities
rm_rf(Mod,Dir) ->
diff --git a/lib/kernel/test/ram_file_SUITE.erl b/lib/kernel/test/ram_file_SUITE.erl
index 933dc88d21..b0265393bd 100644
--- a/lib/kernel/test/ram_file_SUITE.erl
+++ b/lib/kernel/test/ram_file_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -26,9 +26,10 @@
init_per_testcase/2, end_per_testcase/2]).
-export([open_modes/1, open_old_modes/1, pread_pwrite/1, position/1,
truncate/1, sync/1, get_set_file/1, compress/1, uuencode/1,
- large_file_errors/1, large_file_light/1, large_file_heavy/1]).
+ large_file_errors/1, large_file_light/1,
+ large_file_heavy/0, large_file_heavy/1]).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/file.hrl").
-define(FILE_MODULE, file). % Name of module to test
@@ -36,7 +37,9 @@
%%--------------------------------------------------------------------------
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[open_modes, open_old_modes, pread_pwrite, position,
@@ -59,37 +62,23 @@ end_per_group(_GroupName, Config) ->
Config.
-init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
- Time =
- case Func of
- large_file_heavy ->
- ?t:minutes(5);
- _ ->
- ?t:seconds(10)
- end,
- Dog = ?t:timetrap(Time),
- %% error_logger:info_msg("~p:~p *****~n", [?MODULE, Func]),
- [{watchdog, Dog} | Config].
+init_per_testcase(Func, Config) ->
+ Config.
end_per_testcase(_Func, Config) ->
- %% error_logger:info_msg("~p:~p END *****~n", [?MODULE, Func]),
- Dog = ?config(watchdog, Config),
- ?t:timetrap_cancel(Dog).
+ Config.
%%--------------------------------------------------------------------------
%% Test suites
-open_modes(suite) ->
- [];
-open_modes(doc) ->
- ["Test that the basic read, write and binary options works for open/2."];
+%% Test that the basic read, write and binary options works for open/2.
open_modes(Config) when is_list(Config) ->
- ?line Str1 = "The quick brown fox ",
- ?line Str2 = "jumps over a lazy dog ",
- ?line Str = Str1 ++ Str2,
- ?line Bin1 = list_to_binary(Str1),
- ?line Bin2 = list_to_binary(Str2),
- ?line Bin = list_to_binary(Str),
+ Str1 = "The quick brown fox ",
+ Str2 = "jumps over a lazy dog ",
+ Str = Str1 ++ Str2,
+ Bin1 = list_to_binary(Str1),
+ Bin2 = list_to_binary(Str2),
+ Bin = list_to_binary(Str),
%%
open_read_write(?FILE_MODULE, Str1, [ram, read, write], Str2),
open_read(?FILE_MODULE, Str, [ram]),
@@ -98,18 +87,15 @@ open_modes(Config) when is_list(Config) ->
%%
ok.
-open_old_modes(suite) ->
- [];
-open_old_modes(doc) ->
- ["Test that the old style read, write and binary options ",
- "works for open/2."];
+%% Test that the old style read, write and binary options
+%% works for open/2.
open_old_modes(Config) when is_list(Config) ->
- ?line Str1 = "The quick brown fox ",
- ?line Str2 = "jumps over a lazy dog ",
- ?line Str = Str1 ++ Str2,
- ?line Bin1 = list_to_binary(Str1),
- ?line Bin2 = list_to_binary(Str2),
- ?line Bin = list_to_binary(Str),
+ Str1 = "The quick brown fox ",
+ Str2 = "jumps over a lazy dog ",
+ Str = Str1 ++ Str2,
+ Bin1 = list_to_binary(Str1),
+ Bin2 = list_to_binary(Str2),
+ Bin = list_to_binary(Str),
%%
open_read_write(?RAM_FILE_MODULE, Str1, read_write, Str2),
open_read(?RAM_FILE_MODULE, Str, read),
@@ -119,57 +105,54 @@ open_old_modes(Config) when is_list(Config) ->
ok.
open_read_write(Module, Data1, Options, Data2) ->
- ?line io:format("~p:open_read_write(~p, ~p, ~p, ~p)~n",
- [?MODULE, Module, Data1, Options, Data2]),
- %%
- ?line Size1 = sizeof(Data1),
- ?line Size2 = sizeof(Data2),
- ?line Data = append(Data1, Data2),
- ?line Size = Size1 + Size2,
- %%
- ?line {ok, Fd} = Module:open(Data1, Options),
- ?line {ok, Data1} = Module:read(Fd, Size1),
- ?line eof = Module:read(Fd, 1),
- ?line {ok, Zero} = Module:read(Fd, 0),
- ?line 0 = sizeof(Zero),
- ?line ok = Module:write(Fd, Data2),
- ?line {ok, 0} = Module:position(Fd, bof),
- ?line {ok, Data} = Module:read(Fd, Size),
- ?line eof = Module:read(Fd, 1),
- ?line {ok, Zero} = Module:read(Fd, 0),
- ?line ok = Module:close(Fd),
- %%
- ?line ok.
+ io:format("~p:open_read_write(~p, ~p, ~p, ~p)~n",
+ [?MODULE, Module, Data1, Options, Data2]),
+ %%
+ Size1 = sizeof(Data1),
+ Size2 = sizeof(Data2),
+ Data = append(Data1, Data2),
+ Size = Size1 + Size2,
+ %%
+ {ok, Fd} = Module:open(Data1, Options),
+ {ok, Data1} = Module:read(Fd, Size1),
+ eof = Module:read(Fd, 1),
+ {ok, Zero} = Module:read(Fd, 0),
+ 0 = sizeof(Zero),
+ ok = Module:write(Fd, Data2),
+ {ok, 0} = Module:position(Fd, bof),
+ {ok, Data} = Module:read(Fd, Size),
+ eof = Module:read(Fd, 1),
+ {ok, Zero} = Module:read(Fd, 0),
+ ok = Module:close(Fd),
+ %%
+ ok.
open_read(Module, Data, Options) ->
- ?line io:format("~p:open_read(~p, ~p, ~p)~n",
- [?MODULE, Module, Data, Options]),
- %%
- ?line Size = sizeof(Data),
- %%
- ?line {ok, Fd} = Module:open(Data, Options),
- ?line {ok, Data} = Module:read(Fd, Size),
- ?line eof = Module:read(Fd, 1),
- ?line {ok, Zero} = Module:read(Fd, 0),
- ?line 0 = sizeof(Zero),
- ?line {error, ebadf} = Module:write(Fd, Data),
- ?line {ok, 0} = Module:position(Fd, bof),
- ?line {ok, Data} = Module:read(Fd, Size),
- ?line eof = Module:read(Fd, 1),
- ?line {ok, Zero} = Module:read(Fd, 0),
- ?line ok = Module:close(Fd),
+ io:format("~p:open_read(~p, ~p, ~p)~n",
+ [?MODULE, Module, Data, Options]),
+ %%
+ Size = sizeof(Data),
+ %%
+ {ok, Fd} = Module:open(Data, Options),
+ {ok, Data} = Module:read(Fd, Size),
+ eof = Module:read(Fd, 1),
+ {ok, Zero} = Module:read(Fd, 0),
+ 0 = sizeof(Zero),
+ {error, ebadf} = Module:write(Fd, Data),
+ {ok, 0} = Module:position(Fd, bof),
+ {ok, Data} = Module:read(Fd, Size),
+ eof = Module:read(Fd, 1),
+ {ok, Zero} = Module:read(Fd, 0),
+ ok = Module:close(Fd),
%%
- ?line ok.
+ ok.
-pread_pwrite(suite) ->
- [];
-pread_pwrite(doc) ->
- ["Test that pread/2,3 and pwrite/2,3 works."];
+%% Test that pread/2,3 and pwrite/2,3 works.
pread_pwrite(Config) when is_list(Config) ->
- ?line Str = "Flygande bäckaziner söka hwila på mjuqa tuvor x",
- ?line Bin = list_to_binary(Str),
+ Str = "Flygande bäckaziner söka hwila på mjuqa tuvor x",
+ Bin = list_to_binary(Str),
%%
pread_pwrite_test(?FILE_MODULE, Str, [ram, read, write]),
pread_pwrite_test(?FILE_MODULE, Bin, [ram, binary, read, write]),
@@ -179,36 +162,33 @@ pread_pwrite(Config) when is_list(Config) ->
ok.
pread_pwrite_test(Module, Data, Options) ->
- ?line io:format("~p:pread_pwrite_test(~p, ~p, ~p)~n",
- [?MODULE, Module, Data, Options]),
- %%
- ?line Size = sizeof(Data),
- %%
- ?line {ok, Fd} = Module:open([], Options),
- ?line ok = Module:pwrite(Fd, 0, Data),
- ?line {ok, Data} = Module:pread(Fd, 0, Size+1),
- ?line eof = Module:pread(Fd, Size+1, 1),
- ?line {ok, Zero} = Module:pread(Fd, Size+1, 0),
- ?line 0 = sizeof(Zero),
- ?line ok = Module:pwrite(Fd, [{0, Data}, {Size+17, Data}]),
- ?line {ok, [Data,
- eof,
- Data,
- Zero]} = Module:pread(Fd, [{Size+17, Size+1},
- {2*Size+17+1, 1},
- {0, Size},
- {2*Size+17+1, 0}]),
- ?line ok = Module:close(Fd),
- %%
- ?line ok.
-
-position(suite) ->
- [];
-position(doc) ->
- ["Test that position/2 works."];
+ io:format("~p:pread_pwrite_test(~p, ~p, ~p)~n",
+ [?MODULE, Module, Data, Options]),
+ %%
+ Size = sizeof(Data),
+ %%
+ {ok, Fd} = Module:open([], Options),
+ ok = Module:pwrite(Fd, 0, Data),
+ {ok, Data} = Module:pread(Fd, 0, Size+1),
+ eof = Module:pread(Fd, Size+1, 1),
+ {ok, Zero} = Module:pread(Fd, Size+1, 0),
+ 0 = sizeof(Zero),
+ ok = Module:pwrite(Fd, [{0, Data}, {Size+17, Data}]),
+ {ok, [Data,
+ eof,
+ Data,
+ Zero]} = Module:pread(Fd, [{Size+17, Size+1},
+ {2*Size+17+1, 1},
+ {0, Size},
+ {2*Size+17+1, 0}]),
+ ok = Module:close(Fd),
+ %%
+ ok.
+
+%% Test that position/2 works.
position(Config) when is_list(Config) ->
- ?line Str = "Att vara eller icke vara, det är frågan. ",
- ?line Bin = list_to_binary(Str),
+ Str = "Att vara eller icke vara, det är frågan. ",
+ Bin = list_to_binary(Str),
%%
position_test(?FILE_MODULE, Str, [ram, read]),
position_test(?FILE_MODULE, Bin, [ram, binary]),
@@ -218,79 +198,76 @@ position(Config) when is_list(Config) ->
ok.
position_test(Module, Data, Options) ->
- ?line io:format("~p:position_test(~p, ~p, ~p)~n",
- [?MODULE, Module, Data, Options]),
- %%
- ?line Size = sizeof(Data),
- ?line Size_7 = Size+7,
- %%
- ?line Slice_0_2 = slice(Data, 0, 2),
- ?line Slice_0_3 = slice(Data, 0, 3),
- ?line Slice_2_5 = slice(Data, 2, 5),
- ?line Slice_3_4 = slice(Data, 3, 4),
- ?line Slice_5 = slice(Data, 5, Size),
- %%
- ?line {ok, Fd} = Module:open(Data, Options),
- %%
- ?line io:format("CUR positions"),
- ?line {ok, Slice_0_2} = Module:read(Fd, 2),
- ?line {ok, 2} = Module:position(Fd, cur),
- ?line {ok, Slice_2_5} = Module:read(Fd, 5),
- ?line {ok, 3} = Module:position(Fd, {cur, -4}),
- ?line {ok, Slice_3_4} = Module:read(Fd, 4),
- ?line {ok, 0} = Module:position(Fd, {cur, -7}),
- ?line {ok, Slice_0_3} = Module:read(Fd, 3),
- ?line {ok, 0} = Module:position(Fd, {cur, -3}),
- ?line {error, einval} = Module:position(Fd, {cur, -1}),
- ?line {ok, 0} = Module:position(Fd, 0),
- ?line {ok, 2} = Module:position(Fd, {cur, 2}),
- ?line {ok, Slice_2_5} = Module:read(Fd, 5),
- ?line {ok, Size_7} = Module:position(Fd, {cur, Size}),
- ?line {ok, Zero} = Module:read(Fd, 0),
- ?line 0 = sizeof(Zero),
- ?line eof = Module:read(Fd, 1),
- %%
- ?line io:format("Absolute and BOF positions"),
- ?line {ok, Size} = Module:position(Fd, Size),
- ?line eof = Module:read(Fd, 1),
- ?line {ok, 5} = Module:position(Fd, 5),
- ?line {ok, Slice_5} = Module:read(Fd, Size),
- ?line {ok, 2} = Module:position(Fd, {bof, 2}),
- ?line {ok, Slice_2_5} = Module:read(Fd, 5),
- ?line {ok, 3} = Module:position(Fd, 3),
- ?line {ok, Slice_3_4} = Module:read(Fd, 4),
- ?line {ok, 0} = Module:position(Fd, bof),
- ?line {ok, Slice_0_2} = Module:read(Fd, 2),
- ?line {ok, Size_7} = Module:position(Fd, {bof, Size_7}),
- ?line {ok, Zero} = Module:read(Fd, 0),
- %%
- ?line io:format("EOF positions"),
- ?line {ok, Size} = Module:position(Fd, eof),
- ?line eof = Module:read(Fd, 1),
- ?line {ok, 5} = Module:position(Fd, {eof, -Size+5}),
- ?line {ok, Slice_5} = Module:read(Fd, Size),
- ?line {ok, 2} = Module:position(Fd, {eof, -Size+2}),
- ?line {ok, Slice_2_5} = Module:read(Fd, 5),
- ?line {ok, 3} = Module:position(Fd, {eof, -Size+3}),
- ?line {ok, Slice_3_4} = Module:read(Fd, 4),
- ?line {ok, 0} = Module:position(Fd, {eof, -Size}),
- ?line {ok, Slice_0_2} = Module:read(Fd, 2),
- ?line {ok, Size_7} = Module:position(Fd, {eof, 7}),
- ?line {ok, Zero} = Module:read(Fd, 0),
- ?line eof = Module:read(Fd, 1),
- %%
- ?line ok.
-
-
-
-truncate(suite) ->
- [];
-truncate(doc) ->
- ["Test that truncate/1 works."];
+ io:format("~p:position_test(~p, ~p, ~p)~n",
+ [?MODULE, Module, Data, Options]),
+ %%
+ Size = sizeof(Data),
+ Size_7 = Size+7,
+ %%
+ Slice_0_2 = slice(Data, 0, 2),
+ Slice_0_3 = slice(Data, 0, 3),
+ Slice_2_5 = slice(Data, 2, 5),
+ Slice_3_4 = slice(Data, 3, 4),
+ Slice_5 = slice(Data, 5, Size),
+ %%
+ {ok, Fd} = Module:open(Data, Options),
+ %%
+ io:format("CUR positions"),
+ {ok, Slice_0_2} = Module:read(Fd, 2),
+ {ok, 2} = Module:position(Fd, cur),
+ {ok, Slice_2_5} = Module:read(Fd, 5),
+ {ok, 3} = Module:position(Fd, {cur, -4}),
+ {ok, Slice_3_4} = Module:read(Fd, 4),
+ {ok, 0} = Module:position(Fd, {cur, -7}),
+ {ok, Slice_0_3} = Module:read(Fd, 3),
+ {ok, 0} = Module:position(Fd, {cur, -3}),
+ {error, einval} = Module:position(Fd, {cur, -1}),
+ {ok, 0} = Module:position(Fd, 0),
+ {ok, 2} = Module:position(Fd, {cur, 2}),
+ {ok, Slice_2_5} = Module:read(Fd, 5),
+ {ok, Size_7} = Module:position(Fd, {cur, Size}),
+ {ok, Zero} = Module:read(Fd, 0),
+ 0 = sizeof(Zero),
+ eof = Module:read(Fd, 1),
+ %%
+ io:format("Absolute and BOF positions"),
+ {ok, Size} = Module:position(Fd, Size),
+ eof = Module:read(Fd, 1),
+ {ok, 5} = Module:position(Fd, 5),
+ {ok, Slice_5} = Module:read(Fd, Size),
+ {ok, 2} = Module:position(Fd, {bof, 2}),
+ {ok, Slice_2_5} = Module:read(Fd, 5),
+ {ok, 3} = Module:position(Fd, 3),
+ {ok, Slice_3_4} = Module:read(Fd, 4),
+ {ok, 0} = Module:position(Fd, bof),
+ {ok, Slice_0_2} = Module:read(Fd, 2),
+ {ok, Size_7} = Module:position(Fd, {bof, Size_7}),
+ {ok, Zero} = Module:read(Fd, 0),
+ %%
+ io:format("EOF positions"),
+ {ok, Size} = Module:position(Fd, eof),
+ eof = Module:read(Fd, 1),
+ {ok, 5} = Module:position(Fd, {eof, -Size+5}),
+ {ok, Slice_5} = Module:read(Fd, Size),
+ {ok, 2} = Module:position(Fd, {eof, -Size+2}),
+ {ok, Slice_2_5} = Module:read(Fd, 5),
+ {ok, 3} = Module:position(Fd, {eof, -Size+3}),
+ {ok, Slice_3_4} = Module:read(Fd, 4),
+ {ok, 0} = Module:position(Fd, {eof, -Size}),
+ {ok, Slice_0_2} = Module:read(Fd, 2),
+ {ok, Size_7} = Module:position(Fd, {eof, 7}),
+ {ok, Zero} = Module:read(Fd, 0),
+ eof = Module:read(Fd, 1),
+ %%
+ ok.
+
+
+
+%% Test that truncate/1 works.
truncate(Config) when is_list(Config) ->
- ?line Str = "Mån ädlare att lida och fördraga "
+ Str = "Mån ädlare att lida och fördraga "
++ "ett bittert ödes stygn av pilar, ",
- ?line Bin = list_to_binary(Str),
+ Bin = list_to_binary(Str),
%%
ok = truncate_test(?FILE_MODULE, Str, [ram, read, write]),
ok = truncate_test(?FILE_MODULE, Bin, [ram, binary, read, write]),
@@ -305,35 +282,32 @@ truncate(Config) when is_list(Config) ->
ok.
truncate_test(Module, Data, Options) ->
- ?line io:format("~p:truncate_test(~p, ~p, ~p)~n",
- [?MODULE, Module, Data, Options]),
- %%
- ?line Size = sizeof(Data),
- ?line Size1 = Size-2,
- ?line Data1 = slice(Data, 0, Size1),
- %%
- ?line {ok, Fd} = Module:open(Data, Options),
- ?line {ok, Size1} = Module:position(Fd, Size1),
- ?line case Module:truncate(Fd) of
- ok ->
- ?line {ok, 0} = Module:position(Fd, 0),
- ?line {ok, Data1} = Module:read(Fd, Size),
- ?line ok = Module:close(Fd),
- ?line ok;
- Error ->
- ?line ok = Module:close(Fd),
- ?line Error
- end.
-
-
-
-sync(suite) ->
- [];
-sync(doc) ->
- ["Test that sync/1 at least does not crash."];
+ io:format("~p:truncate_test(~p, ~p, ~p)~n",
+ [?MODULE, Module, Data, Options]),
+ %%
+ Size = sizeof(Data),
+ Size1 = Size-2,
+ Data1 = slice(Data, 0, Size1),
+ %%
+ {ok, Fd} = Module:open(Data, Options),
+ {ok, Size1} = Module:position(Fd, Size1),
+ case Module:truncate(Fd) of
+ ok ->
+ {ok, 0} = Module:position(Fd, 0),
+ {ok, Data1} = Module:read(Fd, Size),
+ ok = Module:close(Fd),
+ ok;
+ Error ->
+ ok = Module:close(Fd),
+ Error
+ end.
+
+
+
+%% Test that sync/1 at least does not crash.
sync(Config) when is_list(Config) ->
- ?line Str = "än att ta till vapen mot ett hav av kval. ",
- ?line Bin = list_to_binary(Str),
+ Str = "än att ta till vapen mot ett hav av kval. ",
+ Bin = list_to_binary(Str),
%%
sync_test(?FILE_MODULE, Str, [ram, read, write]),
sync_test(?FILE_MODULE, Bin, [ram, binary, read, write]),
@@ -348,28 +322,25 @@ sync(Config) when is_list(Config) ->
ok.
sync_test(Module, Data, Options) ->
- ?line io:format("~p:sync_test(~p, ~p, ~p)~n",
- [?MODULE, Module, Data, Options]),
+ io:format("~p:sync_test(~p, ~p, ~p)~n",
+ [?MODULE, Module, Data, Options]),
%%
- ?line Size = sizeof(Data),
+ Size = sizeof(Data),
%%
- ?line {ok, Fd} = Module:open(Data, Options),
- ?line ok = Module:sync(Fd),
- ?line {ok, Data} = Module:read(Fd, Size+1),
- ?line ok.
+ {ok, Fd} = Module:open(Data, Options),
+ ok = Module:sync(Fd),
+ {ok, Data} = Module:read(Fd, Size+1),
+ ok.
-get_set_file(suite) ->
- [];
-get_set_file(doc) ->
- ["Tests get_file/1, set_file/2, get_file_close/1 and get_size/1."];
+%% Tests get_file/1, set_file/2, get_file_close/1 and get_size/1.
get_set_file(Config) when is_list(Config) ->
%% These two strings should not be of equal length.
- ?line Str = "När högan nord blir snöbetäckt, ",
- ?line Str2 = "får alla harar byta dräkt. ",
- ?line Bin = list_to_binary(Str),
- ?line Bin2 = list_to_binary(Str2),
+ Str = "När högan nord blir snöbetäckt, ",
+ Str2 = "får alla harar byta dräkt. ",
+ Bin = list_to_binary(Str),
+ Bin2 = list_to_binary(Str2),
%%
ok = get_set_file_test(Str, read_write, Str2),
ok = get_set_file_test(Bin, [binary, read, write], Bin2),
@@ -379,87 +350,84 @@ get_set_file(Config) when is_list(Config) ->
ok.
get_set_file_test(Data, Options, Data2) ->
- ?line io:format("~p:get_set_file_test(~p, ~p, ~p)~n",
- [?MODULE, Data, Options, Data2]),
- %%
- ?line Size = sizeof(Data),
- ?line Size2 = sizeof(Data2),
- %%
- ?line {ok, Fd} = ?RAM_FILE_MODULE:open(Data, Options),
- ?line {ok, Size} = ?RAM_FILE_MODULE:get_size(Fd),
- ?line {ok, Data} = ?RAM_FILE_MODULE:get_file(Fd),
- ?line {ok, Data} = ?RAM_FILE_MODULE:get_file_close(Fd),
- ?line {error, einval} = ?RAM_FILE_MODULE:get_size(Fd),
- ?line {ok, Fd2} = ?RAM_FILE_MODULE:open(Data, Options),
- ?line case ?RAM_FILE_MODULE:set_file(Fd2, Data2) of
- {ok, Size2} ->
- ?line {ok, Size2} = ?RAM_FILE_MODULE:get_size(Fd2),
- ?line {ok, Data2} = ?RAM_FILE_MODULE:get_file(Fd2),
- ?line {ok, Data2} = ?RAM_FILE_MODULE:get_file_close(Fd2),
- ?line ok;
- {error, _} = Error ->
- ?line {ok, Data} = ?RAM_FILE_MODULE:get_file_close(Fd2),
- ?line Error
- end.
-
-
-
-compress(suite) ->
- [];
-compress(doc) ->
- ["Test that compress/1 and uncompress/1 works."];
+ io:format("~p:get_set_file_test(~p, ~p, ~p)~n",
+ [?MODULE, Data, Options, Data2]),
+ %%
+ Size = sizeof(Data),
+ Size2 = sizeof(Data2),
+ %%
+ {ok, Fd} = ?RAM_FILE_MODULE:open(Data, Options),
+ {ok, Size} = ?RAM_FILE_MODULE:get_size(Fd),
+ {ok, Data} = ?RAM_FILE_MODULE:get_file(Fd),
+ {ok, Data} = ?RAM_FILE_MODULE:get_file_close(Fd),
+ {error, einval} = ?RAM_FILE_MODULE:get_size(Fd),
+ {ok, Fd2} = ?RAM_FILE_MODULE:open(Data, Options),
+ case ?RAM_FILE_MODULE:set_file(Fd2, Data2) of
+ {ok, Size2} ->
+ {ok, Size2} = ?RAM_FILE_MODULE:get_size(Fd2),
+ {ok, Data2} = ?RAM_FILE_MODULE:get_file(Fd2),
+ {ok, Data2} = ?RAM_FILE_MODULE:get_file_close(Fd2),
+ ok;
+ {error, _} = Error ->
+ {ok, Data} = ?RAM_FILE_MODULE:get_file_close(Fd2),
+ Error
+ end.
+
+
+
+%% Test that compress/1 and uncompress/1 works.
compress(Config) when is_list(Config) ->
- ?line Data = ?config(data_dir, Config),
- ?line Real = filename:join(Data, "realmen.html"),
- ?line RealGz = filename:join(Data, "realmen.html.gz"),
+ Data = proplists:get_value(data_dir, Config),
+ Real = filename:join(Data, "realmen.html"),
+ RealGz = filename:join(Data, "realmen.html.gz"),
%%
%% Uncompress test
%%
- ?line {ok, FdReal} = ?FILE_MODULE:open(Real, []),
- ?line {ok, Fd} = ?FILE_MODULE:open([], [ram, read, write]),
- ?line {ok, FdRealGz} = ?FILE_MODULE:open(RealGz, []),
+ {ok, FdReal} = ?FILE_MODULE:open(Real, []),
+ {ok, Fd} = ?FILE_MODULE:open([], [ram, read, write]),
+ {ok, FdRealGz} = ?FILE_MODULE:open(RealGz, []),
%%
- ?line {ok, SzGz} = ?FILE_MODULE:copy(FdRealGz, Fd),
- ?line {ok, Sz} = ?RAM_FILE_MODULE:uncompress(Fd),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, bof),
- ?line true = compare(FdReal, Fd),
+ {ok, SzGz} = ?FILE_MODULE:copy(FdRealGz, Fd),
+ {ok, Sz} = ?RAM_FILE_MODULE:uncompress(Fd),
+ {ok, 0} = ?FILE_MODULE:position(Fd, bof),
+ true = compare(FdReal, Fd),
%%
- ?line true = (SzGz =< Sz),
+ true = (SzGz =< Sz),
%%
%% Compress and uncompress test
%%
- ?line {ok, 0} = ?FILE_MODULE:position(FdReal, bof),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, bof),
- ?line ok = ?FILE_MODULE:truncate(Fd),
- ?line {ok, Sz} = ?FILE_MODULE:copy(FdReal, Fd),
- ?line {ok, SzGz} = ?RAM_FILE_MODULE:compress(Fd),
- ?line {ok, Sz} = ?RAM_FILE_MODULE:uncompress(Fd),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, bof),
- ?line {ok, 0} = ?FILE_MODULE:position(FdReal, bof),
- ?line true = compare(FdReal, Fd),
+ {ok, 0} = ?FILE_MODULE:position(FdReal, bof),
+ {ok, 0} = ?FILE_MODULE:position(Fd, bof),
+ ok = ?FILE_MODULE:truncate(Fd),
+ {ok, Sz} = ?FILE_MODULE:copy(FdReal, Fd),
+ {ok, SzGz} = ?RAM_FILE_MODULE:compress(Fd),
+ {ok, Sz} = ?RAM_FILE_MODULE:uncompress(Fd),
+ {ok, 0} = ?FILE_MODULE:position(Fd, bof),
+ {ok, 0} = ?FILE_MODULE:position(FdReal, bof),
+ true = compare(FdReal, Fd),
%%
- ?line ok = ?FILE_MODULE:close(FdReal),
- ?line ok = ?FILE_MODULE:close(Fd),
- ?line ok = ?FILE_MODULE:close(FdRealGz),
+ ok = ?FILE_MODULE:close(FdReal),
+ ok = ?FILE_MODULE:close(Fd),
+ ok = ?FILE_MODULE:close(FdRealGz),
%% Test uncompressing data that will be expanded many times.
- ?line Huge = iolist_to_binary(mk_42(18)),
- ?line HugeSize = byte_size(Huge),
- ?line HugeGz = zlib:gzip(Huge),
+ Huge = iolist_to_binary(mk_42(18)),
+ HugeSize = byte_size(Huge),
+ HugeGz = zlib:gzip(Huge),
- ?line {ok,HugeFd} = ?FILE_MODULE:open([], [ram,read,write,binary]),
- ?line ok = ?FILE_MODULE:write(HugeFd, HugeGz),
- ?line {ok,HugeSize} = ?RAM_FILE_MODULE:uncompress(HugeFd),
- ?line {ok,0} = ?FILE_MODULE:position(HugeFd, bof),
- ?line {ok,Huge} = ?FILE_MODULE:read(HugeFd, HugeSize),
+ {ok,HugeFd} = ?FILE_MODULE:open([], [ram,read,write,binary]),
+ ok = ?FILE_MODULE:write(HugeFd, HugeGz),
+ {ok,HugeSize} = ?RAM_FILE_MODULE:uncompress(HugeFd),
+ {ok,0} = ?FILE_MODULE:position(HugeFd, bof),
+ {ok,Huge} = ?FILE_MODULE:read(HugeFd, HugeSize),
%% Uncompressing again should do nothing.
- ?line {ok,HugeSize} = ?RAM_FILE_MODULE:uncompress(HugeFd),
- ?line {ok,0} = ?FILE_MODULE:position(HugeFd, bof),
- ?line {ok,Huge} = ?FILE_MODULE:read(HugeFd, HugeSize),
+ {ok,HugeSize} = ?RAM_FILE_MODULE:uncompress(HugeFd),
+ {ok,0} = ?FILE_MODULE:position(HugeFd, bof),
+ {ok,Huge} = ?FILE_MODULE:read(HugeFd, HugeSize),
- ?line ok = ?FILE_MODULE:close(HugeFd),
+ ok = ?FILE_MODULE:close(HugeFd),
ok.
@@ -469,118 +437,108 @@ mk_42(N) ->
B = mk_42(N-1),
[B|B].
-uuencode(suite) ->
- [];
-uuencode(doc) ->
- ["Test that uuencode/1 and uudecode/1 works."];
+%% Test that uuencode/1 and uudecode/1 works.
uuencode(Config) when is_list(Config) ->
- ?line Data = ?config(data_dir, Config),
- ?line Real = filename:join(Data, "realmen.html"),
- ?line RealUu = filename:join(Data, "realmen.html.uu"),
+ Data = proplists:get_value(data_dir, Config),
+ Real = filename:join(Data, "realmen.html"),
+ RealUu = filename:join(Data, "realmen.html.uu"),
%%
%% Uudecode test
%%
- ?line {ok, FdReal} = ?FILE_MODULE:open(Real, []),
- ?line {ok, Fd} = ?FILE_MODULE:open([], [ram, read, write]),
- ?line {ok, FdRealUu} = ?FILE_MODULE:open(RealUu, []),
+ {ok, FdReal} = ?FILE_MODULE:open(Real, []),
+ {ok, Fd} = ?FILE_MODULE:open([], [ram, read, write]),
+ {ok, FdRealUu} = ?FILE_MODULE:open(RealUu, []),
%%
- ?line {ok, SzUu} = ?FILE_MODULE:copy(FdRealUu, Fd),
- ?line {ok, Sz} = ?RAM_FILE_MODULE:uudecode(Fd),
- ?line true = (Sz =< SzUu),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, bof),
- ?line true = compare(FdReal, Fd),
+ {ok, SzUu} = ?FILE_MODULE:copy(FdRealUu, Fd),
+ {ok, Sz} = ?RAM_FILE_MODULE:uudecode(Fd),
+ true = (Sz =< SzUu),
+ {ok, 0} = ?FILE_MODULE:position(Fd, bof),
+ true = compare(FdReal, Fd),
%%
%% Uuencode and decode test
%%
F = fun(Offs) ->
Size = Sz - Offs,
- ?line {ok, Offs} = ?FILE_MODULE:position(FdReal, {bof,Offs}),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, bof),
- ?line ok = ?FILE_MODULE:truncate(Fd),
- ?line {ok, Size} = ?FILE_MODULE:copy(FdReal, Fd),
- ?line {ok, SizeUu} = ?RAM_FILE_MODULE:uuencode(Fd),
- ?line true = (Size =< SizeUu),
- ?line {ok, Size} = ?RAM_FILE_MODULE:uudecode(Fd),
- ?line {ok, Offs} = ?FILE_MODULE:position(FdReal, {bof,Offs}),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, bof),
- ?line true = compare(FdReal, Fd)
+ {ok, Offs} = ?FILE_MODULE:position(FdReal, {bof,Offs}),
+ {ok, 0} = ?FILE_MODULE:position(Fd, bof),
+ ok = ?FILE_MODULE:truncate(Fd),
+ {ok, Size} = ?FILE_MODULE:copy(FdReal, Fd),
+ {ok, SizeUu} = ?RAM_FILE_MODULE:uuencode(Fd),
+ true = (Size =< SizeUu),
+ {ok, Size} = ?RAM_FILE_MODULE:uudecode(Fd),
+ {ok, Offs} = ?FILE_MODULE:position(FdReal, {bof,Offs}),
+ {ok, 0} = ?FILE_MODULE:position(Fd, bof),
+ true = compare(FdReal, Fd)
end,
lists:foreach(F, lists:seq(0,Sz-1, 43)),
- ?line ok = ?FILE_MODULE:close(FdReal),
- ?line ok = ?FILE_MODULE:close(Fd),
- ?line ok = ?FILE_MODULE:close(FdRealUu),
+ ok = ?FILE_MODULE:close(FdReal),
+ ok = ?FILE_MODULE:close(Fd),
+ ok = ?FILE_MODULE:close(FdRealUu),
%%
ok.
-
-large_file_errors(suite) ->
- [];
-large_file_errors(doc) ->
- ["Test error checking of large file offsets."];
+%% Test error checking of large file offsets.
large_file_errors(Config) when is_list(Config) ->
- ?line TwoGig = 1 bsl 31,
- ?line {ok,Fd} = ?RAM_FILE_MODULE:open("1234567890", [read,write]),
- ?line {error, einval} = ?FILE_MODULE:read(Fd, TwoGig),
- ?line {error, badarg} = ?FILE_MODULE:read(Fd, -1),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {bof,TwoGig}),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {bof,-TwoGig-1}),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {bof,-1}),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {cur,TwoGig}),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {cur,-TwoGig-1}),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {eof,TwoGig}),
- ?line {error, einval} = ?FILE_MODULE:position(Fd, {eof,-TwoGig-1}),
- ?line {error, einval} = ?FILE_MODULE:pread(Fd, TwoGig, 1),
- ?line {error, einval} = ?FILE_MODULE:pread(Fd, -TwoGig-1, 1),
- ?line {error, einval} = ?FILE_MODULE:pread(Fd, -1, 1),
- ?line {error, einval} = ?FILE_MODULE:pwrite(Fd, TwoGig, "@"),
- ?line {error, einval} = ?FILE_MODULE:pwrite(Fd, -TwoGig-1, "@"),
- ?line {error, einval} = ?FILE_MODULE:pwrite(Fd, -1, "@"),
- ?line {error, einval} = ?FILE_MODULE:pread(Fd, TwoGig, 0),
- ?line {error, einval} = ?FILE_MODULE:pread(Fd, -TwoGig-1, 0),
- ?line {error, einval} = ?FILE_MODULE:pread(Fd, -1, 0),
- ?line ok = ?FILE_MODULE:close(Fd),
+ TwoGig = 1 bsl 31,
+ {ok,Fd} = ?RAM_FILE_MODULE:open("1234567890", [read,write]),
+ {error, einval} = ?FILE_MODULE:read(Fd, TwoGig),
+ {error, badarg} = ?FILE_MODULE:read(Fd, -1),
+ {error, einval} = ?FILE_MODULE:position(Fd, {bof,TwoGig}),
+ {error, einval} = ?FILE_MODULE:position(Fd, {bof,-TwoGig-1}),
+ {error, einval} = ?FILE_MODULE:position(Fd, {bof,-1}),
+ {error, einval} = ?FILE_MODULE:position(Fd, {cur,TwoGig}),
+ {error, einval} = ?FILE_MODULE:position(Fd, {cur,-TwoGig-1}),
+ {error, einval} = ?FILE_MODULE:position(Fd, {eof,TwoGig}),
+ {error, einval} = ?FILE_MODULE:position(Fd, {eof,-TwoGig-1}),
+ {error, einval} = ?FILE_MODULE:pread(Fd, TwoGig, 1),
+ {error, einval} = ?FILE_MODULE:pread(Fd, -TwoGig-1, 1),
+ {error, einval} = ?FILE_MODULE:pread(Fd, -1, 1),
+ {error, einval} = ?FILE_MODULE:pwrite(Fd, TwoGig, "@"),
+ {error, einval} = ?FILE_MODULE:pwrite(Fd, -TwoGig-1, "@"),
+ {error, einval} = ?FILE_MODULE:pwrite(Fd, -1, "@"),
+ {error, einval} = ?FILE_MODULE:pread(Fd, TwoGig, 0),
+ {error, einval} = ?FILE_MODULE:pread(Fd, -TwoGig-1, 0),
+ {error, einval} = ?FILE_MODULE:pread(Fd, -1, 0),
+ ok = ?FILE_MODULE:close(Fd),
ok.
-large_file_light(suite) ->
- [];
-large_file_light(doc) ->
- ["Test light operations on a \"large\" ram_file."];
+%% Test light operations on a \large\ ram_file.
large_file_light(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
%% Marker for next test case that is to heavy to run in a suite.
- ?line ok = ?FILE_MODULE:write_file(
- filename:join(PrivDir, "large_file_light"),
- <<"TAG">>),
- %%
- ?line Data = "abcdefghijklmnopqrstuvwzyz",
- ?line Size = sizeof(Data),
- ?line Max = (1 bsl 31) - 1,
- ?line Max__1 = Max - 1,
- ?line {ok, Fd} = ?RAM_FILE_MODULE:open(Data, [read]),
- ?line {ok, Data} = ?FILE_MODULE:read(Fd, Size+1),
- ?line {ok, Max__1} = ?FILE_MODULE:position(Fd, {eof, Max-Size-1}),
- ?line eof = ?FILE_MODULE:read(Fd, 1),
- ?line {ok, Max} = ?FILE_MODULE:position(Fd, {bof, Max}),
- ?line {ok, Zero} = ?FILE_MODULE:read(Fd, 0),
- ?line 0 = sizeof(Zero),
- ?line eof = ?FILE_MODULE:read(Fd, 1),
- ?line eof = ?FILE_MODULE:pread(Fd, Max__1, 1),
- ?line {ok, Zero} = ?FILE_MODULE:pread(Fd, Max, 0),
- ?line eof = ?FILE_MODULE:pread(Fd, Max, 1),
+ ok = ?FILE_MODULE:write_file(
+ filename:join(PrivDir, "large_file_light"),
+ <<"TAG">>),
+ %%
+ Data = "abcdefghijklmnopqrstuvwzyz",
+ Size = sizeof(Data),
+ Max = (1 bsl 31) - 1,
+ Max__1 = Max - 1,
+ {ok, Fd} = ?RAM_FILE_MODULE:open(Data, [read]),
+ {ok, Data} = ?FILE_MODULE:read(Fd, Size+1),
+ {ok, Max__1} = ?FILE_MODULE:position(Fd, {eof, Max-Size-1}),
+ eof = ?FILE_MODULE:read(Fd, 1),
+ {ok, Max} = ?FILE_MODULE:position(Fd, {bof, Max}),
+ {ok, Zero} = ?FILE_MODULE:read(Fd, 0),
+ 0 = sizeof(Zero),
+ eof = ?FILE_MODULE:read(Fd, 1),
+ eof = ?FILE_MODULE:pread(Fd, Max__1, 1),
+ {ok, Zero} = ?FILE_MODULE:pread(Fd, Max, 0),
+ eof = ?FILE_MODULE:pread(Fd, Max, 1),
ok.
-large_file_heavy(suite) ->
- [];
-large_file_heavy(doc) ->
- ["Test operations on a maximum size (2 GByte - 1) ram_file."];
+large_file_heavy() ->
+ [{timetrap,{minutes,5}}].
+
+%% Test operations on a maximum size (2 GByte - 1) ram_file.
large_file_heavy(Config) when is_list(Config) ->
- ?line PrivDir = ?config(priv_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
%% Check previous test case marker.
case ?FILE_MODULE:read_file_info(
filename:join(PrivDir, "large_file_light")) of
@@ -591,33 +549,33 @@ large_file_heavy(Config) when is_list(Config) ->
end.
do_large_file_heavy(_Config) ->
- ?line Data = "qwertyuiopasdfghjklzxcvbnm",
- ?line Size = sizeof(Data),
- ?line Max = (1 bsl 31) - 1,
- ?line Max__1 = Max - 1,
- ?line Max__3 = Max - 3,
- ?line {ok, Fd} = ?RAM_FILE_MODULE:open(Data, [read,write]),
- ?line {ok, Data} = ?FILE_MODULE:read(Fd, Size+1),
- ?line {ok, Max} = ?FILE_MODULE:position(Fd, {eof, Max-Size}),
- ?line eof = ?FILE_MODULE:read(Fd, 1),
- ?line erlang:display({allocating,2,'GByte',please,be,patient,'...'}),
- ?line ok = ?FILE_MODULE:write(Fd, ""),
- ?line erlang:display({allocating,2,'GByte',succeeded}),
- ?line {ok, Max__1} = ?FILE_MODULE:position(Fd, {eof, -1}),
- ?line {ok, [0]} = ?FILE_MODULE:read(Fd, 1),
- ?line {ok, []} = ?FILE_MODULE:read(Fd, 0),
- ?line eof = ?FILE_MODULE:read(Fd, 1),
- ?line ok = ?FILE_MODULE:pwrite(Fd, Max-3, "TAG"),
- ?line {ok, Max} = ?FILE_MODULE:position(Fd, cur),
- ?line {ok, Max__3} = ?FILE_MODULE:position(Fd, {eof, -3}),
- ?line {ok, "TAG"} = ?FILE_MODULE:read(Fd, 3+1),
- ?line {ok, Max__3} = ?FILE_MODULE:position(Fd, {cur, -3}),
- ?line ok = ?FILE_MODULE:write(Fd, "tag"),
- ?line {ok, Max} = ?FILE_MODULE:position(Fd, cur),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, bof),
- ?line {ok, "tag"} = ?FILE_MODULE:pread(Fd, Max__3, 3+1),
- ?line {ok, 0} = ?FILE_MODULE:position(Fd, cur),
- ?line ok = ?FILE_MODULE:close(Fd),
+ Data = "qwertyuiopasdfghjklzxcvbnm",
+ Size = sizeof(Data),
+ Max = (1 bsl 31) - 1,
+ Max__1 = Max - 1,
+ Max__3 = Max - 3,
+ {ok, Fd} = ?RAM_FILE_MODULE:open(Data, [read,write]),
+ {ok, Data} = ?FILE_MODULE:read(Fd, Size+1),
+ {ok, Max} = ?FILE_MODULE:position(Fd, {eof, Max-Size}),
+ eof = ?FILE_MODULE:read(Fd, 1),
+ erlang:display({allocating,2,'GByte',please,be,patient,'...'}),
+ ok = ?FILE_MODULE:write(Fd, ""),
+ erlang:display({allocating,2,'GByte',succeeded}),
+ {ok, Max__1} = ?FILE_MODULE:position(Fd, {eof, -1}),
+ {ok, [0]} = ?FILE_MODULE:read(Fd, 1),
+ {ok, []} = ?FILE_MODULE:read(Fd, 0),
+ eof = ?FILE_MODULE:read(Fd, 1),
+ ok = ?FILE_MODULE:pwrite(Fd, Max-3, "TAG"),
+ {ok, Max} = ?FILE_MODULE:position(Fd, cur),
+ {ok, Max__3} = ?FILE_MODULE:position(Fd, {eof, -3}),
+ {ok, "TAG"} = ?FILE_MODULE:read(Fd, 3+1),
+ {ok, Max__3} = ?FILE_MODULE:position(Fd, {cur, -3}),
+ ok = ?FILE_MODULE:write(Fd, "tag"),
+ {ok, Max} = ?FILE_MODULE:position(Fd, cur),
+ {ok, 0} = ?FILE_MODULE:position(Fd, bof),
+ {ok, "tag"} = ?FILE_MODULE:pread(Fd, Max__3, 3+1),
+ {ok, 0} = ?FILE_MODULE:position(Fd, cur),
+ ok = ?FILE_MODULE:close(Fd),
ok.
%%--------------------------------------------------------------------------
@@ -651,7 +609,7 @@ compare_data(A, B) when is_binary(A), is_list(B) ->
A == list_to_binary(B);
compare_data(A, B) when is_binary(A), is_binary(B) ->
A == B.
-
+
sizeof(Data) when is_list(Data) ->
length(Data);
sizeof(Data) when is_binary(Data) ->
diff --git a/lib/kernel/test/rpc_SUITE.erl b/lib/kernel/test/rpc_SUITE.erl
index ed30c2dffa..a89a7600a2 100644
--- a/lib/kernel/test/rpc_SUITE.erl
+++ b/lib/kernel/test/rpc_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,19 +21,22 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2]).
--export([call/1, block_call/1, multicall/1, multicall_timeout/1,
+-export([off_heap/1,
+ call/1, block_call/1, multicall/1, multicall_timeout/1,
multicall_dies/1, multicall_node_dies/1,
called_dies/1, called_node_dies/1,
called_throws/1, call_benchmark/1, async_call/1]).
-export([suicide/2, suicide/3, f/0, f2/0]).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,2}}].
all() ->
- [call, block_call, multicall, multicall_timeout,
+ [off_heap, call, block_call, multicall, multicall_timeout,
multicall_dies, multicall_node_dies, called_dies,
called_node_dies, called_throws, call_benchmark,
async_call].
@@ -53,274 +56,261 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
+off_heap(_Config) ->
+ %% The rex server process may receive a huge amount of
+ %% messages. Make sure that they are stored off heap to
+ %% avoid exessive GCs.
+ MQD = message_queue_data,
+ {MQD,off_heap} = process_info(whereis(rex), MQD),
+ ok.
-call(doc) -> "Test different rpc calls";
+%% Test different rpc calls.
call(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(30)),
- ?line PA = filename:dirname(code:which(?MODULE)),
+ PA = filename:dirname(code:which(?MODULE)),
%% Note. First part of nodename sets response delay in seconds
- ?line {ok, N1} = ?t:start_node('3_rpc_SUITE_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N2} = ?t:start_node('1_rcp_SUITE_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N3} = ?t:start_node('4_rcp_SUITE_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N4} = ?t:start_node('8_rcp_SUITE_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line ok = io:format("~p~n", [[N1, N2, N3]]),
- ?line {hej,_,N1} = rpc:call(N1, ?MODULE, f, []),
- ?line {hej,_,N2} = rpc:call(N2, ?MODULE, f, [], 2000),
- ?line {badrpc,timeout} = rpc:call(N3, ?MODULE, f, [], 2000),
- ?line receive after 6000 -> ok end,
- ?line [] = flush([]),
- ?line {hej,_,N4} = rpc:call(N4, ?MODULE, f, []),
- ?line ?t:stop_node(N1),
- ?line ?t:stop_node(N2),
- ?line ?t:stop_node(N3),
- ?line ?t:stop_node(N4),
- ?t:timetrap_cancel(Timetrap),
+ {ok, N1} = test_server:start_node('3_rpc_SUITE_call', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N2} = test_server:start_node('1_rcp_SUITE_call', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N3} = test_server:start_node('4_rcp_SUITE_call', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N4} = test_server:start_node('8_rcp_SUITE_call', slave,
+ [{args, "-pa " ++ PA}]),
+ ok = io:format("~p~n", [[N1, N2, N3]]),
+ {hej,_,N1} = rpc:call(N1, ?MODULE, f, []),
+ {hej,_,N2} = rpc:call(N2, ?MODULE, f, [], 2000),
+ {badrpc,timeout} = rpc:call(N3, ?MODULE, f, [], 2000),
+ receive after 6000 -> ok end,
+ [] = flush([]),
+ {hej,_,N4} = rpc:call(N4, ?MODULE, f, []),
+ test_server:stop_node(N1),
+ test_server:stop_node(N2),
+ test_server:stop_node(N3),
+ test_server:stop_node(N4),
ok.
-block_call(doc) -> "Test different rpc calls";
+%% Test different rpc calls.
block_call(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(30)),
- ?line PA = filename:dirname(code:which(?MODULE)),
+ PA = filename:dirname(code:which(?MODULE)),
%% Note. First part of nodename sets response delay in seconds
- ?line {ok, N1} = ?t:start_node('3_rpc_SUITE_block_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N2} = ?t:start_node('1_rcp_SUITE_block_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N3} = ?t:start_node('4_rcp_SUITE_block_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N4} = ?t:start_node('8_rcp_SUITE_block_call', slave,
- [{args, "-pa " ++ PA}]),
- ?line ok = io:format("~p~n", [[N1, N2, N3]]),
- ?line {hej,_,N1} = rpc:block_call(N1, ?MODULE, f, []),
- ?line {hej,_,N2} = rpc:block_call(N2, ?MODULE, f, [], 2000),
- ?line {badrpc,timeout} = rpc:block_call(N3, ?MODULE, f, [], 2000),
- ?line receive after 6000 -> ok end,
- ?line [] = flush([]),
- ?line {hej,_,N4} = rpc:block_call(N4, ?MODULE, f, []),
- ?line ?t:stop_node(N1),
- ?line ?t:stop_node(N2),
- ?line ?t:stop_node(N3),
- ?line ?t:stop_node(N4),
- ?t:timetrap_cancel(Timetrap),
+ {ok, N1} = test_server:start_node('3_rpc_SUITE_block_call', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N2} = test_server:start_node('1_rcp_SUITE_block_call', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N3} = test_server:start_node('4_rcp_SUITE_block_call', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N4} = test_server:start_node('8_rcp_SUITE_block_call', slave,
+ [{args, "-pa " ++ PA}]),
+ ok = io:format("~p~n", [[N1, N2, N3]]),
+ {hej,_,N1} = rpc:block_call(N1, ?MODULE, f, []),
+ {hej,_,N2} = rpc:block_call(N2, ?MODULE, f, [], 2000),
+ {badrpc,timeout} = rpc:block_call(N3, ?MODULE, f, [], 2000),
+ receive after 6000 -> ok end,
+ [] = flush([]),
+ {hej,_,N4} = rpc:block_call(N4, ?MODULE, f, []),
+ test_server:stop_node(N1),
+ test_server:stop_node(N2),
+ test_server:stop_node(N3),
+ test_server:stop_node(N4),
ok.
-multicall(doc) ->
- "OTP-3449";
+%% OTP-3449.
multicall(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(20)),
- ?line PA = filename:dirname(code:which(?MODULE)),
+ PA = filename:dirname(code:which(?MODULE)),
%% Note. First part of nodename sets response delay in seconds
- ?line {ok, N1} = ?t:start_node('3_rpc_SUITE_multicall', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N2} = ?t:start_node('1_rcp_SUITE_multicall', slave,
- [{args, "-pa " ++ PA}]),
- ?line ok = io:format("~p~n", [[N1, N2]]),
- ?line {[{hej,_,N1},{hej,_,N2}],[]} =
- rpc:multicall([N1, N2], ?MODULE, f, []),
- ?line Msgs = flush([]),
- ?line [] = Msgs,
- ?line ?t:stop_node(N1),
- ?line ?t:stop_node(N2),
- ?t:timetrap_cancel(Timetrap),
+ {ok, N1} = test_server:start_node('3_rpc_SUITE_multicall', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N2} = test_server:start_node('1_rcp_SUITE_multicall', slave,
+ [{args, "-pa " ++ PA}]),
+ ok = io:format("~p~n", [[N1, N2]]),
+ {[{hej,_,N1},{hej,_,N2}],[]} =
+ rpc:multicall([N1, N2], ?MODULE, f, []),
+ Msgs = flush([]),
+ [] = Msgs,
+ test_server:stop_node(N1),
+ test_server:stop_node(N2),
ok.
multicall_timeout(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(30)),
- ?line PA = filename:dirname(code:which(?MODULE)),
+ PA = filename:dirname(code:which(?MODULE)),
%% Note. First part of nodename sets response delay in seconds
- ?line {ok, N1} = ?t:start_node('11_rpc_SUITE_multicall', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N2} = ?t:start_node('8_rpc_SUITE_multicall', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N3} = ?t:start_node('5_rpc_SUITE_multicall', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N4} = ?t:start_node('2_rcp_SUITE_multicall', slave,
- [{args, "-pa " ++ PA}]),
- ?line ok = io:format("~p~n", [[N1, N2]]),
- ?line {[{hej,_,N3},{hej,_,N4}],[N1, N2]} =
- rpc:multicall([N3, N1, N2, N4], ?MODULE, f, [], ?t:seconds(6)),
- ?t:sleep(?t:seconds(8)), %% Wait for late answers
- ?line Msgs = flush([]),
- ?line [] = Msgs,
- ?line ?t:stop_node(N1),
- ?line ?t:stop_node(N2),
- ?line ?t:stop_node(N3),
- ?line ?t:stop_node(N4),
- ?t:timetrap_cancel(Timetrap),
+ {ok, N1} = test_server:start_node('11_rpc_SUITE_multicall', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N2} = test_server:start_node('8_rpc_SUITE_multicall', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N3} = test_server:start_node('5_rpc_SUITE_multicall', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N4} = test_server:start_node('2_rcp_SUITE_multicall', slave,
+ [{args, "-pa " ++ PA}]),
+ ok = io:format("~p~n", [[N1, N2]]),
+ {[{hej,_,N3},{hej,_,N4}],[N1, N2]} =
+ rpc:multicall([N3, N1, N2, N4], ?MODULE, f, [], 6000),
+ ct:sleep({seconds,8}), %Wait for late answers
+ Msgs = flush([]),
+ [] = Msgs,
+ test_server:stop_node(N1),
+ test_server:stop_node(N2),
+ test_server:stop_node(N3),
+ test_server:stop_node(N4),
ok.
multicall_dies(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(30)),
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, N1} = ?t:start_node('rpc_SUITE_multicall_dies_1', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N2} = ?t:start_node('rcp_SUITE_multicall_dies_2', slave,
- [{args, "-pa " ++ PA}]),
- ?line Nodes = [N1, N2],
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, N1} = test_server:start_node('rpc_SUITE_multicall_dies_1', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N2} = test_server:start_node('rcp_SUITE_multicall_dies_2', slave,
+ [{args, "-pa " ++ PA}]),
+ Nodes = [N1, N2],
%%
- ?line {[{badrpc, {'EXIT', normal}}, {badrpc, {'EXIT', normal}}], []} =
+ {[{badrpc, {'EXIT', normal}}, {badrpc, {'EXIT', normal}}], []} =
do_multicall(Nodes, erlang, exit, [normal]),
- ?line {[{badrpc, {'EXIT', abnormal}}, {badrpc, {'EXIT', abnormal}}], []} =
+ {[{badrpc, {'EXIT', abnormal}}, {badrpc, {'EXIT', abnormal}}], []} =
do_multicall(Nodes, erlang, exit, [abnormal]),
- ?line {[{badrpc, {'EXIT', {badarith, _}}},
- {badrpc, {'EXIT', {badarith, _}}}],
- []} =
+ {[{badrpc, {'EXIT', {badarith, _}}},
+ {badrpc, {'EXIT', {badarith, _}}}],
+ []} =
do_multicall(Nodes, erlang, 'div', [1, 0]),
- ?line {[{badrpc, {'EXIT', {badarg, _}}},
- {badrpc, {'EXIT', {badarg, _}}}],
- []} =
+ {[{badrpc, {'EXIT', {badarg, _}}},
+ {badrpc, {'EXIT', {badarg, _}}}],
+ []} =
do_multicall(Nodes, erlang, atom_to_list, [1]),
- ?line {[{badrpc, {'EXIT', {undef, _}}},
- {badrpc, {'EXIT', {undef, _}}}],
- []} =
+ {[{badrpc, {'EXIT', {undef, _}}},
+ {badrpc, {'EXIT', {undef, _}}}],
+ []} =
do_multicall(Nodes, ?MODULE, suicide, []),
- ?line {[timeout, timeout], []} =
+ {[timeout, timeout], []} =
do_multicall(Nodes, ?MODULE, suicide, [link, normal]),
- ?line {[{badrpc, {'EXIT', abnormal}}, {badrpc, {'EXIT', abnormal}}], []} =
+ {[{badrpc, {'EXIT', abnormal}}, {badrpc, {'EXIT', abnormal}}], []} =
do_multicall(Nodes, ?MODULE, suicide, [link, abnormal]),
- ?line {[timeout, timeout], []} =
+ {[timeout, timeout], []} =
do_multicall(Nodes, ?MODULE, suicide, [exit, normal]),
- ?line {[{badrpc, {'EXIT', abnormal}}, {badrpc, {'EXIT', abnormal}}], []} =
+ {[{badrpc, {'EXIT', abnormal}}, {badrpc, {'EXIT', abnormal}}], []} =
do_multicall(Nodes, ?MODULE, suicide, [exit, abnormal]),
- ?line {[{badrpc, {'EXIT', killed}}, {badrpc, {'EXIT', killed}}], []} =
+ {[{badrpc, {'EXIT', killed}}, {badrpc, {'EXIT', killed}}], []} =
do_multicall(Nodes, ?MODULE, suicide, [exit, kill]),
%%
- ?line ?t:stop_node(N1),
- ?line ?t:stop_node(N2),
- ?t:timetrap_cancel(Timetrap),
+ test_server:stop_node(N1),
+ test_server:stop_node(N2),
ok.
do_multicall(Nodes, Mod, Func, Args) ->
- ?line ok = io:format("~p:~p~p~n", [Mod, Func, Args]),
- ?line Result = rpc:multicall(Nodes, Mod, Func, Args),
- ?line Msgs = flush([]),
- ?line [] = Msgs,
+ ok = io:format("~p:~p~p~n", [Mod, Func, Args]),
+ Result = rpc:multicall(Nodes, Mod, Func, Args),
+ Msgs = flush([]),
+ [] = Msgs,
Result.
-multicall_node_dies(doc) ->
- "";
multicall_node_dies(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(60)),
- %%
do_multicall_2_nodes_dies(?MODULE, suicide, [erlang, halt, []]),
do_multicall_2_nodes_dies(?MODULE, suicide, [init, stop, []]),
do_multicall_2_nodes_dies(?MODULE, suicide, [rpc, stop, []]),
- %%
- ?t:timetrap_cancel(Timetrap),
ok.
do_multicall_2_nodes_dies(Mod, Func, Args) ->
- ?line ok = io:format("~p:~p~p~n", [Mod, Func, Args]),
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, N1} = ?t:start_node('rpc_SUITE_multicall_node_dies_1', slave,
- [{args, "-pa " ++ PA}]),
- ?line {ok, N2} = ?t:start_node('rcp_SUITE_multicall_node_dies_2', slave,
- [{args, "-pa " ++ PA}]),
- ?line Nodes = [N1, N2],
- ?line {[], Nodes} = rpc:multicall(Nodes, Mod, Func, Args),
- ?line Msgs = flush([]),
- ?line [] = Msgs,
+ ok = io:format("~p:~p~p~n", [Mod, Func, Args]),
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, N1} = test_server:start_node('rpc_SUITE_multicall_node_dies_1', slave,
+ [{args, "-pa " ++ PA}]),
+ {ok, N2} = test_server:start_node('rcp_SUITE_multicall_node_dies_2', slave,
+ [{args, "-pa " ++ PA}]),
+ Nodes = [N1, N2],
+ {[], Nodes} = rpc:multicall(Nodes, Mod, Func, Args),
+ Msgs = flush([]),
+ [] = Msgs,
ok.
-called_dies(doc) ->
- "OTP-3766";
+%% OTP-3766.
called_dies(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(210)),
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line {ok, N} = ?t:start_node(rpc_SUITE_called_dies, slave,
- [{args, "-pa " ++ PA}]),
+ PA = filename:dirname(code:which(?MODULE)),
+ {ok, N} = test_server:start_node(rpc_SUITE_called_dies, slave,
+ [{args, "-pa " ++ PA}]),
%%
- ?line rep(fun (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',normal}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, erlang, exit, [normal]),
- ?line rep(fun (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',abnormal}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, erlang, exit, [abnormal]),
- ?line rep(fun (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',{badarith,_}}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, erlang, 'div', [1,0]),
- ?line rep(fun (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',{badarg,_}}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, erlang, atom_to_list, [1]),
- ?line rep(fun (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',{undef,_}}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, ?MODULE, suicide, []),
+ rep(fun (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',normal}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, erlang, exit, [normal]),
+ rep(fun (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',abnormal}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, erlang, exit, [abnormal]),
+ rep(fun (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',{badarith,_}}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, erlang, 'div', [1,0]),
+ rep(fun (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',{badarg,_}}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, erlang, atom_to_list, [1]),
+ rep(fun (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',{undef,_}}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, ?MODULE, suicide, []),
%%
TrapExit = process_flag(trap_exit, true),
%%
- ?line rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
- {Tag,timeout} =
- {Tag,apply(rpc, Call, Args)},
- {Tag,flush,[{'EXIT',_,normal}]} =
- {Tag,flush,flush([])};
- (Tag, Call, Args) ->
- {Tag,timeout} =
- {Tag,apply(rpc, Call, Args)}
- end, N, ?MODULE, suicide, [link,normal]),
- ?line rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
- {Tag,timeout} =
- {Tag,apply(rpc, Call, Args)},
- {Tag,flush,[{'EXIT',_,abnormal}]} =
- {Tag,flush,flush([])};
- (Tag, block_call, Args) ->
- {Tag,timeout} =
- {Tag,apply(rpc, block_call, Args)};
- (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',abnormal}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, ?MODULE, suicide, [link,abnormal]),
- ?line rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
- {Tag,timeout} =
- {Tag,apply(rpc, Call, Args)},
- {Tag,flush,[{'EXIT',_,normal}]} =
- {Tag,flush,flush([])};
- (Tag, Call, Args) ->
- {Tag,timeout} =
- {Tag,apply(rpc, Call, Args)}
- end, N, ?MODULE, suicide, [exit,normal]),
- ?line rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
- {Tag,timeout} =
- {Tag,apply(rpc, Call, Args)},
- {Tag,flush,[{'EXIT',_,abnormal}]} =
- {Tag,flush,flush([])};
- (Tag, block_call, Args) ->
- {Tag,timeout} =
- {Tag,apply(rpc, block_call, Args)};
- (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',abnormal}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, ?MODULE, suicide, [exit,abnormal]),
+ rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, Call, Args)},
+ {Tag,flush,[{'EXIT',_,normal}]} =
+ {Tag,flush,flush([])};
+ (Tag, Call, Args) ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, ?MODULE, suicide, [link,normal]),
+ rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, Call, Args)},
+ {Tag,flush,[{'EXIT',_,abnormal}]} =
+ {Tag,flush,flush([])};
+ (Tag, block_call, Args) ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, block_call, Args)};
+ (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',abnormal}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, ?MODULE, suicide, [link,abnormal]),
+ rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, Call, Args)},
+ {Tag,flush,[{'EXIT',_,normal}]} =
+ {Tag,flush,flush([])};
+ (Tag, Call, Args) ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, ?MODULE, suicide, [exit,normal]),
+ rep(fun (Tag, Call, Args=[Node|_]) when Node == node() ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, Call, Args)},
+ {Tag,flush,[{'EXIT',_,abnormal}]} =
+ {Tag,flush,flush([])};
+ (Tag, block_call, Args) ->
+ {Tag,timeout} =
+ {Tag,apply(rpc, block_call, Args)};
+ (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',abnormal}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, ?MODULE, suicide, [exit,abnormal]),
%%
process_flag(trap_exit, TrapExit),
%%
- ?line rep(fun %% A local [exit,kill] would kill the test case process
- (_Tag, _Call, [Node|_]) when Node == node() ->
- ok;
- %% A block_call [exit,kill] would kill the rpc server
- (_Tag, block_call, _Args) -> ok;
- (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',killed}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, ?MODULE, suicide, [exit,kill]),
+ rep(fun %% A local [exit,kill] would kill the test case process
+ (_Tag, _Call, [Node|_]) when Node == node() ->
+ ok;
+ %% A block_call [exit,kill] would kill the rpc server
+ (_Tag, block_call, _Args) -> ok;
+ (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',killed}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, ?MODULE, suicide, [exit,kill]),
%%
- ?line [] = flush([]),
- ?line ?t:stop_node(N),
- ?t:timetrap_cancel(Timetrap),
+ [] = flush([]),
+ test_server:stop_node(N),
ok.
rep(Fun, N, M, F, A) ->
@@ -335,7 +325,7 @@ rep(Fun, N, M, F, A) ->
Fun(9, block_call, [N, M, F, A, infinity]),
Fun(10, block_call, [N, M, F, A, 3000]),
ok.
-
+
suicide(link, Reason) ->
spawn_link(
@@ -364,109 +354,83 @@ suicide(Mod, Func, Args) ->
-called_node_dies(doc) ->
- "";
-called_node_dies(suite) -> [];
called_node_dies(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:minutes(2)),
- ?line PA = filename:dirname(code:which(?MODULE)),
- %%
- ?line node_rep(
- fun (Tag, Call, Args) ->
- {Tag,{badrpc,nodedown}} =
- {Tag,apply(rpc, Call, Args)}
- end, "rpc_SUITE_called_node_dies_1",
- PA, ?MODULE, suicide, [erlang,halt,[]]),
- ?line node_rep(
- fun (Tag, Call, Args) ->
- {Tag,{badrpc,nodedown}} =
- {Tag,apply(rpc, Call, Args)}
- end, "rpc_SUITE_called_node_dies_2",
- PA, ?MODULE, suicide, [init,stop,[]]),
- ?line node_rep(
- fun (Tag, Call, Args=[_|_]) ->
- {Tag,{'EXIT',{killed,_}}} =
- {Tag,catch {noexit,apply(rpc, Call, Args)}}
- end, "rpc_SUITE_called_node_dies_3",
- PA, ?MODULE, suicide, [erlang,exit,[rex,kill]]),
- ?line node_rep(
- fun %% Cannot block call rpc - will hang
- (_Tag, block_call, _Args) -> ok;
- (Tag, Call, Args=[_|_]) ->
- {Tag,{'EXIT',{normal,_}}} =
- {Tag,catch {noexit,apply(rpc, Call, Args)}}
- end, "rpc_SUITE_called_node_dies_4",
- PA, ?MODULE, suicide, [rpc,stop,[]]),
- %%
- ?t:timetrap_cancel(Timetrap),
+ PA = filename:dirname(code:which(?MODULE)),
+
+ node_rep(
+ fun (Call, Args) ->
+ {badrpc,nodedown} = apply(rpc, Call, Args)
+ end, "rpc_SUITE_called_node_dies_1",
+ PA, ?MODULE, suicide, [erlang,halt,[]]),
+
+ node_rep(
+ fun (Call, Args) ->
+ {badrpc,nodedown} = apply(rpc, Call, Args)
+ end, "rpc_SUITE_called_node_dies_2",
+ PA, ?MODULE, suicide, [init,stop,[]]),
+
+ node_rep(
+ fun (Call, Args=[_|_]) ->
+ {badrpc,{'EXIT',{killed,_}}} = apply(rpc, Call, Args)
+ end, "rpc_SUITE_called_node_dies_3",
+ PA, ?MODULE, suicide, [erlang,exit,[rex,kill]]),
+
+ node_rep(
+ fun (block_call, _Args) ->
+ %% Cannot block call rpc - will hang
+ ok;
+ (Call, Args=[_|_]) ->
+ {badrpc,{'EXIT',{normal,_}}} = apply(rpc, Call, Args)
+ end, "rpc_SUITE_called_node_dies_4",
+ PA, ?MODULE, suicide, [rpc,stop,[]]),
+
ok.
node_rep(Fun, Name, PA, M, F, A) ->
- {ok, Na} = ?t:start_node(list_to_atom(Name++"_a"), slave,
- [{args, "-pa " ++ PA}]),
- Fun(a, call, [Na, M, F, A]),
- catch ?t:stop_node(Na),
- {ok, Nb} = ?t:start_node(list_to_atom(Name++"_b"), slave,
- [{args, "-pa " ++ PA}]),
- Fun(b, call, [Nb, M, F, A, infinity]),
- catch ?t:stop_node(Nb),
- {ok, Nc} = ?t:start_node(list_to_atom(Name++"_c"), slave,
- [{args, "-pa " ++ PA}]),
- Fun(c, call, [Nc, M, F, A, infinity]),
- catch ?t:stop_node(Nc),
- %%
- {ok, Nd} = ?t:start_node(list_to_atom(Name++"_d"), slave,
- [{args, "-pa " ++ PA}]),
- Fun(d, block_call, [Nd, M, F, A]),
- catch ?t:stop_node(Nd),
- {ok, Ne} = ?t:start_node(list_to_atom(Name++"_e"), slave,
- [{args, "-pa " ++ PA}]),
- Fun(e, block_call, [Ne, M, F, A, infinity]),
- catch ?t:stop_node(Ne),
- {ok, Nf} = ?t:start_node(list_to_atom(Name++"_f"), slave,
- [{args, "-pa " ++ PA}]),
- Fun(f, block_call, [Nf, M, F, A, infinity]),
- catch ?t:stop_node(Nf),
+ node_rep_call(a, call, [M,F,A], Fun, Name, PA),
+ node_rep_call(b, call, [M,F,A,infinity], Fun, Name, PA),
+ node_rep_call(c, block_call, [M,F,A], Fun, Name, PA),
+ node_rep_call(d, block_call, [M,F,A,infinity], Fun, Name, PA).
+
+node_rep_call(Tag, Call, Args, Fun, Name0, PA) ->
+ Name = list_to_atom(Name0 ++ "_" ++ atom_to_list(Tag)),
+ {ok, N} = test_server:start_node(Name, slave,
+ [{args, "-pa " ++ PA}]),
+ Fun(Call, [N|Args]),
+ catch test_server:stop_node(N),
ok.
-
-
-called_throws(doc) ->
- "OTP-3766";
+%% OTP-3766.
called_throws(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(10)),
- ?line PA = filename:dirname(code:which(?MODULE)),
+ PA = filename:dirname(code:which(?MODULE)),
%%
- ?line {ok, N} = ?t:start_node(rpc_SUITE_called_throws, slave,
- [{args, "-pa " ++ PA}]),
+ {ok, N} = test_server:start_node(rpc_SUITE_called_throws, slave,
+ [{args, "-pa " ++ PA}]),
%%
- ?line rep(fun (Tag, Call, Args) ->
- {Tag,up} =
- {Tag,apply(rpc, Call, Args)}
- end, N, erlang, throw, [up]),
- ?line rep(fun (Tag, Call, Args) ->
- {Tag,{badrpc,{'EXIT',reason}}} =
- {Tag,apply(rpc, Call, Args)}
- end, N, erlang, throw, [{'EXIT',reason}]),
+ rep(fun (Tag, Call, Args) ->
+ {Tag,up} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, erlang, throw, [up]),
+ rep(fun (Tag, Call, Args) ->
+ {Tag,{badrpc,{'EXIT',reason}}} =
+ {Tag,apply(rpc, Call, Args)}
+ end, N, erlang, throw, [{'EXIT',reason}]),
%%
- ?line ?t:stop_node(N),
- ?t:timetrap_cancel(Timetrap),
+ test_server:stop_node(N),
ok.
call_benchmark(Config) when is_list(Config) ->
- Timetrap = ?t:timetrap(?t:seconds(120)),
PA = filename:dirname(code:which(?MODULE)),
- {ok, Node} = ?t:start_node(rpc_SUITE_call_benchmark, slave,
- [{args, "-pa " ++ PA}]),
+ {ok, Node} = test_server:start_node(rpc_SUITE_call_benchmark, slave,
+ [{args, "-pa " ++ PA}]),
Iter = case erlang:system_info(modified_timing_level) of
undefined -> 10000;
_ -> 500 %Modified timing - spawn is slower
end,
Res = do_call_benchmark(Node, Iter),
- ?t:stop_node(Node),
- ?t:timetrap_cancel(Timetrap),
+ test_server:stop_node(Node),
Res.
do_call_benchmark(Node, M) when is_integer(M), M > 0 ->
@@ -486,31 +450,28 @@ do_call_benchmark(Node, I, M) ->
do_call_benchmark(Node, I+1, M).
async_call(Config) when is_list(Config) ->
- Dog = ?t:timetrap(?t:seconds(120)),
-
%% Note: First part of nodename sets response delay in seconds.
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line NodeArgs = [{args,"-pa "++ PA}],
- ?line {ok,Node1} = ?t:start_node('1_rpc_SUITE_call', slave, NodeArgs),
- ?line {ok,Node2} = ?t:start_node('10_rpc_SUITE_call', slave, NodeArgs),
- ?line {ok,Node3} = ?t:start_node('20_rpc_SUITE_call', slave, NodeArgs),
- ?line Promise1 = rpc:async_call(Node1, ?MODULE, f, []),
- ?line Promise2 = rpc:async_call(Node2, ?MODULE, f, []),
- ?line Promise3 = rpc:async_call(Node3, ?MODULE, f, []),
+ PA = filename:dirname(code:which(?MODULE)),
+ NodeArgs = [{args,"-pa "++ PA}],
+ {ok,Node1} = test_server:start_node('1_rpc_SUITE_call', slave, NodeArgs),
+ {ok,Node2} = test_server:start_node('10_rpc_SUITE_call', slave, NodeArgs),
+ {ok,Node3} = test_server:start_node('20_rpc_SUITE_call', slave, NodeArgs),
+ Promise1 = rpc:async_call(Node1, ?MODULE, f, []),
+ Promise2 = rpc:async_call(Node2, ?MODULE, f, []),
+ Promise3 = rpc:async_call(Node3, ?MODULE, f, []),
%% Test fast timeouts.
- ?line timeout = rpc:nb_yield(Promise2),
- ?line timeout = rpc:nb_yield(Promise2, 10),
+ timeout = rpc:nb_yield(Promise2),
+ timeout = rpc:nb_yield(Promise2, 10),
%% Let Node1 finish its work before yielding.
- ?t:sleep(?t:seconds(2)),
- ?line {hej,_,Node1} = rpc:yield(Promise1),
+ ct:sleep({seconds,2}),
+ {hej,_,Node1} = rpc:yield(Promise1),
%% Wait for the Node2 and Node3.
- ?line {value,{hej,_,Node2}} = rpc:nb_yield(Promise2, infinity),
- ?line {hej,_,Node3} = rpc:yield(Promise3),
+ {value,{hej,_,Node2}} = rpc:nb_yield(Promise2, infinity),
+ {hej,_,Node3} = rpc:yield(Promise3),
- ?t:timetrap_cancel(Dog),
ok.
%%%
diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl
index a82b9c5fcd..0c0b1cbcb6 100644
--- a/lib/kernel/test/sendfile_SUITE.erl
+++ b/lib/kernel/test/sendfile_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2011-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2011-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -23,37 +23,48 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/file.hrl").
--compile(export_all).
-
-all() -> [{group,async_threads},
- {group,no_async_threads}].
-
-groups() ->
- [{async_threads,[],tcs()},
- {no_async_threads,[],tcs()}].
-
-tcs() ->
- [t_sendfile_small
- ,t_sendfile_big_all
- ,t_sendfile_big_size
- ,t_sendfile_many_small
- ,t_sendfile_partial
- ,t_sendfile_offset
- ,t_sendfile_sendafter
- ,t_sendfile_recvafter
- ,t_sendfile_recvafter_remoteclose
- ,t_sendfile_sendduring
- ,t_sendfile_recvduring
- ,t_sendfile_closeduring
- ,t_sendfile_crashduring
- ].
+-export([all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2]).
+
+-export([sendfile_server/2, sendfile_do_recv/2, init/1, handle_event/2]).
+
+-export(
+ [t_sendfile_small/1,
+ t_sendfile_big_all/1,
+ t_sendfile_big_size/1,
+ t_sendfile_many_small/1,
+ t_sendfile_partial/1,
+ t_sendfile_offset/1,
+ t_sendfile_sendafter/1,
+ t_sendfile_recvafter/1,
+ t_sendfile_recvafter_remoteclose/1,
+ t_sendfile_sendduring/1,
+ t_sendfile_recvduring/1,
+ t_sendfile_closeduring/1,
+ t_sendfile_crashduring/1,
+ t_sendfile_arguments/1]).
+
+all() ->
+ [t_sendfile_small,
+ t_sendfile_big_all,
+ t_sendfile_big_size,
+ t_sendfile_many_small,
+ t_sendfile_partial,
+ t_sendfile_offset,
+ t_sendfile_sendafter,
+ t_sendfile_recvafter,
+ t_sendfile_recvafter_remoteclose,
+ t_sendfile_sendduring,
+ t_sendfile_recvduring,
+ t_sendfile_closeduring,
+ t_sendfile_crashduring,
+ t_sendfile_arguments].
init_per_suite(Config) ->
case {os:type(),os:version()} of
{{unix,sunos}, {5,8,_}} ->
{skip, "Solaris 8 not supported for now"};
_ ->
- Priv = ?config(priv_dir, Config),
+ Priv = proplists:get_value(priv_dir, Config),
SFilename = filename:join(Priv, "sendfile_small.html"),
{ok, DS} = file:open(SFilename,[write,raw]),
file:write(DS,"yo baby yo"),
@@ -72,43 +83,32 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
file:delete(proplists:get_value(big_file, Config)).
-init_per_group(async_threads,Config) ->
- case erlang:system_info(thread_pool_size) of
- 0 ->
- {skip,"No async threads"};
- _ ->
- [{sendfile_opts,[{use_threads,true}]}|Config]
- end;
-init_per_group(no_async_threads,Config) ->
- [{sendfile_opts,[{use_threads,false}]}|Config].
-
-end_per_group(_,_Config) ->
- ok.
-
init_per_testcase(TC,Config) when TC == t_sendfile_recvduring;
TC == t_sendfile_sendduring ->
Filename = proplists:get_value(small_file, Config),
Send = fun(Sock) ->
{_Size, Data} = sendfile_file_info(Filename),
- {ok,D} = file:open(Filename, [raw,binary,read]),
- prim_file:sendfile(D, Sock, 0, 0, 0,
- [],[],[]),
+ {ok,Fd} = file:open(Filename, [raw,binary,read]),
+ %% Determine whether the driver has native support by
+ %% hitting the raw module directly; file:sendfile/5 will
+ %% land in the fallback if it doesn't.
+ RawModule = Fd#file_descriptor.module,
+ {ok, _Ignored} = RawModule:sendfile(Fd,Sock,0,0,0,[],[],[]),
Data
end,
%% Check if sendfile is supported on this platform
case catch sendfile_send(Send) of
ok ->
- Config;
+ init_per_testcase(t_sendfile, Config);
Error ->
ct:log("Error: ~p",[Error]),
{skip,"Not supported"}
end;
-init_per_testcase(_Tc,Config) ->
+init_per_testcase(_TC,Config) ->
Config.
-
t_sendfile_small(Config) when is_list(Config) ->
Filename = proplists:get_value(small_file, Config),
@@ -124,7 +124,7 @@ t_sendfile_small(Config) when is_list(Config) ->
t_sendfile_many_small(Config) when is_list(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
error_logger:add_report_handler(?MODULE,[self()]),
@@ -151,7 +151,7 @@ t_sendfile_many_small(Config) when is_list(Config) ->
t_sendfile_big_all(Config) when is_list(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -165,7 +165,7 @@ t_sendfile_big_all(Config) when is_list(Config) ->
t_sendfile_big_size(Config) ->
Filename = proplists:get_value(big_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
SendAll = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -180,7 +180,7 @@ t_sendfile_big_size(Config) ->
t_sendfile_partial(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
SendSingle = fun(Sock) ->
{_Size, <<Data:5/binary,_/binary>>} =
@@ -217,7 +217,7 @@ t_sendfile_partial(Config) ->
t_sendfile_offset(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{_Size, <<_:5/binary,Data:3/binary,_/binary>> = AllData} =
@@ -233,7 +233,7 @@ t_sendfile_offset(Config) ->
t_sendfile_sendafter(Config) ->
Filename = proplists:get_value(small_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{Size, Data} = sendfile_file_info(Filename),
@@ -246,7 +246,7 @@ t_sendfile_sendafter(Config) ->
t_sendfile_recvafter(Config) ->
Filename = proplists:get_value(small_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{Size, Data} = sendfile_file_info(Filename),
@@ -279,7 +279,7 @@ t_sendfile_recvafter_remoteclose(Config) ->
t_sendfile_sendduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -296,7 +296,7 @@ t_sendfile_sendduring(Config) ->
t_sendfile_recvduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -315,7 +315,7 @@ t_sendfile_recvduring(Config) ->
t_sendfile_closeduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock,SFServPid) ->
spawn_link(fun() ->
@@ -345,7 +345,7 @@ t_sendfile_closeduring(Config) ->
t_sendfile_crashduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
error_logger:add_report_handler(?MODULE,[self()]),
@@ -373,6 +373,36 @@ t_sendfile_crashduring(Config) ->
end
end.
+t_sendfile_arguments(Config) ->
+ Filename = proplists:get_value(small_file, Config),
+
+ {ok, Listener} = gen_tcp:listen(0,
+ [{packet, 0}, {active, false}, {reuseaddr, true}]),
+ {ok, Port} = inet:port(Listener),
+
+ ErrorCheck =
+ fun(Reason, Offset, Length, Opts) ->
+ {ok, Sender} = gen_tcp:connect({127, 0, 0, 1}, Port,
+ [{packet, 0}, {active, false}]),
+ {ok, Receiver} = gen_tcp:accept(Listener),
+ {ok, Fd} = file:open(Filename, [read, raw]),
+ {error, Reason} = file:sendfile(Fd, Sender, Offset, Length, Opts),
+ gen_tcp:close(Receiver),
+ gen_tcp:close(Sender),
+ file:close(Fd)
+ end,
+
+ ErrorCheck(einval, -1, 0, []),
+ ErrorCheck(einval, 0, -1, []),
+ ErrorCheck(badarg, gurka, 0, []),
+ ErrorCheck(badarg, 0, gurka, []),
+ ErrorCheck(badarg, 0, 0, gurka),
+ ErrorCheck(badarg, 0, 0, [{chunk_size, gurka}]),
+
+ gen_tcp:close(Listener),
+
+ ok.
+
%% Generic sendfile server code
sendfile_send(Send) ->
sendfile_send({127,0,0,1},Send).
diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl
index 6a63f7bc9c..cf4bf11328 100644
--- a/lib/kernel/test/seq_trace_SUITE.erl
+++ b/lib/kernel/test/seq_trace_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,30 +25,29 @@
-export([token_set_get/1, tracer_set_get/1, print/1,
send/1, distributed_send/1, recv/1, distributed_recv/1,
trace_exit/1, distributed_exit/1, call/1, port/1,
- match_set_seq_token/1, gc_seq_token/1]).
+ match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1]).
-% internal exports
+%% internal exports
-export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1,
start_tracer/0, stop_tracer/1,
do_match_set_seq_token/1, do_gc_seq_token/1, countdown_start/2]).
- %-define(line_trace, 1).
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(TIMESTAMP_MODES, [no_timestamp,
timestamp,
monotonic_timestamp,
strict_monotonic_timestamp]).
--define(default_timeout, ?t:minutes(1)).
-
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[token_set_get, tracer_set_get, print, send,
distributed_send, recv, distributed_recv, trace_exit,
distributed_exit, call, port, match_set_seq_token,
- gc_seq_token].
+ gc_seq_token, label_capability_mismatch].
groups() ->
[].
@@ -67,18 +66,13 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(_Case, Config) ->
- ?line Dog = test_server:timetrap(?default_timeout),
- [{watchdog, Dog}|Config].
+ Config.
-end_per_testcase(_Case, Config) ->
- Dog=?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
+end_per_testcase(_Case, _Config) ->
ok.
%% Verifies that the set_token and get_token functions work as expected
-token_set_get(doc) -> [];
-token_set_get(suite) -> [];
token_set_get(Config) when is_list(Config) ->
do_token_set_get(timestamp),
do_token_set_get(monotonic_timestamp),
@@ -91,371 +85,436 @@ do_token_set_get(TsType) ->
strict_monotonic_timestamp -> 23;
monotonic_timestamp -> 39
end,
- ?line Self = self(),
- ?line seq_trace:reset_trace(),
+ Self = self(),
+ seq_trace:reset_trace(),
%% Test that initial seq_trace is disabled
- ?line [] = seq_trace:get_token(),
+ [] = seq_trace:get_token(),
%% Test setting and reading the different fields
- ?line 0 = seq_trace:set_token(label,17),
- ?line {label,17} = seq_trace:get_token(label),
- ?line false = seq_trace:set_token(print,true),
- ?line {print,true} = seq_trace:get_token(print),
- ?line false = seq_trace:set_token(send,true),
- ?line {send,true} = seq_trace:get_token(send),
- ?line false = seq_trace:set_token('receive',true),
- ?line {'receive',true} = seq_trace:get_token('receive'),
- ?line false = seq_trace:set_token(TsType,true),
- ?line {TsType,true} = seq_trace:get_token(TsType),
+ 0 = seq_trace:set_token(label,{my_label,1}),
+ {label,{my_label,1}} = seq_trace:get_token(label),
+ false = seq_trace:set_token(print,true),
+ {print,true} = seq_trace:get_token(print),
+ false = seq_trace:set_token(send,true),
+ {send,true} = seq_trace:get_token(send),
+ false = seq_trace:set_token('receive',true),
+ {'receive',true} = seq_trace:get_token('receive'),
+ false = seq_trace:set_token(TsType,true),
+ {TsType,true} = seq_trace:get_token(TsType),
%% Check the whole token
- ?line {Flags,17,0,Self,0} = seq_trace:get_token(), % all flags are set
+ {Flags,{my_label,1},0,Self,0} = seq_trace:get_token(), % all flags are set
%% Test setting and reading the 'serial' field
- ?line {0,0} = seq_trace:set_token(serial,{3,5}),
- ?line {serial,{3,5}} = seq_trace:get_token(serial),
+ {0,0} = seq_trace:set_token(serial,{3,5}),
+ {serial,{3,5}} = seq_trace:get_token(serial),
%% Check the whole token, test that a whole token can be set and get
- ?line {Flags,17,5,Self,3} = seq_trace:get_token(),
- ?line seq_trace:set_token({Flags,19,7,Self,5}),
- ?line {Flags,19,7,Self,5} = seq_trace:get_token(),
+ {Flags,{my_label,1},5,Self,3} = seq_trace:get_token(),
+ seq_trace:set_token({Flags,19,7,Self,5}),
+ {Flags,19,7,Self,5} = seq_trace:get_token(),
%% Check that receive timeout does not reset token
- ?line receive after 0 -> ok end,
- ?line {Flags,19,7,Self,5} = seq_trace:get_token(),
+ receive after 0 -> ok end,
+ {Flags,19,7,Self,5} = seq_trace:get_token(),
%% Check that token can be unset
- ?line {Flags,19,7,Self,5} = seq_trace:set_token([]),
- ?line [] = seq_trace:get_token(),
+ {Flags,19,7,Self,5} = seq_trace:set_token([]),
+ [] = seq_trace:get_token(),
%% Check that Previous serial counter survived unset token
- ?line 0 = seq_trace:set_token(label, 17),
- ?line {0,17,0,Self,5} = seq_trace:get_token(),
+ 0 = seq_trace:set_token(label, 17),
+ {0,17,0,Self,5} = seq_trace:get_token(),
%% Check that reset_trace resets the token and clears
%% the Previous serial counter
- ?line seq_trace:reset_trace(),
- ?line [] = seq_trace:get_token(),
- ?line 0 = seq_trace:set_token(label, 19),
- ?line {0,19,0,Self,0} = seq_trace:get_token(),
+ seq_trace:reset_trace(),
+ [] = seq_trace:get_token(),
+ 0 = seq_trace:set_token(label, 19),
+ {0,19,0,Self,0} = seq_trace:get_token(),
%% Cleanup
- ?line seq_trace:reset_trace(),
+ seq_trace:reset_trace(),
ok.
-tracer_set_get(doc) -> [];
-tracer_set_get(suite) -> [];
tracer_set_get(Config) when is_list(Config) ->
- ?line Self = self(),
- ?line seq_trace:set_system_tracer(self()),
- ?line Self = seq_trace:get_system_tracer(),
- ?line Self = seq_trace:set_system_tracer(false),
- ?line false = seq_trace:get_system_tracer(),
+ Self = self(),
+ seq_trace:set_system_tracer(self()),
+ Self = seq_trace:get_system_tracer(),
+ Self = seq_trace:set_system_tracer(false),
+ false = seq_trace:get_system_tracer(),
%% Set the system tracer to a port.
- ?line Port = load_tracer(Config),
- ?line seq_trace:set_system_tracer(Port),
- ?line Port = seq_trace:get_system_tracer(),
- ?line Port = seq_trace:set_system_tracer(false),
- ?line false = seq_trace:get_system_tracer(),
+ Port = load_tracer(Config),
+ seq_trace:set_system_tracer(Port),
+ Port = seq_trace:get_system_tracer(),
+ Port = seq_trace:set_system_tracer(false),
+ false = seq_trace:get_system_tracer(),
ok.
-print(doc) -> [];
-print(suite) -> [];
print(Config) when is_list(Config) ->
lists:foreach(fun do_print/1, ?TIMESTAMP_MODES).
do_print(TsType) ->
- ?line start_tracer(),
- ?line set_token_flags([print, TsType]),
- ?line seq_trace:print(0,print1),
- ?line seq_trace:print(1,print2),
- ?line seq_trace:print(print3),
- ?line seq_trace:reset_trace(),
- ?line [{0,{print,_,_,[],print1}, Ts0},
+ start_tracer(),
+ set_token_flags([print, TsType]),
+ seq_trace:print(0,print1),
+ seq_trace:print(1,print2),
+ seq_trace:print(print3),
+ seq_trace:reset_trace(),
+ [{0,{print,_,_,[],print1}, Ts0},
{0,{print,_,_,[],print3}, Ts1}] = stop_tracer(2),
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
-send(doc) -> [];
-send(suite) -> [];
send(Config) when is_list(Config) ->
lists:foreach(fun do_send/1, ?TIMESTAMP_MODES).
do_send(TsType) ->
- ?line seq_trace:reset_trace(),
- ?line start_tracer(),
- ?line Receiver = spawn(?MODULE,one_time_receiver,[]),
- ?line set_token_flags([send, TsType]),
- ?line Receiver ! send,
- ?line Self = self(),
- ?line seq_trace:reset_trace(),
- ?line [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Receiver = spawn(?MODULE,one_time_receiver,[]),
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags([send, TsType]),
+ Receiver ! send,
+ Self = self(),
+ seq_trace:reset_trace(),
+ [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
-distributed_send(doc) -> [];
-distributed_send(suite) -> [];
distributed_send(Config) when is_list(Config) ->
lists:foreach(fun do_distributed_send/1, ?TIMESTAMP_MODES).
do_distributed_send(TsType) ->
- ?line {ok,Node} = start_node(seq_trace_other,[]),
- ?line {_,Dir} = code:is_loaded(?MODULE),
- ?line Mdir = filename:dirname(Dir),
- ?line true = rpc:call(Node,code,add_patha,[Mdir]),
- ?line seq_trace:reset_trace(),
- ?line start_tracer(),
- ?line Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
- ?line set_token_flags([send,TsType]),
- ?line Receiver ! send,
- ?line Self = self(),
- ?line seq_trace:reset_trace(),
- ?line stop_node(Node),
- ?line [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ {ok,Node} = start_node(seq_trace_other,[]),
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags([send,TsType]),
+
+ Receiver ! send,
+ Self = self(),
+ seq_trace:reset_trace(),
+ stop_node(Node),
+ [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
-
-recv(doc) -> [];
-recv(suite) -> [];
+
recv(Config) when is_list(Config) ->
lists:foreach(fun do_recv/1, ?TIMESTAMP_MODES).
do_recv(TsType) ->
- ?line seq_trace:reset_trace(),
- ?line start_tracer(),
- ?line Receiver = spawn(?MODULE,one_time_receiver,[]),
- ?line set_token_flags(['receive',TsType]),
- ?line Receiver ! 'receive',
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Receiver = spawn(?MODULE,one_time_receiver,[]),
+ set_token_flags(['receive',TsType]),
+ Receiver ! 'receive',
%% let the other process receive the message:
- ?line receive after 1 -> ok end,
- ?line Self = self(),
- ?line seq_trace:reset_trace(),
- ?line [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = stop_tracer(1),
+ receive after 1 -> ok end,
+ Self = self(),
+ seq_trace:reset_trace(),
+ [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
-distributed_recv(doc) -> [];
-distributed_recv(suite) -> [];
distributed_recv(Config) when is_list(Config) ->
lists:foreach(fun do_distributed_recv/1, ?TIMESTAMP_MODES).
do_distributed_recv(TsType) ->
- ?line {ok,Node} = start_node(seq_trace_other,[]),
- ?line {_,Dir} = code:is_loaded(?MODULE),
- ?line Mdir = filename:dirname(Dir),
- ?line true = rpc:call(Node,code,add_patha,[Mdir]),
- ?line seq_trace:reset_trace(),
- ?line rpc:call(Node,?MODULE,start_tracer,[]),
- ?line Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
- ?line set_token_flags(['receive',TsType]),
- ?line Receiver ! 'receive',
+ {ok,Node} = start_node(seq_trace_other,[]),
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ rpc:call(Node,?MODULE,start_tracer,[]),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags(['receive',TsType]),
+
+ Receiver ! 'receive',
%% let the other process receive the message:
- ?line receive after 1 -> ok end,
- ?line Self = self(),
- ?line seq_trace:reset_trace(),
- ?line Result = rpc:call(Node,?MODULE,stop_tracer,[1]),
- ?line stop_node(Node),
- ?line ok = io:format("~p~n",[Result]),
- ?line [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result,
+ receive after 1 -> ok end,
+ Self = self(),
+ seq_trace:reset_trace(),
+ Result = rpc:call(Node,?MODULE,stop_tracer,[1]),
+ stop_node(Node),
+ ok = io:format("~p~n",[Result]),
+ [{Label,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result,
check_ts(TsType, Ts).
-trace_exit(doc) -> [];
-trace_exit(suite) -> [];
trace_exit(Config) when is_list(Config) ->
lists:foreach(fun do_trace_exit/1, ?TIMESTAMP_MODES).
do_trace_exit(TsType) ->
- ?line seq_trace:reset_trace(),
- ?line start_tracer(),
- ?line Receiver = spawn_link(?MODULE, one_time_receiver, [exit]),
- ?line process_flag(trap_exit, true),
- ?line set_token_flags([send, TsType]),
- ?line Receiver ! {before, exit},
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Receiver = spawn_link(?MODULE, one_time_receiver, [exit]),
+ process_flag(trap_exit, true),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags([send, TsType]),
+
+ Receiver ! {before, exit},
%% let the other process receive the message:
- ?line receive
+ receive
{'EXIT', Receiver, {exit, {before, exit}}} ->
seq_trace:set_token([]);
Other ->
seq_trace:set_token([]),
- ?t:fail({received, Other})
+ ct:fail({received, Other})
end,
- ?line Self = self(),
- ?line Result = stop_tracer(2),
- ?line seq_trace:reset_trace(),
- ?line ok = io:format("~p~n", [Result]),
- ?line [{0, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0},
- {0, {send, {1,2}, Receiver, Self,
+ Self = self(),
+ Result = stop_tracer(2),
+ seq_trace:reset_trace(),
+ ok = io:format("~p~n", [Result]),
+ [{Label, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0},
+ {Label, {send, {1,2}, Receiver, Self,
{'EXIT', Receiver, {exit, {before, exit}}}}, Ts1}] = Result,
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
-distributed_exit(doc) -> [];
-distributed_exit(suite) -> [];
distributed_exit(Config) when is_list(Config) ->
lists:foreach(fun do_distributed_exit/1, ?TIMESTAMP_MODES).
do_distributed_exit(TsType) ->
- ?line {ok, Node} = start_node(seq_trace_other, []),
- ?line {_, Dir} = code:is_loaded(?MODULE),
- ?line Mdir = filename:dirname(Dir),
- ?line true = rpc:call(Node, code, add_patha, [Mdir]),
- ?line seq_trace:reset_trace(),
- ?line rpc:call(Node, ?MODULE, start_tracer,[]),
- ?line Receiver = spawn_link(Node, ?MODULE, one_time_receiver, [exit]),
- ?line process_flag(trap_exit, true),
- ?line set_token_flags([send, TsType]),
- ?line Receiver ! {before, exit},
+ {ok, Node} = start_node(seq_trace_other, []),
+ {_, Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node, code, add_patha, [Mdir]),
+ seq_trace:reset_trace(),
+ rpc:call(Node, ?MODULE, start_tracer,[]),
+ Receiver = spawn_link(Node, ?MODULE, one_time_receiver, [exit]),
+ process_flag(trap_exit, true),
+ set_token_flags([send, TsType]),
+ Receiver ! {before, exit},
%% let the other process receive the message:
- ?line receive
+ receive
{'EXIT', Receiver, {exit, {before, exit}}} ->
seq_trace:set_token([]);
Other ->
seq_trace:set_token([]),
- ?t:fail({received, Other})
+ ct:fail({received, Other})
end,
- ?line Self = self(),
- ?line Result = rpc:call(Node, ?MODULE, stop_tracer, [1]),
- ?line seq_trace:reset_trace(),
- ?line stop_node(Node),
- ?line ok = io:format("~p~n", [Result]),
- ?line [{0, {send, {1, 2}, Receiver, Self,
+ Self = self(),
+ Result = rpc:call(Node, ?MODULE, stop_tracer, [1]),
+ seq_trace:reset_trace(),
+ stop_node(Node),
+ ok = io:format("~p~n", [Result]),
+ [{0, {send, {1, 2}, Receiver, Self,
{'EXIT', Receiver, {exit, {before, exit}}}}, Ts}] = Result,
check_ts(TsType, Ts).
+label_capability_mismatch(Config) when is_list(Config) ->
+ Releases = ["20_latest"],
+ Available = [Rel || Rel <- Releases, test_server:is_release_available(Rel)],
+ case Available of
+ [] -> {skipped, "No incompatible releases available"};
+ _ ->
+ lists:foreach(fun do_incompatible_labels/1, Available),
+ lists:foreach(fun do_compatible_labels/1, Available),
+ ok
+ end.
+
+do_incompatible_labels(Rel) ->
+ Cookie = atom_to_list(erlang:get_cookie()),
+ {ok, Node} = test_server:start_node(
+ list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer,
+ [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]),
+
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ rpc:call(Node,?MODULE,start_tracer,[]),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% This node does not support arbitrary labels, so it must fail with a
+ %% timeout as the token is dropped silently.
+ seq_trace:set_token(label,make_ref()),
+ seq_trace:set_token('receive',true),
+
+ Receiver ! 'receive',
+ %% let the other process receive the message:
+ receive after 10 -> ok end,
+ seq_trace:reset_trace(),
+
+ {error,timeout} = rpc:call(Node,?MODULE,stop_tracer,[1]),
+ stop_node(Node),
+ ok.
+
+do_compatible_labels(Rel) ->
+ Cookie = atom_to_list(erlang:get_cookie()),
+ {ok, Node} = test_server:start_node(
+ list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer,
+ [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]),
+
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ rpc:call(Node,?MODULE,start_tracer,[]),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% This node does not support arbitrary labels, but small integers should
+ %% still work.
+ Label = 1234,
+ seq_trace:set_token(label,Label),
+ seq_trace:set_token('receive',true),
+
+ Receiver ! 'receive',
+ %% let the other process receive the message:
+ receive after 10 -> ok end,
+ Self = self(),
+ seq_trace:reset_trace(),
+ Result = rpc:call(Node,?MODULE,stop_tracer,[1]),
+ stop_node(Node),
+ ok = io:format("~p~n",[Result]),
+ [{Label,{'receive',_,Self,Receiver,'receive'}, _}] = Result,
+ ok.
+
call(doc) ->
"Tests special forms {is_seq_trace} and {get_seq_token} "
"in trace match specs.";
-call(suite) ->
- [];
call(Config) when is_list(Config) ->
- ?line Self = self(),
- ?line seq_trace:reset_trace(),
- ?line TrA = transparent_tracer(),
- ?line 1 =
+ Self = self(),
+ seq_trace:reset_trace(),
+ TrA = transparent_tracer(),
+ 1 =
erlang:trace(Self, true,
[call, set_on_spawn, {tracer, TrA(pid)}]),
- ?line 1 =
+ 1 =
erlang:trace_pattern({?MODULE, call_tracee_1, 1},
[{'_',
[],
[{message, {{{self}, {get_seq_token}}}}]}],
[local]),
- ?line 1 =
+ 1 =
erlang:trace_pattern({?MODULE, call_tracee_2, 1},
[{'_',
[{is_seq_trace}],
[{message, {{{self}, {get_seq_token}}}}]}],
[local]),
- ?line RefA = make_ref(),
- ?line Pid2A = spawn_link(
+ RefA = make_ref(),
+ Pid2A = spawn_link(
fun() ->
receive {_, msg, RefA} -> ok end,
RefA = call_tracee_2(RefA),
Self ! {self(), msg, RefA}
end),
- ?line Pid1A = spawn_link(
+ Pid1A = spawn_link(
fun() ->
receive {_, msg, RefA} -> ok end,
RefA = call_tracee_1(RefA),
Pid2A ! {self(), msg, RefA}
end),
- ?line Pid1A ! {Self, msg, RefA},
+ Pid1A ! {Self, msg, RefA},
%% The message is passed Self -> Pid1B -> Pid2B -> Self.
%% Traced functions are called in Pid1B and Pid2B.
- ?line receive {Pid2A, msg, RefA} -> ok end,
+ receive {Pid2A, msg, RefA} -> ok end,
%% Only call_tracee1 will be traced since the guard for
%% call_tracee2 requires a sequential trace. The trace
%% token is undefined.
- ?line Token2A = [],
- ?line {ok, [{trace, Pid1A, call,
+ Token2A = [],
+ {ok, [{trace, Pid1A, call,
{?MODULE, call_tracee_1, [RefA]},
{Pid1A, Token2A}}]} =
TrA({stop, 1}),
- ?line seq_trace:reset_trace(),
+ seq_trace:reset_trace(),
- ?line TrB = transparent_tracer(),
- ?line 1 =
+ TrB = transparent_tracer(),
+ 1 =
erlang:trace(Self, true,
[call, set_on_spawn, {tracer, TrB(pid)}]),
- ?line Label = 17,
- ?line seq_trace:set_token(label, Label), % Token enters here!!
- ?line RefB = make_ref(),
- ?line Pid2B = spawn_link(
+ Label = 17,
+ seq_trace:set_token(label, Label), % Token enters here!!
+ RefB = make_ref(),
+ Pid2B = spawn_link(
fun() ->
receive {_, msg, RefB} -> ok end,
RefB = call_tracee_2(RefB),
Self ! {self(), msg, RefB}
end),
- ?line Pid1B = spawn_link(
+ Pid1B = spawn_link(
fun() ->
receive {_, msg, RefB} -> ok end,
RefB = call_tracee_1(RefB),
Pid2B ! {self(), msg, RefB}
end),
- ?line Pid1B ! {Self, msg, RefB},
+ Pid1B ! {Self, msg, RefB},
%% The message is passed Self -> Pid1B -> Pid2B -> Self, and the
%% seq_trace token follows invisibly. Traced functions are
%% called in Pid1B and Pid2B. Seq_trace flags == 0 so no
%% seq_trace messages are generated.
- ?line receive {Pid2B, msg, RefB} -> ok end,
+ receive {Pid2B, msg, RefB} -> ok end,
%% The values of these counters {.., 1, _, 0}, {.., 2, _, 1}
%% depend on that seq_trace has been reset just before this test.
- ?line Token1B = {0, Label, 1, Self, 0},
- ?line Token2B = {0, Label, 2, Pid1B, 1},
- ?line {ok, [{trace, Pid1B, call,
+ Token1B = {0, Label, 1, Self, 0},
+ Token2B = {0, Label, 2, Pid1B, 1},
+ {ok, [{trace, Pid1B, call,
{?MODULE, call_tracee_1, [RefB]},
{Pid1B, Token1B}},
{trace, Pid2B, call,
{?MODULE, call_tracee_2, [RefB]},
{Pid2B, Token2B}}]} =
TrB({stop,2}),
- ?line seq_trace:reset_trace(),
+ seq_trace:reset_trace(),
ok.
-port(doc) ->
- "Send trace messages to a port.";
-port(suite) -> [];
+%% Send trace messages to a port.
port(Config) when is_list(Config) ->
lists:foreach(fun (TsType) -> do_port(TsType, Config) end,
?TIMESTAMP_MODES).
do_port(TsType, Config) ->
io:format("Testing ~p~n",[TsType]),
- ?line Port = load_tracer(Config),
- ?line seq_trace:set_system_tracer(Port),
+ Port = load_tracer(Config),
+ seq_trace:set_system_tracer(Port),
- ?line set_token_flags([print, TsType]),
- ?line Small = [small,term],
- ?line seq_trace:print(0, Small),
- ?line case get_port_message(Port) of
+ set_token_flags([print, TsType]),
+ Small = [small,term],
+ seq_trace:print(0, Small),
+ case get_port_message(Port) of
{seq_trace,0,{print,_,_,[],Small}} when TsType == no_timestamp ->
ok;
{seq_trace,0,{print,_,_,[],Small},Ts0} when TsType /= no_timestamp ->
check_ts(TsType, Ts0),
ok;
Other ->
- ?line seq_trace:reset_trace(),
- ?line ?t:fail({unexpected,Other})
+ seq_trace:reset_trace(),
+ ct:fail({unexpected,Other})
end,
%% OTP-4218 Messages from ports should not affect seq trace token.
%%
%% Check if trace token still is active on this process after
%% the get_port_message/1 above that receives from a port.
- ?line OtherSmall = [other | Small],
- ?line seq_trace:print(0, OtherSmall),
- ?line seq_trace:reset_trace(),
- ?line case get_port_message(Port) of
+ OtherSmall = [other | Small],
+ seq_trace:print(0, OtherSmall),
+ seq_trace:reset_trace(),
+ case get_port_message(Port) of
{seq_trace,0,{print,_,_,[],OtherSmall}} when TsType == no_timestamp ->
ok;
{seq_trace,0,{print,_,_,[],OtherSmall}, Ts1} when TsType /= no_timestamp ->
check_ts(TsType, Ts1),
ok;
Other1 ->
- ?line ?t:fail({unexpected,Other1})
+ ct:fail({unexpected,Other1})
end,
- ?line seq_trace:set_token(print, true),
- ?line Huge = huge_data(),
- ?line seq_trace:print(0, Huge),
- ?line seq_trace:reset_trace(),
- ?line case get_port_message(Port) of
+ seq_trace:set_token(print, true),
+ Huge = huge_data(),
+ seq_trace:print(0, Huge),
+ seq_trace:reset_trace(),
+ case get_port_message(Port) of
{seq_trace,0,{print,_,_,[],Huge}} ->
ok;
Other2 ->
- ?line ?t:fail({unexpected,Other2})
+ ct:fail({unexpected,Other2})
end,
unlink(Port),
exit(Port,kill),
@@ -466,21 +525,19 @@ get_port_message(Port) ->
{Port,{data,Bin}} when is_binary(Bin) ->
binary_to_term(Bin);
Other ->
- ?t:fail({unexpected,Other})
+ ct:fail({unexpected,Other})
after 5000 ->
- ?t:fail(timeout)
+ ct:fail(timeout)
end.
-match_set_seq_token(suite) ->
- [];
match_set_seq_token(doc) ->
["Tests that match spec function set_seq_token does not "
"corrupt the heap"];
match_set_seq_token(Config) when is_list(Config) ->
- ?line Parent = self(),
- ?line Timetrap = test_server:timetrap(test_server:seconds(20)),
+ Parent = self(),
+
%% OTP-4222 Match spec 'set_seq_token' corrupts heap
%%
%% This test crashes the emulator if the bug in question is present,
@@ -488,13 +545,13 @@ match_set_seq_token(Config) when is_list(Config) ->
%%
%% All the timeout stuff is here to get decent accuracy of the error
%% return value, instead of just 'timeout'.
- %
- ?line {ok, Sandbox} = start_node(seq_trace_other, []),
- ?line true = rpc:call(Sandbox, code, add_patha,
+ %%
+ {ok, Sandbox} = start_node(seq_trace_other, []),
+ true = rpc:call(Sandbox, code, add_patha,
[filename:dirname(code:which(?MODULE))]),
- ?line Lbl = 4711,
+ Lbl = 4711,
%% Do the possibly crashing test
- ?line P1 =
+ P1 =
spawn(
fun () ->
Parent ! {self(),
@@ -502,16 +559,16 @@ match_set_seq_token(Config) when is_list(Config) ->
?MODULE, do_match_set_seq_token, [Lbl])}
end),
%% Probe the node with a simple rpc request, to see if it is alive.
- ?line P2 =
+ P2 =
spawn(
fun () ->
receive after 4000 -> ok end,
Parent ! {self(), rpc:call(Sandbox, erlang, abs, [-1])}
end),
%% If the test node hangs completely, this timer expires.
- ?line R3 = erlang:start_timer(8000, self(), void),
+ R3 = erlang:start_timer(8000, self(), void),
%%
- ?line {ok, Log} =
+ {ok, Log} =
receive
{P1, Result} ->
exit(P2, done),
@@ -526,10 +583,15 @@ match_set_seq_token(Config) when is_list(Config) ->
exit(P2, timeout),
{error, "Test node hung"}
end,
- ?line ok = check_match_set_seq_token_log(Lbl, Log),
+
+ %% Sort the log on Pid, as events from different processes
+ %% are not guaranteed to arrive in a certain order to the
+ %% tracer
+ SortedLog = lists:keysort(2, Log),
+
+ ok = check_match_set_seq_token_log(Lbl, SortedLog),
%%
- ?line stop_node(Sandbox),
- ?line test_server:timetrap_cancel(Timetrap),
+ stop_node(Sandbox),
ok.
%% OTP-4222 Match spec 'set_seq_token' corrupts heap
@@ -580,13 +642,13 @@ do_match_set_seq_token(Label) ->
check_match_set_seq_token_log(
Label,
- [{trace,C,call,{?MODULE,countdown,[B,Ref]}, {0,Label,0,C,0}},
+ [{trace,B,call,{?MODULE,bounce, [Ref]}, {0,Label,2,B,1}},
+ {trace,B,call,{?MODULE,bounce, [Ref]}, {0,Label,4,B,3}},
+ {trace,B,call,{?MODULE,bounce, [Ref]}, {0,Label,6,B,5}},
+ {trace,C,call,{?MODULE,countdown,[B,Ref]}, {0,Label,0,C,0}},
{trace,C,call,{?MODULE,countdown,[B,Ref,3]},{0,Label,0,C,0}},
- {trace,B,call,{?MODULE,bounce, [Ref]}, {0,Label,2,B,1}},
{trace,C,call,{?MODULE,countdown,[B,Ref,2]},{0,Label,2,B,1}},
- {trace,B,call,{?MODULE,bounce, [Ref]}, {0,Label,4,B,3}},
{trace,C,call,{?MODULE,countdown,[B,Ref,1]},{0,Label,4,B,3}},
- {trace,B,call,{?MODULE,bounce, [Ref]}, {0,Label,6,B,5}},
{trace,C,call,{?MODULE,countdown,[B,Ref,0]},{0,Label,6,B,5}}
]) ->
ok;
@@ -621,14 +683,12 @@ bounce(Ref) ->
-gc_seq_token(suite) ->
- [];
gc_seq_token(doc) ->
["Tests that a seq_trace token on a message in the inqueue ",
"can be garbage collected."];
gc_seq_token(Config) when is_list(Config) ->
- ?line Parent = self(),
- ?line Timetrap = test_server:timetrap(test_server:seconds(20)),
+ Parent = self(),
+
%% OTP-4555 Seq trace token causes free mem read in gc
%%
%% This test crashes the emulator if the bug in question is present,
@@ -636,13 +696,13 @@ gc_seq_token(Config) when is_list(Config) ->
%%
%% All the timeout stuff is here to get decent accuracy of the error
%% return value, instead of just 'timeout'.
- %
- ?line {ok, Sandbox} = start_node(seq_trace_other, []),
- ?line true = rpc:call(Sandbox, code, add_patha,
+ %%
+ {ok, Sandbox} = start_node(seq_trace_other, []),
+ true = rpc:call(Sandbox, code, add_patha,
[filename:dirname(code:which(?MODULE))]),
- ?line Label = 4711,
+ Label = 4711,
%% Do the possibly crashing test
- ?line P1 =
+ P1 =
spawn(
fun () ->
Parent ! {self(),
@@ -650,16 +710,16 @@ gc_seq_token(Config) when is_list(Config) ->
?MODULE, do_gc_seq_token, [Label])}
end),
%% Probe the node with a simple rpc request, to see if it is alive.
- ?line P2 =
+ P2 =
spawn(
fun () ->
receive after 4000 -> ok end,
Parent ! {self(), rpc:call(Sandbox, erlang, abs, [-1])}
end),
%% If the test node hangs completely, this timer expires.
- ?line R3 = erlang:start_timer(8000, self(), void),
+ R3 = erlang:start_timer(8000, self(), void),
%%
- ?line ok =
+ ok =
receive
{P1, Result} ->
exit(P2, done),
@@ -675,8 +735,7 @@ gc_seq_token(Config) when is_list(Config) ->
{error, "Test node hung"}
end,
%%
- ?line stop_node(Sandbox),
- ?line test_server:timetrap_cancel(Timetrap),
+ stop_node(Sandbox),
ok.
do_gc_seq_token(Label) ->
@@ -724,6 +783,24 @@ do_shrink(N) ->
erlang:garbage_collect(),
do_shrink(N-1).
+%% Test that messages from a port does not clear the token
+port_clean_token(Config) when is_list(Config) ->
+ seq_trace:reset_trace(),
+ Label = make_ref(),
+ seq_trace:set_token(label, Label),
+ {label,Label} = seq_trace:get_token(label),
+
+ %% Create a port and get messages from it
+ %% We use os:cmd as a convenience as it does
+ %% open_port, port_command, port_close and receives replies.
+ %% Maybe it is not ideal to rely on the internal implementation
+ %% of os:cmd but it will have to do.
+ os:cmd("ls"),
+
+ %% Make sure that the seq_trace token is still there
+ {label,Label} = seq_trace:get_token(label),
+
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -819,7 +896,6 @@ start_tracer() ->
seq_trace:set_system_tracer(Pid),
Pid.
-
set_token_flags([]) ->
ok;
set_token_flags([no_timestamp|Flags]) ->
@@ -836,7 +912,7 @@ check_ts(no_timestamp, Ts) ->
no_timestamp = Ts
catch
_ : _ ->
- ?t:fail({unexpected_timestamp, Ts})
+ ct:fail({unexpected_timestamp, Ts})
end,
ok;
check_ts(timestamp, Ts) ->
@@ -847,7 +923,7 @@ check_ts(timestamp, Ts) ->
true = is_integer(Us)
catch
_ : _ ->
- ?t:fail({unexpected_timestamp, Ts})
+ ct:fail({unexpected_timestamp, Ts})
end,
ok;
check_ts(monotonic_timestamp, Ts) ->
@@ -855,7 +931,7 @@ check_ts(monotonic_timestamp, Ts) ->
true = is_integer(Ts)
catch
_ : _ ->
- ?t:fail({unexpected_timestamp, Ts})
+ ct:fail({unexpected_timestamp, Ts})
end,
ok;
check_ts(strict_monotonic_timestamp, Ts) ->
@@ -865,10 +941,10 @@ check_ts(strict_monotonic_timestamp, Ts) ->
true = is_integer(UMI)
catch
_ : _ ->
- ?t:fail({unexpected_timestamp, Ts})
+ ct:fail({unexpected_timestamp, Ts})
end,
ok.
-
+
start_node(Name, Param) ->
test_server:start_node(Name, slave, [{args, Param}]).
@@ -876,7 +952,7 @@ stop_node(Node) ->
test_server:stop_node(Node).
load_tracer(Config) ->
- Path = ?config(data_dir, Config),
+ Path = proplists:get_value(data_dir, Config),
ok = erl_ddll:load_driver(Path, echo_drv),
open_port({spawn,echo_drv}, [eof,binary]).
diff --git a/lib/kernel/test/standard_error_SUITE.erl b/lib/kernel/test/standard_error_SUITE.erl
index 97ead9b9fd..1d9026dc58 100644
--- a/lib/kernel/test/standard_error_SUITE.erl
+++ b/lib/kernel/test/standard_error_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2014. All Rights Reserved.
+%% Copyright Ericsson AB 2014-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/topApp.erl b/lib/kernel/test/topApp.erl
index 597268a5a2..6ea957fdc1 100644
--- a/lib/kernel/test/topApp.erl
+++ b/lib/kernel/test/topApp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/topApp2.erl b/lib/kernel/test/topApp2.erl
index 69d5c047be..aeb024f7a7 100644
--- a/lib/kernel/test/topApp2.erl
+++ b/lib/kernel/test/topApp2.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/topApp3.erl b/lib/kernel/test/topApp3.erl
index e892aadcd0..917da3dad4 100644
--- a/lib/kernel/test/topApp3.erl
+++ b/lib/kernel/test/topApp3.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2010. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl
index 9a93b9037f..59b088ca73 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@
-module(wrap_log_reader_SUITE).
-%-define(debug, true).
+%%-define(debug, true).
-ifdef(debug).
-define(format(S, A), io:format(S, A)).
@@ -29,9 +29,9 @@
-define(config(X,Y), foo).
-define(t,test_server).
-else.
--include_lib("test_server/include/test_server.hrl").
+-include_lib("common_test/include/ct.hrl").
-define(format(S, A), ok).
--define(privdir(Conf), ?config(priv_dir, Conf)).
+-define(privdir(Conf), proplists:get_value(priv_dir, Conf)).
-endif.
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -47,7 +47,9 @@
-export([init_per_testcase/2, end_per_testcase/2]).
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
[no_file, {group, one}, {group, two}, {group, four},
@@ -71,75 +73,70 @@ end_per_group(_GroupName, Config) ->
Config.
-init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
- Dog=?t:timetrap(?t:seconds(60)),
- [{watchdog, Dog} | Config].
+init_per_testcase(Func, Config) ->
+ Config.
end_per_testcase(_Func, _Config) ->
- Dog=?config(watchdog, _Config),
- ?t:timetrap_cancel(Dog).
+ ok.
-no_file(suite) -> [];
-no_file(doc) -> ["No log file exists"];
+%% No log file exists.
no_file(Conf) when is_list(Conf) ->
- ?line code:add_path(?config(data_dir,Conf)),
+ code:add_path(proplists:get_value(data_dir,Conf)),
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
delete_files(File),
start(),
wlt ! {open, self(), File},
- ?line rec({error, {index_file_not_found, File}}, ?LINE),
+ rec({error, {index_file_not_found, File}}, ?LINE),
wlt ! {open, self(), File, 1},
- ?line rec({error, {index_file_not_found, File}}, ?LINE),
+ rec({error, {index_file_not_found, File}}, ?LINE),
wlt ! {open, self(), File, 4},
- ?line rec({error, {index_file_not_found, File}}, ?LINE),
+ rec({error, {index_file_not_found, File}}, ?LINE),
stop(),
delete_files(File),
ok.
-one_empty(suite) -> [];
-one_empty(doc) -> ["One empty index file"];
+%% One empty index file.
one_empty(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
delete_files(File),
start(),
- ?line open(sune, File, ?LINE),
+ open(sune, File, ?LINE),
%% open
- ?line do_chunk([{open,File}, eof], wlt, ?LINE),
- ?line do_chunk([{open,File,1}, eof], wlt, ?LINE),
+ do_chunk([{open,File}, eof], wlt, ?LINE),
+ do_chunk([{open,File,1}, eof], wlt, ?LINE),
wlt ! {open, self(), File, 2},
- ?line rec({error, {file_not_found, add_ext(File, 2)}}, ?LINE),
- ?line close(sune),
+ rec({error, {file_not_found, add_ext(File, 2)}}, ?LINE),
+ close(sune),
%% closed
- ?line do_chunk([{open,File}, eof], wlt, ?LINE),
- ?line do_chunk([{open,File,1}, eof], wlt, ?LINE),
+ do_chunk([{open,File}, eof], wlt, ?LINE),
+ do_chunk([{open,File,1}, eof], wlt, ?LINE),
wlt ! {open, self(), File, 2},
- ?line rec({error, {file_not_found, add_ext(File, 2)}}, ?LINE),
+ rec({error, {file_not_found, add_ext(File, 2)}}, ?LINE),
stop(),
delete_files(File),
ok.
-one_filled(suite) -> [];
-one_filled(doc) -> ["One filled index file"];
+%% One filled index file.
one_filled(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
delete_files(File),
start(),
- ?line open(sune, File, ?LINE),
- ?line log_terms(sune, ["first round, one", "first round, two"]),
- ?line sync(sune),
+ open(sune, File, ?LINE),
+ log_terms(sune, ["first round, one", "first round, two"]),
+ sync(sune),
%% open
test_one(File),
- ?line close(sune),
+ close(sune),
%% closed
test_one(File),
@@ -148,34 +145,33 @@ one_filled(Conf) when is_list(Conf) ->
ok.
test_one(File) ->
- ?line do_chunk([{open,File},
- {chunk, ["first round, one", "first round, two"]},
- eof], wlt, ?LINE),
- ?line do_chunk([{open,File,1},
- {chunk, ["first round, one", "first round, two"]},
- eof], wlt, ?LINE),
+ do_chunk([{open,File},
+ {chunk, ["first round, one", "first round, two"]},
+ eof], wlt, ?LINE),
+ do_chunk([{open,File,1},
+ {chunk, ["first round, one", "first round, two"]},
+ eof], wlt, ?LINE),
wlt ! {open, self(), File, 2},
- ?line rec({error, {file_not_found, add_ext(File, 2)}}, ?LINE),
- ?line do_chunk([{open,File,1}, {chunk, 1, ["first round, one"]},
- {chunk, 1, ["first round, two"]}, eof], wlt, ?LINE),
+ rec({error, {file_not_found, add_ext(File, 2)}}, ?LINE),
+ do_chunk([{open,File,1}, {chunk, 1, ["first round, one"]},
+ {chunk, 1, ["first round, two"]}, eof], wlt, ?LINE),
ok.
-two_filled(suite) -> [];
-two_filled(doc) -> ["Two filled index files"];
+%% Two filled index files.
two_filled(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = list_to_atom(join(Dir, "sune.LOG")),
delete_files(File),
start(),
- ?line open(sune, File, ?LINE),
- ?line log_terms(sune, ["first round, 11", "first round, 12"]),
- ?line log_terms(sune, ["first round, 21", "first round, 22"]),
- ?line sync(sune),
+ open(sune, File, ?LINE),
+ log_terms(sune, ["first round, 11", "first round, 12"]),
+ log_terms(sune, ["first round, 21", "first round, 22"]),
+ sync(sune),
%% open
test_two(File),
- ?line close(sune),
+ close(sune),
%% closed
test_two(File),
@@ -184,37 +180,36 @@ two_filled(Conf) when is_list(Conf) ->
ok.
test_two(File) ->
- ?line do_chunk([{open,File},
- {chunk, infinity, ["first round, 11", "first round, 12"]},
- {chunk, ["first round, 21", "first round, 22"]},
- eof], wlt, ?LINE),
- ?line do_chunk([{open,File,1},
- {chunk, ["first round, 11", "first round, 12"]},
- eof], wlt, ?LINE),
- ?line do_chunk([{open,File,2},
- {chunk, ["first round, 21", "first round, 22"]},
- eof], wlt, ?LINE),
+ do_chunk([{open,File},
+ {chunk, infinity, ["first round, 11", "first round, 12"]},
+ {chunk, ["first round, 21", "first round, 22"]},
+ eof], wlt, ?LINE),
+ do_chunk([{open,File,1},
+ {chunk, ["first round, 11", "first round, 12"]},
+ eof], wlt, ?LINE),
+ do_chunk([{open,File,2},
+ {chunk, ["first round, 21", "first round, 22"]},
+ eof], wlt, ?LINE),
wlt ! {open, self(), File, 3},
- ?line rec({error, {file_not_found, add_ext(File, 3)}}, ?LINE),
- ?line do_chunk([{open,File,1}, {chunk, 1, ["first round, 11"]},
- {chunk, 2, ["first round, 12"]}, eof], wlt, ?LINE),
+ rec({error, {file_not_found, add_ext(File, 3)}}, ?LINE),
+ do_chunk([{open,File,1}, {chunk, 1, ["first round, 11"]},
+ {chunk, 2, ["first round, 12"]}, eof], wlt, ?LINE),
ok.
-four_filled(suite) -> [];
-four_filled(doc) -> ["Four filled index files"];
+%% Four filled index files.
four_filled(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
delete_files(File),
start(),
- ?line open(sune, File, ?LINE),
- ?line init_files(0),
- ?line sync(sune),
+ open(sune, File, ?LINE),
+ init_files(0),
+ sync(sune),
%% open
test_four(File),
- ?line close(sune),
+ close(sune),
%% closed
test_four(File),
@@ -223,42 +218,41 @@ four_filled(Conf) when is_list(Conf) ->
ok.
test_four(File) ->
- ?line do_chunk([{open,File},
- {chunk, ["first round, 11", "first round, 12"]},
- {chunk, ["first round, 21", "first round, 22"]},
- {chunk, ["first round, 31", "first round, 32"]},
- {chunk, ["first round, 41", "first round, 42"]},
- eof], wlt, ?LINE),
- ?line do_chunk([{open,File,1},
- {chunk, ["first round, 11", "first round, 12"]},
- eof], wlt, ?LINE),
- ?line do_chunk([{open,File,4},
- {chunk, ["first round, 41", "first round, 42"]},
- eof], wlt, ?LINE),
+ do_chunk([{open,File},
+ {chunk, ["first round, 11", "first round, 12"]},
+ {chunk, ["first round, 21", "first round, 22"]},
+ {chunk, ["first round, 31", "first round, 32"]},
+ {chunk, ["first round, 41", "first round, 42"]},
+ eof], wlt, ?LINE),
+ do_chunk([{open,File,1},
+ {chunk, ["first round, 11", "first round, 12"]},
+ eof], wlt, ?LINE),
+ do_chunk([{open,File,4},
+ {chunk, ["first round, 41", "first round, 42"]},
+ eof], wlt, ?LINE),
wlt ! {open, self(), File, 5},
- ?line rec({error, {file_not_found, add_ext(File, 5)}}, ?LINE),
- ?line do_chunk([{open,File,1}, {chunk, 1, ["first round, 11"]},
- {chunk, 2, ["first round, 12"]}, eof], wlt, ?LINE),
- ?line do_chunk([{open,File,4}, {chunk, 1, ["first round, 41"]},
- {chunk, 2, ["first round, 42"]}, eof], wlt, ?LINE),
+ rec({error, {file_not_found, add_ext(File, 5)}}, ?LINE),
+ do_chunk([{open,File,1}, {chunk, 1, ["first round, 11"]},
+ {chunk, 2, ["first round, 12"]}, eof], wlt, ?LINE),
+ do_chunk([{open,File,4}, {chunk, 1, ["first round, 41"]},
+ {chunk, 2, ["first round, 42"]}, eof], wlt, ?LINE),
ok.
-wrap_filled(suite) -> [];
-wrap_filled(doc) -> ["First wrap, open, filled index file"];
+%% First wrap, open, filled index file.
wrap_filled(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
delete_files(File),
start(),
- ?line open(sune, File, ?LINE),
- ?line init_files(0),
- ?line log_terms(sune, ["second round, 11", "second round, 12"]),
- ?line sync(sune),
+ open(sune, File, ?LINE),
+ init_files(0),
+ log_terms(sune, ["second round, 11", "second round, 12"]),
+ sync(sune),
%% open
test_wrap(File),
- ?line close(sune),
+ close(sune),
%% closed
test_wrap(File),
@@ -267,103 +261,100 @@ wrap_filled(Conf) when is_list(Conf) ->
ok.
test_wrap(File) ->
- ?line do_chunk([{open,File},
- {chunk, ["first round, 21", "first round, 22"]},
- {chunk, ["first round, 31", "first round, 32"]},
- {chunk, ["first round, 41", "first round, 42"]},
- {chunk, ["second round, 11", "second round, 12"]},
- eof], wlt, ?LINE),
- ?line do_chunk([{open,File,1},
- {chunk, ["second round, 11", "second round, 12"]},
- eof], wlt, ?LINE),
- ?line do_chunk([{open,File,2},
- {chunk, ["first round, 21", "first round, 22"]},
- eof], wlt, ?LINE),
+ do_chunk([{open,File},
+ {chunk, ["first round, 21", "first round, 22"]},
+ {chunk, ["first round, 31", "first round, 32"]},
+ {chunk, ["first round, 41", "first round, 42"]},
+ {chunk, ["second round, 11", "second round, 12"]},
+ eof], wlt, ?LINE),
+ do_chunk([{open,File,1},
+ {chunk, ["second round, 11", "second round, 12"]},
+ eof], wlt, ?LINE),
+ do_chunk([{open,File,2},
+ {chunk, ["first round, 21", "first round, 22"]},
+ eof], wlt, ?LINE),
wlt ! {open, self(), File, 5},
- ?line rec({error, {file_not_found, add_ext(File, 5)}}, ?LINE),
- ?line do_chunk([{open,File,1}, {chunk, 1, ["second round, 11"]},
- {chunk, 2, ["second round, 12"]}, eof], wlt, ?LINE),
- ?line do_chunk([{open,File,4}, {chunk, 1, ["first round, 41"]},
- {chunk, 2, ["first round, 42"]}, eof], wlt, ?LINE),
+ rec({error, {file_not_found, add_ext(File, 5)}}, ?LINE),
+ do_chunk([{open,File,1}, {chunk, 1, ["second round, 11"]},
+ {chunk, 2, ["second round, 12"]}, eof], wlt, ?LINE),
+ do_chunk([{open,File,4}, {chunk, 1, ["first round, 41"]},
+ {chunk, 2, ["first round, 42"]}, eof], wlt, ?LINE),
ok.
-wrapping(suite) -> [];
-wrapping(doc) -> ["Wrapping at the same time as reading"];
+%% Wrapping at the same time as reading.
wrapping(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
delete_files(File),
start(),
- ?line open(sune, File, ?LINE),
- ?line init_files(1100),
- ?line sync(sune),
- ?line C1 =
+ open(sune, File, ?LINE),
+ init_files(1100),
+ sync(sune),
+ C1 =
do_chunk([{open,File}, {chunk, 1, ["first round, 11"]}], wlt, ?LINE),
- ?line log_terms(sune, ["second round, 11", "second round, 12"]),
- ?line sync(sune),
- ?line do_chunk([{chunk, 1, ["first round, 12"]},
- %% Here two bad bytes are found.
- {chunk, ["first round, 21", "first round, 22"]},
- {chunk, ["first round, 31", "first round, 32"]},
- {chunk, ["first round, 41", "first round, 42"]}, eof],
- wlt, ?LINE, C1),
+ log_terms(sune, ["second round, 11", "second round, 12"]),
+ sync(sune),
+ do_chunk([{chunk, 1, ["first round, 12"]},
+ %% Here two bad bytes are found.
+ {chunk, ["first round, 21", "first round, 22"]},
+ {chunk, ["first round, 31", "first round, 32"]},
+ {chunk, ["first round, 41", "first round, 42"]}, eof],
+ wlt, ?LINE, C1),
start(),
delete_files(File),
- ?line open(sune, File, ?LINE),
- ?line init_files(1100),
- ?line sync(sune),
- ?line C2 =
+ open(sune, File, ?LINE),
+ init_files(1100),
+ sync(sune),
+ C2 =
do_chunk([{open,File}, {chunk, 1, ["first round, 11"]}], wlt, ?LINE),
- ?line log_terms(sune, ["second round, 11", "second round, 12"]),
- ?line close(sune),
- ?line do_chunk([{chunk, 1, ["first round, 12"]},
- %% Here two bad bytes are found.
- {chunk, ["first round, 21", "first round, 22"]},
- {chunk, ["first round, 31", "first round, 32"]},
- {chunk, ["first round, 41", "first round, 42"]}, eof],
- wlt, ?LINE, C2),
+ log_terms(sune, ["second round, 11", "second round, 12"]),
+ close(sune),
+ do_chunk([{chunk, 1, ["first round, 12"]},
+ %% Here two bad bytes are found.
+ {chunk, ["first round, 21", "first round, 22"]},
+ {chunk, ["first round, 31", "first round, 32"]},
+ {chunk, ["first round, 41", "first round, 42"]}, eof],
+ wlt, ?LINE, C2),
start(),
delete_files(File),
- ?line open(sune, File, ?LINE),
- ?line init_files(1100),
- ?line sync(sune),
- ?line C3 = do_chunk([{open,File}], wlt, ?LINE),
- ?line log_terms(sune, ["second round, 11"]),
- ?line sync(sune),
- ?line do_chunk([{chunk, 1, ["second round, 11"]},
- {chunk, 1, ["first round, 21"]},
- {chunk, 1, ["first round, 22"]},
- {chunk, ["first round, 31", "first round, 32"]},
- {chunk, ["first round, 41", "first round, 42"]}, eof],
- wlt, ?LINE, C3),
+ open(sune, File, ?LINE),
+ init_files(1100),
+ sync(sune),
+ C3 = do_chunk([{open,File}], wlt, ?LINE),
+ log_terms(sune, ["second round, 11"]),
+ sync(sune),
+ do_chunk([{chunk, 1, ["second round, 11"]},
+ {chunk, 1, ["first round, 21"]},
+ {chunk, 1, ["first round, 22"]},
+ {chunk, ["first round, 31", "first round, 32"]},
+ {chunk, ["first round, 41", "first round, 42"]}, eof],
+ wlt, ?LINE, C3),
stop(),
delete_files(File),
ok.
-external(suite) -> [];
-external(doc) -> ["External format"];
+%% External format.
external(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
delete_files(File),
start(),
- ?line open_ext(sune, File, ?FILE),
- ?line init_files_ext(0),
- ?line close(sune),
+ open_ext(sune, File, ?FILE),
+ init_files_ext(0),
+ close(sune),
P0 = pps(),
wlt ! {open, self(), File},
- ?line rec({error, {not_a_log_file, add_ext(File, 1)}}, ?LINE),
- ?line true = (P0 == pps()),
+ rec({error, {not_a_log_file, add_ext(File, 1)}}, ?LINE),
+ true = (P0 == pps()),
stop(),
delete_files(File),
ok.
-error(suite) -> [];
-error(doc) -> ["Error situations"];
+%% Error situations.
error(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
File = join(Dir, "sune.LOG"),
@@ -372,78 +363,79 @@ error(Conf) when is_list(Conf) ->
P0 = pps(),
wlt ! {open, self(), File, 1},
- ?line rec({error, {index_file_not_found, File}}, ?LINE),
+ rec({error, {index_file_not_found, File}}, ?LINE),
wlt ! {open, self(), File},
- ?line rec({error, {index_file_not_found, File}}, ?LINE),
- ?line true = (P0 == pps()),
+ rec({error, {index_file_not_found, File}}, ?LINE),
+ true = (P0 == pps()),
- ?line open(sune, File, ?LINE),
- ?line close(sune),
+ open(sune, File, ?LINE),
+ close(sune),
P1 = pps(),
- ?line First = add_ext(File, 1),
- ?line ok = file:delete(First),
+ First = add_ext(File, 1),
+ ok = file:delete(First),
wlt ! {open, self(), File},
- ?line rec({error, {not_a_log_file, First}}, ?LINE),
- ?line true = (P1 == pps()),
+ rec({error, {not_a_log_file, First}}, ?LINE),
+ true = (P1 == pps()),
delete_files(File),
- ?line open(sune, File, ?LINE),
- ?line init_files(0),
- ?line close(sune),
+ open(sune, File, ?LINE),
+ init_files(0),
+ close(sune),
P2 = pps(),
- ?line C = do_chunk([{open,File},
- {chunk, ["first round, 11", "first round, 12"]}],
- wlt, ?LINE),
- ?line Second = add_ext(File, 2),
- ?line ok = file:delete(Second),
+ C = do_chunk([{open,File},
+ {chunk, ["first round, 11", "first round, 12"]}],
+ wlt, ?LINE),
+ Second = add_ext(File, 2),
+ ok = file:delete(Second),
wlt ! {chunk, self(), C},
- ?line rec({error, {file_error, Second, {error, enoent}}}, ?LINE),
- ?line ok = file:write_file(Second, <<17:(3*8)>>), % three bytes
+ rec({error, {file_error, Second, {error, enoent}}}, ?LINE),
+ ok = file:write_file(Second, <<17:(3*8)>>), % three bytes
wlt ! {chunk, self(), C},
- ?line rec({error, {not_a_log_file, Second}}, ?LINE),
- ?line do_chunk([close], wlt, ?LINE, C),
- ?line true = (P2 == pps()),
+ rec({error, {not_a_log_file, Second}}, ?LINE),
+ do_chunk([close], wlt, ?LINE, C),
+ true = (P2 == pps()),
delete_files(File),
- ?line open(sune, File, ?LINE),
- ?line init_files(0),
- ?line close(sune),
+ open(sune, File, ?LINE),
+ init_files(0),
+ close(sune),
P3 = pps(),
timer:sleep(1100),
Now = calendar:local_time(),
- ?line ok = file:change_time(First, Now),
- ?line C2 = do_chunk([{open,File},
- {chunk, ["first round, 11", "first round, 12"]}],
- wlt, ?LINE),
+ ok = file:change_time(First, Now),
+ C2 = do_chunk([{open,File},
+ {chunk, ["first round, 11", "first round, 12"]}],
+ wlt, ?LINE),
wlt ! {chunk, self(), C2},
- ?line rec({error,{is_wrapped,First}}, ?LINE),
- ?line do_chunk([close], wlt, ?LINE, C2),
+ rec({error,{is_wrapped,First}}, ?LINE),
+ do_chunk([close], wlt, ?LINE, C2),
IndexFile = add_ext(File, idx),
- ?line ok = file:write_file(IndexFile, <<17:(3*8)>>),
+ ok = file:write_file(IndexFile, <<17:(3*8)>>),
wlt ! {open, self(), File, 1},
- ?line rec({error, {index_file_not_found, File}}, ?LINE),
- ?line true = (P3 == pps()),
+ rec({error, {index_file_not_found, File}}, ?LINE),
+ true = (P3 == pps()),
stop(),
delete_files(File),
ok.
start() ->
- ?line ok = wrap_log_test:stop(),
+ ok = wrap_log_test:stop(),
dl_wait(),
- ?line ok = wrap_log_test:init().
+ ok = wrap_log_test:init().
stop() ->
- ?line ok = wrap_log_test:stop(),
+ ok = wrap_log_test:stop(),
dl_wait().
-%% Give disk logs opened by 'logger' and 'wlt' time to close after
+%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after
%% receiving EXIT signals.
dl_wait() ->
case disk_log:accessible_logs() of
{[], []} ->
ok;
- _ ->
+ _X ->
+ erlang:display(_X),
timer:sleep(100),
dl_wait()
end.
@@ -458,24 +450,24 @@ delete_files(File) ->
ok.
init_files(Delay) ->
- ?line log_terms(sune, ["first round, 11", "first round, 12"]),
+ log_terms(sune, ["first round, 11", "first round, 12"]),
timer:sleep(Delay),
- ?line log_terms(sune, ["first round, 21", "first round, 22"]),
+ log_terms(sune, ["first round, 21", "first round, 22"]),
timer:sleep(Delay),
- ?line log_terms(sune, ["first round, 31", "first round, 32"]),
+ log_terms(sune, ["first round, 31", "first round, 32"]),
timer:sleep(Delay),
- ?line log_terms(sune, ["first round, 41", "first round, 42"]),
+ log_terms(sune, ["first round, 41", "first round, 42"]),
timer:sleep(Delay),
ok.
init_files_ext(Delay) ->
- ?line blog_terms(sune, ["first round, 11", "first round, 12"]),
+ blog_terms(sune, ["first round, 11", "first round, 12"]),
timer:sleep(Delay),
- ?line blog_terms(sune, ["first round, 21", "first round, 22"]),
+ blog_terms(sune, ["first round, 21", "first round, 22"]),
timer:sleep(Delay),
- ?line blog_terms(sune, ["first round, 31", "first round, 32"]),
+ blog_terms(sune, ["first round, 31", "first round, 32"]),
timer:sleep(Delay),
- ?line blog_terms(sune, ["first round, 41", "first round, 42"]),
+ blog_terms(sune, ["first round, 41", "first round, 42"]),
timer:sleep(Delay),
ok.
@@ -487,27 +479,27 @@ do_chunk(Commands, Server, Where) ->
do_chunk([{open, File, One} | Cs], S, W, _C) ->
S ! {open, self(), File, One},
- ?line NC = rec1(ok, {W,?LINE}),
+ NC = rec1(ok, {W,?LINE}),
do_chunk(Cs, S, W, NC);
do_chunk([{open, File} | Cs], S, W, _C) ->
S ! {open, self(), File},
- ?line NC = rec1(ok, {W,?LINE}),
+ NC = rec1(ok, {W,?LINE}),
do_chunk(Cs, S, W, NC);
do_chunk([{chunk, Terms} | Cs], S, W, C) ->
S ! {chunk, self(), C},
- ?line NC = rec2(Terms, {W,?LINE}),
+ NC = rec2(Terms, {W,?LINE}),
do_chunk(Cs, S, W, NC);
do_chunk([{chunk, N, Terms} | Cs], S, W, C) ->
S ! {chunk, self(), C, N},
- ?line NC = rec2(Terms, {W,?LINE}),
+ NC = rec2(Terms, {W,?LINE}),
do_chunk(Cs, S, W, NC);
do_chunk([eof], S, W, C) ->
S ! {chunk, self(), C},
- ?line C1 = rec2(eof, {W,?LINE}),
+ C1 = rec2(eof, {W,?LINE}),
do_chunk([close], S, W, C1);
do_chunk([close], S, W, C) ->
S ! {close, self(), C},
- ?line rec(ok, {W,?LINE});
+ rec(ok, {W,?LINE});
do_chunk([], _S, _W, C) ->
C.
@@ -516,50 +508,50 @@ add_ext(Name, Ext) ->
%% disk_log.
open(Log, File, Where) ->
- logger ! {open, self(), Log, File},
+ wlr_logger ! {open, self(), Log, File},
rec1(ok, Where).
open_ext(Log, File, Where) ->
- logger ! {open_ext, self(), Log, File},
+ wlr_logger ! {open_ext, self(), Log, File},
rec1(ok, Where).
close(Log) ->
- logger ! {close, self(), Log},
+ wlr_logger ! {close, self(), Log},
rec(ok, ?LINE).
sync(Log) ->
- logger ! {sync, self(), Log},
+ wlr_logger ! {sync, self(), Log},
rec(ok, ?LINE).
log_terms(File, Terms) ->
- logger ! {log_terms, self(), File, Terms},
+ wlr_logger ! {log_terms, self(), File, Terms},
rec(ok, ?LINE).
blog_terms(File, Terms) ->
- logger ! {blog_terms, self(), File, Terms},
+ wlr_logger ! {blog_terms, self(), File, Terms},
rec(ok, ?LINE).
rec1(M, Where) ->
receive
{M, C} -> C;
- Else -> test_server:fail({error, {Where, Else}})
- after 1000 -> test_server:fail({error, {Where, time_out}})
+ Else -> ct:fail({error, {Where, Else}})
+ after 1000 -> ct:fail({error, {Where, time_out}})
end.
rec2(M, Where) ->
receive
{C, M} -> C;
- Else -> test_server:fail({error, {Where, Else}})
- after 1000 -> test_server:fail({error, {Where, time_out}})
+ Else -> ct:fail({error, {Where, Else}})
+ after 1000 -> ct:fail({error, {Where, time_out}})
end.
rec(M, Where) ->
receive
M ->
ok;
- Else -> ?t:fail({error, {Where, Else}})
- after 5000 -> ?t:fail({error, {Where, time_out}})
+ Else -> ct:fail({error, {Where, Else}})
+ after 5000 -> ct:fail({error, {Where, time_out}})
end.
-
+
pps() ->
{erlang:ports(), lists:filter(fun erlang:is_process_alive/1, processes())}.
diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
index 1cd1a4b0a4..d2bac40192 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2009. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -36,9 +36,9 @@
-endif.
init() ->
- spawn(fun() -> start(logger) end),
+ spawn(fun() -> start(wlr_logger) end),
spawn(fun() -> start2(wlt) end),
- wait_registered(logger),
+ wait_registered(wlr_logger),
wait_registered(wlt),
ok.
@@ -52,9 +52,9 @@ wait_registered(Name) ->
end.
stop() ->
- catch logger ! exit,
+ catch wlr_logger ! exit,
catch wlt ! exit,
- wait_unregistered(logger),
+ wait_unregistered(wlr_logger),
wait_unregistered(wlt),
ok.
@@ -82,47 +82,47 @@ loop() ->
{open, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{open_ext, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{format, external}, {size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{close, Pid, Name} ->
R = disk_log:close(Name),
- ?format("logger: close ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: close ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{sync, Pid, Name} ->
R = disk_log:sync(Name),
- ?format("logger: sync ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{log_terms, Pid, Name, Terms} ->
R = disk_log:log_terms(Name, Terms),
- ?format("logger: log_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{blog_terms, Pid, Name, Terms} ->
R = disk_log:blog_terms(Name, Terms),
- ?format("logger: blog_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
exit ->
- ?format("Stopping logger~n", []),
+ ?format("Stopping wlr_logger~n", []),
exit(normal);
_Else ->
- ?format("logger: ignored: ~p~n", [_Else]),
+ ?format("wlr_logger: ignored: ~p~n", [_Else]),
loop()
end.
diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl
index 6aaa024a82..52ae1b3ae6 100644
--- a/lib/kernel/test/zlib_SUITE.erl
+++ b/lib/kernel/test/zlib_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -20,60 +20,57 @@
-module(zlib_SUITE).
--include_lib("test_server/include/test_server.hrl").
-
--compile(export_all).
-
--define(error(Format,Args),
- put(test_server_loc,{?MODULE,?LINE}),
- error(Format,Args,?MODULE,?LINE)).
-
-%% Learn erts team how to really write tests ;-)
--define(m(ExpectedRes,Expr),
- fun() ->
- ACtual1 = (catch (Expr)),
- try case ACtual1 of
- ExpectedRes -> ACtual1
- end
- catch
- error:{case_clause,ACtuAl} ->
- ?error("Not Matching Actual result was:~n ~p ~n",
- [ACtuAl]),
- ACtuAl
- end
- end()).
-
--define(BARG, {'EXIT',{badarg,[{zlib,_,_,_}|_]}}).
--define(DATA_ERROR, {'EXIT',{data_error,[{zlib,_,_,_}|_]}}).
-
-init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:seconds(60)),
- [{watchdog, Dog}|Config].
-end_per_testcase(_Func, Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog).
-
-error(Format, Args, File, Line) ->
- io:format("~p:~p: ERROR: " ++ Format, [File,Line|Args]),
- group_leader() ! {failed, File, Line}.
-
-%% Hopefully I don't need this to get it to work with the testserver..
-%% Fail = #'REASON'{file = filename:basename(File),
-%% line = Line,
-%% desc = Args},
-%% case global:whereis_name(mnesia_test_case_sup) of
-%% undefined ->
-%% ignore;
-%% Pid ->
-%% Pid ! Fail
-%% %% global:send(mnesia_test_case_sup, Fail),
-%% end,
-%% log("<>ERROR<>~n" ++ Format, Args, File, Line).
-
-suite() -> [{ct_hooks,[ts_install_cth]}].
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
+-export([suite/0, all/0, groups/0]).
+
+%% API group
+-export([api_open_close/1]).
+-export([api_deflateInit/1, api_deflateSetDictionary/1, api_deflateReset/1,
+ api_deflateParams/1, api_deflate/1, api_deflateEnd/1]).
+-export([api_inflateInit/1, api_inflateReset/1, api_inflate2/1, api_inflate3/1,
+ api_inflateChunk/1, api_safeInflate/1, api_inflateEnd/1]).
+-export([api_inflateSetDictionary/1, api_inflateGetDictionary/1]).
+-export([api_crc32/1, api_adler32/1]).
+-export([api_un_compress/1, api_un_zip/1, api_g_un_zip/1]).
+
+%% Examples group
+-export([intro/1]).
+
+%% Usage group
+-export([zip_usage/1, gz_usage/1, gz_usage2/1, compress_usage/1,
+ dictionary_usage/1, large_deflate/1, crc/1, adler/1,
+ only_allow_owner/1, sub_heap_binaries/1]).
+
+%% Bench group
+-export([inflate_bench_zeroed/1, inflate_bench_rand/1,
+ deflate_bench_zeroed/1, deflate_bench_rand/1,
+ chunk_bench_zeroed/1, chunk_bench_rand/1]).
+
+%% Others
+-export([smp/1, otp_9981/1, otp_7359/1]).
+
+-define(m(Guard, Expression),
+ fun() ->
+ Actual = (catch (Expression)),
+ case Actual of
+ Guard -> Actual;
+ _Other ->
+ ct:fail("Failed to match ~p, actual result was ~p",
+ [??Guard, Actual])
+ end
+ end()).
+
+-define(EXIT(Reason), {'EXIT',{Reason,[{_,_,_,_}|_]}}).
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{minutes,1}}].
all() ->
- [{group, api}, {group, examples}, {group, func}, smp,
+ [{group, api}, {group, examples}, {group, func},
+ {group, bench}, smp,
otp_9981,
otp_7359].
@@ -82,149 +79,184 @@ groups() ->
[api_open_close, api_deflateInit,
api_deflateSetDictionary, api_deflateReset,
api_deflateParams, api_deflate, api_deflateEnd,
- api_inflateInit, api_inflateSetDictionary,
- api_inflateSync, api_inflateReset, api_inflate, api_inflateChunk,
- api_inflateEnd, api_setBufsz, api_getBufsz, api_crc32,
- api_adler32, api_getQSize, api_un_compress, api_un_zip,
+ api_inflateInit, api_inflateSetDictionary, api_inflateGetDictionary,
+ api_inflateReset, api_inflate2, api_inflate3, api_inflateChunk,
+ api_safeInflate, api_inflateEnd, api_crc32,
+ api_adler32, api_un_compress, api_un_zip,
api_g_un_zip]},
{examples, [], [intro]},
{func, [],
[zip_usage, gz_usage, gz_usage2, compress_usage,
- dictionary_usage, large_deflate, crc, adler]}].
-
-init_per_suite(Config) ->
- Config.
-
-end_per_suite(_Config) ->
- ok.
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-
-api_open_close(doc) -> "Test open/0 and close/1";
-api_open_close(suite) -> [];
+ dictionary_usage, large_deflate, crc, adler,
+ only_allow_owner, sub_heap_binaries]},
+ {bench,
+ [inflate_bench_zeroed, inflate_bench_rand,
+ deflate_bench_zeroed, deflate_bench_rand,
+ chunk_bench_zeroed, chunk_bench_rand]}].
+
+%% Test open/0 and close/1.
api_open_close(Config) when is_list(Config) ->
- ?line Fd1 = zlib:open(),
- ?line Fd2 = zlib:open(),
+ Fd1 = zlib:open(),
+ Fd2 = zlib:open(),
?m(false,Fd1 == Fd2),
?m(ok,zlib:close(Fd1)),
- ?m(?BARG, zlib:close(Fd1)),
+ ?m(?EXIT(not_initialized), zlib:close(Fd1)),
?m(ok,zlib:close(Fd2)),
-
+
%% Make sure that we don't get any EXIT messages if trap_exit is enabled.
- ?line process_flag(trap_exit, true),
- ?line Fd3 = zlib:open(),
+ process_flag(trap_exit, true),
+ Fd3 = zlib:open(),
?m(ok,zlib:close(Fd3)),
receive
- Any -> ?line ?t:fail({unexpected_message,Any})
+ Any -> ct:fail({unexpected_message,Any})
after 10 -> ok
end.
-api_deflateInit(doc) -> "Test deflateInit/2 and /6";
-api_deflateInit(suite) -> [];
+%% Test deflateInit/2 and /6.
api_deflateInit(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
- ?m(?BARG, zlib:deflateInit(gurka, none)),
- ?m(?BARG, zlib:deflateInit(gurka, gurka)),
- ?m(?BARG, zlib:deflateInit(Z1, gurka)),
+ Z1 = zlib:open(),
+
+ ?m(?EXIT(badarg), zlib:deflateInit(gurka, none)),
+
+ ?m(?EXIT(bad_compression_level), zlib:deflateInit(gurka, gurka)),
+ ?m(?EXIT(bad_compression_level), zlib:deflateInit(Z1, gurka)),
Levels = [none, default, best_speed, best_compression] ++ lists:seq(0,9),
lists:foreach(fun(Level) ->
- ?line Z = zlib:open(),
+ Z = zlib:open(),
?m(ok, zlib:deflateInit(Z, Level)),
?m(ok,zlib:close(Z))
end, Levels),
%% /6
- ?m(?BARG, zlib:deflateInit(Z1,gurka,deflated,-15,8,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,undefined,-15,8,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,48,8,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-20,8,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-7,8,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,7,8,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,0,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,10,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,8,0)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,8,undefined)),
-
+ ?m(?EXIT(bad_compression_level),
+ zlib:deflateInit(Z1,gurka,deflated,-15,8,default)),
+
+ ?m(?EXIT(bad_compression_method),
+ zlib:deflateInit(Z1,default,undefined,-15,8,default)),
+
+ ?m(?EXIT(bad_compression_strategy),
+ zlib:deflateInit(Z1,default,deflated,-15,8,0)),
+ ?m(?EXIT(bad_compression_strategy),
+ zlib:deflateInit(Z1,default,deflated,-15,8,undefined)),
+
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,48,8,default)),
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,-20,8,default)),
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,-7,8,default)),
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,7,8,default)),
+
+ ?m(?EXIT(bad_memlevel),
+ zlib:deflateInit(Z1,default,deflated,-15,0,default)),
+ ?m(?EXIT(bad_memlevel),
+ zlib:deflateInit(Z1,default,deflated,-15,10,default)),
+
lists:foreach(fun(Level) ->
- ?line Z = zlib:open(),
+ Z = zlib:open(),
?m(ok, zlib:deflateInit(Z, Level, deflated, -15, 8, default)),
?m(ok,zlib:close(Z))
end, Levels),
-
+
lists:foreach(fun(Wbits) ->
- ?line Z11 = zlib:open(),
+ Z11 = zlib:open(),
?m(ok, zlib:deflateInit(Z11,best_compression,deflated,
Wbits,8,default)),
- ?line Z12 = zlib:open(),
+ Z12 = zlib:open(),
?m(ok, zlib:deflateInit(Z12,default,deflated,-Wbits,8,default)),
?m(ok,zlib:close(Z11)),
?m(ok,zlib:close(Z12))
- end, lists:seq(8, 15)),
-
+ end, lists:seq(9, 15)),
+
lists:foreach(fun(MemLevel) ->
- ?line Z = zlib:open(),
+ Z = zlib:open(),
?m(ok, zlib:deflateInit(Z,default,deflated,-15,
MemLevel,default)),
?m(ok,zlib:close(Z))
end, lists:seq(1,8)),
-
+
Strategies = [filtered,huffman_only,rle,default],
lists:foreach(fun(Strategy) ->
- ?line Z = zlib:open(),
+ Z = zlib:open(),
?m(ok, zlib:deflateInit(Z,best_speed,deflated,-15,8,Strategy)),
?m(ok,zlib:close(Z))
end, Strategies),
?m(ok, zlib:deflateInit(Z1,default,deflated,-15,8,default)),
- ?m({'EXIT',_}, zlib:deflateInit(Z1,none,deflated,-15,8,default)), %% ??
+
+ %% Let it crash for any reason; we don't care about the order in which the
+ %% parameters are checked.
+ ?m(?EXIT(_), zlib:deflateInit(Z1,none,deflated,-15,8,default)),
+
?m(ok, zlib:close(Z1)).
-api_deflateSetDictionary(doc) -> "Test deflateSetDictionary";
-api_deflateSetDictionary(suite) -> [];
+%% Test deflateSetDictionary.
api_deflateSetDictionary(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
?m(Id when is_integer(Id), zlib:deflateSetDictionary(Z1, <<1,1,2,3,4,5,1>>)),
?m(Id when is_integer(Id), zlib:deflateSetDictionary(Z1, [1,1,2,3,4,5,1])),
- ?m(?BARG, zlib:deflateSetDictionary(Z1, gurka)),
- ?m(?BARG, zlib:deflateSetDictionary(Z1, 128)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
- ?m({'EXIT',{stream_error,_}},zlib:deflateSetDictionary(Z1,<<1,1,2,3,4,5,1>>)),
+ ?m(?EXIT(badarg), zlib:deflateSetDictionary(Z1, gurka)),
+ ?m(?EXIT(badarg), zlib:deflateSetDictionary(Z1, 128)),
+ ?m(L when is_list(L), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
+ ?m(?EXIT(stream_error), zlib:deflateSetDictionary(Z1,<<1,1,2,3,4,5,1>>)),
?m(ok, zlib:close(Z1)).
-api_deflateReset(doc) -> "Test deflateReset";
-api_deflateReset(suite) -> [];
+%% Test deflateReset.
api_deflateReset(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
+ ?m(L when is_list(L), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
?m(ok, zlib:deflateReset(Z1)),
?m(ok, zlib:deflateReset(Z1)),
%% FIXME how do I make this go wrong??
?m(ok, zlib:close(Z1)).
-api_deflateParams(doc) -> "Test deflateParams";
-api_deflateParams(suite) -> [];
+%% Test deflateParams.
api_deflateParams(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Levels = [none, default, best_speed, best_compression] ++ lists:seq(0, 9),
+ Strategies = [filtered, huffman_only, rle, default],
+
+ Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
- ?m(ok, zlib:deflateParams(Z1, best_compression, huffman_only)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, sync)),
- ?m(ok, zlib:close(Z1)).
-api_deflate(doc) -> "Test deflate";
-api_deflate(suite) -> [];
+ ApiTest =
+ fun(Level, Strategy) ->
+ ?m(ok, zlib:deflateParams(Z1, Level, Strategy)),
+ ?m(ok, zlib:deflateReset(Z1))
+ end,
+
+ [ ApiTest(Level, Strategy) || Level <- Levels, Strategy <- Strategies ],
+
+ ?m(ok, zlib:close(Z1)),
+
+ FlushTest =
+ fun FlushTest(Size, Level, Strategy) ->
+ Z = zlib:open(),
+ ok = zlib:deflateInit(Z, default),
+ Data = gen_determ_rand_bytes(Size),
+ case zlib:deflate(Z, Data, none) of
+ [<<120, 156>>] ->
+ %% All data is present in the internal zlib state, and will
+ %% be flushed on deflateParams.
+
+ ok = zlib:deflateParams(Z, Level, Strategy),
+ Compressed = [<<120, 156>>, zlib:deflate(Z, <<>>, finish)],
+ Data = zlib:uncompress(Compressed),
+ zlib:close(Z),
+
+ FlushTest(Size + (1 bsl 10), Level, Strategy);
+ _Other ->
+ ok
+ end
+ end,
+
+ [ FlushTest(1, Level, Strategy) || Level <- Levels, Strategy <- Strategies ],
+
+ ok.
+
+%% Test deflate.
api_deflate(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
?m([B] when is_binary(B), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, finish)),
?m(ok, zlib:deflateReset(Z1)),
@@ -235,324 +267,435 @@ api_deflate(Config) when is_list(Config) ->
?m(B when is_list(B), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, sync)),
?m(B when is_list(B), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, full)),
?m(B when is_list(B), zlib:deflate(Z1, <<>>, finish)),
-
- ?m(?BARG, zlib:deflate(gurka, <<1,1,1,1,1,1,1,1,1>>, full)),
- ?m(?BARG, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, asdj)),
- ?m(?BARG, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, 198)),
+
+ ?m(?EXIT(badarg), zlib:deflate(gurka, <<1,1,1,1,1,1,1,1,1>>, full)),
+
+ ?m(?EXIT(bad_flush_mode), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, asdj)),
+ ?m(?EXIT(bad_flush_mode), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, 198)),
+
%% Causes problems ERROR REPORT
- ?m(?BARG, zlib:deflate(Z1, [asdj,asd], none)),
-
+ ?m(?EXIT(badarg), zlib:deflate(Z1, [asdj,asd], none)),
+
?m(ok, zlib:close(Z1)).
-api_deflateEnd(doc) -> "Test deflateEnd";
-api_deflateEnd(suite) -> [];
+%% Test deflateEnd.
api_deflateEnd(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
?m(ok, zlib:deflateEnd(Z1)),
- ?m({'EXIT', {einval,_}}, zlib:deflateEnd(Z1)), %% ??
- ?m(?BARG, zlib:deflateEnd(gurka)),
+ ?m(?EXIT(not_initialized), zlib:deflateEnd(Z1)),
+ ?m(?EXIT(badarg), zlib:deflateEnd(gurka)),
?m(ok, zlib:deflateInit(Z1, default)),
?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>)),
- ?m({'EXIT', {data_error,_}}, zlib:deflateEnd(Z1)),
+ ?m(?EXIT(data_error), zlib:deflateEnd(Z1)),
?m(ok, zlib:deflateInit(Z1, default)),
?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>)),
?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>, finish)),
?m(ok, zlib:deflateEnd(Z1)),
-
+
?m(ok, zlib:close(Z1)).
-api_inflateInit(doc) -> "Test inflateInit /1 and /2";
-api_inflateInit(suite) -> [];
+%% Test inflateInit /1 and /2.
api_inflateInit(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
- ?m(?BARG, zlib:inflateInit(gurka)),
+ Z1 = zlib:open(),
+ ?m(?EXIT(badarg), zlib:inflateInit(gurka)),
?m(ok, zlib:inflateInit(Z1)),
- ?m({'EXIT',{einval,_}}, zlib:inflateInit(Z1, 15)), %% ??
+ ?m(?EXIT(already_initialized), zlib:inflateInit(Z1, 15)),
lists:foreach(fun(Wbits) ->
- ?line Z11 = zlib:open(),
+ Z11 = zlib:open(),
?m(ok, zlib:inflateInit(Z11,Wbits)),
- ?line Z12 = zlib:open(),
+ Z12 = zlib:open(),
?m(ok, zlib:inflateInit(Z12,-Wbits)),
?m(ok,zlib:close(Z11)),
?m(ok,zlib:close(Z12))
end, lists:seq(8,15)),
- ?m(?BARG, zlib:inflateInit(gurka, -15)),
- ?m(?BARG, zlib:inflateInit(Z1, 7)),
- ?m(?BARG, zlib:inflateInit(Z1, -7)),
- ?m(?BARG, zlib:inflateInit(Z1, 48)),
- ?m(?BARG, zlib:inflateInit(Z1, -16)),
+ ?m(?EXIT(badarg), zlib:inflateInit(gurka, -15)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, 7)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, -7)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, 48)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, -16)),
?m(ok, zlib:close(Z1)).
-api_inflateSetDictionary(doc) -> "Test inflateSetDictionary";
-api_inflateSetDictionary(suite) -> [];
+%% Test inflateSetDictionary.
api_inflateSetDictionary(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateSetDictionary(gurka,<<1,1,1,1,1>>)),
- ?m(?BARG, zlib:inflateSetDictionary(Z1,102)),
- ?m(?BARG, zlib:inflateSetDictionary(Z1,gurka)),
+ ?m(?EXIT(badarg), zlib:inflateSetDictionary(gurka,<<1,1,1,1,1>>)),
+ ?m(?EXIT(badarg), zlib:inflateSetDictionary(Z1,102)),
+ ?m(?EXIT(badarg), zlib:inflateSetDictionary(Z1,gurka)),
Dict = <<1,1,1,1,1>>,
- ?m({'EXIT',{stream_error,_}}, zlib:inflateSetDictionary(Z1,Dict)),
+ ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z1,Dict)),
?m(ok, zlib:close(Z1)).
-api_inflateSync(doc) -> "Test inflateSync";
-api_inflateSync(suite) -> [];
-api_inflateSync(Config) when is_list(Config) ->
- {skip,"inflateSync/1 sucks"}.
-%% ?line Z1 = zlib:open(),
-%% ?m(ok, zlib:deflateInit(Z1)),
-%% ?line B1list0 = zlib:deflate(Z1, "gurkan gurra ger galna tunnor", full),
-%% ?line B2 = zlib:deflate(Z1, "grodan boll", finish),
-%% io:format("~p\n", [B1list0]),
-%% io:format("~p\n", [B2]),
-%% ?m(ok, zlib:deflateEnd(Z1)),
-%% ?line B1 = clobber(14, list_to_binary(B1list0)),
-%% ?line Compressed = list_to_binary([B1,B2]),
-%% ?line io:format("~p\n", [Compressed]),
-
-%% ?m(ok, zlib:inflateInit(Z1)),
-%% ?m(?BARG, zlib:inflateSync(gurka)),
-%% ?m({'EXIT',{data_error,_}}, zlib:inflate(Z1, Compressed)),
-%% ?m(ok, zlib:inflateSync(Z1)),
-%% ?line Ubs = zlib:inflate(Z1, []),
-%% ?line <<"grodan boll">> = list_to_binary(Ubs),
-%% ?m(ok, zlib:close(Z1)).
-
-clobber(N, Bin) when is_binary(Bin) ->
- T = list_to_tuple(binary_to_list(Bin)),
- Byte = case element(N, T) of
- 255 -> 254;
- B -> B+1
- end,
- list_to_binary(tuple_to_list(setelement(N, T, Byte))).
+%% Test inflateGetDictionary.
+api_inflateGetDictionary(Config) when is_list(Config) ->
+ Z1 = zlib:open(),
+ zlib:inflateInit(Z1),
+ IsOperationSupported =
+ case catch zlib:inflateGetDictionary(Z1) of
+ ?EXIT(not_supported) -> false;
+ _ -> true
+ end,
+ zlib:close(Z1),
+ api_inflateGetDictionary_if_supported(IsOperationSupported).
+
+api_inflateGetDictionary_if_supported(false) ->
+ {skip, "inflateGetDictionary/1 unsupported in current setup"};
+api_inflateGetDictionary_if_supported(true) ->
+ % Compress payload using custom dictionary
+ Z1 = zlib:open(),
+ ?m(ok, zlib:deflateInit(Z1)),
+ Dict = <<"foobar barfoo foo bar far boo">>,
+ Checksum = zlib:deflateSetDictionary(Z1, Dict),
+ Payload = <<"foobarbarbar">>,
+ Compressed = zlib:deflate(Z1, Payload, finish),
+ ?m(ok, zlib:close(Z1)),
+
+ % Decompress and test dictionary extraction with inflate/2
+ Z2 = zlib:open(),
+ ?m(ok, zlib:inflateInit(Z2)),
+ ?m(<<>>, iolist_to_binary(zlib:inflateGetDictionary(Z2))),
+ ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z2, Dict)),
+ ?m(?EXIT({need_dictionary,Checksum}), zlib:inflate(Z2, Compressed)),
+ ?m(ok, zlib:inflateSetDictionary(Z2, Dict)),
+ ?m(Dict, iolist_to_binary(zlib:inflateGetDictionary(Z2))),
+ Payload = iolist_to_binary(zlib:inflate(Z2, [])),
+ ?m(ok, zlib:close(Z2)),
+ ?m(?EXIT(not_initialized), zlib:inflateSetDictionary(Z2, Dict)),
+
+ %% ... And do the same for inflate/3
+ Z3 = zlib:open(),
+ ?m(ok, zlib:inflateInit(Z3)),
+ ?m(<<>>, iolist_to_binary(zlib:inflateGetDictionary(Z3))),
+ ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z3, Dict)),
+
+ {need_dictionary, Checksum, _Output = []} =
+ zlib:inflate(Z3, Compressed, [{exception_on_need_dict, false}]),
+
+ ?m(ok, zlib:inflateSetDictionary(Z3, Dict)),
+ ?m(Dict, iolist_to_binary(zlib:inflateGetDictionary(Z3))),
+
+ Payload = iolist_to_binary(
+ zlib:inflate(Z3, [], [{exception_on_need_dict, false}])),
+
+ ?m(ok, zlib:close(Z3)),
+ ?m(?EXIT(not_initialized), zlib:inflateSetDictionary(Z3, Dict)),
-api_inflateReset(doc) -> "Test inflateReset";
-api_inflateReset(suite) -> [];
+ ok.
+
+%% Test inflateReset.
api_inflateReset(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateReset(gurka)),
+ ?m(?EXIT(badarg), zlib:inflateReset(gurka)),
?m(ok, zlib:inflateReset(Z1)),
?m(ok, zlib:close(Z1)).
-api_inflate(doc) -> "Test inflate";
-api_inflate(suite) -> [];
-api_inflate(Config) when is_list(Config) ->
+%% Test inflate/2
+api_inflate2(Config) when is_list(Config) ->
Data = [<<1,2,2,3,3,3,4,4,4,4>>],
- ?line Compressed = zlib:compress(Data),
- ?line Z1 = zlib:open(),
+ Compressed = zlib:compress(Data),
+ Z1 = zlib:open(),
?m(ok, zlib:inflateInit(Z1)),
?m([], zlib:inflate(Z1, <<>>)),
?m(Data, zlib:inflate(Z1, Compressed)),
?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
?m(Data, zlib:inflate(Z1, Compressed)),
- ?m(?BARG, zlib:inflate(gurka, Compressed)),
- ?m(?BARG, zlib:inflate(Z1, 4384)),
- ?m(?BARG, zlib:inflate(Z1, [atom_list])),
+ ?m(?EXIT(badarg), zlib:inflate(gurka, Compressed)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, 4384)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, [atom_list])),
?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
- ?m({'EXIT',{data_error,_}}, zlib:inflate(Z1, <<2,1,2,1,2>>)),
+ ?m(?EXIT(data_error), zlib:inflate(Z1, <<2,1,2,1,2>>)),
?m(ok, zlib:close(Z1)).
-api_inflateChunk(doc) -> "Test inflateChunk";
-api_inflateChunk(suite) -> [];
+%% Test inflate/3; same as inflate/2 but with the default options inverted.
+api_inflate3(Config) when is_list(Config) ->
+ Data = [<<1,2,2,3,3,3,4,4,4,4>>],
+ Options = [{exception_on_need_dict, false}],
+ Compressed = zlib:compress(Data),
+ Z1 = zlib:open(),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m([], zlib:inflate(Z1, <<>>, Options)),
+ ?m(Data, zlib:inflate(Z1, Compressed)),
+ ?m(ok, zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(Data, zlib:inflate(Z1, Compressed, Options)),
+ ?m(?EXIT(badarg), zlib:inflate(gurka, Compressed, Options)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, 4384, Options)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, [atom_list], Options)),
+ ?m(ok, zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(?EXIT(data_error), zlib:inflate(Z1, <<2,1,2,1,2>>, Options)),
+ ?m(ok, zlib:close(Z1)).
+
+%% Test inflateChunk.
api_inflateChunk(Config) when is_list(Config) ->
ChunkSize = 1024,
Data = << <<(I rem 150)>> || I <- lists:seq(1, 3 * ChunkSize) >>,
Part1 = binary:part(Data, 0, ChunkSize),
Part2 = binary:part(Data, ChunkSize, ChunkSize),
Part3 = binary:part(Data, ChunkSize * 2, ChunkSize),
- ?line Compressed = zlib:compress(Data),
- ?line Z1 = zlib:open(),
- ?line zlib:setBufSize(Z1, ChunkSize),
+
+ Compressed = zlib:compress(Data),
+ Z1 = zlib:open(),
+
+ zlib:setBufSize(Z1, ChunkSize),
+
?m(ok, zlib:inflateInit(Z1)),
- ?m([], zlib:inflateChunk(Z1, <<>>)),
- ?m({more, Part1}, zlib:inflateChunk(Z1, Compressed)),
- ?m({more, Part2}, zlib:inflateChunk(Z1)),
- ?m(Part3, zlib:inflateChunk(Z1)),
- ?m(ok, zlib:inflateEnd(Z1)),
+ 0 = iolist_size(zlib:inflateChunk(Z1, <<>>)),
+
+ {more, Part1AsIOList} = zlib:inflateChunk(Z1, Compressed),
+ {more, Part2AsIOList} = zlib:inflateChunk(Z1),
+ {more, Part3AsIOList} = zlib:inflateChunk(Z1),
+
+ [] = zlib:inflateChunk(Z1),
+ [] = zlib:inflateChunk(Z1),
+ [] = zlib:inflateChunk(Z1),
+
+ ?m(Part1, iolist_to_binary(Part1AsIOList)),
+ ?m(Part2, iolist_to_binary(Part2AsIOList)),
+ ?m(Part3, iolist_to_binary(Part3AsIOList)),
+
+ ?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
- ?m({more, Part1}, zlib:inflateChunk(Z1, Compressed)),
+
+ ?m({more, Part1AsIOList}, zlib:inflateChunk(Z1, Compressed)),
?m(ok, zlib:inflateReset(Z1)),
- ?line zlib:setBufSize(Z1, size(Data)),
- ?m(Data, zlib:inflateChunk(Z1, Compressed)),
- ?m(ok, zlib:inflateEnd(Z1)),
+ zlib:setBufSize(Z1, byte_size(Data) + 1),
+ DataAsIOList = zlib:inflateChunk(Z1, Compressed),
+ ?m(Data, iolist_to_binary(DataAsIOList)),
+
+ ?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateChunk(gurka, Compressed)),
- ?m(?BARG, zlib:inflateChunk(Z1, 4384)),
- ?m({'EXIT',{data_error,_}}, zlib:inflateEnd(Z1)),
+
+ ?m(?EXIT(badarg), zlib:inflateChunk(gurka, Compressed)),
+ ?m(?EXIT(badarg), zlib:inflateChunk(Z1, 4384)),
+
+ ?m(?EXIT(data_error), zlib:inflateEnd(Z1)),
+
?m(ok, zlib:close(Z1)).
-api_inflateEnd(doc) -> "Test inflateEnd";
-api_inflateEnd(suite) -> [];
-api_inflateEnd(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
- ?m({'EXIT',{einval,_}}, zlib:inflateEnd(Z1)),
- ?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateEnd(gurka)),
- ?m({'EXIT',{data_error,_}}, zlib:inflateEnd(Z1)),
- ?m({'EXIT',{einval,_}}, zlib:inflateEnd(Z1)),
+%% Test safeInflate as a mirror of inflateChunk, but ignore the stuff about
+%% exact chunk sizes.
+api_safeInflate(Config) when is_list(Config) ->
+ Data = << <<(I rem 150)>> || I <- lists:seq(1, 20 bsl 10) >>,
+ Compressed = zlib:compress(Data),
+ Z1 = zlib:open(),
+
?m(ok, zlib:inflateInit(Z1)),
- ?m(B when is_list(B), zlib:inflate(Z1, zlib:compress("abc"))),
+
+ SafeInflateLoop =
+ fun
+ Loop({continue, Chunk}, Output) ->
+ Loop(zlib:safeInflate(Z1, []), [Output, Chunk]);
+ Loop({finished, Chunk}, Output) ->
+ [Output, Chunk]
+ end,
+
+ Decompressed = SafeInflateLoop(zlib:safeInflate(Z1, Compressed), []),
+ Data = iolist_to_binary(Decompressed),
+
?m(ok, zlib:inflateEnd(Z1)),
- ?m(ok, zlib:close(Z1)).
+ ?m(ok, zlib:inflateInit(Z1)),
-api_getBufsz(doc) -> "Test getBufsz";
-api_getBufsz(suite) -> [];
-api_getBufsz(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
- ?m(Val when is_integer(Val), zlib:getBufSize(Z1)),
- ?m(?BARG, zlib:getBufSize(gurka)),
- ?m(ok, zlib:close(Z1)).
+ {continue, Partial} = zlib:safeInflate(Z1, Compressed),
+ PBin = iolist_to_binary(Partial),
+ PSize = byte_size(PBin),
+ <<PBin:PSize/binary, Rest/binary>> = Data,
-api_setBufsz(doc) -> "Test setBufsz";
-api_setBufsz(suite) -> [];
-api_setBufsz(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
- ?m(?BARG, zlib:setBufSize(Z1, gurka)),
- ?m(?BARG, zlib:setBufSize(gurka, 1232330)),
- Sz = ?m( Val when is_integer(Val), zlib:getBufSize(Z1)),
- ?m(ok, zlib:setBufSize(Z1, Sz*2)),
- DSz = Sz*2,
- ?m(DSz, zlib:getBufSize(Z1)),
+ ?m(ok, zlib:inflateReset(Z1)),
+
+ {continue, Partial} = zlib:safeInflate(Z1, Compressed),
+ PBin = iolist_to_binary(Partial),
+ PSize = byte_size(PBin),
+ <<PBin:PSize/binary, Rest/binary>> = Data,
+
+ ?m(ok, zlib:inflateReset(Z1)),
+
+ SafeInflateLoop(zlib:safeInflate(Z1, Compressed), []),
+
+ ?m({finished, []}, zlib:safeInflate(Z1, Compressed)),
+ ?m({finished, []}, zlib:safeInflate(Z1, Compressed)),
+
+ ?m(ok, zlib:inflateReset(Z1)),
+ ?m(?EXIT(badarg), zlib:safeInflate(gurka, Compressed)),
+ ?m(?EXIT(badarg), zlib:safeInflate(Z1, 4384)),
+ ?m(?EXIT(data_error), zlib:inflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
-%%% Debug function ??
-api_getQSize(doc) -> "Test getQSize";
-api_getQSize(suite) -> [];
-api_getQSize(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
- Q = ?m(Val when is_integer(Val), zlib:getQSize(Z1)),
- io:format("QSize ~p ~n", [Q]),
- ?m(?BARG, zlib:getQSize(gurka)),
+%% Test inflateEnd.
+api_inflateEnd(Config) when is_list(Config) ->
+ Z1 = zlib:open(),
+ ?m(?EXIT(not_initialized), zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(?EXIT(badarg), zlib:inflateEnd(gurka)),
+ ?m(?EXIT(data_error), zlib:inflateEnd(Z1)),
+ ?m(?EXIT(not_initialized), zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(B when is_list(B), zlib:inflate(Z1, zlib:compress("abc"))),
+ ?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
-api_crc32(doc) -> "Test crc32";
-api_crc32(suite) -> [];
+%% Test crc32.
api_crc32(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1,best_speed,deflated,-15,8,default)),
Bin = <<1,1,1,1,1,1,1,1,1>>,
- Compressed1 = ?m(_, zlib:deflate(Z1, Bin, none)),
- Compressed2 = ?m(_, zlib:deflate(Z1, <<>>, finish)),
+ Compressed1 = ?m(L when is_list(L), zlib:deflate(Z1, Bin, none)),
+ Compressed2 = ?m(L when is_list(L), zlib:deflate(Z1, <<>>, finish)),
Compressed = list_to_binary(Compressed1 ++ Compressed2),
- CRC1 = ?m( CRC1 when is_integer(CRC1), zlib:crc32(Z1)),
+ CRC1 = ?m( CRC1 when is_integer(CRC1), zlib:crc32(Z1)),
?m(CRC1 when is_integer(CRC1), zlib:crc32(Z1,Bin)),
?m(CRC1 when is_integer(CRC1), zlib:crc32(Z1,binary_to_list(Bin))),
?m(CRC2 when is_integer(CRC2), zlib:crc32(Z1,Compressed)),
CRC2 = ?m(CRC2 when is_integer(CRC2), zlib:crc32(Z1,0,Compressed)),
?m(CRC3 when CRC2 /= CRC3, zlib:crc32(Z1,234,Compressed)),
- ?m(?BARG, zlib:crc32(gurka)),
- ?m(?BARG, zlib:crc32(Z1, not_a_binary)),
- ?m(?BARG, zlib:crc32(gurka, <<1,1,2,4,4>>)),
- ?m(?BARG, zlib:crc32(Z1, 2298929, not_a_binary)),
- ?m(?BARG, zlib:crc32(Z1, not_an_int, <<123,123,123,35,231>>)),
- ?m(?BARG, zlib:crc32_combine(Z1, not_an_int, 123123, 123)),
- ?m(?BARG, zlib:crc32_combine(Z1, noint, 123123, 123)),
- ?m(?BARG, zlib:crc32_combine(Z1, 123123, noint, 123)),
- ?m(?BARG, zlib:crc32_combine(Z1, 123123, 123, noint)),
+ ?m(?EXIT(badarg), zlib:crc32(gurka)),
+ ?m(?EXIT(badarg), zlib:crc32(Z1, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:crc32(gurka, <<1,1,2,4,4>>)),
+ ?m(?EXIT(badarg), zlib:crc32(Z1, 2298929, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:crc32(Z1, not_an_int, <<123,123,123,35,231>>)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, not_an_int, 123123, 123)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, noint, 123123, 123)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, 123123, noint, 123)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, 123123, 123, noint)),
?m(ok, zlib:deflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
-api_adler32(doc) -> "Test adler32";
-api_adler32(suite) -> [];
+%% Test adler32.
api_adler32(Config) when is_list(Config) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1,best_speed,deflated,-15,8,default)),
Bin = <<1,1,1,1,1,1,1,1,1>>,
- Compressed1 = ?m(_, zlib:deflate(Z1, Bin, none)),
- Compressed2 = ?m(_, zlib:deflate(Z1, <<>>, finish)),
+ Compressed1 = ?m(L when is_list(L), zlib:deflate(Z1, Bin, none)),
+ Compressed2 = ?m(L when is_list(L), zlib:deflate(Z1, <<>>, finish)),
Compressed = list_to_binary(Compressed1 ++ Compressed2),
?m(ADLER1 when is_integer(ADLER1), zlib:adler32(Z1,Bin)),
?m(ADLER1 when is_integer(ADLER1), zlib:adler32(Z1,binary_to_list(Bin))),
ADLER2 = ?m(ADLER2 when is_integer(ADLER2), zlib:adler32(Z1,Compressed)),
?m(ADLER2 when is_integer(ADLER2), zlib:adler32(Z1,1,Compressed)),
?m(ADLER3 when ADLER2 /= ADLER3, zlib:adler32(Z1,234,Compressed)),
- ?m(?BARG, zlib:adler32(Z1, not_a_binary)),
- ?m(?BARG, zlib:adler32(gurka, <<1,1,2,4,4>>)),
- ?m(?BARG, zlib:adler32(Z1, 2298929, not_a_binary)),
- ?m(?BARG, zlib:adler32(Z1, not_an_int, <<123,123,123,35,231>>)),
- ?m(?BARG, zlib:adler32_combine(Z1, noint, 123123, 123)),
- ?m(?BARG, zlib:adler32_combine(Z1, 123123, noint, 123)),
- ?m(?BARG, zlib:adler32_combine(Z1, 123123, 123, noint)),
+ ?m(?EXIT(badarg), zlib:adler32(Z1, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:adler32(gurka, <<1,1,2,4,4>>)),
+ ?m(?EXIT(badarg), zlib:adler32(Z1, 2298929, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:adler32(Z1, not_an_int, <<123,123,123,35,231>>)),
+ ?m(?EXIT(badarg), zlib:adler32_combine(Z1, noint, 123123, 123)),
+ ?m(?EXIT(badarg), zlib:adler32_combine(Z1, 123123, noint, 123)),
+ ?m(?EXIT(badarg), zlib:adler32_combine(Z1, 123123, 123, noint)),
?m(ok, zlib:deflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
-api_un_compress(doc) -> "Test compress";
-api_un_compress(suite) -> [];
+%% Test compress.
api_un_compress(Config) when is_list(Config) ->
- ?m(?BARG,zlib:compress(not_a_binary)),
+ ?m(?EXIT(badarg),zlib:compress(not_a_binary)),
Bin = <<1,11,1,23,45>>,
- ?line Comp = zlib:compress(Bin),
- ?m(?BARG,zlib:uncompress(not_a_binary)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<171,171,171,171,171>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156,3>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156,3,0>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<0,156,3,0,0,0,0,1>>)),
+ Comp = zlib:compress(Bin),
+ ?m(?EXIT(badarg),zlib:uncompress(not_a_binary)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<171,171,171,171,171>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120,156>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120,156,3>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120,156,3,0>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<0,156,3,0,0,0,0,1>>)),
?m(Bin, zlib:uncompress(binary_to_list(Comp))),
?m(Bin, zlib:uncompress(Comp)).
-api_un_zip(doc) -> "Test zip";
-api_un_zip(suite) -> [];
+%% Test zip.
api_un_zip(Config) when is_list(Config) ->
- ?m(?BARG,zlib:zip(not_a_binary)),
+ ?m(?EXIT(badarg),zlib:zip(not_a_binary)),
Bin = <<1,11,1,23,45>>,
- ?line Comp = zlib:zip(Bin),
+ Comp = zlib:zip(Bin),
?m(Comp, zlib:zip(binary_to_list(Bin))),
- ?m(?BARG,zlib:unzip(not_a_binary)),
- ?m({'EXIT',{data_error,_}}, zlib:unzip(<<171,171,171,171,171>>)),
- ?m({'EXIT',{data_error,_}}, zlib:unzip(<<>>)),
+ ?m(?EXIT(badarg),zlib:unzip(not_a_binary)),
+ ?m(?EXIT(data_error), zlib:unzip(<<171,171,171,171,171>>)),
+ ?m(?EXIT(data_error), zlib:unzip(<<>>)),
?m(Bin, zlib:unzip(Comp)),
?m(Bin, zlib:unzip(binary_to_list(Comp))),
-
+
%% OTP-6396
- B = <<131,104,19,100,0,13,99,95,99,105,100,95,99,115,103,115,110,95,50,97,1,107,0,4,208,161,246,29,107,0,3,237,166,224,107,0,6,66,240,153,0,2,10,1,0,8,97,116,116,97,99,104,101,100,104,2,100,0,22,117,112,100,97,116,101,95,112,100,112,95,99,111,110,116,101,120,116,95,114,101,113,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,197,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,5,0,33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,1,114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,103,112,114,115,8,0,104,2,104,2,100,0,8,97,99,116,105,118,97,116,101,104,23,100,0,11,112,100,112,95,99,111,110,116,1,120,116,100,0,7,112,114,105,109,97,114,121,97,1,100,0,9,117,110,100,101,102,105,110,101,100,97,1,97,4,97,4,97,7,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,10100,100,0,9,117,110,100,101,102,105,110,101,100,100,0,5,102,97,108,115,101,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,1,101,100,97,0,100,0,9,117,110,100,101,102,105,110,101,100,107,0,4,16,0,1,144,107,0,4,61,139,186,181,107,0,4,10,8,201,49,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,0,101,100,100,0,9,117,110,100,101,102,105,110,101,100,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,21,106,108,0,0,0,3,104,2,97,1,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,167,20,104,2,97,4,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,21,104,2,97,10,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,26,106,100,0,5,118,101,114,57,57,100,0,9,117,110,0,101,102,105,110,101,100,107,0,2,0,244,107,0,4,10,6,102,195,107,0,4,10,6,102,195,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,107,0,125,248,143,0,203,25115,157,116,65,185,65,172,55,87,164,88,225,50,203,251,115,157,116,65,185,65,172,55,87,164,88,225,50,0,0,82,153,50,0,200,98,87,148,237,193,185,65,149,167,69,144,14,16,153,50,3,81,70,94,13,109,193,1,120,5,181,113,198,118,50,3,81,70,94,13,109,193,185,120,5,181,113,198,118,153,3,81,70,94,13,109,193,185,120,5,181,113,198,118,153,50,16,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,113,92,2,119,128,0,0,108,0,0,1,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,11,97,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,0,33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,101,114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,103,112,114,115,8,0,106>>,
+ B =
+ <<131,104,19,100,0,13,99,95,99,105,100,95,99,115,103,115,110,95,50,97,
+ 1,107,0,4,208,161,246,29,107,0,3,237,166,224,107,0,6,66,240,153,0,2,
+ 10,1,0,8,97,116,116,97,99,104,101,100,104,2,100,0,22,117,112,100,97,
+ 116,101,95,112,100,112,95,99,111,110,116,101,120,116,95,114,101,113,
+ 107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,197,
+ 31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,
+ 64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,5,0,33,4,4,10
+ ,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,1,114,45,97,
+ 112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,
+ 103,112,114,115,8,0,104,2,104,2,100,0,8,97,99,116,105,118,97,116,101,
+ 104,23,100,0,11,112,100,112,95,99,111,110,116,1,120,116,100,0,7,112,
+ 114,105,109,97,114,121,97,1,100,0,9,117,110,100,101,102,105,110,101,
+ 100,97,1,97,4,97,4,97,7,100,0,9,117,110,100,101,102,105,110,101,100,
+ 100,0,9,117,110,100,101,102,105,110,10100,100,0,9,117,110,100,101,
+ 102,105,110,101,100,100,0,5,102,97,108,115,101,100,0,9,117,110,100,
+ 101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,
+ 100,0,9,117,110,100,101,102,105,1,101,100,97,0,100,0,9,117,110,100,
+ 101,102,105,110,101,100,107,0,4,16,0,1,144,107,0,4,61,139,186,181,
+ 107,0,4,10,8,201,49,100,0,9,117,110,100,101,102,105,110,101,100,100,
+ 0,9,117,110,100,101,102,105,0,101,100,100,0,9,117,110,100,101,102,
+ 105,110,101,100,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,
+ 16,97,21,106,108,0,0,0,3,104,2,97,1,104,2,104,3,98,0,0,7,214,97,11,
+ 97,20,104,3,97,17,97,167,20,104,2,97,4,104,2,104,3,98,0,0,7,214,97,
+ 11,97,20,104,3,97,17,97,16,97,21,104,2,97,10,104,2,104,3,98,0,0,7,
+ 214,97,11,97,20,104,3,97,17,97,16,97,26,106,100,0,5,118,101,114,57,
+ 57,100,0,9,117,110,0,101,102,105,110,101,100,107,0,2,0,244,107,0,4,
+ 10,6,102,195,107,0,4,10,6,102,195,100,0,9,117,110,100,101,102,105,
+ 110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,107,0,125,
+ 248,143,0,203,25115,157,116,65,185,65,172,55,87,164,88,225,50,203,
+ 251,115,157,116,65,185,65,172,55,87,164,88,225,50,0,0,82,153,50,0,
+ 200,98,87,148,237,193,185,65,149,167,69,144,14,16,153,50,3,81,70,94,
+ 13,109,193,1,120,5,181,113,198,118,50,3,81,70,94,13,109,193,185,120,
+ 5,181,113,198,118,153,3,81,70,94,13,109,193,185,120,5,181,113,198,
+ 118,153,50,16,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,113,92,2,119,128,0,0,
+ 108,0,0,1,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,
+ 12,3,11,97,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,
+ 104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,0,
+ 33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,101,
+ 114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,
+ 50,52,48,4,103,112,114,115,8,0,106>>,
+
Z = zlib:zip(B),
?m(B, zlib:unzip(Z)).
-%% api_g_un_zip_file(doc) -> "Test gunzip_file";
-%% api_g_un_zip_file(suite) -> [];
-%% api_g_un_zip_file(Config) when is_list(Config) ->
-%% ?line Out = conf(data_dir,Config),
-%% io:format("Using OutDir ~p ~n", [Out]),
-%% F = filename:join(Out,"testing1"),
-%% Data = <<1,1,255,255,255,1,1>>,
-%% ?m(ok, file:write_file(F,Data)),
-%% ?line Compressed = zlib:gzip_file(F),
-%% ?m(ok, file:write_file(F++".gz",Compressed)),
-%% ?m(Data, zlib:gunzip_file(F++".gz")),
-%% ?m({error,enoent}, zlib:gunzip_file(gurka)),
-%% ?m({error,enoent}, zlib:gzip_file(gurka)),
-%% ?m({error,what}, zlib:gunzip_file(F)),
-%% ?line ok.
-
-api_g_un_zip(doc) -> "Test gunzip";
-api_g_un_zip(suite) -> [];
+%% Test gunzip.
api_g_un_zip(Config) when is_list(Config) ->
- ?m(?BARG,zlib:gzip(not_a_binary)),
+ ?m(?EXIT(badarg),zlib:gzip(not_a_binary)),
Bin = <<1,11,1,23,45>>,
- ?line Comp = zlib:gzip(Bin),
+ Comp = zlib:gzip(Bin),
+
?m(Comp, zlib:gzip(binary_to_list(Bin))),
- ?m(?BARG, zlib:gunzip(not_a_binary)),
- ?m(?DATA_ERROR, zlib:gunzip(<<171,171,171,171,171>>)),
- ?m(?DATA_ERROR, zlib:gunzip(<<>>)),
+ ?m(?EXIT(badarg), zlib:gunzip(not_a_binary)),
+ ?m(?EXIT(data_error), zlib:gunzip(<<171,171,171,171,171>>)),
+ ?m(?EXIT(data_error), zlib:gunzip(<<>>)),
?m(Bin, zlib:gunzip(Comp)),
?m(Bin, zlib:gunzip(binary_to_list(Comp))),
-
+
+ %% RFC 1952:
+ %%
+ %% "A gzip file consists of a series of "members" (compressed data
+ %% sets). [...] The members simply appear one after another in the file,
+ %% with no additional information before, between, or after them."
+ Concatenated = <<Bin/binary, Bin/binary>>,
+ ?m(Concatenated, zlib:gunzip([Comp, Comp])),
+
+ %% Don't explode if the uncompressed size is a perfect multiple of the
+ %% internal inflate chunk size.
+ ChunkSizedData = <<0:16384/unit:8>>,
+ ?m(ChunkSizedData, zlib:gunzip(zlib:gzip(ChunkSizedData))),
+
%% Bad CRC; bad length.
BadCrc = bad_crc_data(),
- ?line ?m({'EXIT',{data_error,_}},(catch zlib:gunzip(BadCrc))),
+ ?m(?EXIT(data_error),(catch zlib:gunzip(BadCrc))),
BadLen = bad_len_data(),
- ?line ?m({'EXIT',{data_error,_}},(catch zlib:gunzip(BadLen))),
+ ?m(?EXIT(data_error),(catch zlib:gunzip(BadLen))),
ok.
bad_crc_data() ->
@@ -564,76 +707,57 @@ bad_len_data() ->
<<31,139,8,0,0,0,0,0,0,3,211,2,0,91,38,185,9,2,0,0,0>>.
-intro(suite) -> [];
-intro(doc) -> "";
intro(Config) when is_list(Config) ->
D = <<"This is a binary">>,
[put({ex, N}, <<"This is a binary">>) || N <- [0,1,2,3,4]],
put({ex, 5}, end_of_data),
put(ex,0),
- ?line Read = fun() ->
- N = get(ex),
- put(ex,N+1),
- get({ex,N})
- end,
-
- ?line Z = zlib:open(),
- ?line ok = zlib:deflateInit(Z,default),
-
- ?line Compress = fun(end_of_data, _Cont) -> [];
- (Data, Cont) ->
- [zlib:deflate(Z, Data)|Cont(Read(),Cont)]
- end,
- ?line Compressed = Compress(Read(),Compress),
- ?line Last = zlib:deflate(Z, [], finish),
- ?line ok = zlib:deflateEnd(Z),
- ?line zlib:close(Z),
- ?line Res = list_to_binary([Compressed|Last]),
+ Read = fun() ->
+ N = get(ex),
+ put(ex,N+1),
+ get({ex,N})
+ end,
+
+ Z = zlib:open(),
+ ok = zlib:deflateInit(Z,default),
+
+ Compress = fun(end_of_data, _Cont) -> [];
+ (Data, Cont) ->
+ [zlib:deflate(Z, Data)|Cont(Read(),Cont)]
+ end,
+ Compressed = Compress(Read(),Compress),
+ Last = zlib:deflate(Z, [], finish),
+ ok = zlib:deflateEnd(Z),
+ zlib:close(Z),
+ Res = list_to_binary([Compressed|Last]),
Orig = list_to_binary(lists:duplicate(5, D)),
?m(Orig, zlib:uncompress(Res)).
-large_deflate(doc) -> "Test deflate large file, which had a bug reported on erlang-bugs";
-large_deflate(suite) -> [];
+%% Test deflate large file, which had a bug reported on erlang-bugs.
large_deflate(Config) when is_list(Config) ->
large_deflate_do().
large_deflate_do() ->
- ?line Z = zlib:open(),
- ?line Plain = rand_bytes(zlib:getBufSize(Z)*5),
- ?line ok = zlib:deflateInit(Z),
- ?line _ZlibHeader = zlib:deflate(Z, [], full),
- ?line Deflated = zlib:deflate(Z, Plain, full),
- ?m(ok, zlib:close(Z)),
- ?m(Plain, zlib:unzip(list_to_binary([Deflated, 3, 0]))).
-
-rand_bytes(Sz) ->
- L = <<8,2,3,6,1,2,3,2,3,4,8,7,3,7,2,3,4,7,5,8,9,3>>,
- rand_bytes(erlang:md5(L),Sz).
-
-rand_bytes(Bin, Sz) when byte_size(Bin) >= Sz ->
- <<Res:Sz/binary, _/binary>> = Bin,
- Res;
-rand_bytes(Bin, Sz) ->
- rand_bytes(<<(erlang:md5(Bin))/binary, Bin/binary>>, Sz).
-
-
-zip_usage(doc) -> "Test a standard compressed zip file";
-zip_usage(suite) -> [];
+ Plain = gen_determ_rand_bytes(64 bsl 10),
+ Deflated = zlib:zip(Plain),
+ ?m(Plain, zlib:unzip(Deflated)).
+
+%% Test a standard compressed zip file.
zip_usage(Config) when is_list(Config) ->
zip_usage(zip_usage({get_arg,Config}));
zip_usage({get_arg,Config}) ->
- ?line Out = conf(data_dir,Config),
- ?line {ok,ZIP} = file:read_file(filename:join(Out,"zipdoc.zip")),
- ?line {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
+ Out = get_data_dir(Config),
+ {ok,ZIP} = file:read_file(filename:join(Out,"zipdoc.zip")),
+ {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
{run,ZIP,ORIG};
zip_usage({run,ZIP,ORIG}) ->
- ?line <<_:14/binary, CRC:32/little,
- CompSz:32/little, UnCompSz:32/little,_:31/binary,
- Compressed:CompSz/binary, _/binary>> = ZIP,
-
+ <<_:14/binary, CRC:32/little,
+ CompSz:32/little, UnCompSz:32/little,_:31/binary,
+ Compressed:CompSz/binary, _/binary>> = ZIP,
+
%%io:format("CRC ~p CSz ~p UnCSz ~p ~n", [CRC,CompSz,UnCompSz]),
- ?line Split = split_bin(Compressed,[]),
- ?line Z = zlib:open(),
+ Split = split_bin(Compressed,[]),
+ Z = zlib:open(),
?m(ok, zlib:inflateInit(Z, -15)),
Bs = [zlib:inflate(Z, Part) || Part <- Split],
@@ -643,86 +767,84 @@ zip_usage({run,ZIP,ORIG}) ->
?m(true, zlib:crc32(Z,UC0) == zlib:crc32(Z,ORIG)),
?m(ok, zlib:inflateEnd(Z)),
- ?line UC1 = zlib:unzip(Compressed),
+ UC1 = zlib:unzip(Compressed),
?m(UnCompSz, byte_size(UC1)),
?m(true, zlib:crc32(Z,UC1) == zlib:crc32(Z,ORIG)),
-
+
?m(ok, zlib:inflateInit(Z, -15)),
- ?line UC2 = zlib:inflate(Z, Compressed),
+ UC2 = zlib:inflate(Z, Compressed),
?m(UnCompSz, byte_size(list_to_binary(UC2))),
?m(CRC, zlib:crc32(Z)),
?m(true, zlib:crc32(Z,UC2) == zlib:crc32(Z,ORIG)),
?m(ok, zlib:inflateEnd(Z)),
-
+
?m(ok, zlib:inflateInit(Z, -15)),
- ?line UC3 = zlib:inflate(Z, Split), % Test multivec.
+ UC3 = zlib:inflate(Z, Split), % Test multivec.
?m(UnCompSz, byte_size(list_to_binary(UC3))),
?m(true, zlib:crc32(Z,UC3) == zlib:crc32(Z,ORIG)),
?m(CRC, zlib:crc32(Z)),
?m(ok, zlib:inflateEnd(Z)),
-
+
?m(ok, zlib:inflateInit(Z, -15)),
?m(ok, zlib:setBufSize(Z, UnCompSz *2)),
- ?line UC4 = zlib:inflate(Z, Compressed),
+ UC4 = zlib:inflate(Z, Compressed),
?m(UnCompSz, byte_size(list_to_binary(UC4))),
?m(CRC, zlib:crc32(Z)),
?m(CRC, zlib:crc32(Z,UC4)),
?m(true, zlib:crc32(Z,UC4) == zlib:crc32(Z,ORIG)),
?m(ok, zlib:inflateEnd(Z)),
-
- ?line C1 = zlib:zip(ORIG),
- ?line UC5 = zlib:unzip(C1),
+
+ C1 = zlib:zip(ORIG),
+ UC5 = zlib:unzip(C1),
?m(CRC, zlib:crc32(Z,UC5)),
?m(true,zlib:crc32(Z,UC5) == zlib:crc32(Z,ORIG)),
-
+
?m(ok, zlib:deflateInit(Z, default, deflated, -15, 8, default)),
- ?line C2 = zlib:deflate(Z, ORIG, finish),
- ?m(true, C1 == list_to_binary(C2)),
+ C2 = zlib:deflate(Z, ORIG, finish),
+ ?m(ORIG, zlib:unzip(C2)),
?m(ok, zlib:deflateEnd(Z)),
-
+
?m(ok, zlib:deflateInit(Z, none, deflated, -15, 8, filtered)),
?m(ok, zlib:deflateParams(Z, default, default)),
- ?line C3 = zlib:deflate(Z, ORIG, finish),
- ?m(true, C1 == list_to_binary(C3)),
+ C3 = zlib:deflate(Z, ORIG, finish),
+ ?m(ORIG, zlib:unzip(C3)),
?m(ok, zlib:deflateEnd(Z)),
- ?line ok = zlib:close(Z),
- ?line ok.
+ ok = zlib:close(Z),
+ ok.
-gz_usage(doc) -> "Test a standard compressed gzipped file";
-gz_usage(suite) -> [];
+%% Test a standard compressed gzipped file.
gz_usage(Config) when is_list(Config) ->
gz_usage(gz_usage({get_arg,Config}));
gz_usage({get_arg,Config}) ->
- ?line Out = conf(data_dir,Config),
- ?line {ok,GZIP} = file:read_file(filename:join(Out,"zipdoc.1.gz")),
- ?line {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
- ?line {ok,GZIP2} = file:read_file(filename:join(Out,"zipdoc.txt.gz")),
+ Out = get_data_dir(Config),
+ {ok,GZIP} = file:read_file(filename:join(Out,"zipdoc.1.gz")),
+ {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
+ {ok,GZIP2} = file:read_file(filename:join(Out,"zipdoc.txt.gz")),
{run,GZIP,ORIG,GZIP2};
gz_usage({run,GZIP,ORIG,GZIP2}) ->
- ?line Z = zlib:open(),
- ?line UC1 = zlib:gunzip(GZIP),
+ Z = zlib:open(),
+ UC1 = zlib:gunzip(GZIP),
?m(true,zlib:crc32(Z,UC1) == zlib:crc32(Z,ORIG)),
- ?line UC3 = zlib:gunzip(GZIP2),
+ UC3 = zlib:gunzip(GZIP2),
?m(true,zlib:crc32(Z,UC3) == zlib:crc32(Z,ORIG)),
- ?line Compressed = zlib:gzip(ORIG),
- ?line UC5 = zlib:gunzip(Compressed),
+ Compressed = zlib:gzip(ORIG),
+ UC5 = zlib:gunzip(Compressed),
?m(true,zlib:crc32(Z,UC5) == zlib:crc32(Z,ORIG)),
- ?line ok = zlib:close(Z).
+ ok = zlib:close(Z).
-gz_usage2(doc) -> "Test more of a standard compressed gzipped file";
-gz_usage2(suite) -> [];
+%% Test more of a standard compressed gzipped file.
gz_usage2(Config) ->
case os:find_executable("gzip") of
Name when is_list(Name) ->
- ?line Z = zlib:open(),
- ?line Out = conf(data_dir,Config),
- ?line {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
- ?line Compressed = zlib:gzip(ORIG),
+ Z = zlib:open(),
+ Out = get_data_dir(Config),
+ {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
+ Compressed = zlib:gzip(ORIG),
GzOutFile = filename:join(Out,"out.gz"),
OutFile = filename:join(Out,"out.txt"),
?m(ok, file:write_file(GzOutFile,Compressed)),
- ?line os:cmd("gzip -c -d " ++ GzOutFile ++ " > " ++ OutFile),
+ os:cmd("gzip -c -d " ++ GzOutFile ++ " > " ++ OutFile),
case file:read_file(OutFile) of
{ok,ExtDecompressed} ->
?m(true,
@@ -731,83 +853,80 @@ gz_usage2(Config) ->
io:format("Couldn't test external decompressor ~p\n",
[Error])
end,
- ?line ok = zlib:close(Z),
+ ok = zlib:close(Z),
ok;
false ->
{skipped,"No gzip in path"}
end.
-
-compress_usage(doc) ->
- "Test that (de)compress funcs work with"
- " standard tools, for example a chunk from a png file";
-compress_usage(suite) -> [];
+
+%% Test that (de)compress funcs work with standard tools, for example
+%% a chunk from a png file.
compress_usage(Config) when is_list(Config) ->
compress_usage(compress_usage({get_arg,Config}));
compress_usage({get_arg,Config}) ->
- ?line Out = conf(data_dir,Config),
- ?line {ok,C1} = file:read_file(filename:join(Out,"png-compressed.zlib")),
+ Out = get_data_dir(Config),
+ {ok,C1} = file:read_file(filename:join(Out,"png-compressed.zlib")),
{run,C1};
compress_usage({run,C1}) ->
- ?line Z = zlib:open(),
+ Z = zlib:open(),
%% See that we can uncompress a file generated with external prog.
- ?line UC1 = zlib:uncompress(C1),
+ UC1 = zlib:uncompress(C1),
%% Check that the crc are correct.
?m(4125865008,zlib:crc32(Z,UC1)),
- ?line C2 = zlib:compress(UC1),
- ?line UC2 = zlib:uncompress(C2),
+ C2 = zlib:compress(UC1),
+ UC2 = zlib:uncompress(C2),
%% Check that the crc are correct.
?m(4125865008,zlib:crc32(Z,UC2)),
-
- ?line ok = zlib:close(Z),
+
+ ok = zlib:close(Z),
D = [<<"We tests some partial">>,
<<"data, sent over">>,
<<"the stream">>,
<<"we check that we can unpack">>,
<<"every message we get">>],
-
- ?line ZC = zlib:open(),
- ?line ZU = zlib:open(),
+
+ ZC = zlib:open(),
+ ZU = zlib:open(),
Test = fun(finish, {_,Tot}) ->
- ?line Compressed = zlib:deflate(ZC, <<>>, finish),
+ Compressed = zlib:deflate(ZC, <<>>, finish),
Data = zlib:inflate(ZU, Compressed),
[Tot|Data];
(Data, {Op,Tot}) ->
- ?line Compressed = zlib:deflate(ZC, Data, Op),
+ Compressed = zlib:deflate(ZC, Data, Op),
Res1 = ?m([Data],zlib:inflate(ZU, Compressed)),
{Op, [Tot|Res1]}
end,
- ?line zlib:deflateInit(ZC),
- ?line zlib:inflateInit(ZU),
- ?line T1 = lists:foldl(Test,{sync,[]},D++[finish]),
+ zlib:deflateInit(ZC),
+ zlib:inflateInit(ZU),
+ T1 = lists:foldl(Test,{sync,[]},D++[finish]),
?m(true, list_to_binary(D) == list_to_binary(T1)),
- ?line zlib:deflateEnd(ZC),
- ?line zlib:inflateEnd(ZU),
-
- ?line zlib:deflateInit(ZC),
- ?line zlib:inflateInit(ZU),
- ?line T2 = lists:foldl(Test,{full,[]},D++[finish]),
+ zlib:deflateEnd(ZC),
+ zlib:inflateEnd(ZU),
+
+ zlib:deflateInit(ZC),
+ zlib:inflateInit(ZU),
+ T2 = lists:foldl(Test,{full,[]},D++[finish]),
?m(true, list_to_binary(D) == list_to_binary(T2)),
- ?line zlib:deflateEnd(ZC),
- ?line zlib:inflateEnd(ZU),
-
- ?line ok = zlib:close(ZC),
- ?line ok = zlib:close(ZU).
+ zlib:deflateEnd(ZC),
+ zlib:inflateEnd(ZU),
+ ok = zlib:close(ZC),
+ ok = zlib:close(ZU).
-crc(doc) -> "Check that crc works as expected";
-crc(suite) -> [];
+
+%% Check that crc works as expected.
crc(Config) when is_list(Config) ->
crc(crc({get_arg,Config}));
crc({get_arg,Config}) ->
- ?line Out = conf(data_dir,Config),
- ?line {ok,C1} = file:read_file(filename:join(Out,"zipdoc")),
+ Out = get_data_dir(Config),
+ {ok,C1} = file:read_file(filename:join(Out,"zipdoc")),
{run,C1};
crc({run,C1}) ->
- ?line Z = zlib:open(),
- ?line Crc = zlib:crc32(Z, C1),
+ Z = zlib:open(),
+ Crc = zlib:crc32(Z, C1),
Bins = split_bin(C1,[]),
%%io:format("Length ~p ~p ~n", [length(Bins), [size(Bin) || Bin <- Bins]]),
Last = lists:last(Bins),
@@ -817,29 +936,28 @@ crc({run,C1}) ->
Crc1
end, 0, Bins),
?m(Crc,SCrc),
- ?line [First|Rest] = Bins,
+ [First|Rest] = Bins,
Combine = fun(Bin, CS1) ->
CS2 = zlib:crc32(Z, Bin),
S2 = byte_size(Bin),
zlib:crc32_combine(Z,CS1,CS2,S2)
end,
- ?line Comb = lists:foldl(Combine, zlib:crc32(Z, First), Rest),
+ Comb = lists:foldl(Combine, zlib:crc32(Z, First), Rest),
?m(Crc,Comb),
- ?line ok = zlib:close(Z).
+ ok = zlib:close(Z).
-adler(doc) -> "Check that adler works as expected";
-adler(suite) -> [];
+%% Check that adler works as expected.
adler(Config) when is_list(Config) ->
adler(adler({get_arg,Config}));
adler({get_arg,Config}) ->
- ?line Out = conf(data_dir,Config),
+ Out = get_data_dir(Config),
File1 = filename:join(Out,"zipdoc"),
- ?line {ok,C1} = file:read_file(File1),
+ {ok,C1} = file:read_file(File1),
{run,C1};
adler({run,C1}) ->
- ?line Z = zlib:open(),
+ Z = zlib:open(),
?m(1, zlib:adler32(Z,<<>>)),
- ?line Crc = zlib:adler32(Z, C1),
+ Crc = zlib:adler32(Z, C1),
Bins = split_bin(C1,[]),
Last = lists:last(Bins),
SCrc = lists:foldl(fun(Bin,Crc0) ->
@@ -848,43 +966,46 @@ adler({run,C1}) ->
Crc1
end, zlib:adler32(Z,<<>>), Bins),
?m(Crc,SCrc),
- ?line [First|Rest] = Bins,
+ [First|Rest] = Bins,
Combine = fun(Bin, CS1) ->
CS2 = zlib:adler32(Z, Bin),
S2 = byte_size(Bin),
zlib:adler32_combine(Z,CS1,CS2,S2)
end,
- ?line Comb = lists:foldl(Combine, zlib:adler32(Z, First), Rest),
+ Comb = lists:foldl(Combine, zlib:adler32(Z, First), Rest),
?m(Crc,Comb),
- ?line ok = zlib:close(Z).
+ ok = zlib:close(Z).
-dictionary_usage(doc) -> "Test dictionary usage";
-dictionary_usage(suite) -> [];
+%% Test dictionary usage.
dictionary_usage(Config) when is_list(Config) ->
dictionary_usage(dictionary_usage({get_arg,Config}));
dictionary_usage({get_arg,_Config}) ->
{run}; % no args
dictionary_usage({run}) ->
- ?line Z1 = zlib:open(),
+ Z1 = zlib:open(),
Dict = <<"Anka">>,
Data = <<"Kalle Anka">>,
?m(ok, zlib:deflateInit(Z1)),
- ?line DictID = zlib:deflateSetDictionary(Z1, Dict),
- %% ?line io:format("DictID = ~p\n", [DictID]),
- ?line B1 = zlib:deflate(Z1, Data),
- ?line B2 = zlib:deflate(Z1, <<>>, finish),
+ DictID = zlib:deflateSetDictionary(Z1, Dict),
+ %% io:format("DictID = ~p\n", [DictID]),
+ B1 = zlib:deflate(Z1, Data),
+ B2 = zlib:deflate(Z1, <<>>, finish),
?m(ok, zlib:deflateEnd(Z1)),
?m(ok, zlib:close(Z1)),
Compressed = list_to_binary([B1,B2]),
%% io:format("~p\n", [Compressed]),
%% Now uncompress.
- ?line Z2 = zlib:open(),
+ Z2 = zlib:open(),
?m(ok, zlib:inflateInit(Z2)),
- ?line {'EXIT',{{need_dictionary,DictID},_}} = (catch zlib:inflate(Z2, Compressed)),
+
+ ?m(?EXIT({need_dictionary, DictID}), zlib:inflate(Z2, Compressed)),
+
?m(ok, zlib:inflateSetDictionary(Z2, Dict)),
?m(ok, zlib:inflateSetDictionary(Z2, binary_to_list(Dict))),
- ?line Uncompressed = ?m(B when is_list(B), zlib:inflate(Z2, [])),
+
+ Uncompressed = ?m(B when is_list(B), zlib:inflate(Z2, [])),
+
?m(ok, zlib:inflateEnd(Z2)),
?m(ok, zlib:close(Z2)),
?m(Data, list_to_binary(Uncompressed)).
@@ -894,38 +1015,77 @@ split_bin(<<Part:1997/binary,Rest/binary>>, Acc) ->
split_bin(Last,Acc) ->
lists:reverse([Last|Acc]).
+only_allow_owner(Config) when is_list(Config) ->
+ Z = zlib:open(),
+ Owner = self(),
+
+ ?m(ok, zlib:inflateInit(Z)),
+ ?m(ok, zlib:inflateReset(Z)),
+
+ {Pid, Ref} = spawn_monitor(
+ fun() ->
+ ?m(?EXIT(not_on_controlling_process), zlib:inflateReset(Z)),
+ Owner ! '$transfer_ownership',
+ receive
+ '$ownership_transferred' ->
+ ?m(ok, zlib:inflateReset(Z))
+ after 200 ->
+ ct:fail("Never received transfer signal.")
+ end
+ end),
+ ownership_transfer_check(Z, Pid, Ref).
+
+ownership_transfer_check(Z, WorkerPid, Ref) ->
+ receive
+ '$transfer_ownership' ->
+ zlib:set_controlling_process(Z, WorkerPid),
+ WorkerPid ! '$ownership_transferred',
+ ownership_transfer_check(Z, WorkerPid, Ref);
+ {'DOWN', Ref, process, WorkerPid, normal} ->
+ ok;
+ {'DOWN', Ref, process, WorkerPid, Reason} ->
+ ct:fail("Spawned worker crashed with reason ~p.", [Reason])
+ after 200 ->
+ ct:fail("Spawned worker timed out.")
+ end.
+
+sub_heap_binaries(Config) when is_list(Config) ->
+ Compressed = zlib:compress(<<"gurka">>),
+ ConfLen = erlang:length(Config),
+
+ HeapBin = <<ConfLen:8/integer, Compressed/binary>>,
+ <<_:8/integer, SubHeapBin/binary>> = HeapBin,
+
+ ?m(<<"gurka">>, zlib:uncompress(SubHeapBin)),
+ ok.
-smp(doc) -> "Check concurrent access to zlib driver";
-smp(suite) -> [];
+%% Check concurrent access to zlib driver.
smp(Config) ->
- case erlang:system_info(smp_support) of
- true ->
- NumOfProcs = lists:min([8,erlang:system_info(schedulers)]),
- io:format("smp starting ~p workers\n",[NumOfProcs]),
-
- %% Tests to run in parallel.
- Funcs = [zip_usage, gz_usage, compress_usage, dictionary_usage,
- crc, adler],
-
- %% We get all function arguments here to avoid repeated parallel
- %% file read access.
- FnAList = lists:map(fun(F) -> {F,?MODULE:F({get_arg,Config})}
- end, Funcs),
-
- Pids = [spawn_link(?MODULE, worker, [random:uniform(9999),
- list_to_tuple(FnAList),
- self()])
- || _ <- lists:seq(1,NumOfProcs)],
- wait_pids(Pids);
+ NumOfProcs = lists:min([8,erlang:system_info(schedulers)]),
+ io:format("smp starting ~p workers\n",[NumOfProcs]),
- false ->
- {skipped,"No smp support"}
- end.
-
+ %% Tests to run in parallel.
+ Funcs =
+ [zip_usage, gz_usage, compress_usage, dictionary_usage,
+ crc, adler],
+
+ %% We get all function arguments here to avoid repeated parallel
+ %% file read access.
+ UsageArgs =
+ list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]),
+ Parent = self(),
+
+ WorkerFun =
+ fun() ->
+ worker(rand:uniform(9999), UsageArgs, Parent)
+ end,
+
+ Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)],
+ wait_pids(Pids).
worker(Seed, FnATpl, Parent) ->
io:format("smp worker ~p, seed=~p~n",[self(),Seed]),
- random:seed(Seed,Seed,Seed),
+ rand:seed(exsplus, {Seed,Seed,Seed}),
worker_loop(100, FnATpl),
Parent ! self().
@@ -933,33 +1093,32 @@ worker_loop(0, _FnATpl) ->
large_deflate_do(), % the time consuming one as finale
ok;
worker_loop(N, FnATpl) ->
- {F,A} = element(random:uniform(size(FnATpl)),FnATpl),
+ {F,A} = element(rand:uniform(tuple_size(FnATpl)), FnATpl),
?MODULE:F(A),
worker_loop(N-1, FnATpl).
-
+
wait_pids([]) ->
ok;
wait_pids(Pids) ->
receive
Pid ->
- ?line true = lists:member(Pid,Pids),
+ true = lists:member(Pid,Pids),
Others = lists:delete(Pid,Pids),
io:format("wait_pid got ~p, still waiting for ~p\n",[Pid,Others]),
wait_pids(Others)
end.
-otp_7359(doc) -> "Deflate/inflate data with size close to multiple of internal buffer size";
-otp_7359(suite) -> [];
+%% Deflate/inflate data with size close to multiple of internal buffer size.
otp_7359(_Config) ->
%% Find compressed size
ZTry = zlib:open(),
ok = zlib:deflateInit(ZTry),
ISize = zlib:getBufSize(ZTry),
IData = list_to_binary([Byte band 255 || Byte <- lists:seq(1,ISize)]),
- ?line ISize = byte_size(IData),
+ ISize = byte_size(IData),
- ?line DSize = iolist_size(zlib:deflate(ZTry, IData, sync)),
+ DSize = iolist_size(zlib:deflate(ZTry, IData, sync)),
zlib:close(ZTry),
io:format("Deflated try ~p -> ~p bytes~n", [ISize, DSize]),
@@ -981,19 +1140,19 @@ otp_7359(_Config) ->
otp_7359_def_inf(Data,{DefSize,InfSize}) ->
%%io:format("Try: DefSize=~p InfSize=~p~n", [DefSize,InfSize]),
- ?line ZDef = zlib:open(),
- ?line ok = zlib:deflateInit(ZDef),
- ?line ok = zlib:setBufSize(ZDef,DefSize),
- ?line DefData = iolist_to_binary(zlib:deflate(ZDef, Data, sync)),
+ ZDef = zlib:open(),
+ ok = zlib:deflateInit(ZDef),
+ ok = zlib:setBufSize(ZDef,DefSize),
+ DefData = iolist_to_binary(zlib:deflate(ZDef, Data, sync)),
%%io:format("Deflated ~p(~p) -> ~p(~p) bytes~n",
%% [byte_size(Data), InfSize, byte_size(DefData), DefSize]),
- ?line ok = zlib:close(ZDef),
+ ok = zlib:close(ZDef),
- ?line ZInf = zlib:open(),
- ?line ok = zlib:inflateInit(ZInf),
- ?line ok = zlib:setBufSize(ZInf,InfSize),
- ?line Data = iolist_to_binary(zlib:inflate(ZInf, DefData)),
- ?line ok = zlib:close(ZInf),
+ ZInf = zlib:open(),
+ ok = zlib:inflateInit(ZInf),
+ ok = zlib:setBufSize(ZInf,InfSize),
+ Data = iolist_to_binary(zlib:inflate(ZInf, DefData)),
+ ok = zlib:close(ZInf),
ok.
otp_9981(Config) when is_list(Config) ->
@@ -1013,43 +1172,98 @@ otp_9981(Config) when is_list(Config) ->
Ports = lists:sort(erlang:ports()),
ok.
-
+-define(BENCH_SIZE, (16 bsl 20)).
+
+-define(DECOMPRESS_BENCH(Name, What, Data),
+ Name(Config) when is_list(Config) ->
+ Uncompressed = Data,
+ Compressed = zlib:compress(Uncompressed),
+ What(Compressed, byte_size(Uncompressed))).
+
+-define(COMPRESS_BENCH(Name, What, Data),
+ Name(Config) when is_list(Config) ->
+ Compressed = Data,
+ What(Compressed, byte_size(Compressed))).
+
+?DECOMPRESS_BENCH(inflate_bench_zeroed, throughput_bench_inflate,
+ <<0:(8 * ?BENCH_SIZE)>>).
+?DECOMPRESS_BENCH(inflate_bench_rand, throughput_bench_inflate,
+ gen_determ_rand_bytes(?BENCH_SIZE)).
+
+?DECOMPRESS_BENCH(chunk_bench_zeroed, throughput_bench_chunk,
+ <<0:(8 * ?BENCH_SIZE)>>).
+?DECOMPRESS_BENCH(chunk_bench_rand, throughput_bench_chunk,
+ gen_determ_rand_bytes(?BENCH_SIZE)).
+
+?COMPRESS_BENCH(deflate_bench_zeroed, throughput_bench_deflate,
+ <<0:(8 * ?BENCH_SIZE)>>).
+?COMPRESS_BENCH(deflate_bench_rand, throughput_bench_deflate,
+ gen_determ_rand_bytes(?BENCH_SIZE)).
+
+throughput_bench_inflate(Compressed, Size) ->
+ Z = zlib:open(),
+ zlib:inflateInit(Z),
+
+ submit_throughput_results(Size,
+ fun() ->
+ zlib:inflate(Z, Compressed)
+ end).
+
+throughput_bench_deflate(Uncompressed, Size) ->
+ Z = zlib:open(),
+ zlib:deflateInit(Z),
+
+ submit_throughput_results(Size,
+ fun() ->
+ zlib:deflate(Z, Uncompressed, finish)
+ end).
+
+throughput_bench_chunk(Compressed, Size) ->
+ Z = zlib:open(),
+ zlib:inflateInit(Z),
+
+ ChunkLoop =
+ fun
+ Loop({more, _}) -> Loop(zlib:inflateChunk(Z));
+ Loop(_) -> ok
+ end,
+
+ submit_throughput_results(Size,
+ fun() ->
+ ChunkLoop(zlib:inflateChunk(Z, Compressed))
+ end).
+
+submit_throughput_results(Size, Fun) ->
+ TimeTaken = measure_perf_counter(Fun, millisecond),
+
+ KBPS = trunc((Size bsr 10) / (TimeTaken / 1000)),
+ ct_event:notify(#event{ name = benchmark_data, data = [{value,KBPS}] }),
+ {comment, io_lib:format("~p ms, ~p KBPS", [TimeTaken, KBPS])}.
+
+measure_perf_counter(Fun, Unit) ->
+ Start = os:perf_counter(Unit),
+ Fun(),
+ os:perf_counter(Unit) - Start.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Helps with testing directly %%%%%%%%%%%%%
-conf(What,Config) ->
- try ?config(What,Config) of
- undefined ->
- "./zlib_SUITE_data";
- Dir ->
- Dir
+get_data_dir(Config) ->
+ try proplists:get_value(data_dir,Config) of
+ undefined ->
+ "./zlib_SUITE_data";
+ Dir ->
+ Dir
catch
- _:_ -> "./zlib_SUITE_data"
+ _:_ -> "./zlib_SUITE_data"
end.
-t() -> t([all]).
-
-t(What) when not is_list(What) ->
- t([What]);
-t(What) ->
- lists:foreach(fun(T) ->
- try ?MODULE:T([])
- catch _E:_R ->
- Line = get(test_server_loc),
- io:format("Failed ~p:~p ~p ~p ~p~n",
- [T,Line,_E,_R, erlang:get_stacktrace()])
- end
- end, expand(What)).
-
-expand(All) ->
- lists:reverse(expand(All,[])).
-expand([H|T], Acc) ->
- case ?MODULE:H(suite) of
- [] -> expand(T,[H|Acc]);
- Cs ->
- R = expand(Cs, Acc),
- expand(T, R)
- end;
-expand([], Acc) -> Acc.
-
+%% Generates a bunch of statistically random bytes using the size as seed.
+gen_determ_rand_bytes(Size) ->
+ gen_determ_rand_bytes(Size, erlang:md5_init(), <<>>).
+gen_determ_rand_bytes(Size, _Context, Acc) when Size =< 0 ->
+ Acc;
+gen_determ_rand_bytes(Size, Context0, Acc) when Size > 0 ->
+ Context = erlang:md5_update(Context0, <<Size/integer>>),
+ Checksum = erlang:md5_final(Context),
+ gen_determ_rand_bytes(Size - 16, Context, <<Acc/binary, Checksum/binary>>).
diff --git a/lib/kernel/test/zzz_SUITE.erl b/lib/kernel/test/zzz_SUITE.erl
new file mode 100644
index 0000000000..59c7fd7404
--- /dev/null
+++ b/lib/kernel/test/zzz_SUITE.erl
@@ -0,0 +1,37 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2006-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(zzz_SUITE).
+
+%% The sole purpose of this test suite is for things we want to run last
+%% before the VM terminates.
+
+-export([all/0]).
+
+-export([lc_graph/1]).
+
+
+all() ->
+ [lc_graph].
+
+lc_graph(_Config) ->
+ %% Create "lc_graph" file in current working dir
+ %% if lock checker is enabled.
+ erts_debug:lc_graph(),
+ ok.
diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk
index c8917ebc3c..df95174c9f 100644
--- a/lib/kernel/vsn.mk
+++ b/lib/kernel/vsn.mk
@@ -1 +1 @@
-KERNEL_VSN = 4.2
+KERNEL_VSN = 6.1