aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.dir-locals.el9
-rw-r--r--.travis.yml8
-rw-r--r--CONTRIBUTING.md98
-rw-r--r--OTP_VERSION1
-rw-r--r--README.md2
-rw-r--r--erts/aclocal.m419
-rw-r--r--erts/doc/src/erl.xml19
-rw-r--r--erts/doc/src/erlang.xml8
-rw-r--r--erts/doc/src/notes.xml8
-rw-r--r--erts/emulator/beam/beam_bif_load.c13
-rw-r--r--erts/emulator/beam/beam_bp.c60
-rw-r--r--erts/emulator/beam/beam_bp.h6
-rw-r--r--erts/emulator/beam/beam_emu.c239
-rw-r--r--erts/emulator/beam/break.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c4
-rw-r--r--erts/emulator/beam/erl_bif_trace.c7
-rw-r--r--erts/emulator/beam/erl_gc.c206
-rw-r--r--erts/emulator/beam/erl_gc.h5
-rw-r--r--erts/emulator/beam/erl_hl_timer.c5
-rw-r--r--erts/emulator/beam/erl_init.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_message.c2
-rw-r--r--erts/emulator/beam/erl_nif.c4
-rw-r--r--erts/emulator/beam/erl_process.c260
-rw-r--r--erts/emulator/beam/erl_process.h2
-rw-r--r--erts/emulator/beam/erl_process_dump.c3
-rw-r--r--erts/emulator/beam/erl_trace.c5
-rw-r--r--erts/emulator/beam/erl_vm.h17
-rw-r--r--erts/emulator/beam/external.c25
-rw-r--r--erts/emulator/beam/sys.h3
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c10
-rw-r--r--erts/emulator/sys/unix/sys.c43
-rw-r--r--erts/emulator/test/bif_SUITE.erl173
-rw-r--r--erts/emulator/test/call_trace_SUITE.erl6
-rw-r--r--erts/emulator/test/code_SUITE.erl2
-rw-r--r--erts/emulator/test/emulator_smoke.spec12
-rw-r--r--erts/emulator/test/port_SUITE.erl2
-rw-r--r--erts/emulator/test/port_SUITE_data/Makefile.src6
-rw-r--r--erts/emulator/test/port_SUITE_data/port_test.c16
-rw-r--r--erts/emulator/test/port_trace_SUITE_data/echo_drv.c39
-rw-r--r--erts/emulator/test/process_SUITE.erl21
-rw-r--r--erts/etc/common/heart.c40
-rw-r--r--erts/etc/unix/etp-commands.in12
-rw-r--r--erts/lib_src/common/ethr_aux.c2
-rw-r--r--erts/test/install_SUITE.erl44
-rw-r--r--lib/common_test/src/common_test.app.src1
-rw-r--r--lib/common_test/test/Makefile3
-rw-r--r--lib/common_test/test/ct_SUITE.erl53
-rw-r--r--lib/common_test/test/ct_hooks_SUITE.erl112
-rw-r--r--lib/compiler/src/compile.erl30
-rw-r--r--lib/compiler/test/lc_SUITE.erl9
-rw-r--r--lib/crypto/c_src/crypto.c18
-rw-r--r--lib/crypto/doc/src/crypto.xml2
-rw-r--r--lib/crypto/src/crypto.erl23
-rw-r--r--lib/dialyzer/src/dialyzer_analysis_callgraph.erl113
-rw-r--r--lib/dialyzer/src/dialyzer_behaviours.erl13
-rw-r--r--lib/dialyzer/src/dialyzer_callgraph.erl94
-rw-r--r--lib/dialyzer/src/dialyzer_cl.erl14
-rw-r--r--lib/dialyzer/src/dialyzer_codeserver.erl204
-rw-r--r--lib/dialyzer/src/dialyzer_contracts.erl52
-rw-r--r--lib/dialyzer/src/dialyzer_dataflow.erl22
-rw-r--r--lib/dialyzer/src/dialyzer_gui_wx.erl3
-rw-r--r--lib/dialyzer/src/dialyzer_plt.erl130
-rw-r--r--lib/dialyzer/src/dialyzer_succ_typings.erl33
-rw-r--r--lib/dialyzer/src/dialyzer_typesig.erl101
-rw-r--r--lib/dialyzer/src/dialyzer_utils.erl118
-rw-r--r--lib/dialyzer/test/behaviour_SUITE_data/dialyzer_options2
-rw-r--r--lib/dialyzer/test/map_SUITE_data/dialyzer_options1
-rw-r--r--lib/dialyzer/test/opaque_SUITE_data/dialyzer_options2
-rw-r--r--lib/dialyzer/test/small_SUITE_data/results/chars4
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/anno.erl18
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/chars.erl32
-rw-r--r--lib/eldap/test/Makefile2
-rw-r--r--lib/eldap/test/eldap.cover3
-rw-r--r--lib/hipe/cerl/erl_types.erl206
-rw-r--r--lib/hipe/test/basic_SUITE_data/basic_num_bif.erl217
-rw-r--r--lib/hipe/test/hipe_SUITE.erl6
-rw-r--r--lib/hipe/test/opt_verify_SUITE.erl39
-rw-r--r--lib/inets/doc/src/httpc.xml2
-rw-r--r--lib/inets/src/http_client/httpc.erl92
-rw-r--r--lib/inets/src/http_client/httpc_handler.erl674
-rw-r--r--lib/inets/src/http_client/httpc_response.erl2
-rw-r--r--lib/inets/src/http_server/httpd_request_handler.erl4
-rw-r--r--lib/inets/src/inets_app/inets.appup.src4
-rw-r--r--lib/inets/test/httpc_SUITE.erl71
-rw-r--r--lib/kernel/doc/src/heart.xml17
-rw-r--r--lib/kernel/src/error_logger.erl8
-rw-r--r--lib/kernel/src/file.erl2
-rw-r--r--lib/kernel/src/heart.erl15
-rw-r--r--lib/kernel/src/rpc.erl14
-rw-r--r--lib/kernel/test/error_logger_SUITE.erl13
-rw-r--r--lib/kernel/test/rpc_SUITE.erl12
-rw-r--r--lib/observer/src/cdv_detail_wx.erl2
-rw-r--r--lib/observer/src/crashdump_viewer.erl21
-rw-r--r--lib/observer/src/etop.erl24
-rw-r--r--lib/observer/src/etop_txt.erl24
-rw-r--r--lib/observer/src/observer_app_wx.erl8
-rw-r--r--lib/observer/src/observer_lib.erl26
-rw-r--r--lib/observer/src/observer_port_wx.erl27
-rw-r--r--lib/observer/src/observer_pro_wx.erl8
-rw-r--r--lib/observer/src/observer_procinfo.erl2
-rw-r--r--lib/observer/src/observer_tv_wx.erl9
-rw-r--r--lib/observer/src/observer_wx.erl7
-rw-r--r--lib/observer/test/observer_SUITE.erl17
-rw-r--r--lib/os_mon/src/memsup.erl1
-rw-r--r--lib/public_key/doc/src/public_key.xml33
-rw-r--r--lib/public_key/doc/src/using_public_key.xml253
-rw-r--r--lib/public_key/src/public_key.erl164
-rw-r--r--lib/public_key/test/public_key_SUITE.erl111
-rw-r--r--lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_cn.pem17
-rw-r--r--lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem14
-rw-r--r--lib/public_key/test/public_key_SUITE_data/verify_hostname.conf16
-rw-r--r--lib/runtime_tools/doc/src/LTTng.xml2
-rw-r--r--lib/runtime_tools/src/observer_backend.erl7
-rwxr-xr-xlib/sasl/test/release_handler_SUITE_data/start3
-rwxr-xr-xlib/sasl/test/release_handler_SUITE_data/start_client3
-rw-r--r--lib/snmp/src/app/snmp.appup.src8
-rw-r--r--lib/snmp/src/app/snmp.erl80
-rw-r--r--lib/snmp/src/compile/snmpc_lib.erl4
-rw-r--r--lib/snmp/src/compile/snmpc_mib_gram.yrl6
-rw-r--r--lib/snmp/test/snmp_compiler_test.erl32
-rw-r--r--lib/snmp/test/snmp_test_data/OTP14145-MIB.mib44
-rw-r--r--lib/snmp/vsn.mk4
-rw-r--r--lib/ssh/src/ssh.app.src1
-rw-r--r--lib/ssh/src/ssh_auth.erl6
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl15
-rw-r--r--lib/ssh/src/ssh_dbg.erl66
-rw-r--r--lib/ssh/src/ssh_sftpd_file_api.erl2
-rw-r--r--lib/ssh/src/ssh_transport.erl125
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_encode_decode.erl370
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE.erl8
-rw-r--r--lib/ssh/test/ssh_benchmark_SUITE.erl2
-rw-r--r--lib/ssh/test/ssh_options_SUITE.erl11
-rw-r--r--lib/ssh/test/ssh_property_test_SUITE.erl3
-rw-r--r--lib/ssh/test/ssh_sftp_SUITE.erl4
-rw-r--r--lib/ssh/test/ssh_test_lib.erl47
-rw-r--r--lib/ssh/test/ssh_upgrade_SUITE.erl4
-rw-r--r--lib/ssl/doc/src/ssl.xml8
-rw-r--r--lib/ssl/src/Makefile15
-rw-r--r--lib/ssl/src/ssl.app.src13
-rw-r--r--lib/ssl/src/ssl.erl10
-rw-r--r--lib/ssl/src/ssl_admin_sup.erl95
-rw-r--r--lib/ssl/src/ssl_certificate.erl16
-rw-r--r--lib/ssl/src/ssl_config.erl52
-rw-r--r--lib/ssl/src/ssl_connection.erl50
-rw-r--r--lib/ssl/src/ssl_connection_sup.erl101
-rw-r--r--lib/ssl/src/ssl_crl.erl76
-rw-r--r--lib/ssl/src/ssl_dist_admin_sup.erl74
-rw-r--r--lib/ssl/src/ssl_dist_connection_sup.erl79
-rw-r--r--lib/ssl/src/ssl_dist_sup.erl42
-rw-r--r--lib/ssl/src/ssl_handshake.erl58
-rw-r--r--lib/ssl/src/ssl_handshake.hrl3
-rw-r--r--lib/ssl/src/ssl_internal.hrl3
-rw-r--r--lib/ssl/src/ssl_manager.erl209
-rw-r--r--lib/ssl/src/ssl_pem_cache.erl266
-rw-r--r--lib/ssl/src/ssl_pkix_db.erl72
-rw-r--r--lib/ssl/src/ssl_sup.erl86
-rw-r--r--lib/ssl/src/tls_connection.erl50
-rw-r--r--lib/ssl/test/make_certs.erl4
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl30
-rw-r--r--lib/ssl/test/ssl_pem_cache_SUITE.erl4
-rw-r--r--lib/stdlib/src/erl_eval.erl1
-rw-r--r--lib/stdlib/src/erl_parse.yrl11
-rw-r--r--lib/stdlib/src/escript.erl83
-rw-r--r--lib/stdlib/test/escript_SUITE.erl15
-rwxr-xr-xlib/stdlib/test/escript_SUITE_data/two_lines2
-rw-r--r--lib/stdlib/test/rand_SUITE.erl6
-rw-r--r--lib/stdlib/test/shell_SUITE.erl2
-rw-r--r--lib/typer/src/typer.erl22
-rw-r--r--lib/xmerl/src/xmerl_scan.erl22
-rw-r--r--lib/xmerl/test/xmerl_SUITE.erl18
-rwxr-xr-xotp_build35
-rw-r--r--otp_versions.table1
-rwxr-xr-xscripts/run-smoke-tests10
-rw-r--r--system/doc/design_principles/statem.xml4
-rw-r--r--system/doc/efficiency_guide/advanced.xml12
-rw-r--r--system/doc/reference_manual/typespec.xml89
-rw-r--r--system/doc/tutorial/c_portdriver.xmlsrc4
178 files changed, 5005 insertions, 2475 deletions
diff --git a/.dir-locals.el b/.dir-locals.el
new file mode 100644
index 0000000000..17bf4b636c
--- /dev/null
+++ b/.dir-locals.el
@@ -0,0 +1,9 @@
+;; Project-wide Emacs settings
+(
+ ;; `nil' settings apply to all language modes
+ (nil
+ ;; Use only spaces for indentation
+ (indent-tabs-mode . nil))
+ (c-mode
+ ;; In C code, indentation is four spaces
+ (c-basic-offset . 4)))
diff --git a/.travis.yml b/.travis.yml
index 43bf0c7fb5..42151a16d2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -33,11 +33,9 @@ script:
after_success:
- $ERL_TOP/bin/dialyzer --build_plt --apps asn1 compiler crypto dialyzer edoc erts et hipe inets kernel mnesia observer public_key runtime_tools snmp ssh ssl stdlib syntax_tools wx xmerl --statistics
- - $ERL_TOP/bin/dialyzer -n -Wunknown -Wunmatched_returns --apps compiler erts kernel stdlib --statistics
- - $ERL_TOP/bin/dialyzer -n -Wunknown -Wunmatched_returns --apps asn1 crypto dialyzer --statistics
- - $ERL_TOP/bin/dialyzer -n -Wunknown -Wunmatched_returns --apps hipe parsetools public_key --statistics
- - $ERL_TOP/bin/dialyzer -n -Wunknown -Wunmatched_returns --apps runtime_tools sasl tools --statistics
+ - $ERL_TOP/bin/dialyzer -n -Wunknown -Wunmatched_returns --apps compiler erts kernel stdlib asn1 crypto dialyzer hipe parsetools public_key runtime_tools sasl tools --statistics
+ - $ERL_TOP/bin/dialyzer -n --apps common_test debugger edoc inets mnesia observer ssh ssl syntax_tools tools wx xmerl --statistics
- ./otp_build tests && make release_docs
after_script:
- - cd $ERL_TOP/release/tests/test_server && $ERL_TOP/bin/erl -s ts install -s ts smoke_test batch -s init stop
+ - ./scripts/run-smoke-tests
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..328b9f7859
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,98 @@
+# Contributing to Erlang/OTP
+
+## Reporting a bug
+
+Report bugs at https://bugs.erlang.org. See [Bug reports](https://github.com/erlang/otp/wiki/Bug-reports)
+for more information.
+
+## Submitting Pull Requests
+
+You can contribute to Erlang/OTP by opening a Pull Request.
+
+## Fixing a bug
+
+* In most cases, pull requests for bug fixes should be based on the `maint` branch.
+There are exceptions, for example corrections to bugs that have been introduced in the `master` branch.
+
+* Include a test case to ensure that the bug is fixed **and that it stays fixed**.
+
+* TIP: Write the test case **before** fixing the bug so that you can know that it catches the bug.
+
+* For applications without a test suite in the git repository, it would be appreciated if you provide a
+small code sample in the commit message or email a module that will provoke the failure.
+
+## Adding a new feature
+
+* In most cases, pull requests for new features should be based on the `master` branch.
+
+* It is recommended to discuss new features on
+[the erlang-questions mailing list](http://erlang.org/mailman/listinfo/erlang-questions),
+especially for major new features or any new features in ERTS, Kernel, or STDLIB.
+
+* It is important to write a good commit message explaining **why** the feature is needed.
+We prefer that the information is in the commit message, so that anyone that want to know
+two years later why a particular feature can easily find out. It does no harm to provide
+the same information in the pull request (if the pull request consists of a single commit,
+the commit message will be added to the pull request automatically).
+
+* With few exceptions, it is mandatory to write a new test case that tests the feature.
+The test case is needed to ensure that the features does not stop working in the future.
+
+* Update the [Documentation](https://github.com/erlang/otp/wiki/Documentation) to describe the feature.
+
+* Make sure that the new feature builds and works on all major platforms. Exceptions are features
+that only makes sense one some platforms, for example the `win32reg` module for accessing the Windows registry.
+
+* Make sure that your feature does not break backward compatibility. In general, we only break backward
+compatibility in major releases and only for a very good reason. Usually we first deprecate the
+feature one or two releases beforehand.
+
+* In general, language changes/extensions require an
+[EEP (Erlang Enhancement Proposal)](https://github.com/erlang/eep) to be written and approved before they
+can be included in OTP. Major changes or new features in ERTS, Kernel, or STDLIB will need an EEP or at least
+a discussion on the mailing list.
+
+## Before you submit your pull request
+
+* Make sure existing test cases don't fail. It is not necessary to run all tests (that would take many hours),
+but you should at least run the tests for the application you have changed.
+See [Running tests](https://github.com/erlang/otp/wiki/Running-tests).
+
+Make sure that your branch contains clean commits:
+
+* Don't make the first line in the commit message longer than 72 characters.
+**Don't end the first line with a period.**
+
+* Follow the guidelines for [Writing good commit messages](https://github.com/erlang/otp/wiki/Writing-good-commit-messages).
+
+* Don't merge `maint` or `master` into your branch. Use `git rebase` if you need to resolve merge
+conflicts or include the latest changes.
+
+* To make it possible to use the powerful `git bisect` command, make sure that each commit can be
+compiled and that it works.
+
+* Check for unnecessary whitespace before committing with `git diff --check`.
+
+Check your coding style:
+
+* Make sure your changes follow the coding and indentation style of the code surrounding your changes.
+
+* Do not commit commented-out code or files that are no longer needed. Remove the code or the files.
+
+* In most code (Erlang and C), indentation is 4 steps. Indentation using only spaces is **strongly recommended**.
+
+### Configuring Emacs
+
+If you use Emacs, use the Erlang mode, and add the following lines to `.emacs`:
+
+ (setq-default indent-tabs-mode nil)
+ (setq c-basic-offset 4)
+
+If you want to change the setting only for the Erlang mode, you can use a hook like this:
+
+```
+(add-hook 'erlang-mode-hook 'my-erlang-hook)
+
+(defun my-erlang-hook ()
+ (setq indent-tabs-mode nil))
+```
diff --git a/OTP_VERSION b/OTP_VERSION
index d16f3ea71e..8c4fa9e670 100644
--- a/OTP_VERSION
+++ b/OTP_VERSION
@@ -1 +1,2 @@
19.2.2
+
diff --git a/README.md b/README.md
index 127ac9cad7..8c77f0b39a 100644
--- a/README.md
+++ b/README.md
@@ -40,7 +40,7 @@ In short:
* Go to the JIRA issue tracker at [bugs.erlang.org] [7] to
see reported issues which you can contribute to.
- Search for issues with the status *Contribution Needed*.
+ Search for issues with the status *Help Wanted*.
Bug Reports
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 6c0544da31..5ea4c2ccf3 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -1704,6 +1704,25 @@ case "$THR_LIB_NAME" in
AC_DEFINE(ETHR_TIME_WITH_SYS_TIME, 1, \
[Define if you can safely include both <sys/time.h> and <time.h>.]))
+ AC_MSG_CHECKING([for usable PTHREAD_STACK_MIN])
+ pthread_stack_min=no
+ AC_TRY_COMPILE([
+#include <limits.h>
+#if defined(ETHR_NEED_NPTL_PTHREAD_H)
+#include <nptl/pthread.h>
+#elif defined(ETHR_HAVE_MIT_PTHREAD_H)
+#include <pthread/mit/pthread.h>
+#elif defined(ETHR_HAVE_PTHREAD_H)
+#include <pthread.h>
+#endif
+ ],
+ [return PTHREAD_STACK_MIN;],
+ [pthread_stack_min=yes])
+
+ AC_MSG_RESULT([$pthread_stack_min])
+ test $pthread_stack_min != yes || {
+ AC_DEFINE(ETHR_HAVE_USABLE_PTHREAD_STACK_MIN, 1, [Define if you can use PTHREAD_STACK_MIN])
+ }
dnl
dnl Check for functions
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index f2a55f6298..8da832ac37 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -1595,6 +1595,25 @@
</section>
<section>
+ <marker id="signals"></marker>
+ <title>Signals</title>
+ <p>On Unix systems, the Erlang runtime will interpret two types of signals.</p>
+ <taglist>
+ <tag><c>SIGUSR1</c></tag>
+ <item>
+ <p>A <c>SIGUSR1</c> signal forces a crash dump.</p>
+ </item>
+ <tag><c>SIGTERM</c></tag>
+ <item>
+ <p>A <c>SIGTERM</c> will produce a <c>stop</c> message to the <c>init</c> process.
+ This is equivalent to a <c>init:stop/0</c> call.</p>
+ <p>Introduced in ERTS 8.3 (Erlang/OTP 19.3)</p>
+ </item>
+ </taglist>
+ <p>The signal <c>SIGUSR2</c> is reserved for internal usage. No other signals are handled.</p>
+ </section>
+
+ <section>
<marker id="configuration"></marker>
<title>Configuration</title>
<p>The standard Erlang/OTP system can be reconfigured to change the default
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 9646953518..112682d713 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -4899,7 +4899,9 @@ RealSystem = system + MissedSystem</code>
<p>Returns the current call stack back-trace (<em>stacktrace</em>)
of the process. The stack has the same format as returned by
<seealso marker="#get_stacktrace/0">
- <c>erlang:get_stacktrace/0</c></seealso>.</p>
+ <c>erlang:get_stacktrace/0</c></seealso>. The depth of the
+ stacktrace is truncated according to the <c>backtrace_depth</c>
+ system flag setting.</p>
</item>
<tag><c>{dictionary, <anno>Dictionary</anno>}</c></tag>
<item>
@@ -6611,7 +6613,9 @@ ok
<fsummary>Set system flag <c>backtrace_depth</c>.</fsummary>
<desc>
<p>Sets the maximum depth of call stack back-traces in the
- exit reason element of <c>'EXIT'</c> tuples.</p>
+ exit reason element of <c>'EXIT'</c> tuples. The flag
+ also limits the stacktrace depth returned by <c>process_info</c>
+ item <c>current_stacktrace.</c></p>
<p>Returns the old value of the flag.</p>
</desc>
</func>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index d4bc819120..14fcdb7e0e 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -33,7 +33,6 @@
<section><title>Erts 8.2.1</title>
-
<section><title>Fixed Bugs and Malfunctions</title>
<list>
<item>
@@ -165,13 +164,6 @@
<p>
Own Id: OTP-14051</p>
</item>
- <item>
- <p>
- Fixed a number of bugs that caused faulty stack-traces to
- be created on exception when a process was traced.</p>
- <p>
- Own Id: OTP-14055</p>
- </item>
</list>
</section>
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index dbff005325..7f5e9ff691 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -658,7 +658,7 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1)
}
else if (modp->old.code_hdr) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Module %T must be purged before loading\n",
+ erts_dsprintf(dsbufp, "Module %T must be purged before deleting\n",
BIF_ARG_1);
erts_send_error_to_logger(BIF_P->group_leader, dsbufp);
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
@@ -997,6 +997,12 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
if (any_heap_refs(c_p->heap, c_p->htop, literals, lit_bsize))
goto literal_gc;
*redsp += 1;
+ if (c_p->abandoned_heap) {
+ if (any_heap_refs(c_p->abandoned_heap, c_p->abandoned_heap + c_p->heap_sz,
+ literals, lit_bsize))
+ goto literal_gc;
+ *redsp += 1;
+ }
if (any_heap_refs(c_p->old_heap, c_p->old_htop, literals, lit_bsize))
goto literal_gc;
@@ -1297,6 +1303,11 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls
#endif
if (any_heap_refs(rp->heap, rp->htop, literals, lit_bsize))
goto try_literal_gc;
+ if (rp->abandoned_heap) {
+ if (any_heap_refs(rp->abandoned_heap, rp->abandoned_heap + rp->heap_sz,
+ literals, lit_bsize))
+ goto try_literal_gc;
+ }
if (any_heap_refs(rp->old_heap, rp->old_htop, literals, lit_bsize))
goto try_literal_gc;
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index bbb2e4f34f..0df2df0eaa 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -74,6 +74,9 @@ extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
erts_smp_atomic32_t erts_active_bp_index;
erts_smp_atomic32_t erts_staging_bp_index;
+#ifdef ERTS_DIRTY_SCHEDULERS
+erts_smp_mtx_t erts_dirty_bp_ix_mtx;
+#endif
/*
* Inlined helpers
@@ -85,6 +88,31 @@ get_mtime(Process *c_p)
return erts_get_monotonic_time(erts_proc_sched_data(c_p));
}
+static ERTS_INLINE Uint32
+acquire_bp_sched_ix(Process *c_p)
+{
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
+ ASSERT(esdp);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
+ return (Uint32) erts_no_schedulers;
+ }
+#endif
+ return (Uint32) esdp->no - 1;
+}
+
+static ERTS_INLINE void
+release_bp_sched_ix(Uint32 ix)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ix == (Uint32) erts_no_schedulers)
+ erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx);
+#endif
+}
+
+
+
/* *************************************************************************
** Local prototypes
*/
@@ -135,6 +163,9 @@ void
erts_bp_init(void) {
erts_smp_atomic32_init_nob(&erts_active_bp_index, 0);
erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index");
+#endif
}
@@ -973,6 +1004,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(c_p);
ASSERT(c_p);
ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
@@ -980,7 +1012,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* get previous timestamp and breakpoint
* from the process psd */
-
+
pbt = ERTS_PROC_GET_CALL_TIME(c_p);
time = get_mtime(c_p);
@@ -1006,7 +1038,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* if null then the breakpoint was removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1027,7 +1059,7 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
/* this breakpoint */
ASSERT(bdt);
- h = &(bdt->hash[bp_sched2ix_proc(c_p)]);
+ h = &(bdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1041,6 +1073,8 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
pbt->pc = I;
pbt->time = time;
+
+ release_bp_sched_ix(six);
}
void
@@ -1051,6 +1085,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
@@ -1071,6 +1106,7 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
*/
if (pbt) {
+
/* might have been removed due to
* trace_pattern(false)
*/
@@ -1085,7 +1121,8 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
/* beware, the trace_pattern might have been removed */
if (pbdt) {
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1096,11 +1133,15 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
} else {
BP_TIME_ADD(item, &sitem);
}
+
}
pbt->pc = pc;
pbt->time = time;
+
}
+
+ release_bp_sched_ix(six);
}
int
@@ -1353,6 +1394,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
bp_data_time_item_t sitem, *item = NULL;
bp_time_hash_t *h = NULL;
BpDataTime *pbdt = NULL;
+ Uint32 six = acquire_bp_sched_ix(p);
ASSERT(p);
@@ -1375,7 +1417,7 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
sitem.pid = p->common.id;
sitem.count = 0;
- h = &(pbdt->hash[bp_sched2ix_proc(p)]);
+ h = &(pbdt->hash[six]);
ASSERT(h);
ASSERT(h->item);
@@ -1401,6 +1443,8 @@ void erts_schedule_time_break(Process *p, Uint schedule) {
break;
}
} /* pbt */
+
+ release_bp_sched_ix(six);
}
/* *************************************************************************
@@ -1517,7 +1561,11 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
bdt = Alloc(sizeof(BpDataTime));
erts_refc_init(&bdt->refc, 1);
- bdt->n = erts_no_total_schedulers;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ bdt->n = erts_no_schedulers + 1;
+#else
+ bdt->n = erts_no_schedulers;
+#endif
bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 7206ef471a..4743e4fc2f 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -79,10 +79,8 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#ifdef ERTS_SMP
-#define bp_sched2ix_proc(p) (erts_proc_sched_data(p)->thr_id - 1)
-#else
-#define bp_sched2ix_proc(p) (0)
+#ifdef ERTS_DIRTY_SCHEDULERS
+extern erts_smp_mtx_t erts_dirty_bp_ix_mtx;
#endif
enum erts_break_op{
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 59a9ea1417..f392feb06b 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1047,9 +1047,11 @@ static BeamInstr* handle_error(Process* c_p, BeamInstr* pc,
Eterm* reg, BifFunction bf) NOINLINE;
static BeamInstr* call_error_handler(Process* p, BeamInstr* ip,
Eterm* reg, Eterm func) NOINLINE;
-static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity) NOINLINE;
+static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* apply(Process* p, Eterm module, Eterm function,
- Eterm args, Eterm* reg) NOINLINE;
+ Eterm args, Eterm* reg,
+ BeamInstr *I, Uint offs) NOINLINE;
static BeamInstr* call_fun(Process* p, int arity,
Eterm* reg, Eterm args) NOINLINE;
static BeamInstr* apply_fun(Process* p, Eterm fun,
@@ -3194,7 +3196,7 @@ do { \
OpCase(i_apply): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+1);
@@ -3208,7 +3210,7 @@ do { \
OpCase(i_apply_last_P): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, Arg(0));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -3223,7 +3225,7 @@ do { \
OpCase(i_apply_only): {
BeamInstr *next;
HEAVY_SWAPOUT;
- next = apply(c_p, r(0), x(1), x(2), reg);
+ next = apply(c_p, r(0), x(1), x(2), reg, I, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_I(next);
@@ -3237,7 +3239,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), NULL, 0);
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, I+2);
@@ -3252,7 +3254,7 @@ do { \
BeamInstr *next;
HEAVY_SWAPOUT;
- next = fixed_apply(c_p, reg, Arg(0));
+ next = fixed_apply(c_p, reg, Arg(0), I, Arg(1));
HEAVY_SWAPIN;
if (next != NULL) {
SET_CP(c_p, (BeamInstr *) E[0]);
@@ -3550,18 +3552,11 @@ do { \
BifFunction vbf;
ErlHeapFragment *live_hf_end;
- if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
- /* If we have run out of reductions, we do a context
- switch before calling the nif */
- goto context_switch;
- }
-
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
c_p->current = I-3; /* current and vbf set to please handle_error */
- SWAPOUT;
- c_p->fcalls = FCALLS - 1;
+ HEAVY_SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
bif_nif_arity = I[-1];
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
@@ -5844,12 +5839,13 @@ save_stacktrace(Process* c_p, BeamInstr* pc, Eterm* reg, BifFunction bf,
s->depth = 0;
/*
- * If the failure was in a BIF other than 'error', 'exit' or
- * 'throw', find the bif-table index and save the argument
- * registers by consing up an arglist.
+ * If the failure was in a BIF other than 'error/1', 'error/2',
+ * 'exit/1' or 'throw/1', find the bif-table index and save the
+ * argument registers by consing up an arglist.
*/
- if (bf != NULL && bf != error_1 && bf != error_2 &&
- bf != exit_1 && bf != throw_1) {
+ if (bf != NULL && bf != error_1 && bf != error_2 && bf != exit_1
+ && bf != throw_1 && bf != wrap_error_1 && bf != wrap_error_2
+ && bf != wrap_exit_1 && bf != wrap_throw_1) {
int i;
int a = 0;
for (i = 0; i < BIF_SIZE; i++) {
@@ -5945,12 +5941,30 @@ erts_save_stacktrace(Process* p, struct StackTrace* s, int depth)
p->cp) {
/* Cannot follow cp here - code may be unloaded */
BeamInstr *cpp = p->cp;
+ int trace_cp;
if (cpp == beam_exception_trace || cpp == beam_return_trace) {
/* Skip return_trace parameters */
ptr += 2;
+ trace_cp = 1;
} else if (cpp == beam_return_to_trace) {
/* Skip return_to_trace parameters */
ptr += 1;
+ trace_cp = 1;
+ }
+ else {
+ trace_cp = 0;
+ }
+ if (trace_cp && s->pc == cpp) {
+ /*
+ * If process 'cp' points to a return/exception trace
+ * instruction and 'cp' has been saved as 'pc' in
+ * stacktrace, we need to update 'pc' in stacktrace
+ * with the actual 'cp' located on the top of the
+ * stack; otherwise, we will lose the top stackframe
+ * when building the stack trace.
+ */
+ ASSERT(is_CP(p->stop[0]));
+ s->pc = cp_val(p->stop[0]);
}
}
while (ptr < STACK_START(p) && depth > 0) {
@@ -6070,12 +6084,15 @@ build_stacktrace(Process* c_p, Eterm exc) {
erts_set_current_function(&fi, s->current);
}
+ depth = s->depth;
/*
- * If fi.current is still NULL, default to the initial function
+ * If fi.current is still NULL, and we have no
+ * stack at all, default to the initial function
* (e.g. spawn_link(erlang, abs, [1])).
*/
if (fi.current == NULL) {
- erts_set_current_function(&fi, c_p->u.initial);
+ if (depth <= 0)
+ erts_set_current_function(&fi, c_p->u.initial);
args = am_true; /* Just in case */
} else {
args = get_args_from_exc(exc);
@@ -6085,10 +6102,9 @@ build_stacktrace(Process* c_p, Eterm exc) {
* Look up all saved continuation pointers and calculate
* needed heap space.
*/
- depth = s->depth;
stk = stkp = (FunctionInfo *) erts_alloc(ERTS_ALC_T_TMP,
depth*sizeof(FunctionInfo));
- heap_size = fi.needed + 2;
+ heap_size = fi.current ? fi.needed + 2 : 0;
for (i = 0; i < depth; i++) {
erts_lookup_function_info(stkp, s->trace[i], 1);
if (stkp->current) {
@@ -6107,8 +6123,10 @@ build_stacktrace(Process* c_p, Eterm exc) {
res = CONS(hp, mfa, res);
hp += 2;
}
- hp = erts_build_mfa_item(&fi, hp, args, &mfa);
- res = CONS(hp, mfa, res);
+ if (fi.current) {
+ hp = erts_build_mfa_item(&fi, hp, args, &mfa);
+ res = CONS(hp, mfa, res);
+ }
erts_free(ERTS_ALC_T_TMP, (void *) stk);
return res;
@@ -6205,8 +6223,107 @@ apply_setup_error_handler(Process* p, Eterm module, Eterm function, Uint arity,
return ep;
}
+static ERTS_INLINE void
+apply_bif_error_adjustment(Process *p, Export *ep,
+ Eterm *reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
+{
+ /*
+ * I is only set when the apply is a tail call, i.e.,
+ * from the instructions i_apply_only, i_apply_last_P,
+ * and apply_last_IP.
+ */
+ if (I
+ && ep->code[3] == (BeamInstr) em_apply_bif
+ && (ep == bif_export[BIF_error_1]
+ || ep == bif_export[BIF_error_2]
+ || ep == bif_export[BIF_exit_1]
+ || ep == bif_export[BIF_throw_1])) {
+ /*
+ * We are about to tail apply one of the BIFs
+ * erlang:error/1, erlang:error/2, erlang:exit/1,
+ * or erlang:throw/1. Error handling of these BIFs is
+ * special!
+ *
+ * We need 'p->cp' to point into the calling
+ * function when handling the error after the BIF has
+ * been applied. This in order to get the topmost
+ * stackframe correct. Without the following adjustment,
+ * 'p->cp' will point into the function that called
+ * current function when handling the error. We add a
+ * dummy stackframe in order to achive this.
+ *
+ * Note that these BIFs unconditionally will cause
+ * an exception to be raised. That is, our modifications
+ * of 'p->cp' as well as the stack will be corrected by
+ * the error handling code.
+ *
+ * If we find an exception/return-to trace continuation
+ * pointer as the topmost continuation pointer, we do not
+ * need to do anything since the information already will
+ * be available for generation of the stacktrace.
+ */
+ int apply_only = stack_offset == 0;
+ BeamInstr *cpp;
+
+ if (apply_only) {
+ ASSERT(p->cp != NULL);
+ cpp = p->cp;
+ }
+ else {
+ ASSERT(is_CP(p->stop[0]));
+ cpp = cp_val(p->stop[0]);
+ }
+
+ if (cpp != beam_exception_trace
+ && cpp != beam_return_trace
+ && cpp != beam_return_to_trace) {
+ Uint need = stack_offset /* bytes */ / sizeof(Eterm);
+ if (need == 0)
+ need = 1; /* i_apply_only */
+ if (p->stop - p->htop < need)
+ erts_garbage_collect(p, (int) need, reg, arity+1);
+ p->stop -= need;
+
+ if (apply_only) {
+ /*
+ * Called from the i_apply_only instruction.
+ *
+ * 'p->cp' contains continuation pointer pointing
+ * into the function that called current function.
+ * We push that continuation pointer onto the stack,
+ * and set 'p->cp' to point into current function.
+ */
+
+ p->stop[0] = make_cp(p->cp);
+ p->cp = I;
+ }
+ else {
+ /*
+ * Called from an i_apply_last_p, or apply_last_IP,
+ * instruction.
+ *
+ * Calling instruction will after we return read
+ * a continuation pointer from the stack and write
+ * it to 'p->cp', and then remove the topmost
+ * stackframe of size 'stack_offset'.
+ *
+ * We have sized the dummy-stackframe so that it
+ * will be removed by the instruction we currently
+ * are executing, and leave the stackframe that
+ * normally would have been removed intact.
+ *
+ */
+ p->stop[0] = make_cp(I);
+ }
+ }
+ }
+}
+
static BeamInstr*
-apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
+apply(
+Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg,
+BeamInstr *I, Uint stack_offset)
{
int arity;
Export* ep;
@@ -6231,21 +6348,54 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
return 0;
}
- /* The module argument may be either an atom or an abstract module
- * (currently implemented using tuples, but this might change).
- */
- this = THE_NON_VALUE;
- if (is_not_atom(module)) {
- Eterm* tp;
+ while (1) {
+ Eterm m, f, a;
+ /* The module argument may be either an atom or an abstract module
+ * (currently implemented using tuples, but this might change).
+ */
+ this = THE_NON_VALUE;
+ if (is_not_atom(module)) {
+ Eterm* tp;
+
+ if (is_not_tuple(module)) goto error;
+ tp = tuple_val(module);
+ if (arityval(tp[0]) < 1) goto error;
+ this = module;
+ module = tp[1];
+ if (is_not_atom(module)) goto error;
+ }
- if (is_not_tuple(module)) goto error;
- tp = tuple_val(module);
- if (arityval(tp[0]) < 1) goto error;
- this = module;
- module = tp[1];
- if (is_not_atom(module)) goto error;
+ if (module != am_erlang || function != am_apply)
+ break;
+
+ /* Adjust for multiple apply of apply/3... */
+
+ a = args;
+ if (is_list(a)) {
+ Eterm *consp = list_val(a);
+ m = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ f = CAR(consp);
+ a = CDR(consp);
+ if (is_list(a)) {
+ consp = list_val(a);
+ a = CAR(consp);
+ if (is_nil(CDR(consp))) {
+ /* erlang:apply/3 */
+ module = m;
+ function = f;
+ args = a;
+ if (is_not_atom(f))
+ goto error;
+ continue;
+ }
+ }
+ }
+ }
+ break; /* != erlang:apply/3 */
}
-
/*
* Walk down the 3rd parameter of apply (the argument list) and copy
* the parameters to the x registers (reg[]). If the module argument
@@ -6283,12 +6433,14 @@ apply(Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
static BeamInstr*
-fixed_apply(Process* p, Eterm* reg, Uint arity)
+fixed_apply(Process* p, Eterm* reg, Uint arity,
+ BeamInstr *I, Uint stack_offset)
{
Export* ep;
Eterm module;
@@ -6318,6 +6470,10 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
if (is_not_atom(module)) goto error;
++arity;
}
+
+ /* Handle apply of apply/3... */
+ if (module == am_erlang && function == am_apply && arity == 3)
+ return apply(p, reg[0], reg[1], reg[2], reg, I, stack_offset);
/*
* Get the index into the export table, or failing that the export
@@ -6332,6 +6488,7 @@ fixed_apply(Process* p, Eterm* reg, Uint arity)
} else if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
save_calls(p, ep);
}
+ apply_bif_error_adjustment(p, ep, reg, arity, I, stack_offset);
DTRACE_GLOBAL_CALL_FROM_EXPORT(p, ep);
return ep->addressv[erts_active_code_ix()];
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index dfbe1ced47..61907cc7ff 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -717,6 +717,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
* We have to be very very careful when doing this as the schedulers
* could be anywhere.
*/
+ sys_init_suspend_handler();
+
for (i = 0; i < erts_no_schedulers; i++) {
erts_tid_t tid = ERTS_SCHEDULER_IX(i)->tid;
if (!erts_equal_tids(tid,erts_thr_self()))
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 735aabbee3..88a052cad7 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1672,11 +1672,11 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
Eterm mfa;
Eterm res = NIL;
- depth = 8;
+ depth = erts_backtrace_depth;
sz = offsetof(struct StackTrace, trace) + sizeof(BeamInstr *)*depth;
s = (struct StackTrace *) erts_alloc(ERTS_ALC_T_TMP, sz);
s->depth = 0;
- if (rp->i) {
+ if (depth > 0 && rp->i) {
s->trace[s->depth++] = rp->i;
depth--;
}
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 96275eb228..0627526d7e 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1062,9 +1062,16 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_smp_thr_progress_block();
}
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
+#endif
+
r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx);
+#endif
#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
erts_smp_thr_progress_unblock();
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index af799d09da..edbdbb4b8f 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -59,6 +59,10 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
+#if defined(DEBUG) && 0
+# define HARDDEBUG 1
+#endif
+
/*
* Returns number of elements in an array.
*/
@@ -110,7 +114,6 @@ typedef struct {
static Uint setup_rootset(Process*, Eterm*, int, Rootset*);
static void cleanup_rootset(Rootset *rootset);
-static void remove_message_buffers(Process* p);
static Eterm *full_sweep_heaps(Process *p,
int hibernate,
Eterm *n_heap, Eterm* n_htop,
@@ -153,7 +156,7 @@ static void init_gc_info(ErtsGCInfo *gcip);
static Uint64 next_vheap_size(Process* p, Uint64 vheap, Uint64 vheap_sz);
#ifdef HARDDEBUG
-static void disallow_heap_frag_ref_in_heap(Process* p);
+static void disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop);
static void disallow_heap_frag_ref_in_old_heap(Process* p);
#endif
@@ -176,6 +179,13 @@ typedef struct {
erts_smp_atomic32_t refc;
} ErtsGCInfoReq;
+#ifdef ERTS_DIRTY_SCHEDULERS
+static struct {
+ erts_mtx_t mtx;
+ ErtsGCInfo info;
+} dirty_gc;
+#endif
+
static ERTS_INLINE int
gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
{
@@ -259,6 +269,11 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info");
+ init_gc_info(&dirty_gc.info);
+#endif
+
init_gcireq_alloc();
}
@@ -477,24 +492,26 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
hsz = ssz + need + ERTS_DELAY_GC_EXTRA_FREE;
hfrag = new_message_buffer(hsz);
- hfrag->next = p->mbuf;
- p->mbuf = hfrag;
- p->mbuf_sz += hsz;
p->heap = p->htop = &hfrag->mem[0];
p->hend = hend = &hfrag->mem[hsz];
p->stop = stop = hend - ssz;
sys_memcpy((void *) stop, (void *) orig_stop, ssz * sizeof(Eterm));
if (p->abandoned_heap) {
- /* Active heap already in a fragment; adjust it... */
- ErlHeapFragment *hfrag = ((ErlHeapFragment *)
- (((char *) orig_heap)
- - offsetof(ErlHeapFragment, mem)));
- Uint unused = orig_hend - orig_htop;
- ASSERT(hfrag->used_size == hfrag->alloc_size);
- ASSERT(hfrag->used_size >= unused);
- hfrag->used_size -= unused;
- p->mbuf_sz -= unused;
+ /*
+ * Active heap already in a fragment; adjust it and
+ * save it into mbuf list...
+ */
+ ErlHeapFragment *hfrag = ((ErlHeapFragment *)
+ (((char *) orig_heap)
+ - offsetof(ErlHeapFragment, mem)));
+ Uint used = orig_htop - orig_heap;
+ hfrag->used_size = used;
+ p->mbuf_sz += used;
+ ASSERT(hfrag->used_size <= hfrag->alloc_size);
+ ASSERT(!hfrag->off_heap.first && !hfrag->off_heap.overhead);
+ hfrag->next = p->mbuf;
+ p->mbuf = hfrag;
}
else {
/* Do not leave a hole in the abandoned heap... */
@@ -556,17 +573,14 @@ young_gen_usage(Process *p)
}
}
+ hsz += p->htop - p->heap;
aheap = p->abandoned_heap;
- if (!aheap)
- hsz += p->htop - p->heap;
- else {
+ if (aheap) {
/* used in orig heap */
if (p->flags & F_ABANDONED_HEAP_USE)
hsz += aheap[p->heap_sz-1];
else
hsz += p->heap_sz;
- /* Remove unused part in latest fragment */
- hsz -= p->hend - p->htop;
}
return hsz;
}
@@ -602,7 +616,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
Uint reclaimed_now = 0;
Eterm gc_trace_end_tag;
int reds;
- ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */
+ ErtsMonotonicTime start_time;
ErtsSchedulerData *esdp;
erts_aint32_t state;
ERTS_MSACC_PUSH_STATE_M();
@@ -610,6 +624,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
+ ERTS_UNDEF(start_time, 0);
ERTS_CHK_MBUF_SZ(p);
ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
@@ -735,8 +750,19 @@ do_major_collection:
monitor_large_heap(p);
}
- esdp->gc_info.garbage_cols++;
- esdp->gc_info.reclaimed += reclaimed_now;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ dirty_gc.info.garbage_cols++;
+ dirty_gc.info.reclaimed += reclaimed_now;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+ else
+#endif
+ {
+ esdp->gc_info.garbage_cols++;
+ esdp->gc_info.reclaimed += reclaimed_now;
+ }
FLAGS(p) &= ~F_FORCE_GC;
p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -765,6 +791,7 @@ do_major_collection:
ASSERT(!p->mbuf);
ASSERT(!ERTS_IS_GC_DESIRED(p));
+ ASSERT(need <= HEAP_LIMIT(p) - HEAP_TOP(p));
return reds;
}
@@ -789,6 +816,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
>= erts_proc_sched_data(p)->virtual_reds);
}
+
/*
* Place all living data on a the new heap; deallocate any old heap.
* Meant to be used by hibernate/3.
@@ -835,11 +863,11 @@ erts_garbage_collect_hibernate(Process* p)
p->arg_reg,
p->arity);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : p->heap),
- p->heap_sz * sizeof(Eterm));
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, heap, htop);
+#endif
+
+ erts_deallocate_young_generation(p);
p->heap = heap;
p->high_water = htop;
@@ -874,8 +902,6 @@ erts_garbage_collect_hibernate(Process* p)
sys_memcpy((void *) heap, (void *) p->heap, actual_size*sizeof(Eterm));
ERTS_HEAP_FREE(ERTS_ALC_T_TMP_HEAP, p->heap, p->heap_sz*sizeof(Eterm));
- remove_message_buffers(p);
-
p->stop = p->hend = heap + heap_size;
offs = heap - p->heap;
@@ -1494,22 +1520,16 @@ do_minor(Process *p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- HEAP_SIZE(p) * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
HEAP_END(p) = n_heap + new_sz;
-
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
}
/*
@@ -1598,13 +1618,12 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
}
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
- (p->abandoned_heap
- ? p->abandoned_heap
- : HEAP_START(p)),
- p->heap_sz * sizeof(Eterm));
- p->abandoned_heap = NULL;
- p->flags &= ~F_ABANDONED_HEAP_USE;
+#ifdef HARDDEBUG
+ disallow_heap_frag_ref_in_heap(p, n_heap, n_htop);
+#endif
+
+ erts_deallocate_young_generation(p);
+
HEAP_START(p) = n_heap;
HEAP_TOP(p) = n_htop;
HEAP_SIZE(p) = new_sz;
@@ -1613,11 +1632,6 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
HIGH_WATER(p) = HEAP_TOP(p);
-#ifdef HARDDEBUG
- disallow_heap_frag_ref_in_heap(p);
-#endif
- remove_message_buffers(p);
-
if (p->flags & F_ON_HEAP_MSGQ)
move_msgq_to_heap(p);
@@ -1770,22 +1784,56 @@ adjust_after_fullsweep(Process *p, int need, Eterm *objv, int nobj)
return adjusted;
}
-/*
- * Remove all message buffers.
- */
-static void
-remove_message_buffers(Process* p)
+void
+erts_deallocate_young_generation(Process *c_p)
{
- if (MBUF(p) != NULL) {
- free_message_buffer(MBUF(p));
- MBUF(p) = NULL;
+ Eterm *orig_heap;
+
+ if (!c_p->abandoned_heap) {
+ orig_heap = c_p->heap;
+ ASSERT(!(c_p->flags & F_ABANDONED_HEAP_USE));
+ }
+ else {
+ ErlHeapFragment *hfrag;
+
+ orig_heap = c_p->abandoned_heap;
+ c_p->abandoned_heap = NULL;
+ c_p->flags &= ~F_ABANDONED_HEAP_USE;
+
+ /*
+ * Temporary heap located in heap fragment
+ * only referred to by 'c_p->heap'. Add it to
+ * 'c_p->mbuf' list and deallocate it as any
+ * other heap fragment...
+ */
+ hfrag = ((ErlHeapFragment *)
+ (((char *) c_p->heap)
+ - offsetof(ErlHeapFragment, mem)));
+
+ ASSERT(!hfrag->off_heap.first);
+ ASSERT(!hfrag->off_heap.overhead);
+ ASSERT(!hfrag->next);
+ ASSERT(c_p->htop - c_p->heap <= hfrag->alloc_size);
+
+ hfrag->next = c_p->mbuf;
+ c_p->mbuf = hfrag;
+ }
+
+ if (c_p->mbuf) {
+ free_message_buffer(c_p->mbuf);
+ c_p->mbuf = NULL;
}
- if (p->msg_frag) {
- erts_cleanup_messages(p->msg_frag);
- p->msg_frag = NULL;
+ if (c_p->msg_frag) {
+ erts_cleanup_messages(c_p->msg_frag);
+ c_p->msg_frag = NULL;
}
- MBUF_SIZE(p) = 0;
+ c_p->mbuf_sz = 0;
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP,
+ orig_heap,
+ c_p->heap_sz * sizeof(Eterm));
}
+
#ifdef HARDDEBUG
/*
@@ -1797,19 +1845,15 @@ remove_message_buffers(Process* p)
*/
static void
-disallow_heap_frag_ref_in_heap(Process* p)
+disallow_heap_frag_ref_in_heap(Process *p, Eterm *heap, Eterm *htop)
{
Eterm* hp;
- Eterm* htop;
- Eterm* heap;
Uint heap_size;
if (p->mbuf == 0) {
return;
}
- htop = p->htop;
- heap = p->heap;
heap_size = (htop - heap)*sizeof(Eterm);
hp = heap;
@@ -3017,6 +3061,18 @@ reply_gc_info(void *vgcirp)
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /*
+ * Add dirty schedulers info on requesting
+ * schedulers info
+ */
+ if (gcirp->req_sched == esdp->no) {
+ erts_mtx_lock(&dirty_gc.mtx);
+ reclaimed += dirty_gc.info.reclaimed;
+ garbage_cols += dirty_gc.info.garbage_cols;
+ erts_mtx_unlock(&dirty_gc.mtx);
+ }
+#endif
sz = 0;
hpp = NULL;
@@ -3297,13 +3353,15 @@ within2(Eterm *ptr, Process *p, Eterm *real_htop)
ErtsMessage* mp;
Eterm *htop, *heap;
- if (p->abandoned_heap)
+ if (p->abandoned_heap) {
ERTS_GET_ORIG_HEAP(p, heap, htop);
- else {
- heap = p->heap;
- htop = real_htop ? real_htop : HEAP_TOP(p);
+ if (heap <= ptr && ptr < htop)
+ return 1;
}
+ heap = p->heap;
+ htop = real_htop ? real_htop : HEAP_TOP(p);
+
if (OLD_HEAP(p) && (OLD_HEAP(p) <= ptr && ptr < OLD_HEND(p))) {
return 1;
}
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 54ea9ca3c0..2521379664 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -27,10 +27,6 @@
#include "erl_map.h"
-#if defined(DEBUG) && !ERTS_GLB_INLINE_INCL_FUNC_DEF
-# define HARDDEBUG 1
-#endif
-
#define IS_MOVED_BOXED(x) (!is_header((x)))
#define IS_MOVED_CONS(x) (is_non_value((x)))
@@ -157,5 +153,6 @@ void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_free_heap_frags(struct process* p);
Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
int erts_max_heap_size(Eterm, Uint *, Uint *);
+void erts_deallocate_young_generation(Process *c_p);
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index d29d079fc5..647fa26811 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -2666,7 +2666,7 @@ erts_start_timer_callback(ErtsMonotonicTime tmo,
tmo);
twt = tmo < ERTS_TIMER_WHEEL_MSEC;
- if (esdp)
+ if (esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp))
start_callback_timer(esdp,
twt,
timeout_pos,
@@ -2865,7 +2865,8 @@ btm_print(ErtsHLTimer *tmr, void *vbtmp)
if (tmr->timeout <= btmp->now)
left = 0;
- left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
+ else
+ left = ERTS_CLKTCKS_TO_MSEC(tmr->timeout - btmp->now);
receiver = ((tmr->head.roflgs & ERTS_TMR_ROFLG_REG_NAME)
? tmr->receiver.name
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 2fd97208cc..f65a06c85f 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -117,6 +117,11 @@ const int etp_big_endian = 1;
const int etp_big_endian = 0;
#endif
const Eterm etp_the_non_value = THE_NON_VALUE;
+#ifdef ERTS_HOLE_MARKER
+const Eterm etp_hole_marker = ERTS_HOLE_MARKER;
+#else
+const Eterm etp_hole_marker = 0;
+#endif
/*
* Note about VxWorks: All variables must be initialized by executable code,
@@ -2220,7 +2225,6 @@ erl_start(int argc, char **argv)
init_break_handler();
if (replace_intr)
erts_replace_intr();
- sys_init_suspend_handler();
#endif
boot_argc = argc - i; /* Number of arguments to init */
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 06266363b5..5d4823c077 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -129,6 +129,8 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "run_queue", "address" },
#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
+ { "dirty_gc_info", NULL },
+ { "dirty_break_point_index", NULL },
#endif
{ "process_table", NULL },
{ "cpu_info", NULL },
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index e4c696ae3b..f45e6974cd 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -193,7 +193,7 @@ free_message_buffer(ErlHeapFragment* bp)
erts_cleanup_offheap(&bp->off_heap);
ERTS_HEAP_FREE(ERTS_ALC_T_HEAP_FRAG, (void *) bp,
- ERTS_HEAP_FRAG_SIZE(bp->size));
+ ERTS_HEAP_FRAG_SIZE(bp->alloc_size));
bp = next_bp;
}while (bp != NULL);
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 6b265a8b80..19ce0f6965 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -3561,8 +3561,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee,
#endif
if (p) {
/* This is almost a normal nif call like in beam_emu,
- except that any heap fragment created in the nif will be
- discarded without checking if anything in it is live.
+ except that any heap consumed by the nif will be
+ released without checking if anything in it is live.
This is because we cannot do a GC here as we don't know
the number of live registers that have to be preserved.
This means that any heap part of the returned term may
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index cfa2b62061..485949c84a 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -2060,6 +2060,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -6234,6 +6235,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
int queue;
erts_aint32_t dact, max_qbit;
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
/* Termination should be done on an ordinary scheduler */
if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
@@ -6426,6 +6433,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
@@ -6480,12 +6490,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 0;
-
+#if !defined(ERTS_SMP)
+ /* Decrement refc if process struct is free... */
+ return !!(n & ERTS_PSFLG_FREE);
+#else
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
+#endif
}
else {
Process* sched_p;
+ ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6501,11 +6517,14 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(runq);
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
+
/* Enqueue the process */
enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
- return 1;
+ return 0;
erts_smp_runq_unlock(runq);
@@ -6513,9 +6532,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 1;
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
}
-
}
static ERTS_INLINE void
@@ -6530,8 +6555,17 @@ add2runq(int enqueue, erts_aint32_t prio,
if (runq) {
Process *sched_p;
- if (enqueue > 0)
+ if (enqueue > 0) {
sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
else {
Process *pxy;
@@ -7055,12 +7089,18 @@ typedef struct {
} ErtsSchdlrSspndResume;
static void
-schdlr_sspnd_resume_proc(Eterm pid)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS);
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
if (p) {
resume_process(p, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
}
}
@@ -7069,17 +7109,20 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
ErtsSchdlrSspndResume *resume)
{
if (is_internal_pid(resume->onln.chngr)) {
- schdlr_sspnd_resume_proc(resume->onln.chngr);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
resume->onln.chngr = NIL;
}
if (is_internal_pid(resume->onln.nxt)) {
- schdlr_sspnd_resume_proc(resume->onln.nxt);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
resume->onln.nxt = NIL;
}
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(plp->pid);
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
proclist_destroy(plp);
}
}
@@ -7131,16 +7174,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (sched_type != ERTS_SCHED_NORMAL)
erts_smp_runq_unlock(esdp->run_queue);
- }
else {
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7153,8 +7188,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
sched_wall_time_change(esdp, 0);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
@@ -7198,15 +7233,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
(void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
/* resume processes that initiated the multi scheduling block... */
plp = msb[i]->chngq;
- while (plp) {
- erts_proclist_store_last(&msb[i]->blckrs,
- proclist_copy(plp));
- plp = plp->next;
- }
- if (end_plp)
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
end_plp->next = resume.msb.chngrs;
- resume.msb.chngrs = msb[i]->chngq;
- msb[i]->chngq = NULL;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
}
}
}
@@ -7270,24 +7308,14 @@ suspend_scheduler(ErtsSchedulerData *esdp)
while (1) {
ErtsMonotonicTime current_time;
- erts_aint32_t qmask;
erts_aint32_t flgs;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
-
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (qmask) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
+ if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
- }
else {
+ erts_aint32_t qmask;
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -7415,12 +7443,23 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == schdlr_sspnd.active) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
-
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd.online == schdlr_sspnd.active) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
ASSERT((sched_type == ERTS_SCHED_NORMAL
? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
@@ -7553,7 +7592,7 @@ abort_sched_onln_chng_waitq(Process *p)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
if (is_internal_pid(resume))
- schdlr_sspnd_resume_proc(resume);
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
}
ErtsSchedSuspendResult
@@ -7951,21 +7990,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
else { /* ------ UNBLOCK ------ */
if (p->flags & have_blckd_flg) {
- ErtsProcList *plps[2];
+ ErtsProcList **plpps[3] = {0};
ErtsProcList *plp;
- int limit = 0;
- plps[limit++] = erts_proclist_peek_first(msbp->blckrs);
- if (all)
- plps[limit++] = erts_proclist_peek_first(msbp->chngq);
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
- for (ix = 0; ix < limit; ix++) {
- plp = plps[ix];
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
while (plp) {
ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(msbp->blckrs, plp);
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&msbp->blckrs, tmp_plp);
+ erts_proclist_remove(plpps[ix], tmp_plp);
proclist_destroy(tmp_plp);
if (!all)
break;
@@ -8604,8 +8642,15 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
* from being selected for normal execution regardless
* of locks held or not held on it...
*/
- ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&rp->state)));
+#ifdef DEBUG
+ {
+ erts_aint32_t state;
+ state = erts_smp_atomic32_read_nob(&rp->state);
+ ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
+ || !(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ }
+#endif
if (!suspend)
resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
@@ -9587,41 +9632,46 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
esdp->reductions += reds;
- /* schedule_out_process() returns with rq locked! */
- schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
- proxy_p = NULL;
+ {
+ int dec_refc;
- ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
- esdp->current_process = NULL;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+
+ esdp->current_process = NULL;
#ifdef ERTS_SMP
- if (is_normal_sched)
- p->scheduler_data = NULL;
+ if (is_normal_sched)
+ p->scheduler_data = NULL;
#endif
- erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_STATUS
- | ERTS_PROC_LOCK_TRACE));
+ erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
- if (state & ERTS_PSFLG_FREE) {
- if (!is_normal_sched) {
- ASSERT(p->flags & F_DELAYED_DEL_PROC);
- erts_proc_dec_refc(p);
- }
- else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- erts_proc_dec_refc(p);
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
#endif
- }
- }
+
+ if (dec_refc)
+ erts_proc_dec_refc(p);
+ }
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
@@ -9897,8 +9947,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9930,6 +9984,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE)))
#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
+
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING))
@@ -9962,6 +10017,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* free and not queued by proxy */
erts_proc_dec_refc(p);
}
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -11914,7 +11971,6 @@ erts_cleanup_empty_process(Process* p)
static void
delete_process(Process* p)
{
- Eterm *heap;
ErtsPSD *psd;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
@@ -11969,13 +12025,8 @@ delete_process(Process* p)
hipe_delete_process(&p->hipe);
#endif
- heap = p->abandoned_heap ? p->abandoned_heap : p->heap;
+ erts_deallocate_young_generation(p);
-#ifdef DEBUG
- sys_memset(heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
-#endif
-
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) heap, p->heap_sz*sizeof(Eterm));
if (p->old_heap != NULL) {
#ifdef DEBUG
@@ -11987,16 +12038,6 @@ delete_process(Process* p)
(p->old_hend-p->old_heap)*sizeof(Eterm));
}
- /*
- * Free all pending message buffers.
- */
- if (p->mbuf != NULL) {
- free_message_buffer(p->mbuf);
- }
-
- if (p->msg_frag)
- erts_cleanup_messages(p->msg_frag);
-
erts_erase_dicts(p);
/* free all pending messages */
@@ -13093,15 +13134,14 @@ erts_continue_exit_process(Process *p)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
delay_del_proc = 1;
/*
- * The dirty scheduler will also decrease
- * refc when done...
+ * The dirty scheduler decrease refc
+ * when done with the process...
*/
- erts_proc_inc_refc(p);
}
#endif
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 9f7084c127..68fbb10602 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1684,7 +1684,7 @@ ERTS_GLB_INLINE ErtsProcList *erts_proclist_fetch_first(ErtsProcList **list)
return NULL;
else {
ErtsProcList *res = *list;
- if (res == *list)
+ if (res->next == *list)
*list = NULL;
else
*list = res->next;
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index d8bb00e8c6..a19db74763 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -90,9 +90,12 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) {
erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size);
erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size);
size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
+ if (p->abandoned_heap)
+ size += (p->hend - p->heap) * sizeof(Eterm);
if (p->old_hend && p->old_heap)
size += (p->old_hend - p->old_heap) * sizeof(Eterm);
+
size += p->msg.len * sizeof(ErtsMessage);
for (mp = p->msg.first; mp; mp = mp->next)
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 8c84303997..ac9e91e31f 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -1436,6 +1436,7 @@ void
trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
{
ErtsTracerNif *tnif = NULL;
+ Eterm* o_hp = NULL;
Eterm* hp;
Uint sz = 0;
Eterm tup;
@@ -1446,7 +1447,7 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
if (is_non_value(msg)) {
(void) erts_process_gc_info(p, &sz, NULL, 0, 0);
- hp = HAlloc(p, sz + 3 + 2);
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, (sz + 3 + 2) * sizeof(Eterm));
msg = erts_process_gc_info(p, NULL, &hp, 0, 0);
tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
@@ -1455,6 +1456,8 @@ trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC,
what, msg, THE_NON_VALUE, am_true);
+ if (o_hp)
+ erts_free(ERTS_ALC_T_TMP, o_hp);
}
}
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index f97716d030..899e2a275a 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -110,8 +110,21 @@
#define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p))
#if defined(DEBUG) || defined(CHECK_FOR_HOLES)
-# define ERTS_HOLE_MARKER (((0xdeadbeef << 24) << 8) | 0xdeadbeef)
-#endif /* egil: 32-bit ? */
+
+/*
+ * ERTS_HOLE_MARKER must *not* be mistaken for a valid term
+ * on the heap...
+ */
+# ifdef ARCH_64
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdeadbeaf00000000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdeadbeaf0000001b */
+# else
+# define ERTS_HOLE_MARKER \
+ make_catch(UWORD_CONSTANT(0xdead0000) >> _TAG_IMMED2_SIZE)
+/* Will (at the time of writing) appear as 0xdead001b */
+# endif
+#endif
/*
* Allocate heap memory on the ordinary heap, NEVER in a heap
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index beed847578..587c29e3b5 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1222,6 +1222,7 @@ typedef struct B2TContext_t {
} u;
} B2TContext;
+static B2TContext* b2t_export_context(Process*, B2TContext* src);
static uLongf binary2term_uncomp_size(byte* data, Sint size)
{
@@ -1254,7 +1255,7 @@ static uLongf binary2term_uncomp_size(byte* data, Sint size)
static ERTS_INLINE int
binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
- B2TContext* ctx)
+ B2TContext** ctxp, Process* p)
{
byte *bytes = data;
Sint size = data_size;
@@ -1268,8 +1269,8 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
size--;
if (size < 5 || *bytes != COMPRESSED) {
state->extp = bytes;
- if (ctx)
- ctx->state = B2TSizeInit;
+ if (ctxp)
+ (*ctxp)->state = B2TSizeInit;
}
else {
uLongf dest_len = (Uint32) get_int32(bytes+1);
@@ -1286,16 +1287,26 @@ binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size,
return -1;
}
state->extp = erts_alloc(ERTS_ALC_T_EXT_TERM_DATA, dest_len);
- ctx->reds -= dest_len;
+ if (ctxp)
+ (*ctxp)->reds -= dest_len;
}
state->exttmp = 1;
- if (ctx) {
+ if (ctxp) {
+ /*
+ * Start decompression by exporting trap context
+ * so we don't have to deal with deep-copying z_stream.
+ */
+ B2TContext* ctx = b2t_export_context(p, *ctxp);
+ ASSERT(state = &(*ctxp)->b2ts);
+ state = &ctx->b2ts;
+
if (erl_zlib_inflate_start(&ctx->u.uc.stream, bytes, size) != Z_OK)
return -1;
ctx->u.uc.dbytes = state->extp;
ctx->u.uc.dleft = dest_len;
ctx->state = B2TUncompressChunk;
+ *ctxp = ctx;
}
else {
uLongf dlen = dest_len;
@@ -1339,7 +1350,7 @@ erts_binary2term_prepare(ErtsBinary2TermState *state, byte *data, Sint data_size
{
Sint res;
- if (binary2term_prepare(state, data, data_size, NULL) < 0 ||
+ if (binary2term_prepare(state, data, data_size, NULL, NULL) < 0 ||
(res=decoded_size(state->extp, state->extp + state->extsize, 0, NULL)) < 0) {
if (state->exttmp)
@@ -1485,7 +1496,7 @@ static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binar
if (ctx->aligned_alloc) {
ctx->reds -= bin_size / 8;
}
- if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, ctx) < 0) {
+ if (binary2term_prepare(&ctx->b2ts, bytes, bin_size, &ctx, p) < 0) {
ctx->state = B2TBadArg;
}
break;
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 7740dd4373..41a7ff4c16 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -859,9 +859,12 @@ int erts_sys_unsetenv(char *key);
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
+#if defined(ERTS_SMP)
#if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX)
extern void sys_thr_resume(erts_tid_t tid);
extern void sys_thr_suspend(erts_tid_t tid);
+#define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2
+#endif
#endif
/* utils.c */
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index 9c03b3811c..801cef2e56 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -226,11 +226,6 @@ void hipe_handle_exception(Process *c_p)
ASSERT(c_p->freason != TRAP); /* Should have been handled earlier. */
- if (c_p->mbuf) {
- erts_printf("%s line %u: p==%p, p->mbuf==%p\n", __FUNCTION__, __LINE__, c_p, c_p->mbuf);
- /* erts_garbage_collect(c_p, 0, NULL, 0); */
- }
-
/*
* Check if we have an arglist for the top level call. If so, this
* is encoded in Value, so we have to dig out the real Value as well
@@ -259,11 +254,6 @@ void hipe_handle_exception(Process *c_p)
/* Synthesized to avoid having to generate code for it. */
c_p->def_arg_reg[0] = exception_tag[GET_EXC_CLASS(c_p->freason)];
- if (c_p->mbuf) {
- /* erts_printf("%s line %u: p==%p, p->mbuf==%p, p->lastbif==%p\n", __FUNCTION__, __LINE__, c_p, c_p->mbuf, c_p->hipe.lastbif); */
- erts_garbage_collect(c_p, 0, NULL, 0);
- }
-
hipe_find_handler(c_p);
}
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 4b2edace0a..e135dbff99 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -119,9 +119,8 @@ erts_smp_atomic_t sys_misc_mem_sz;
static void smp_sig_notify(char c);
static int sig_notify_fds[2] = {-1, -1};
-#if !defined(ETHR_UNUSABLE_SIGUSRX) && defined(ERTS_THR_HAVE_SIG_FUNCS)
+#ifdef ERTS_SYS_SUSPEND_SIGNAL
static int sig_suspend_fds[2] = {-1, -1};
-#define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2
#endif
#endif
@@ -407,6 +406,7 @@ erts_sys_pre_init(void)
#ifdef ERTS_THR_HAVE_SIG_FUNCS
sigemptyset(&thr_create_sigmask);
sigaddset(&thr_create_sigmask, SIGINT); /* block interrupt */
+ sigaddset(&thr_create_sigmask, SIGTERM); /* block terminate signal */
sigaddset(&thr_create_sigmask, SIGUSR1); /* block user defined signal */
#endif
@@ -655,6 +655,40 @@ static RETSIGTYPE request_break(int signum)
#endif
}
+static void stop_requested(void) {
+ Process* p = NULL;
+ Eterm msg, *hp;
+ ErtsProcLocks locks = 0;
+ ErlOffHeap *ohp;
+ Eterm id = erts_whereis_name_to_id(NULL, am_init);
+
+ if ((p = (erts_pid2proc_opt(NULL, 0, id, 0, ERTS_P2P_FLG_INC_REFC))) != NULL) {
+ ErtsMessage *msgp = erts_alloc_message_heap(p, &locks, 3, &hp, &ohp);
+
+ /* init ! {stop,stop} */
+ msg = TUPLE2(hp, am_stop, am_stop);
+ erts_queue_message(p, locks, msgp, msg, am_system);
+
+ if (locks)
+ erts_smp_proc_unlock(p, locks);
+ erts_proc_dec_refc(p);
+ }
+}
+
+#if (defined(SIG_SIGSET) || defined(SIG_SIGNAL))
+static RETSIGTYPE request_stop(void)
+#else
+static RETSIGTYPE request_stop(int signum)
+#endif
+{
+#ifdef ERTS_SMP
+ smp_sig_notify('S');
+#else
+ stop_requested();
+#endif
+}
+
+
static ERTS_INLINE void
sigusr1_exit(void)
{
@@ -751,6 +785,7 @@ static RETSIGTYPE do_quit(int signum)
/* Disable break */
void erts_set_ignore_break(void) {
sys_signal(SIGINT, SIG_IGN);
+ sys_signal(SIGTERM, SIG_IGN);
sys_signal(SIGQUIT, SIG_IGN);
sys_signal(SIGTSTP, SIG_IGN);
}
@@ -776,6 +811,7 @@ void erts_replace_intr(void) {
void init_break_handler(void)
{
sys_signal(SIGINT, request_break);
+ sys_signal(SIGTERM, request_stop);
#ifndef ETHR_UNUSABLE_SIGUSRX
sys_signal(SIGUSR1, user_signal1);
#endif /* #ifndef ETHR_UNUSABLE_SIGUSRX */
@@ -1289,6 +1325,9 @@ signal_dispatcher_thread_func(void *unused)
switch (buf[i]) {
case 0: /* Emulator initialized */
break;
+ case 'S': /* SIGTERM */
+ stop_requested();
+ break;
case 'I': /* SIGINT */
break_requested();
break;
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index b8d89126fe..f70fb0e501 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -32,7 +32,8 @@
binary_to_atom/1,binary_to_existing_atom/1,
atom_to_binary/1,min_max/1, erlang_halt/1,
erl_crash_dump_bytes/1,
- is_builtin/1]).
+ is_builtin/1, error_stacktrace/1,
+ error_stacktrace_during_call_trace/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -44,8 +45,8 @@ all() ->
t_list_to_existing_atom, os_env, otp_7526,
display,
atom_to_binary, binary_to_atom, binary_to_existing_atom,
- erl_crash_dump_bytes,
- min_max, erlang_halt, is_builtin].
+ erl_crash_dump_bytes, min_max, erlang_halt, is_builtin,
+ error_stacktrace, error_stacktrace_during_call_trace].
%% Uses erlang:display to test that erts_printf does not do deep recursion
display(Config) when is_list(Config) ->
@@ -727,6 +728,172 @@ is_builtin(_Config) ->
ok.
+error_stacktrace(Config) when is_list(Config) ->
+ error_stacktrace_test().
+
+error_stacktrace_during_call_trace(Config) when is_list(Config) ->
+ Tracer = spawn_link(fun () ->
+ receive after infinity -> ok end
+ end),
+ Mprog = [{'_',[],[{exception_trace}]}],
+ erlang:trace_pattern({?MODULE,'_','_'}, Mprog, [local]),
+ 1 = erlang:trace_pattern({erlang,error,2}, Mprog, [local]),
+ 1 = erlang:trace_pattern({erlang,error,1}, Mprog, [local]),
+ erlang:trace(all, true, [call,return_to,timestamp,{tracer, Tracer}]),
+ try
+ error_stacktrace_test()
+ after
+ erlang:trace(all, false, [call,return_to,timestamp,{tracer, Tracer}]),
+ erlang:trace_pattern({erlang,error,2}, false, [local]),
+ erlang:trace_pattern({erlang,error,1}, false, [local]),
+ erlang:trace_pattern({?MODULE,'_','_'}, false, [local]),
+ unlink(Tracer),
+ exit(Tracer, kill),
+ Mon = erlang:monitor(process, Tracer),
+ receive
+ {'DOWN', Mon, process, Tracer, _} -> ok
+ end
+ end,
+ ok.
+
+
+error_stacktrace_test() ->
+ Types = [apply_const_last, apply_const, apply_last,
+ apply, double_apply_const_last, double_apply_const,
+ double_apply_last, double_apply, multi_apply_const_last,
+ multi_apply_const, multi_apply_last, multi_apply,
+ call_const_last, call_last, call_const, call],
+ lists:foreach(fun (Type) ->
+ {Pid, Mon} = spawn_monitor(
+ fun () ->
+ stk([a,b,c,d], Type, error_2)
+ end),
+ receive
+ {'DOWN', Mon, process, Pid, Reason} ->
+ {oops, Stack} = Reason,
+%% io:format("Type: ~p Stack: ~p~n",
+%% [Type, Stack]),
+ [{?MODULE, do_error_2, [Type], _},
+ {?MODULE, stk, 3, _},
+ {?MODULE, stk, 3, _}] = Stack
+ end
+ end,
+ Types),
+ lists:foreach(fun (Type) ->
+ {Pid, Mon} = spawn_monitor(
+ fun () ->
+ stk([a,b,c,d], Type, error_1)
+ end),
+ receive
+ {'DOWN', Mon, process, Pid, Reason} ->
+ {oops, Stack} = Reason,
+%% io:format("Type: ~p Stack: ~p~n",
+%% [Type, Stack]),
+ [{?MODULE, do_error_1, 1, _},
+ {?MODULE, stk, 3, _},
+ {?MODULE, stk, 3, _}] = Stack
+ end
+ end,
+ Types),
+ ok.
+
+stk([], Type, Func) ->
+ tail(Type, Func, jump),
+ ok;
+stk([_|L], Type, Func) ->
+ stk(L, Type, Func),
+ ok.
+
+tail(Type, Func, jump) ->
+ tail(Type, Func, do);
+tail(Type, error_1, do) ->
+ do_error_1(Type);
+tail(Type, error_2, do) ->
+ do_error_2(Type).
+
+do_error_2(apply_const_last) ->
+ erlang:apply(erlang, error, [oops, [apply_const_last]]);
+do_error_2(apply_const) ->
+ erlang:apply(erlang, error, [oops, [apply_const]]),
+ ok;
+do_error_2(apply_last) ->
+ erlang:apply(id(erlang), id(error), id([oops, [apply_last]]));
+do_error_2(apply) ->
+ erlang:apply(id(erlang), id(error), id([oops, [apply]])),
+ ok;
+do_error_2(double_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops, [double_apply_const_last]]]);
+do_error_2(double_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops, [double_apply_const]]]),
+ ok;
+do_error_2(double_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops, [double_apply_last]])]);
+do_error_2(double_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops, [double_apply]])]),
+ ok;
+do_error_2(multi_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops, [multi_apply_const_last]]]]]);
+do_error_2(multi_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops, [multi_apply_const]]]]]),
+ ok;
+do_error_2(multi_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops, [multi_apply_last]])]]]);
+do_error_2(multi_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops, [multi_apply]])]]]),
+ ok;
+do_error_2(call_const_last) ->
+ erlang:error(oops, [call_const_last]);
+do_error_2(call_last) ->
+ erlang:error(id(oops), id([call_last]));
+do_error_2(call_const) ->
+ erlang:error(oops, [call_const]),
+ ok;
+do_error_2(call) ->
+ erlang:error(id(oops), id([call])).
+
+
+do_error_1(apply_const_last) ->
+ erlang:apply(erlang, error, [oops]);
+do_error_1(apply_const) ->
+ erlang:apply(erlang, error, [oops]),
+ ok;
+do_error_1(apply_last) ->
+ erlang:apply(id(erlang), id(error), id([oops]));
+do_error_1(apply) ->
+ erlang:apply(id(erlang), id(error), id([oops])),
+ ok;
+do_error_1(double_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops]]);
+do_error_1(double_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, error, [oops]]),
+ ok;
+do_error_1(double_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops])]);
+do_error_1(double_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(error), id([oops])]),
+ ok;
+do_error_1(multi_apply_const_last) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops]]]]);
+do_error_1(multi_apply_const) ->
+ erlang:apply(erlang, apply, [erlang, apply, [erlang, apply, [erlang, error, [oops]]]]),
+ ok;
+do_error_1(multi_apply_last) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops])]]]);
+do_error_1(multi_apply) ->
+ erlang:apply(id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(apply), [id(erlang), id(error), id([oops])]]]),
+ ok;
+do_error_1(call_const_last) ->
+ erlang:error(oops);
+do_error_1(call_last) ->
+ erlang:error(id(oops));
+do_error_1(call_const) ->
+ erlang:error(oops),
+ ok;
+do_error_1(call) ->
+ erlang:error(id(oops)).
+
+
+
%% Helpers
diff --git a/erts/emulator/test/call_trace_SUITE.erl b/erts/emulator/test/call_trace_SUITE.erl
index 6ba6301c7c..f7ff04430a 100644
--- a/erts/emulator/test/call_trace_SUITE.erl
+++ b/erts/emulator/test/call_trace_SUITE.erl
@@ -1090,8 +1090,7 @@ exception_nocatch() ->
{trace,t2,exception_from,{erlang,throw,1},
{error,{nocatch,Q2}}}],
exception_from, {error,{nocatch,Q2}}),
- expect({trace,T2,exit,{{nocatch,Q2},[{erlang,throw,[Q2],[]},
- {?MODULE,deep_4,1,
+ expect({trace,T2,exit,{{nocatch,Q2},[{?MODULE,deep_4,1,
Deep4LocThrow}]}}),
Q3 = {dump,[dump,{dump}]},
T3 =
@@ -1100,8 +1099,7 @@ exception_nocatch() ->
{trace,t3,exception_from,{erlang,error,1},
{error,Q3}}],
exception_from, {error,Q3}),
- expect({trace,T3,exit,{Q3,[{erlang,error,[Q3],[]},
- {?MODULE,deep_4,1,Deep4LocError}]}}),
+ expect({trace,T3,exit,{Q3,[{?MODULE,deep_4,1,Deep4LocError}]}}),
T4 =
exception_nocatch(?LINE, '=', [17,4711], 5, [],
exception_from, {error,{badmatch,4711}}),
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index 34515efa3d..3ee14f2d1c 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -66,9 +66,9 @@ versions(Config) when is_list(Config) ->
2 = versions:version(),
%% Kill processes, unload code.
- P1 ! P2 ! done,
_ = monitor(process, P1),
_ = monitor(process, P2),
+ P1 ! P2 ! done,
receive
{'DOWN',_,process,P1,normal} -> ok
end,
diff --git a/erts/emulator/test/emulator_smoke.spec b/erts/emulator/test/emulator_smoke.spec
index 3219aeb823..b2d0de8835 100644
--- a/erts/emulator/test/emulator_smoke.spec
+++ b/erts/emulator/test/emulator_smoke.spec
@@ -1,3 +1,9 @@
-{suites,"../emulator_test",[smoke_test_SUITE,time_SUITE]}.
-{cases,"../emulator_test",crypto_SUITE,[t_md5]}.
-{cases,"../emulator_test",float_SUITE,[fpe,cmp_integer]}. \ No newline at end of file
+{define,'Dir',"../emulator_test"}.
+{suites,'Dir',[smoke_test_SUITE]}.
+{suites,'Dir',[time_SUITE]}.
+{skip_cases,'Dir',time_SUITE,
+ [univ_to_local,local_to_univ],"Depends on CET timezone"}.
+{skip_cases,'Dir',time_SUITE,
+ [consistency],"Not reliable in October and March"}.
+{cases,'Dir',crypto_SUITE,[t_md5]}.
+{cases,'Dir',float_SUITE,[fpe,cmp_integer]}.
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index d4e77d634a..23594aa8c4 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -2292,7 +2292,7 @@ maybe_to_list(List) ->
List.
format({Eol,List}) ->
- io_lib:format("tuple<~w,~s>",[Eol, maybe_to_list(List)]);
+ io_lib:format("tuple<~w,~w>",[Eol, maybe_to_list(List)]);
format(List) when is_list(List) ->
case list_at_least(50, List) of
true ->
diff --git a/erts/emulator/test/port_SUITE_data/Makefile.src b/erts/emulator/test/port_SUITE_data/Makefile.src
index fb7685c4b6..3a343e6d17 100644
--- a/erts/emulator/test/port_SUITE_data/Makefile.src
+++ b/erts/emulator/test/port_SUITE_data/Makefile.src
@@ -20,6 +20,12 @@ echo_args@exe@: echo_args@obj@
echo_args@obj@: echo_args.c
$(CC) -c -o echo_args@obj@ $(CFLAGS) echo_args.c
+dead_port@exe@: dead_port@obj@
+ $(LD) $(CROSSLDFLAGS) -o dead_port dead_port@obj@ @LIBS@
+
+dead_port@obj@: dead_port.c
+ $(CC) -c -o dead_port@obj@ $(CFLAGS) dead_port.c
+
port_test.@EMULATOR@: port_test.erl
@erl_name@ -compile port_test
diff --git a/erts/emulator/test/port_SUITE_data/port_test.c b/erts/emulator/test/port_SUITE_data/port_test.c
index cc3ebdf0f8..e199a0fc13 100644
--- a/erts/emulator/test/port_SUITE_data/port_test.c
+++ b/erts/emulator/test/port_SUITE_data/port_test.c
@@ -10,6 +10,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+#include <ctype.h>
#ifndef __WIN32__
#include <unistd.h>
@@ -33,7 +34,7 @@
exit(1); \
}
-#define MAIN(argc, argv) main(argc, argv)
+#define ASSERT(e) ((void) ((e) ? 1 : abort()))
extern int errno;
@@ -105,9 +106,7 @@ int err;
#endif
-MAIN(argc, argv)
-int argc;
-char *argv[];
+int main(int argc, char *argv[])
{
int ret, fd_count;
if((port_data = (PORT_TEST_DATA *) malloc(sizeof(PORT_TEST_DATA))) == NULL) {
@@ -377,9 +376,11 @@ write_reply(buf, size)
int size; /* Size of buffer to send. */
{
int n; /* Temporary to hold size. */
+ int rv;
if (port_data->slow_writes <= 0) { /* Normal, "fast", write. */
- write(port_data->fd_to_erl, buf, size);
+ rv = write(port_data->fd_to_erl, buf, size);
+ ASSERT(rv == size);
} else {
/*
* Write chunks with delays in between.
@@ -387,7 +388,8 @@ write_reply(buf, size)
while (size > 0) {
n = size > port_data->slow_writes ? port_data->slow_writes : size;
- write(port_data->fd_to_erl, buf, n);
+ rv = write(port_data->fd_to_erl, buf, n);
+ ASSERT(rv == n);
size -= n;
buf += n;
if (size)
@@ -558,7 +560,7 @@ char* spec; /* Specification for reply. */
buf = (char *) malloc(total_size);
if (buf == NULL) {
fprintf(stderr, "%s: insufficent memory for reply buffer of size %d\n",
- port_data->progname, total_size);
+ port_data->progname, (int)total_size);
exit(1);
}
diff --git a/erts/emulator/test/port_trace_SUITE_data/echo_drv.c b/erts/emulator/test/port_trace_SUITE_data/echo_drv.c
index b545523192..20ec33a594 100644
--- a/erts/emulator/test/port_trace_SUITE_data/echo_drv.c
+++ b/erts/emulator/test/port_trace_SUITE_data/echo_drv.c
@@ -2,23 +2,30 @@
#include "erl_driver.h"
#include <errno.h>
#include <string.h>
+#include <assert.h>
/* -------------------------------------------------------------------------
** Data types
**/
+struct my_thread {
+ struct my_thread* next;
+ ErlDrvTid tid;
+};
typedef struct _erl_drv_data {
ErlDrvPort erlang_port;
ErlDrvTermData caller;
+ struct my_thread* threads;
} EchoDrvData;
struct remote_send_term {
- char *buf;
- int len;
+ struct my_thread thread;
ErlDrvTermData port;
ErlDrvTermData caller;
+ int len;
+ char buf[1]; /* buf[len] */
};
#define ECHO_DRV_NOOP 0
@@ -86,7 +93,7 @@ static ErlDrvEntry echo_drv_entry = {
NULL
};
-static void send_term_thread(void *);
+static void* send_term_thread(void *);
/* -------------------------------------------------------------------------
** Entry functions
@@ -111,10 +118,22 @@ static EchoDrvData *echo_drv_start(ErlDrvPort port, char *command)
EchoDrvData *echo_drv_data_p = driver_alloc(sizeof(EchoDrvData));
echo_drv_data_p->erlang_port = port;
echo_drv_data_p->caller = driver_caller(port);
+ echo_drv_data_p->threads = NULL;
return echo_drv_data_p;
}
-static void echo_drv_stop(EchoDrvData *data_p) {
+static void echo_drv_stop(EchoDrvData *data_p)
+{
+ struct my_thread* thr = data_p->threads;
+
+ while (thr) {
+ struct my_thread* next = thr->next;
+ void* exit_value;
+ int ret = erl_drv_thread_join(thr->tid, &exit_value);
+ assert(ret == 0 && exit_value == NULL);
+ driver_free(thr);
+ thr = next;
+ }
driver_free(data_p);
}
@@ -212,14 +231,14 @@ static void echo_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) {
}
case ECHO_DRV_REMOTE_SEND_TERM:
{
- ErlDrvTid tid;
- struct remote_send_term *t = malloc(sizeof(struct remote_send_term));
+ struct remote_send_term *t = driver_alloc(sizeof(struct remote_send_term) + len);
t->len = len-1;
- t->buf = malloc(len-1);
t->port = driver_mk_port(port);
t->caller = data_p->caller;
memcpy(t->buf, buf+1, t->len);
- erl_drv_thread_create("tmp_thread", &tid, send_term_thread, t, NULL);
+ erl_drv_thread_create("tmp_thread", &t->thread.tid, send_term_thread, t, NULL);
+ t->thread.next = data_p->threads;
+ data_p->threads = &t->thread;
break;
}
case ECHO_DRV_SAVE_CALLER:
@@ -262,7 +281,7 @@ static ErlDrvSSizeT echo_drv_call(ErlDrvData drv_data,
return len-command;
}
-static void send_term_thread(void *a)
+static void* send_term_thread(void *a)
{
struct remote_send_term *t = (struct remote_send_term*)a;
ErlDrvTermData term[] = {
@@ -273,5 +292,5 @@ static void send_term_thread(void *a)
ERL_DRV_TUPLE, 3};
erl_drv_send_term(t->port, t->caller,
term, sizeof(term) / sizeof(ErlDrvTermData));
- return;
+ return NULL;
}
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 0f999e0efe..0a6eb7ffac 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -437,11 +437,22 @@ t_process_info(Config) when is_list(Config) ->
verify_loc(Line2, Res2),
pi_stacktrace([{?MODULE,t_process_info,1,?LINE}]),
+ verify_stacktrace_depth(),
+
Gleader = group_leader(),
{group_leader, Gleader} = process_info(self(), group_leader),
{'EXIT',{badarg,_Info}} = (catch process_info('not_a_pid')),
ok.
+verify_stacktrace_depth() ->
+ CS = current_stacktrace,
+ OldDepth = erlang:system_flag(backtrace_depth, 0),
+ {CS,[]} = erlang:process_info(self(), CS),
+ _ = erlang:system_flag(backtrace_depth, 8),
+ {CS,[{?MODULE,verify_stacktrace_depth,0,_},_|_]} =
+ erlang:process_info(self(), CS),
+ _ = erlang:system_flag(backtrace_depth, OldDepth).
+
pi_stacktrace(Expected0) ->
{Line,Res} = {?LINE,erlang:process_info(self(), current_stacktrace)},
{current_stacktrace,Stack} = Res,
@@ -1611,6 +1622,7 @@ spawn_initial_hangarounds(_Cleaner, NP, Max, Len, HAs) when NP > Max ->
{Len, HAs};
spawn_initial_hangarounds(Cleaner, NP, Max, Len, HAs) ->
Skip = 30,
+ wait_for_proc_slots(Skip+3),
HA1 = spawn_opt(?MODULE, hangaround, [Cleaner, initial_hangaround],
[{priority, low}]),
HA2 = spawn_opt(?MODULE, hangaround, [Cleaner, initial_hangaround],
@@ -1620,6 +1632,15 @@ spawn_initial_hangarounds(Cleaner, NP, Max, Len, HAs) ->
spawn_drop(Skip),
spawn_initial_hangarounds(Cleaner, NP+Skip, Max, Len+3, [HA1,HA2,HA3|HAs]).
+wait_for_proc_slots(MinFreeSlots) ->
+ case erlang:system_info(process_limit) - erlang:system_info(process_count) of
+ FreeSlots when FreeSlots < MinFreeSlots ->
+ receive after 10 -> ok end,
+ wait_for_proc_slots(MinFreeSlots);
+ _FreeSlots ->
+ ok
+ end.
+
spawn_drop(N) when N =< 0 ->
ok;
spawn_drop(N) ->
diff --git a/erts/etc/common/heart.c b/erts/etc/common/heart.c
index d67b997d6d..bc353e384e 100644
--- a/erts/etc/common/heart.c
+++ b/erts/etc/common/heart.c
@@ -48,13 +48,10 @@
*
* HEART_BEATING
*
- * This program expects a heart beat messages. If it does not receive a
- * heart beat message from Erlang within heart_beat_timeout seconds, it
- * reboots the system. The variable heart_beat_timeout is exported (so
- * that it can be set from the shell in VxWorks, as is the variable
- * heart_beat_report_delay). When using Solaris, the system is rebooted
- * by executing the command stored in the environment variable
- * HEART_COMMAND.
+ * This program expects a heart beat message. If it does not receive a
+ * heart beat message from Erlang within heart_beat_timeout seconds, it
+ * reboots the system. The system is rebooted by executing the command
+ * stored in the environment variable HEART_COMMAND.
*
* BLOCKING DESCRIPTORS
*
@@ -149,27 +146,17 @@ struct msg {
/* Maybe interesting to change */
/* Times in seconds */
-#define HEART_BEAT_BOOT_DELAY 60 /* 1 minute */
#define SELECT_TIMEOUT 5 /* Every 5 seconds we reset the
watchdog timer */
/* heart_beat_timeout is the maximum gap in seconds between two
- consecutive heart beat messages from Erlang, and HEART_BEAT_BOOT_DELAY
- is the the extra delay that wd_keeper allows for, to give heart a
- chance to reboot in the "normal" way before the hardware watchdog
- enters the scene. heart_beat_report_delay is the time allowed for reporting
- before rebooting under VxWorks. */
+ consecutive heart beat messages from Erlang. */
int heart_beat_timeout = 60;
-int heart_beat_report_delay = 30;
-int heart_beat_boot_delay = HEART_BEAT_BOOT_DELAY;
/* All current platforms have a process identifier that
fits in an unsigned long and where 0 is an impossible or invalid value */
unsigned long heart_beat_kill_pid = 0;
-#define VW_WD_TIMEOUT (heart_beat_timeout+heart_beat_report_delay+heart_beat_boot_delay)
-#define SOL_WD_TIMEOUT (heart_beat_timeout+heart_beat_boot_delay)
-
/* reasons for reboot */
#define R_TIMEOUT (1)
#define R_CLOSED (2)
@@ -297,7 +284,6 @@ free_env_val(char *value)
static void get_arguments(int argc, char** argv) {
int i = 1;
int h;
- int w;
unsigned long p;
while (i < argc) {
@@ -313,15 +299,6 @@ static void get_arguments(int argc, char** argv) {
i++;
}
break;
- case 'w':
- if (strcmp(argv[i], "-wt") == 0)
- if (sscanf(argv[i+1],"%i",&w) ==1)
- if ((w > 10) && (w <= 65535)) {
- heart_beat_boot_delay = w;
- fprintf(stderr,"heart_beat_boot_delay = %d\n",w);
- i++;
- }
- break;
case 'p':
if (strcmp(argv[i], "-pid") == 0)
if (sscanf(argv[i+1],"%lu",&p) ==1){
@@ -347,7 +324,7 @@ static void get_arguments(int argc, char** argv) {
}
i++;
}
- debugf("arguments -ht %d -wt %d -pid %lu\n",h,w,p);
+ debugf("arguments -ht %d -pid %lu\n",h,p);
}
int main(int argc, char **argv) {
@@ -674,11 +651,6 @@ void win_system(char *command)
*/
static void
do_terminate(int erlin_fd, int reason) {
- /*
- When we get here, we have HEART_BEAT_BOOT_DELAY secs to finish
- (plus heart_beat_report_delay if under VxWorks), so we don't need
- to call wd_reset().
- */
int ret = 0, tmo=0;
char *tmo_env;
diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in
index fa68bd26ee..f2babc48d2 100644
--- a/erts/etc/unix/etp-commands.in
+++ b/erts/etc/unix/etp-commands.in
@@ -1322,7 +1322,11 @@ define etp-stacktrace
set $etp_stacktrace_p = ($arg0)->stop
set $etp_stacktrace_end = ($arg0)->hend
printf "%% Stacktrace (%u): ", $etp_stacktrace_end-$etp_stacktrace_p
- etp ($arg0)->cp
+ if ($arg0)->cp == 0x0
+ printf "NULL\n"
+ else
+ etp ($arg0)->cp
+ end
while $etp_stacktrace_p < $etp_stacktrace_end
if ($etp_stacktrace_p[0] & 0x3) == 0x0
# Continuation pointer
@@ -1350,7 +1354,11 @@ define etp-stackdump
set $etp_stackdump_p = ($arg0)->stop
set $etp_stackdump_end = ($arg0)->hend
printf "%% Stackdump (%u): ", $etp_stackdump_end-$etp_stackdump_p
- etp ($arg0)->cp
+ if ($arg0)->cp == 0x0
+ printf "NULL\n"
+ else
+ etp ($arg0)->cp
+ end
while $etp_stackdump_p < $etp_stackdump_end
etp $etp_stackdump_p[0]
set $etp_stackdump_p++
diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c
index 420efd725f..3501fe335a 100644
--- a/erts/lib_src/common/ethr_aux.c
+++ b/erts/lib_src/common/ethr_aux.c
@@ -220,7 +220,7 @@ ethr_init_common__(ethr_init_data *id)
ethr_min_stack_size__ += ethr_pagesize__;
#endif
/* The system may think that we need more stack */
-#if defined(PTHREAD_STACK_MIN)
+#if defined(ETHR_HAVE_USABLE_PTHREAD_STACK_MIN)
if (ethr_min_stack_size__ < PTHREAD_STACK_MIN)
ethr_min_stack_size__ = PTHREAD_STACK_MIN;
#elif defined(_SC_THREAD_STACK_MIN)
diff --git a/erts/test/install_SUITE.erl b/erts/test/install_SUITE.erl
index 2c7e8972f6..f96dca9563 100644
--- a/erts/test/install_SUITE.erl
+++ b/erts/test/install_SUITE.erl
@@ -18,7 +18,6 @@
%% %CopyrightEnd%
%%
-
%%%-------------------------------------------------------------------
%%% File : install_SUITE.erl
%%% Author : Rickard Green
@@ -63,12 +62,12 @@
erlang_bindir = "",
bindir_symlinks = ""}).
-need_symlink_cases() ->
+need_symlink_cases() ->
[bin_unreachable_absolute, bin_unreachable_relative,
bin_same_dir, bin_ok_symlink, bin_dirname_fail,
bin_no_use_dirname_fail].
-dont_need_symlink_cases() ->
+dont_need_symlink_cases() ->
[bin_default, bin_default_dirty, bin_outside_eprfx,
bin_outside_eprfx_dirty, bin_not_abs,
bin_unreasonable_path, 'bin white space',
@@ -78,10 +77,9 @@ suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap, {minutes, 1}}].
-all() ->
+all() ->
dont_need_symlink_cases() ++ need_symlink_cases().
-
%%
%% The test cases
%%
@@ -533,21 +531,19 @@ bin_no_srcfile(Config) when is_list(Config) ->
ChkRes).
%%
-%%
%% Auxiliary functions
%%
-%%
expect(X, X) ->
- io:format("result: ~p~n", [X]),
+ io:format("result: ~tp~n", [X]),
io:format("-----------------------------------------------~n", []),
ok;
expect(X, Y) ->
- io:format("expected: ~p~n", [X]),
- io:format("got : ~p~n", [Y]),
+ io:format("expected: ~tp~n", [X]),
+ io:format("got : ~tp~n", [Y]),
io:format("-----------------------------------------------~n", []),
ct:fail({X,Y}).
-
+
init_per_suite(Config) ->
PD = proplists:get_value(priv_dir, Config),
SymLinks = case os:type() of
@@ -630,8 +626,8 @@ install_bin(Config, #inst{mkdirs = MkDirs,
true -> ok;
false -> {comment, "No symlink tests run, since symlinks not working"}
end.
-
-
+
+
install_bin2(Config, Inst, ChkRes) ->
install_bin3(Config, Inst#inst{symlinks = false,
ln_s = "ln"}, ChkRes),
@@ -662,8 +658,6 @@ install_bin2(Config, Inst, ChkRes) ->
false ->
ok
end.
-
-
install_bin3(Config,
#inst{cmd_prefix = CMD_PRFX,
@@ -690,20 +684,20 @@ install_bin3(Config,
++ "\" --exec-prefix \"" ++ EXEC_PREFIX
++ "\" --test-file \"" ++ ResFile ++ "\" erl erlc",
- io:format("CMD_PRFX = \"~s\"~n"
- "LN_S = \"~s\"~n"
- "BINDIR_SYMLINKS = \"~s\"~n"
- "exec_prefix = \"~s\"~n"
- "bindir = \"~s\"~n"
- "erlang_bindir = \"~s\"~n"
- "EXTRA_PREFIX = \"~s\"~n"
- "DESTDIR = \"~s\"~n",
+ io:format("CMD_PRFX = \"~ts\"~n"
+ "LN_S = \"~ts\"~n"
+ "BINDIR_SYMLINKS = \"~ts\"~n"
+ "exec_prefix = \"~ts\"~n"
+ "bindir = \"~ts\"~n"
+ "erlang_bindir = \"~ts\"~n"
+ "EXTRA_PREFIX = \"~ts\"~n"
+ "DESTDIR = \"~ts\"~n",
[CMD_PRFX, LN_S, BINDIR_SYMLINKS, EXEC_PREFIX, BINDIR,
ERLANG_BINDIR, EXTRA_PREFIX, DESTDIR]),
- io:format("$ ~s~n", [Cmd]),
+ io:format("$ ~ts~n", [Cmd]),
CmdOutput = os:cmd(Cmd),
- io:format("~s~n", [CmdOutput]),
+ io:format("~ts~n", [CmdOutput]),
ChkRes(case file:consult(ResFile) of
{ok, [Res]} -> Res;
Err -> exit({result, Err})
diff --git a/lib/common_test/src/common_test.app.src b/lib/common_test/src/common_test.app.src
index 77588af59b..dfa321c901 100644
--- a/lib/common_test/src/common_test.app.src
+++ b/lib/common_test/src/common_test.app.src
@@ -22,6 +22,7 @@
{vsn, "%VSN%"},
{modules, [ct_cover,
ct,
+ ct_default_gl,
ct_event,
ct_framework,
ct_ftp,
diff --git a/lib/common_test/test/Makefile b/lib/common_test/test/Makefile
index b1eddfedd7..2f0fc2e05a 100644
--- a/lib/common_test/test/Makefile
+++ b/lib/common_test/test/Makefile
@@ -70,7 +70,8 @@ MODULES= \
test_server_SUITE \
test_server_test_lib \
ct_release_test_SUITE \
- ct_log_SUITE
+ ct_log_SUITE \
+ ct_SUITE
ERL_FILES= $(MODULES:%=%.erl)
HRL_FILES= test_server_test_lib.hrl
diff --git a/lib/common_test/test/ct_SUITE.erl b/lib/common_test/test/ct_SUITE.erl
new file mode 100644
index 0000000000..eb98c2544f
--- /dev/null
+++ b/lib/common_test/test/ct_SUITE.erl
@@ -0,0 +1,53 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(ct_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+all() ->
+ [app_file, appup_file].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+
+app_file(_Config) ->
+ ok = test_server:app_test(common_test),
+ ok.
+
+appup_file(_Config) ->
+ ok = test_server:appup_test(common_test).
+
diff --git a/lib/common_test/test/ct_hooks_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE.erl
index 690d0af1bb..bc716fb5e3 100644
--- a/lib/common_test/test/ct_hooks_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE.erl
@@ -70,20 +70,20 @@ suite() ->
all() ->
all(suite).
-all(suite) ->
+all(suite) ->
lists:reverse(
[
one_cth, two_cth, faulty_cth_no_init, faulty_cth_id_no_init,
faulty_cth_exit_in_init, faulty_cth_exit_in_id,
- faulty_cth_exit_in_init_scope_suite, minimal_cth,
- minimal_and_maximal_cth, faulty_cth_undef,
+ faulty_cth_exit_in_init_scope_suite, minimal_cth,
+ minimal_and_maximal_cth, faulty_cth_undef,
scope_per_suite_cth, scope_per_group_cth, scope_suite_cth,
- scope_per_suite_state_cth, scope_per_group_state_cth,
+ scope_per_suite_state_cth, scope_per_group_state_cth,
scope_suite_state_cth,
fail_pre_suite_cth, double_fail_pre_suite_cth,
fail_post_suite_cth, skip_pre_suite_cth, skip_pre_end_cth,
skip_post_suite_cth, recover_post_suite_cth, update_config_cth,
- state_update_cth, options_cth, same_id_cth,
+ state_update_cth, options_cth, same_id_cth,
fail_n_skip_with_minimal_cth, prio_cth, no_config,
data_dir, cth_log
]
@@ -96,10 +96,10 @@ all(suite) ->
%%%-----------------------------------------------------------------
%%%
-one_cth(Config) when is_list(Config) ->
+one_cth(Config) when is_list(Config) ->
do_test(one_empty_cth, "ct_cth_empty_SUITE.erl",[empty_cth], Config).
-two_cth(Config) when is_list(Config) ->
+two_cth(Config) when is_list(Config) ->
do_test(two_empty_cth, "ct_cth_empty_SUITE.erl",[empty_cth,empty_cth],
Config).
@@ -119,13 +119,13 @@ minimal_cth(Config) when is_list(Config) ->
minimal_and_maximal_cth(Config) when is_list(Config) ->
do_test(minimal_and_maximal_cth, "ct_cth_empty_SUITE.erl",
[minimal_cth, empty_cth],Config).
-
+
faulty_cth_undef(Config) when is_list(Config) ->
do_test(faulty_cth_undef, "ct_cth_empty_SUITE.erl",
[undef_cth],Config).
faulty_cth_exit_in_init_scope_suite(Config) when is_list(Config) ->
- do_test(faulty_cth_exit_in_init_scope_suite,
+ do_test(faulty_cth_exit_in_init_scope_suite,
"ct_exit_in_init_scope_suite_cth_SUITE.erl",
[],Config).
@@ -205,7 +205,7 @@ state_update_cth(Config) when is_list(Config) ->
options_cth(Config) when is_list(Config) ->
do_test(options_cth, "ct_cth_empty_SUITE.erl",
[{empty_cth,[test]}],Config).
-
+
same_id_cth(Config) when is_list(Config) ->
do_test(same_id_cth, "ct_cth_empty_SUITE.erl",
[same_id_cth,same_id_cth],Config).
@@ -227,9 +227,10 @@ data_dir(Config) when is_list(Config) ->
do_test(data_dir, "ct_data_dir_SUITE.erl",
[verify_data_dir_cth],Config).
-cth_log(Config) when is_list(Config) ->
+cth_log(Config) when is_list(Config) ->
%% test that cth_log_redirect writes properly to
%% unexpected I/O log
+ ct:timetrap({minutes,10}),
StartOpts = do_test(cth_log, "cth_log_SUITE.erl", [], Config),
Logdir = proplists:get_value(logdir, StartOpts),
UnexpIoLogs =
@@ -266,7 +267,6 @@ do_test(Tag, SWC, CTHs, Config, Res) ->
do_test(Tag, SWC, CTHs, Config, Res, 2).
do_test(Tag, SuiteWildCard, CTHs, Config, Res, EC) ->
-
DataDir = ?config(data_dir, Config),
Suites = filelib:wildcard(
filename:join([DataDir,"cth/tests",SuiteWildCard])),
@@ -275,7 +275,7 @@ do_test(Tag, SuiteWildCard, CTHs, Config, Res, EC) ->
Res = ct_test_support:run(Opts, Config),
Events = ct_test_support:get_events(ERPid, Config),
- ct_test_support:log_events(Tag,
+ ct_test_support:log_events(Tag,
reformat(Events, ?eh),
?config(priv_dir, Config),
Opts),
@@ -328,7 +328,7 @@ test_events(one_empty_cth) ->
{?eh,cth,{empty_cth,pre_end_per_testcase,[test_case,'$proplist',[]]}},
{?eh,cth,{empty_cth,post_end_per_testcase,[test_case,'$proplist','_',[]]}},
{?eh,tc_done,{ct_cth_empty_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,end_per_suite}},
{?eh,cth,{empty_cth,pre_end_per_suite,
[ct_cth_empty_SUITE,'$proplist',[]]}},
@@ -360,7 +360,7 @@ test_events(two_empty_cth) ->
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,tc_done,{ct_cth_empty_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,[ct_cth_empty_SUITE,'$proplist',[]]}},
{?eh,cth,{'_',pre_end_per_suite,[ct_cth_empty_SUITE,'$proplist',[]]}},
@@ -402,7 +402,7 @@ test_events(minimal_cth) ->
{?eh,tc_start,{ct_cth_empty_SUITE,test_case}},
{?eh,tc_done,{ct_cth_empty_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,end_per_suite}},
{?eh,tc_done,{ct_cth_empty_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
@@ -426,7 +426,7 @@ test_events(minimal_and_maximal_cth) ->
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,tc_done,{ct_cth_empty_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,[ct_cth_empty_SUITE,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_suite,[ct_cth_empty_SUITE,'$proplist','_',[]]}},
@@ -452,11 +452,11 @@ test_events(faulty_cth_undef) ->
{?eh,tc_auto_skip,{ct_cth_empty_SUITE,test_case,
{failed, FailReason}}},
{?eh,cth,{'_',on_tc_skip,'_'}},
-
+
{?eh,tc_auto_skip,{ct_cth_empty_SUITE,end_per_suite,
{failed, FailReason}}},
{?eh,cth,{'_',on_tc_skip,'_'}},
-
+
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
];
@@ -515,7 +515,7 @@ test_events(scope_per_suite_cth) ->
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,tc_done,{ct_scope_per_suite_cth_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_scope_per_suite_cth_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,
[ct_scope_per_suite_cth_SUITE,'$proplist',[]]}},
@@ -541,7 +541,7 @@ test_events(scope_suite_cth) ->
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,tc_done,{ct_scope_suite_cth_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_scope_suite_cth_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,[ct_scope_suite_cth_SUITE,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_suite,[ct_scope_suite_cth_SUITE,'$proplist','_',[]]}},
@@ -563,18 +563,18 @@ test_events(scope_per_group_cth) ->
{?eh,cth,{'_',init,['_',[]]}},
{?eh,cth,{'_',post_init_per_group,[group1,'$proplist','$proplist',[]]}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,{init_per_group,group1,[]},ok}},
-
+
{?eh,tc_start,{ct_scope_per_group_cth_SUITE,test_case}},
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_scope_per_group_cth_SUITE,{end_per_group,group1,[]}}},
{?eh,cth,{'_',pre_end_per_group,[group1,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_group,[group1,'$proplist','_',[]]}},
{?eh,cth,{'_',terminate,[[]]}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,{end_per_group,group1,[]},ok}}],
-
+
{?eh,tc_start,{ct_scope_per_group_cth_SUITE,end_per_suite}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
@@ -595,7 +595,7 @@ test_events(scope_per_suite_state_cth) ->
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[test]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[test]]}},
{?eh,tc_done,{ct_scope_per_suite_state_cth_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_scope_per_suite_state_cth_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,
[ct_scope_per_suite_state_cth_SUITE,'$proplist',[test]]}},
@@ -621,7 +621,7 @@ test_events(scope_suite_state_cth) ->
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[test]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[test]]}},
{?eh,tc_done,{ct_scope_suite_state_cth_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_scope_suite_state_cth_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,[ct_scope_suite_state_cth_SUITE,'$proplist',[test]]}},
{?eh,cth,{'_',post_end_per_suite,[ct_scope_suite_state_cth_SUITE,'$proplist','_',[test]]}},
@@ -643,18 +643,18 @@ test_events(scope_per_group_state_cth) ->
{?eh,cth,{'_',init,['_',[test]]}},
{?eh,cth,{'_',post_init_per_group,[group1,'$proplist','$proplist',[test]]}},
{?eh,tc_done,{ct_scope_per_group_state_cth_SUITE,{init_per_group,group1,[]},ok}},
-
+
{?eh,tc_start,{ct_scope_per_group_state_cth_SUITE,test_case}},
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[test]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[test]]}},
{?eh,tc_done,{ct_scope_per_group_state_cth_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_scope_per_group_state_cth_SUITE,{end_per_group,group1,[]}}},
{?eh,cth,{'_',pre_end_per_group,[group1,'$proplist',[test]]}},
{?eh,cth,{'_',post_end_per_group,[group1,'$proplist','_',[test]]}},
{?eh,cth,{'_',terminate,[[test]]}},
{?eh,tc_done,{ct_scope_per_group_state_cth_SUITE,{end_per_group,group1,[]},ok}}],
-
+
{?eh,tc_start,{ct_scope_per_group_state_cth_SUITE,end_per_suite}},
{?eh,tc_done,{ct_scope_per_group_state_cth_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
@@ -666,7 +666,7 @@ test_events(fail_pre_suite_cth) ->
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
{?eh,cth,{'_',init,['_',[]]}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,init_per_suite}},
{?eh,cth,{'_',pre_init_per_suite,[ct_cth_empty_SUITE,'$proplist',[]]}},
{?eh,cth,{'_',post_init_per_suite,[ct_cth_empty_SUITE,'$proplist',
@@ -676,7 +676,7 @@ test_events(fail_pre_suite_cth) ->
{?eh,cth,{'_',on_tc_fail,
[init_per_suite,{failed,"Test failure"},[]]}},
-
+
{?eh,tc_auto_skip,{ct_cth_empty_SUITE,test_case,
{failed,{ct_cth_empty_SUITE,init_per_suite,
{failed,"Test failure"}}}}},
@@ -685,7 +685,7 @@ test_events(fail_pre_suite_cth) ->
{failed, {ct_cth_empty_SUITE, init_per_suite,
{failed, "Test failure"}}}},[]]}},
-
+
{?eh,tc_auto_skip, {ct_cth_empty_SUITE, end_per_suite,
{failed, {ct_cth_empty_SUITE, init_per_suite,
{failed, "Test failure"}}}}},
@@ -694,7 +694,7 @@ test_events(fail_pre_suite_cth) ->
{failed, {ct_cth_empty_SUITE, init_per_suite,
{failed, "Test failure"}}}},[]]}},
-
+
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,cth, {'_',terminate,[[]]}},
{?eh,stop_logging,[]}
@@ -733,7 +733,7 @@ test_events(fail_post_suite_cth) ->
{failed,{ct_cth_empty_SUITE,init_per_suite,
{failed,"Test failure"}}}}},
{?eh,cth,{'_',on_tc_skip,[test_case,{tc_auto_skip,'_'},[]]}},
-
+
{?eh,tc_auto_skip, {ct_cth_empty_SUITE, end_per_suite,
{failed, {ct_cth_empty_SUITE, init_per_suite,
{failed, "Test failure"}}}}},
@@ -758,7 +758,7 @@ test_events(skip_pre_suite_cth) ->
{?eh,tc_user_skip,{ct_cth_empty_SUITE,test_case,"Test skip"}},
{?eh,cth,{'_',on_tc_skip,[test_case,{tc_user_skip,"Test skip"},[]]}},
-
+
{?eh,tc_user_skip, {ct_cth_empty_SUITE, end_per_suite,"Test skip"}},
{?eh,test_done,{'DEF','STOP_TIME'}},
@@ -772,18 +772,18 @@ test_events(skip_pre_end_cth) ->
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
{?eh,tc_start,{ct_scope_per_group_cth_SUITE,init_per_suite}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,init_per_suite,ok}},
-
+
[{?eh,tc_start,{ct_scope_per_group_cth_SUITE,{init_per_group,group1,[]}}},
{?eh,cth,{'_',id,[[]]}},
{?eh,cth,{'_',init,['_',[]]}},
{?eh,cth,{'_',post_init_per_group,[group1,'$proplist','$proplist',[]]}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,{init_per_group,group1,[]},ok}},
-
+
{?eh,tc_start,{ct_scope_per_group_cth_SUITE,test_case}},
{?eh,cth,{'_',pre_init_per_testcase,[test_case,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,tc_done,{ct_scope_per_group_cth_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_scope_per_group_cth_SUITE,{end_per_group,group1,[]}}},
{?eh,cth,{'_',pre_end_per_group,[group1,'$proplist',[]]}},
{?eh,cth,{'_',post_end_per_group,[group1,'$proplist','_',[]]}},
@@ -808,7 +808,7 @@ test_events(skip_post_suite_cth) ->
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
{?eh,cth,{'_',init,['_',[]]}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,init_per_suite}},
{?eh,cth,{'_',pre_init_per_suite,[ct_cth_empty_SUITE,'$proplist',[]]}},
{?eh,cth,{'_',post_init_per_suite,[ct_cth_empty_SUITE,'$proplist','$proplist',[]]}},
@@ -818,9 +818,9 @@ test_events(skip_post_suite_cth) ->
{?eh,tc_user_skip,{ct_cth_empty_SUITE,test_case,"Test skip"}},
{?eh,cth,{'_',on_tc_skip,[test_case,{tc_user_skip,"Test skip"},[]]}},
-
+
{?eh,tc_user_skip, {ct_cth_empty_SUITE, end_per_suite,"Test skip"}},
-
+
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,cth,{'_',terminate,[[]]}},
{?eh,stop_logging,[]}
@@ -844,7 +844,7 @@ test_events(recover_post_suite_cth) ->
{?eh,cth,{'_',post_end_per_testcase,
[test_case, contains([tc_status]),'_',[]]}},
{?eh,tc_done,{Suite,test_case,ok}},
-
+
{?eh,tc_start,{Suite,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,
[Suite,not_contains([tc_status]),[]]}},
@@ -861,7 +861,7 @@ test_events(update_config_cth) ->
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
{?eh,cth,{'_',init,['_',[]]}},
-
+
{?eh,tc_start,{ct_update_config_SUITE,init_per_suite}},
{?eh,cth,{'_',pre_init_per_suite,
[ct_update_config_SUITE,contains([]),[]]}},
@@ -941,7 +941,7 @@ test_events(update_config_cth) ->
pre_init_per_suite]),
ok,[]]}},
{?eh,tc_done,{ct_update_config_SUITE,{end_per_group,group1,[]},ok}},
-
+
{?eh,tc_start,{ct_update_config_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,
[ct_update_config_SUITE,contains(
@@ -974,7 +974,7 @@ test_events(state_update_cth) ->
{?eh,cth,{'_',init,['_',[]]}},
{?eh,cth,{'_',init,['_',[]]}},
{?eh,tc_start,{'_',init_per_suite}},
-
+
{?eh,tc_done,{'_',end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,cth,{'_',terminate,[contains(
@@ -1021,7 +1021,7 @@ test_events(options_cth) ->
{?eh,cth,{empty_cth,pre_init_per_testcase,[test_case,'$proplist',[test]]}},
{?eh,cth,{empty_cth,post_end_per_testcase,[test_case,'$proplist','_',[test]]}},
{?eh,tc_done,{ct_cth_empty_SUITE,test_case,ok}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,end_per_suite}},
{?eh,cth,{empty_cth,pre_end_per_suite,
[ct_cth_empty_SUITE,'$proplist',[test]]}},
@@ -1058,7 +1058,7 @@ test_events(same_id_cth) ->
{negative,
{?eh,cth,{'_',post_end_per_testcase,[test_case,'$proplist',ok,[]]}},
{?eh,tc_done,{ct_cth_empty_SUITE,test_case,ok}}},
-
+
{?eh,tc_start,{ct_cth_empty_SUITE,end_per_suite}},
{?eh,cth,{'_',pre_end_per_suite,[ct_cth_empty_SUITE,'$proplist',[]]}},
{negative,
@@ -1115,17 +1115,14 @@ test_events(fail_n_skip_with_minimal_cth) ->
];
test_events(prio_cth) ->
-
GenPre = fun(Func,States) ->
- [{?eh,cth,{'_',Func,['_','_',State]}} ||
- State <- States]
+ [{?eh,cth,{'_',Func,['_','_',State]}} || State <- States]
end,
GenPost = fun(Func,States) ->
- [{?eh,cth,{'_',Func,['_','_','_',State]}} ||
- State <- States]
+ [{?eh,cth,{'_',Func,['_','_','_',State]}} || State <- States]
end,
-
+
[{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}}] ++
@@ -1136,7 +1133,7 @@ test_events(prio_cth) ->
[[1100,100],[600,200],[600,600],[700],[800],[900],[1000],
[1200,1050],[1100],[1200]]) ++
[{?eh,tc_done,{ct_cth_prio_SUITE,init_per_suite,ok}},
-
+
[{?eh,tc_start,{ct_cth_prio_SUITE,{init_per_group,'_',[]}}}] ++
GenPre(pre_init_per_group,
@@ -1147,7 +1144,7 @@ test_events(prio_cth) ->
[900],[900,900],[500,900],[1000],[1200,1050],
[1100],[1200]]) ++
[{?eh,tc_done,{ct_cth_prio_SUITE,{init_per_group,'_',[]},ok}}] ++
-
+
[{?eh,tc_start,{ct_cth_prio_SUITE,test_case}}] ++
GenPre(pre_init_per_testcase,
[[1100,100],[600,200],[600,600],[600],[700],[800],
@@ -1161,7 +1158,7 @@ test_events(prio_cth) ->
[{?eh,tc_done,{ct_cth_prio_SUITE,test_case,ok}},
{?eh,tc_start,{ct_cth_prio_SUITE,{end_per_group,'_',[]}}}] ++
- GenPre(pre_end_per_group,
+ GenPre(pre_end_per_group,
lists:reverse(
[[1100,100],[600,200],[600,600],[600],[700],[800],
[900],[900,900],[500,900],[1000],[1200,1050],
@@ -1300,7 +1297,7 @@ test_events(cth_log) ->
[{suite,cth_log_SUITE},parallel]}}},
{?eh,tc_done,{ct_framework,{end_per_group,g1,
[{suite,cth_log_SUITE},parallel]},ok}}]},
-
+
{?eh,tc_done,{cth_log_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
@@ -1309,7 +1306,6 @@ test_events(cth_log) ->
test_events(ok) ->
ok.
-
%% test events help functions
contains(List) ->
fun(Proplist) when is_list(Proplist) ->
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index 434360d294..e37ca31704 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -173,17 +173,25 @@ env_default_opts() ->
do_compile(Input, Opts0) ->
Opts = expand_opts(Opts0),
- {Pid,Ref} =
- spawn_monitor(fun() ->
- exit(try
- internal(Input, Opts)
- catch
- error:Reason ->
- {error,Reason}
- end)
- end),
- receive
- {'DOWN',Ref,process,Pid,Rep} -> Rep
+ IntFun = fun() -> try
+ internal(Input, Opts)
+ catch
+ error:Reason ->
+ {error,Reason}
+ end
+ end,
+ %% Dialyzer has already spawned workers.
+ case lists:member(dialyzer, Opts) of
+ true ->
+ IntFun();
+ false ->
+ {Pid,Ref} =
+ spawn_monitor(fun() ->
+ exit(IntFun())
+ end),
+ receive
+ {'DOWN',Ref,process,Pid,Rep} -> Rep
+ end
end.
expand_opts(Opts0) ->
diff --git a/lib/compiler/test/lc_SUITE.erl b/lib/compiler/test/lc_SUITE.erl
index 3cb49433ce..adb96fb87d 100644
--- a/lib/compiler/test/lc_SUITE.erl
+++ b/lib/compiler/test/lc_SUITE.erl
@@ -19,7 +19,7 @@
%%
-module(lc_SUITE).
--export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
+-export([all/0, suite/0, groups/0, init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2,
basic/1,deeply_nested/1,no_generator/1,
@@ -32,11 +32,11 @@ suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap,{minutes,1}}].
-all() ->
+all() ->
test_lib:recompile(?MODULE),
[{group,p}].
-groups() ->
+groups() ->
[{p,test_lib:parallel(),
[basic,
deeply_nested,
@@ -214,6 +214,7 @@ shadow(Config) when is_list(Config) ->
ok.
effect(Config) when is_list(Config) ->
+ ct:timetrap({minutes,10}),
[{42,{a,b,c}}] =
do_effect(fun(F, L) ->
[F({V1,V2}) ||
@@ -240,7 +241,7 @@ do_effect(Lc, L) ->
lists:reverse(erase(?MODULE)).
id(I) -> I.
-
+
fc(Args, {'EXIT',{function_clause,[{?MODULE,_,Args,_}|_]}}) -> ok;
fc(Args, {'EXIT',{function_clause,[{?MODULE,_,Arity,_}|_]}})
when length(Args) =:= Arity ->
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index c100fc8ee2..ffa51bcfae 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -61,7 +61,6 @@
#include <openssl/evp.h>
#include <openssl/hmac.h>
-
/* Helper macro to construct a OPENSSL_VERSION_NUMBER.
* See openssl/opensslv.h
*/
@@ -326,7 +325,7 @@ static ErlNifFunc nif_funcs[] = {
{"rsa_private_crypt", 4, rsa_private_crypt},
{"dh_generate_parameters_nif", 2, dh_generate_parameters_nif},
{"dh_check", 1, dh_check},
- {"dh_generate_key_nif", 3, dh_generate_key_nif},
+ {"dh_generate_key_nif", 4, dh_generate_key_nif},
{"dh_compute_key_nif", 3, dh_compute_key_nif},
{"srp_value_B_nif", 5, srp_value_B_nif},
{"srp_user_secret_nif", 7, srp_user_secret_nif},
@@ -2727,12 +2726,13 @@ static ERL_NIF_TERM dh_check(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]
}
static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{/* (PrivKey, DHParams=[P,G], Mpint) */
+{/* (PrivKey|undefined, DHParams=[P,G], Mpint, Len|0) */
DH* dh_params;
int pub_len, prv_len;
unsigned char *pub_ptr, *prv_ptr;
ERL_NIF_TERM ret, ret_pub, ret_prv, head, tail;
int mpint; /* 0 or 4 */
+ unsigned long len = 0;
dh_params = DH_new();
@@ -2743,11 +2743,21 @@ static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_bin(env, head, &dh_params->g)
|| !enif_is_empty_list(env, tail)
- || !enif_get_int(env, argv[2], &mpint) || (mpint & ~4)) {
+ || !enif_get_int(env, argv[2], &mpint) || (mpint & ~4)
+ || !enif_get_ulong(env, argv[3], &len) ) {
DH_free(dh_params);
return enif_make_badarg(env);
}
+ if (len) {
+ if (len < BN_num_bits(dh_params->p))
+ dh_params->length = len;
+ else {
+ DH_free(dh_params);
+ return enif_make_badarg(env);
+ }
+ }
+
if (DH_generate_key(dh_params)) {
pub_len = BN_num_bytes(dh_params->pub_key);
prv_len = BN_num_bytes(dh_params->priv_key);
diff --git a/lib/crypto/doc/src/crypto.xml b/lib/crypto/doc/src/crypto.xml
index eda0f7af51..b6a1371154 100644
--- a/lib/crypto/doc/src/crypto.xml
+++ b/lib/crypto/doc/src/crypto.xml
@@ -100,7 +100,7 @@
<code>dh_private() = key_value() </code>
- <code>dh_params() = [key_value()] = [P, G] </code>
+ <code>dh_params() = [key_value()] = [P, G] | [P, G, PrivateKeyBitLength]</code>
<code>ecdh_public() = key_value() </code>
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index da8626e38a..deeb763145 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -277,10 +277,11 @@ hmac_final_n(Context, HashLen) ->
des3_cbc | des3_cbf | des3_cfb | des_ede3 |
blowfish_cbc | blowfish_cfb64 | blowfish_ofb64 |
aes_cbc128 | aes_cfb8 | aes_cfb128 | aes_cbc256 | aes_ige256 |
- aes_cbc |
+ aes_cbc |
rc2_cbc,
- Key::iodata(), Ivec::binary(), Data::iodata()) -> binary();
- (aes_gcm | chacha20_poly1305, Key::iodata(), Ivec::binary(), {AAD::binary(), Data::iodata()}) -> {binary(), binary()}.
+ Key::iodata(), Ivec::binary(), Data::iodata()) -> binary();
+ (aes_gcm | chacha20_poly1305, Key::iodata(), Ivec::binary(), {AAD::binary(), Data::iodata()}) -> {binary(), binary()};
+ (aes_gcm, Key::iodata(), Ivec::binary(), {AAD::binary(), Data::iodata(), TagLength::1..16}) -> {binary(), binary()}.
block_encrypt(Type, Key, Ivec, Data) when Type =:= des_cbc;
Type =:= des_cfb;
@@ -546,9 +547,15 @@ exor(Bin1, Bin2) ->
generate_key(Type, Params) ->
generate_key(Type, Params, undefined).
-generate_key(dh, DHParameters, PrivateKey) ->
+generate_key(dh, DHParameters0, PrivateKey) ->
+ {DHParameters, Len} =
+ case DHParameters0 of
+ [P,G,L] -> {[P,G], L};
+ [P,G] -> {[P,G], 0}
+ end,
dh_generate_key_nif(ensure_int_as_bin(PrivateKey),
- map_ensure_int_as_bin(DHParameters), 0);
+ map_ensure_int_as_bin(DHParameters),
+ 0, Len);
generate_key(srp, {host, [Verifier, Generator, Prime, Version]}, PrivArg)
when is_binary(Verifier), is_binary(Generator), is_binary(Prime), is_atom(Version) ->
@@ -1200,11 +1207,11 @@ dh_check([_Prime,_Gen]) -> ?nif_stub.
{binary(),binary()}.
dh_generate_key(DHParameters) ->
- dh_generate_key_nif(undefined, map_mpint_to_bin(DHParameters), 4).
+ dh_generate_key_nif(undefined, map_mpint_to_bin(DHParameters), 4, 0).
dh_generate_key(PrivateKey, DHParameters) ->
- dh_generate_key_nif(mpint_to_bin(PrivateKey), map_mpint_to_bin(DHParameters), 4).
+ dh_generate_key_nif(mpint_to_bin(PrivateKey), map_mpint_to_bin(DHParameters), 4, 0).
-dh_generate_key_nif(_PrivateKey, _DHParameters, _Mpint) -> ?nif_stub.
+dh_generate_key_nif(_PrivateKey, _DHParameters, _Mpint, _Length) -> ?nif_stub.
%% DHParameters = [P (Prime)= mpint(), G(Generator) = mpint()]
%% MyPrivKey, OthersPublicKey = mpint()
diff --git a/lib/dialyzer/src/dialyzer_analysis_callgraph.erl b/lib/dialyzer/src/dialyzer_analysis_callgraph.erl
index 08e55a78bd..4e18058993 100644
--- a/lib/dialyzer/src/dialyzer_analysis_callgraph.erl
+++ b/lib/dialyzer/src/dialyzer_analysis_callgraph.erl
@@ -101,9 +101,9 @@ loop(#server_state{parent = Parent} = State,
{AnalPid, cserver, CServer, Plt} ->
send_codeserver_plt(Parent, CServer, Plt),
loop(State, Analysis, ExtCalls);
- {AnalPid, done, Plt, DocPlt} ->
+ {AnalPid, done, MiniPlt, DocPlt} ->
send_ext_calls(Parent, ExtCalls),
- send_analysis_done(Parent, Plt, DocPlt);
+ send_analysis_done(Parent, MiniPlt, DocPlt);
{AnalPid, ext_calls, NewExtCalls} ->
loop(State, Analysis, NewExtCalls);
{AnalPid, ext_types, ExtTypes} ->
@@ -121,6 +121,7 @@ loop(#server_state{parent = Parent} = State,
%% The Analysis
%%--------------------------------------------------------------------
+%% Calls to erlang:garbage_collect() help to reduce the heap size.
analysis_start(Parent, Analysis, LegalWarnings) ->
CServer = dialyzer_codeserver:new(),
Plt = Analysis#analysis.plt,
@@ -157,12 +158,9 @@ analysis_start(Parent, Analysis, LegalWarnings) ->
TmpCServer1 = dialyzer_codeserver:set_temp_records(MergedRecords, TmpCServer0),
TmpCServer2 =
dialyzer_codeserver:finalize_exported_types(MergedExpTypes, TmpCServer1),
+ erlang:garbage_collect(),
?timing(State#analysis_state.timing_server, "remote",
- begin
- TmpCServer3 =
- dialyzer_utils:process_record_remote_types(TmpCServer2),
- dialyzer_contracts:process_contract_remote_types(TmpCServer3)
- end)
+ contracts_and_records(TmpCServer2))
catch
throw:{error, _ErrorMsg} = Error -> exit(Error)
end,
@@ -171,48 +169,75 @@ analysis_start(Parent, Analysis, LegalWarnings) ->
NewPlt1 = dialyzer_plt:insert_exported_types(NewPlt0, ExpTypes),
State0 = State#analysis_state{plt = NewPlt1},
dump_callgraph(Callgraph, State0, Analysis),
- State1 = State0#analysis_state{codeserver = NewCServer},
%% Remove all old versions of the files being analyzed
AllNodes = dialyzer_callgraph:all_nodes(Callgraph),
- Plt1 = dialyzer_plt:delete_list(NewPlt1, AllNodes),
+ Plt1_a = dialyzer_plt:delete_list(NewPlt1, AllNodes),
+ Plt1 = dialyzer_plt:insert_callbacks(Plt1_a, NewCServer),
+ State1 = State0#analysis_state{codeserver = NewCServer, plt = Plt1},
Exports = dialyzer_codeserver:get_exports(NewCServer),
+ NonExports = sets:subtract(sets:from_list(AllNodes), Exports),
+ NonExportsList = sets:to_list(NonExports),
NewCallgraph =
case Analysis#analysis.race_detection of
true -> dialyzer_callgraph:put_race_detection(true, Callgraph);
false -> Callgraph
end,
- State2 = analyze_callgraph(NewCallgraph, State1#analysis_state{plt = Plt1}),
+ State2 = analyze_callgraph(NewCallgraph, State1),
+ #analysis_state{plt = MiniPlt2, doc_plt = DocPlt} = State2,
dialyzer_callgraph:dispose_race_server(NewCallgraph),
rcv_and_send_ext_types(Parent),
- NonExports = sets:subtract(sets:from_list(AllNodes), Exports),
- NonExportsList = sets:to_list(NonExports),
- Plt2 = dialyzer_plt:delete_list(State2#analysis_state.plt, NonExportsList),
- send_codeserver_plt(Parent, CServer, State2#analysis_state.plt),
- send_analysis_done(Parent, Plt2, State2#analysis_state.doc_plt).
+ %% Since the PLT is never used, a dummy is sent:
+ DummyPlt = dialyzer_plt:new(),
+ send_codeserver_plt(Parent, CServer, DummyPlt),
+ MiniPlt3 = dialyzer_plt:delete_list(MiniPlt2, NonExportsList),
+ send_analysis_done(Parent, MiniPlt3, DocPlt).
+
+contracts_and_records(CodeServer) ->
+ Fun = contrs_and_recs(CodeServer),
+ {Pid, Ref} = erlang:spawn_monitor(Fun),
+ dialyzer_codeserver:give_away(CodeServer, Pid),
+ Pid ! {self(), go},
+ receive {'DOWN', Ref, process, Pid, Return} ->
+ Return
+ end.
+
+-spec contrs_and_recs(dialyzer_codeserver:codeserver()) ->
+ fun(() -> no_return()).
+
+contrs_and_recs(TmpCServer2) ->
+ fun() ->
+ Parent = receive {Pid, go} -> Pid end,
+ {TmpCServer3, RecordDict} =
+ dialyzer_utils:process_record_remote_types(TmpCServer2),
+ TmpServer4 =
+ dialyzer_contracts:process_contract_remote_types(TmpCServer3,
+ RecordDict),
+ dialyzer_codeserver:give_away(TmpServer4, Parent),
+ exit(TmpServer4)
+ end.
analyze_callgraph(Callgraph, #analysis_state{codeserver = Codeserver,
doc_plt = DocPlt,
+ plt = Plt,
timing_server = TimingServer,
parent = Parent,
solvers = Solvers} = State) ->
- Plt = dialyzer_plt:insert_callbacks(State#analysis_state.plt, Codeserver),
- {NewPlt, NewDocPlt} =
- case State#analysis_state.analysis_type of
- plt_build ->
- NewPlt0 =
- dialyzer_succ_typings:analyze_callgraph(Callgraph, Plt, Codeserver,
- TimingServer, Solvers, Parent),
- {NewPlt0, DocPlt};
- succ_typings ->
- {Warnings, NewPlt0, NewDocPlt0} =
- dialyzer_succ_typings:get_warnings(Callgraph, Plt, DocPlt, Codeserver,
- TimingServer, Solvers, Parent),
- Warnings1 = filter_warnings(Warnings, Codeserver),
- send_warnings(State#analysis_state.parent, Warnings1),
- {NewPlt0, NewDocPlt0}
- end,
- dialyzer_callgraph:delete(Callgraph),
- State#analysis_state{plt = NewPlt, doc_plt = NewDocPlt}.
+ case State#analysis_state.analysis_type of
+ plt_build ->
+ NewMiniPlt =
+ dialyzer_succ_typings:analyze_callgraph(Callgraph, Plt, Codeserver,
+ TimingServer, Solvers, Parent),
+ dialyzer_callgraph:delete(Callgraph),
+ State#analysis_state{plt = NewMiniPlt, doc_plt = DocPlt};
+ succ_typings ->
+ {Warnings, NewMiniPlt, NewDocPlt} =
+ dialyzer_succ_typings:get_warnings(Callgraph, Plt, DocPlt, Codeserver,
+ TimingServer, Solvers, Parent),
+ dialyzer_callgraph:delete(Callgraph),
+ Warnings1 = filter_warnings(Warnings, Codeserver),
+ send_warnings(State#analysis_state.parent, Warnings1),
+ State#analysis_state{plt = NewMiniPlt, doc_plt = NewDocPlt}
+ end.
%%--------------------------------------------------------------------
%% Build the callgraph and fill the codeserver.
@@ -569,8 +594,9 @@ is_ok_fun({_Filename, _Line, {_M, _F, _A} = MFA}, Codeserver) ->
is_ok_tag(Tag, {_F, _L, MorMFA}, Codeserver) ->
not dialyzer_utils:is_suppressed_tag(MorMFA, Tag, Codeserver).
-send_analysis_done(Parent, Plt, DocPlt) ->
- Parent ! {self(), done, Plt, DocPlt},
+send_analysis_done(Parent, MiniPlt, DocPlt) ->
+ ok = dialyzer_plt:give_away(MiniPlt, Parent),
+ Parent ! {self(), done, MiniPlt, DocPlt},
ok.
send_ext_calls(_Parent, none) ->
@@ -583,7 +609,7 @@ send_ext_types(Parent, ExtTypes) ->
Parent ! {self(), ext_types, ExtTypes},
ok.
-send_codeserver_plt(Parent, CServer, Plt ) ->
+send_codeserver_plt(Parent, CServer, Plt) ->
Parent ! {self(), cserver, CServer, Plt},
ok.
@@ -602,14 +628,14 @@ format_bad_calls([{{_, _, _}, {_, module_info, A}}|Left], CodeServer, Acc)
format_bad_calls([{FromMFA, {M, F, A} = To}|Left], CodeServer, Acc) ->
{_Var, FunCode} = dialyzer_codeserver:lookup_mfa_code(FromMFA, CodeServer),
Msg = {call_to_missing, [M, F, A]},
- {File, Line} = find_call_file_and_line(FunCode, To),
+ {File, Line} = find_call_file_and_line(FromMFA, FunCode, To, CodeServer),
WarningInfo = {File, Line, FromMFA},
NewAcc = [{?WARN_CALLGRAPH, WarningInfo, Msg}|Acc],
format_bad_calls(Left, CodeServer, NewAcc);
format_bad_calls([], _CodeServer, Acc) ->
Acc.
-find_call_file_and_line(Tree, MFA) ->
+find_call_file_and_line({Module, _, _}, Tree, MFA, CodeServer) ->
Fun =
fun(SubTree, Acc) ->
case cerl:is_c_call(SubTree) of
@@ -622,7 +648,7 @@ find_call_file_and_line(Tree, MFA) ->
case {cerl:concrete(M), cerl:concrete(F), A} of
MFA ->
Ann = cerl:get_ann(SubTree),
- [{get_file(Ann), get_line(Ann)}|Acc];
+ [{get_file(CodeServer, Module, Ann), get_line(Ann)}|Acc];
{erlang, make_fun, 3} ->
[CA1, CA2, CA3] = cerl:call_args(SubTree),
case
@@ -638,7 +664,8 @@ find_call_file_and_line(Tree, MFA) ->
of
MFA ->
Ann = cerl:get_ann(SubTree),
- [{get_file(Ann), get_line(Ann)}|Acc];
+ [{get_file(CodeServer, Module, Ann),
+ get_line(Ann)}|Acc];
_ ->
Acc
end;
@@ -658,8 +685,10 @@ get_line([Line|_]) when is_integer(Line) -> Line;
get_line([_|Tail]) -> get_line(Tail);
get_line([]) -> -1.
-get_file([{file, File}|_]) -> File;
-get_file([_|Tail]) -> get_file(Tail).
+get_file(Codeserver, Module, [{file, FakeFile}|_]) ->
+ dialyzer_codeserver:translate_fake_file(Codeserver, Module, FakeFile);
+get_file(Codeserver, Module, [_|Tail]) ->
+ get_file(Codeserver, Module, Tail).
-spec dump_callgraph(dialyzer_callgraph:callgraph(), #analysis_state{}, #analysis{}) ->
'ok'.
diff --git a/lib/dialyzer/src/dialyzer_behaviours.erl b/lib/dialyzer/src/dialyzer_behaviours.erl
index 5623929a43..524ae047e2 100644
--- a/lib/dialyzer/src/dialyzer_behaviours.erl
+++ b/lib/dialyzer/src/dialyzer_behaviours.erl
@@ -62,9 +62,9 @@ check_callbacks(Module, Attrs, Records, Plt, Codeserver) ->
_ ->
MFA = {Module,module_info,0},
{_Var,Code} = dialyzer_codeserver:lookup_mfa_code(MFA, Codeserver),
- File = get_file(cerl:get_ann(Code)),
+ File = get_file(Codeserver, Module, cerl:get_ann(Code)),
State = #state{plt = Plt, filename = File, behlines = BehLines,
- codeserver = Codeserver, records = Records},
+ codeserver = Codeserver, records = Records},
Warnings = get_warnings(Module, Behaviours, State),
[add_tag_warning_info(Module, W, State) || W <- Warnings]
end.
@@ -213,12 +213,15 @@ add_tag_warning_info(Module, {_Tag, [_B, Fun, Arity|_R]} = Warn, State) ->
dialyzer_codeserver:lookup_mfa_code({Module, Fun, Arity},
State#state.codeserver),
Anns = cerl:get_ann(FunCode),
- WarningInfo = {get_file(Anns), get_line(Anns), {Module, Fun, Arity}},
+ File = get_file(State#state.codeserver, Module, Anns),
+ WarningInfo = {File, get_line(Anns), {Module, Fun, Arity}},
{?WARN_BEHAVIOUR, WarningInfo, Warn}.
get_line([Line|_]) when is_integer(Line) -> Line;
get_line([_|Tail]) -> get_line(Tail);
get_line([]) -> -1.
-get_file([{file, File}|_]) -> File;
-get_file([_|Tail]) -> get_file(Tail).
+get_file(Codeserver, Module, [{file, FakeFile}|_]) ->
+ dialyzer_codeserver:translate_fake_file(Codeserver, Module, FakeFile);
+get_file(Codeserver, Module, [_|Tail]) ->
+ get_file(Codeserver, Module, Tail).
diff --git a/lib/dialyzer/src/dialyzer_callgraph.erl b/lib/dialyzer/src/dialyzer_callgraph.erl
index 50abb22009..5e02e7a2cc 100644
--- a/lib/dialyzer/src/dialyzer_callgraph.erl
+++ b/lib/dialyzer/src/dialyzer_callgraph.erl
@@ -119,7 +119,11 @@
-opaque callgraph() :: #callgraph{}.
--type active_digraph() :: {'d', digraph:graph()} | {'e', ets:tid(), ets:tid()}.
+-type active_digraph() :: {'d', digraph:graph()}
+ | {'e',
+ Out :: ets:tid(),
+ In :: ets:tid(),
+ Map :: ets:tid()}.
%%----------------------------------------------------------------------
@@ -248,24 +252,30 @@ find_non_local_calls([], Set) ->
-spec get_depends_on(scc() | module(), callgraph()) -> [scc()].
-get_depends_on(SCC, #callgraph{active_digraph = {'e', Out, _In}}) ->
- case ets_lookup_dict(SCC, Out) of
- {ok, Value} -> Value;
- error -> []
- end;
+get_depends_on(SCC, #callgraph{active_digraph = {'e', Out, _In, Maps}}) ->
+ lookup_scc(SCC, Out, Maps);
get_depends_on(SCC, #callgraph{active_digraph = {'d', DG}}) ->
digraph:out_neighbours(DG, SCC).
-spec get_required_by(scc() | module(), callgraph()) -> [scc()].
-get_required_by(SCC, #callgraph{active_digraph = {'e', _Out, In}}) ->
- case ets_lookup_dict(SCC, In) of
- {ok, Value} -> Value;
- error -> []
- end;
+get_required_by(SCC, #callgraph{active_digraph = {'e', _Out, In, Maps}}) ->
+ lookup_scc(SCC, In, Maps);
get_required_by(SCC, #callgraph{active_digraph = {'d', DG}}) ->
digraph:in_neighbours(DG, SCC).
+lookup_scc(SCC, Table, Maps) ->
+ case ets_lookup_dict({'scc', SCC}, Maps) of
+ {ok, SCCInt} ->
+ case ets_lookup_dict(SCCInt, Table) of
+ {ok, Ints} ->
+ [ets:lookup_element(Maps, Int, 2) || Int <- Ints];
+ error ->
+ []
+ end;
+ error -> []
+ end.
+
%%----------------------------------------------------------------------
%% Handling of modules & SCCs
%%----------------------------------------------------------------------
@@ -582,9 +592,10 @@ digraph_delete(DG) ->
active_digraph_delete({'d', DG}) ->
digraph:delete(DG);
-active_digraph_delete({'e', Out, In}) ->
+active_digraph_delete({'e', Out, In, Maps}) ->
ets:delete(Out),
- ets:delete(In).
+ ets:delete(In),
+ ets:delete(Maps).
digraph_edges(DG) ->
digraph:edges(DG).
@@ -758,37 +769,28 @@ to_ps(#callgraph{} = CG, File, Args) ->
ok.
condensation(G) ->
- SCs = digraph_utils:strong_components(G),
- V2I = ets:new(condensation_v2i, []),
- I2C = ets:new(condensation_i2c, []),
- I2I = ets:new(condensation_i2i, [bag]),
- CFun =
- fun(SC, N) ->
- lists:foreach(fun(V) -> true = ets:insert(V2I, {V,N}) end, SC),
- true = ets:insert(I2C, {N, SC}),
- N + 1
- end,
- lists:foldl(CFun, 1, SCs),
- Fun1 =
- fun({V1, V2}) ->
- I1 = ets:lookup_element(V2I, V1, 2),
- I2 = ets:lookup_element(V2I, V2, 2),
- I1 =:= I2 orelse ets:insert(I2I, {I1, I2})
- end,
- lists:foreach(Fun1, digraph:edges(G)),
- Fun3 =
- fun({I1, I2}, {Out, In}) ->
- SC1 = ets:lookup_element(I2C, I1, 2),
- SC2 = ets:lookup_element(I2C, I2, 2),
- {dict:append(SC1, SC2, Out), dict:append(SC2, SC1, In)}
- end,
- {OutDict, InDict} = ets:foldl(Fun3, {dict:new(), dict:new()}, I2I),
- [OutETS, InETS] =
+ SCCs = digraph_utils:strong_components(G),
+ %% Assign unique numbers to SCCs:
+ Ints = lists:seq(1, length(SCCs)),
+ IntToSCC = lists:zip(Ints, SCCs),
+ IntScc = sofs:relation(IntToSCC, [{int, scc}]),
+ %% Subsitute strong components for vertices in edges using the
+ %% unique numbers:
+ C2V = sofs:relation([{SC, V} || SC <- SCCs, V <- SC], [{scc, v}]),
+ I2V = sofs:relative_product(IntScc, C2V), % [{v, int}]
+ Es = sofs:relation(digraph:edges(G), [{v, v}]),
+ R1 = sofs:relative_product(I2V, Es),
+ R2 = sofs:relative_product(I2V, sofs:converse(R1)),
+ %% Create in- and out-neighbours:
+ In = sofs:relation_to_family(sofs:strict_relation(R2)),
+ R3 = sofs:converse(R2),
+ Out = sofs:relation_to_family(sofs:strict_relation(R3)),
+ [OutETS, InETS, MapsETS] =
[ets:new(Name,[{read_concurrency, true}]) ||
- Name <- [callgraph_deps_out, callgraph_deps_in]],
- ets:insert(OutETS, dict:to_list(OutDict)),
- ets:insert(InETS, dict:to_list(InDict)),
- ets:delete(V2I),
- ets:delete(I2C),
- ets:delete(I2I),
- {{'e', OutETS, InETS}, SCs}.
+ Name <- [callgraph_deps_out, callgraph_deps_in, callgraph_scc_map]],
+ ets:insert(OutETS, sofs:to_external(Out)),
+ ets:insert(InETS, sofs:to_external(In)),
+ %% Create mappings from SCCs to unique integers, and the inverse:
+ ets:insert(MapsETS, lists:zip([{'scc', SCC} || SCC<- SCCs], Ints)),
+ ets:insert(MapsETS, IntToSCC),
+ {{'e', OutETS, InETS, MapsETS}, SCCs}.
diff --git a/lib/dialyzer/src/dialyzer_cl.erl b/lib/dialyzer/src/dialyzer_cl.erl
index fc56693ea3..e8c1613a33 100644
--- a/lib/dialyzer/src/dialyzer_cl.erl
+++ b/lib/dialyzer/src/dialyzer_cl.erl
@@ -637,8 +637,8 @@ cl_loop(State, LogCache) ->
{BackendPid, warnings, Warnings} ->
NewState = store_warnings(State, Warnings),
cl_loop(NewState, LogCache);
- {BackendPid, done, NewPlt, _NewDocPlt} ->
- return_value(State, NewPlt);
+ {BackendPid, done, NewMiniPlt, _NewDocPlt} ->
+ return_value(State, NewMiniPlt);
{BackendPid, ext_calls, ExtCalls} ->
cl_loop(State#cl_state{external_calls = ExtCalls}, LogCache);
{BackendPid, ext_types, ExtTypes} ->
@@ -654,6 +654,7 @@ cl_loop(State, LogCache) ->
cl_error(State, Msg);
_Other ->
%% io:format("Received ~p\n", [_Other]),
+ %% Note: {BackendPid, cserver, CodeServer, Plt} is ignored.
cl_loop(State, LogCache)
end.
@@ -699,10 +700,13 @@ return_value(State = #cl_state{erlang_mode = ErlangMode,
output_plt = OutputPlt,
plt_info = PltInfo,
stored_warnings = StoredWarnings},
- Plt) ->
+ MiniPlt) ->
case OutputPlt =:= none of
- true -> ok;
- false -> dialyzer_plt:to_file(OutputPlt, Plt, ModDeps, PltInfo)
+ true ->
+ dialyzer_plt:delete(MiniPlt);
+ false ->
+ Plt = dialyzer_plt:restore_full_plt(MiniPlt),
+ dialyzer_plt:to_file(OutputPlt, Plt, ModDeps, PltInfo)
end,
UnknownWarnings = unknown_warnings(State),
RetValue =
diff --git a/lib/dialyzer/src/dialyzer_codeserver.erl b/lib/dialyzer/src/dialyzer_codeserver.erl
index 03cd9671af..a5bb4e209c 100644
--- a/lib/dialyzer/src/dialyzer_codeserver.erl
+++ b/lib/dialyzer/src/dialyzer_codeserver.erl
@@ -29,7 +29,9 @@
-module(dialyzer_codeserver).
-export([delete/1,
- finalize_contracts/3,
+ store_temp_contracts/4,
+ give_away/2,
+ finalize_contracts/1,
finalize_exported_types/2,
finalize_records/2,
get_contracts/1,
@@ -38,7 +40,9 @@
get_exports/1,
get_records/1,
get_next_core_label/1,
- get_temp_contracts/1,
+ get_temp_contracts/2,
+ contracts_modules/1,
+ store_contracts/4,
get_temp_exported_types/1,
get_temp_records/1,
insert/3,
@@ -48,6 +52,7 @@
is_exported/2,
lookup_mod_code/2,
lookup_mfa_code/2,
+ lookup_mfa_var_label/2,
lookup_mod_records/2,
lookup_mod_contracts/2,
lookup_mfa_contract/2,
@@ -56,21 +61,22 @@
set_next_core_label/2,
set_temp_records/2,
store_temp_records/3,
- store_temp_contracts/4]).
+ translate_fake_file/3]).
--export_type([codeserver/0, fun_meta_info/0]).
+-export_type([codeserver/0, fun_meta_info/0, contracts/0]).
-include("dialyzer.hrl").
%%--------------------------------------------------------------------
-type dict_ets() :: ets:tid().
+-type map_ets() :: ets:tid().
-type set_ets() :: ets:tid().
-type types() :: erl_types:type_table().
--type mod_records() :: dict:dict(module(), types()).
+-type mod_records() :: erl_types:mod_records().
--type contracts() :: dict:dict(mfa(),dialyzer_contracts:file_contract()).
+-type contracts() :: #{mfa() => dialyzer_contracts:file_contract()}.
-type mod_contracts() :: dict:dict(module(), contracts()).
%% A property-list of data compiled from -compile and -dialyzer attributes.
@@ -81,16 +87,16 @@
-record(codeserver, {next_core_label = 0 :: label(),
code :: dict_ets(),
- exported_types :: set_ets() | 'undefined', % set(mfa())
- records :: dict_ets() | 'undefined',
- contracts :: dict_ets() | 'undefined',
- callbacks :: dict_ets() | 'undefined',
+ exported_types :: set_ets(), % set(mfa())
+ records :: map_ets(),
+ contracts :: map_ets(),
+ callbacks :: map_ets(),
fun_meta_info :: dict_ets(), % {mfa(), meta_info()}
exports :: 'clean' | set_ets(), % set(mfa())
temp_exported_types :: 'clean' | set_ets(), % set(mfa())
- temp_records :: 'clean' | dict_ets(),
- temp_contracts :: 'clean' | dict_ets(),
- temp_callbacks :: 'clean' | dict_ets()
+ temp_records :: 'clean' | map_ets(),
+ temp_contracts :: 'clean' | map_ets(),
+ temp_callbacks :: 'clean' | map_ets()
}).
-opaque codeserver() :: #codeserver{}.
@@ -104,7 +110,7 @@ ets_dict_find(Key, Table) ->
_:_ -> error
end.
-ets_dict_store(Key, Element, Table) ->
+ets_map_store(Key, Element, Table) ->
true = ets:insert(Table, {Key, Element}),
Table.
@@ -128,9 +134,6 @@ ets_set_to_set(Table) ->
Fold = fun({E}, Set) -> sets:add_element(E, Set) end,
ets:foldl(Fold, sets:new(), Table).
-ets_read_concurrent_table(Name) ->
- ets:new(Name, [{read_concurrency, true}]).
-
%%--------------------------------------------------------------------
-spec new() -> codeserver().
@@ -138,6 +141,13 @@ ets_read_concurrent_table(Name) ->
new() ->
CodeOptions = [compressed, public, {read_concurrency, true}],
Code = ets:new(dialyzer_codeserver_code, CodeOptions),
+ ReadOptions = [compressed, {read_concurrency, true}],
+ [Contracts, Callbacks, Records, ExportedTypes] =
+ [ets:new(Name, ReadOptions) ||
+ Name <- [dialyzer_codeserver_contracts,
+ dialyzer_codeserver_callbacks,
+ dialyzer_codeserver_records,
+ dialyzer_codeserver_exported_types]],
TempOptions = [public, {write_concurrency, true}],
[Exports, FunMetaInfo, TempExportedTypes, TempRecords, TempContracts,
TempCallbacks] =
@@ -150,6 +160,10 @@ new() ->
#codeserver{code = Code,
exports = Exports,
fun_meta_info = FunMetaInfo,
+ exported_types = ExportedTypes,
+ records = Records,
+ contracts = Contracts,
+ callbacks = Callbacks,
temp_exported_types = TempExportedTypes,
temp_records = TempRecords,
temp_contracts = TempContracts,
@@ -170,13 +184,15 @@ insert(Mod, ModCode, CS) ->
Exports = cerl:module_exports(ModCode),
Attrs = cerl:module_attrs(ModCode),
Defs = cerl:module_defs(ModCode),
+ {Files, SmallDefs} = compress_file_anno(Defs),
As = cerl:get_ann(ModCode),
Funs =
[{{Mod, cerl:fname_id(Var), cerl:fname_arity(Var)},
- Val} || Val = {Var, _Fun} <- Defs],
- Keys = [Key || {Key, _Value} <- Funs],
+ Val, {Var, cerl_trees:get_label(Fun)}} || Val = {Var, Fun} <- SmallDefs],
+ Keys = [Key || {Key, _Value, _Label} <- Funs],
ModEntry = {Mod, {Name, Exports, Attrs, Keys, As}},
- true = ets:insert(CS#codeserver.code, [ModEntry|Funs]),
+ ModFileEntry = {{mod, Mod}, Files},
+ true = ets:insert(CS#codeserver.code, [ModEntry, ModFileEntry|Funs]),
CS.
-spec get_temp_exported_types(codeserver()) -> sets:set(mfa()).
@@ -220,12 +236,12 @@ get_exports(#codeserver{exports = Exports}) ->
-spec finalize_exported_types(sets:set(mfa()), codeserver()) -> codeserver().
-finalize_exported_types(Set, CS) ->
- ExportedTypes = ets_read_concurrent_table(dialyzer_codeserver_exported_types),
+finalize_exported_types(Set,
+ #codeserver{exported_types = ExportedTypes,
+ temp_exported_types = TempETypes} = CS) ->
true = ets_set_insert_set(Set, ExportedTypes),
- TempExpTypes = CS#codeserver.temp_exported_types,
- true = ets:delete(TempExpTypes),
- CS#codeserver{exported_types = ExportedTypes, temp_exported_types = clean}.
+ true = ets:delete(TempETypes),
+ CS#codeserver{temp_exported_types = clean}.
-spec lookup_mod_code(atom(), codeserver()) -> cerl:c_module().
@@ -237,6 +253,11 @@ lookup_mod_code(Mod, CS) when is_atom(Mod) ->
lookup_mfa_code({_M, _F, _A} = MFA, CS) ->
table__lookup(CS#codeserver.code, MFA).
+-spec lookup_mfa_var_label(mfa(), codeserver()) -> {cerl:c_var(), label()}.
+
+lookup_mfa_var_label({_M, _F, _A} = MFA, CS) ->
+ ets:lookup_element(CS#codeserver.code, MFA, 3).
+
-spec get_next_core_label(codeserver()) -> label().
get_next_core_label(#codeserver{next_core_label = NCL}) ->
@@ -251,8 +272,8 @@ set_next_core_label(NCL, CS) ->
lookup_mod_records(Mod, #codeserver{records = RecDict}) when is_atom(Mod) ->
case ets_dict_find(Mod, RecDict) of
- error -> dict:new();
- {ok, Dict} -> Dict
+ error -> maps:new();
+ {ok, Map} -> Map
end.
-spec get_records(codeserver()) -> mod_records().
@@ -262,11 +283,11 @@ get_records(#codeserver{records = RecDict}) ->
-spec store_temp_records(module(), types(), codeserver()) -> codeserver().
-store_temp_records(Mod, Dict, #codeserver{temp_records = TempRecDict} = CS)
+store_temp_records(Mod, Map, #codeserver{temp_records = TempRecDict} = CS)
when is_atom(Mod) ->
- case dict:size(Dict) =:= 0 of
+ case maps:size(Map) =:= 0 of
true -> CS;
- false -> CS#codeserver{temp_records = ets_dict_store(Mod, Dict, TempRecDict)}
+ false -> CS#codeserver{temp_records = ets_map_store(Mod, Map, TempRecDict)}
end.
-spec get_temp_records(codeserver()) -> mod_records().
@@ -284,20 +305,20 @@ set_temp_records(Dict, CS) ->
-spec finalize_records(mod_records(), codeserver()) -> codeserver().
-finalize_records(Dict, CS) ->
- true = ets:delete(CS#codeserver.temp_records),
- Records = ets_read_concurrent_table(dialyzer_codeserver_records),
+finalize_records(Dict, #codeserver{temp_records = TmpRecords,
+ records = Records} = CS) ->
+ true = ets:delete(TmpRecords),
true = ets_dict_store_dict(Dict, Records),
- CS#codeserver{records = Records, temp_records = clean}.
+ CS#codeserver{temp_records = clean}.
-spec lookup_mod_contracts(atom(), codeserver()) -> contracts().
lookup_mod_contracts(Mod, #codeserver{contracts = ContDict})
when is_atom(Mod) ->
case ets_dict_find(Mod, ContDict) of
- error -> dict:new();
+ error -> maps:new();
{ok, Keys} ->
- dict:from_list([get_file_contract(Key, ContDict)|| Key <- Keys])
+ maps:from_list([get_file_contract(Key, ContDict)|| Key <- Keys])
end.
get_file_contract(Key, ContDict) ->
@@ -330,48 +351,69 @@ get_callbacks(#codeserver{callbacks = CallbDict}) ->
-spec store_temp_contracts(module(), contracts(), contracts(), codeserver()) ->
codeserver().
-store_temp_contracts(Mod, SpecDict, CallbackDict,
+store_temp_contracts(Mod, SpecMap, CallbackMap,
#codeserver{temp_contracts = Cn,
temp_callbacks = Cb} = CS)
when is_atom(Mod) ->
- CS1 =
- case dict:size(SpecDict) =:= 0 of
- true -> CS;
- false ->
- CS#codeserver{temp_contracts = ets_dict_store(Mod, SpecDict, Cn)}
- end,
- case dict:size(CallbackDict) =:= 0 of
- true -> CS1;
- false ->
- CS1#codeserver{temp_callbacks = ets_dict_store(Mod, CallbackDict, Cb)}
- end.
-
--spec get_temp_contracts(codeserver()) -> {mod_contracts(), mod_contracts()}.
+ CS1 = CS#codeserver{temp_contracts = ets_map_store(Mod, SpecMap, Cn)},
+ CS1#codeserver{temp_callbacks = ets_map_store(Mod, CallbackMap, Cb)}.
-get_temp_contracts(#codeserver{temp_contracts = TempContDict,
- temp_callbacks = TempCallDict}) ->
- {ets_dict_to_dict(TempContDict), ets_dict_to_dict(TempCallDict)}.
+-spec contracts_modules(codeserver()) -> [module()].
--spec finalize_contracts(mod_contracts(), mod_contracts(), codeserver()) ->
- codeserver().
+contracts_modules(#codeserver{temp_contracts = TempContTable}) ->
+ ets:select(TempContTable, [{{'$1', '$2'}, [], ['$1']}]).
-finalize_contracts(SpecDict, CallbackDict, CS) ->
- Contracts = ets_read_concurrent_table(dialyzer_codeserver_contracts),
- Callbacks = ets_read_concurrent_table(dialyzer_codeserver_callbacks),
- Contracts = dict:fold(fun decompose_spec_dict/3, Contracts, SpecDict),
- Callbacks = dict:fold(fun decompose_cb_dict/3, Callbacks, CallbackDict),
- CS#codeserver{contracts = Contracts, callbacks = Callbacks,
- temp_contracts = clean, temp_callbacks = clean}.
+-spec store_contracts(module(), contracts(), contracts(), codeserver()) ->
+ codeserver().
-decompose_spec_dict(Mod, Dict, Table) ->
- Keys = dict:fetch_keys(Dict),
- true = ets:insert(Table, dict:to_list(Dict)),
- true = ets:insert(Table, {Mod, Keys}),
- Table.
+store_contracts(Mod, SpecMap, CallbackMap, CS) ->
+ #codeserver{contracts = SpecDict, callbacks = CallbackDict} = CS,
+ Keys = maps:keys(SpecMap),
+ true = ets:insert(SpecDict, maps:to_list(SpecMap)),
+ true = ets:insert(SpecDict, {Mod, Keys}),
+ true = ets:insert(CallbackDict, maps:to_list(CallbackMap)),
+ CS.
-decompose_cb_dict(_Mod, Dict, Table) ->
- true = ets:insert(Table, dict:to_list(Dict)),
- Table.
+-spec get_temp_contracts(module(), codeserver()) ->
+ {contracts(), contracts()}.
+
+get_temp_contracts(Mod, #codeserver{temp_contracts = TempContDict,
+ temp_callbacks = TempCallDict}) ->
+ [{Mod, Contracts}] = ets:lookup(TempContDict, Mod),
+ true = ets:delete(TempContDict, Mod),
+ [{Mod, Callbacks}] = ets:lookup(TempCallDict, Mod),
+ true = ets:delete(TempCallDict, Mod),
+ {Contracts, Callbacks}.
+
+-spec give_away(codeserver(), pid()) -> 'ok'.
+
+give_away(#codeserver{temp_records = TempRecords,
+ temp_contracts = TempContracts,
+ temp_callbacks = TempCallbacks,
+ records = Records,
+ contracts = Contracts,
+ callbacks = Callbacks}, Pid) ->
+ _ = [true = ets:give_away(Table, Pid, any) ||
+ Table <- [TempRecords, TempContracts, TempCallbacks,
+ Records, Contracts, Callbacks],
+ Table =/= clean],
+ ok.
+
+-spec finalize_contracts(codeserver()) -> codeserver().
+
+finalize_contracts(#codeserver{temp_contracts = TempContDict,
+ temp_callbacks = TempCallDict} = CS) ->
+ true = ets:delete(TempContDict),
+ true = ets:delete(TempCallDict),
+ CS#codeserver{temp_contracts = clean, temp_callbacks = clean}.
+
+-spec translate_fake_file(codeserver(), module(), file:filename()) ->
+ file:filename().
+
+translate_fake_file(#codeserver{code = Code}, Module, FakeFile) ->
+ Files = ets:lookup_element(Code, {mod, Module}, 2),
+ {FakeFile, File} = lists:keyfind(FakeFile, 1, Files),
+ File.
table__lookup(TablePid, M) when is_atom(M) ->
{Name, Exports, Attrs, Keys, As} = ets:lookup_element(TablePid, M, 2),
@@ -379,3 +421,25 @@ table__lookup(TablePid, M) when is_atom(M) ->
cerl:ann_c_module(As, Name, Exports, Attrs, Defs);
table__lookup(TablePid, MFA) ->
ets:lookup_element(TablePid, MFA, 2).
+
+compress_file_anno(Term) ->
+ {Files, SmallTerm} = compress_file_anno(Term, []),
+ {[{FakeFile, File} || {File, {file, FakeFile}} <- Files], SmallTerm}.
+
+compress_file_anno({file, F}, Fs) when is_list(F) ->
+ case lists:keyfind(F, 1, Fs) of
+ false ->
+ I = integer_to_list(length(Fs)),
+ FileI = {file, I},
+ NFs = [{F, FileI}|Fs],
+ {NFs, FileI};
+ {F, FileI} -> {Fs, FileI}
+ end;
+compress_file_anno(T, Fs) when is_tuple(T) ->
+ {NFs, NL} = compress_file_anno(tuple_to_list(T), Fs),
+ {NFs, list_to_tuple(NL)};
+compress_file_anno([E|L], Fs) ->
+ {Fs1, NE} = compress_file_anno(E, Fs),
+ {NFs, NL} = compress_file_anno(L, Fs1),
+ {NFs, [NE|NL]};
+compress_file_anno(T, Fs) -> {Fs, T}.
diff --git a/lib/dialyzer/src/dialyzer_contracts.erl b/lib/dialyzer/src/dialyzer_contracts.erl
index 73b04b305b..f3fba68e84 100644
--- a/lib/dialyzer/src/dialyzer_contracts.erl
+++ b/lib/dialyzer/src/dialyzer_contracts.erl
@@ -31,7 +31,7 @@
get_contract_return/2,
%% get_contract_signature/1,
is_overloaded/1,
- process_contract_remote_types/1,
+ process_contract_remote_types/2,
store_tmp_contract/5]).
-export_type([file_contract/0, plt_contracts/0]).
@@ -146,14 +146,13 @@ sequence([], _Delimiter) -> "";
sequence([H], _Delimiter) -> H;
sequence([H|T], Delimiter) -> H ++ Delimiter ++ sequence(T, Delimiter).
--spec process_contract_remote_types(dialyzer_codeserver:codeserver()) ->
- dialyzer_codeserver:codeserver().
+-spec process_contract_remote_types(dialyzer_codeserver:codeserver(),
+ erl_types:mod_records()) ->
+ dialyzer_codeserver:codeserver().
-process_contract_remote_types(CodeServer) ->
- {TmpContractDict, TmpCallbackDict} =
- dialyzer_codeserver:get_temp_contracts(CodeServer),
+process_contract_remote_types(CodeServer, RecordDict) ->
+ Mods = dialyzer_codeserver:contracts_modules(CodeServer),
ExpTypes = dialyzer_codeserver:get_exported_types(CodeServer),
- RecordDict = dialyzer_codeserver:get_records(CodeServer),
ContractFun =
fun({{_M, _F, _A}=MFA, {File, TmpContract, Xtra}}, C0) ->
#tmp_contract{contract_funs = CFuns, forms = Forms} = TmpContract,
@@ -165,20 +164,21 @@ process_contract_remote_types(CodeServer) ->
{{MFA, {File, Contract, Xtra}}, C2}
end,
ModuleFun =
- fun({ModuleName, ContractDict}, C3) ->
- {NewContractList, C4} =
- lists:mapfoldl(ContractFun, C3, dict:to_list(ContractDict)),
- {{ModuleName, dict:from_list(NewContractList)}, C4}
+ fun(ModuleName) ->
+ Cache = erl_types:cache__new(),
+ {ContractMap, CallbackMap} =
+ dialyzer_codeserver:get_temp_contracts(ModuleName, CodeServer),
+ {NewContractList, Cache1} =
+ lists:mapfoldl(ContractFun, Cache, maps:to_list(ContractMap)),
+ {NewCallbackList, _NewCache} =
+ lists:mapfoldl(ContractFun, Cache1, maps:to_list(CallbackMap)),
+ dialyzer_codeserver:store_contracts(ModuleName,
+ maps:from_list(NewContractList),
+ maps:from_list(NewCallbackList),
+ CodeServer)
end,
- Cache = erl_types:cache__new(),
- {NewContractList, C5} =
- lists:mapfoldl(ModuleFun, Cache, dict:to_list(TmpContractDict)),
- {NewCallbackList, _C6} =
- lists:mapfoldl(ModuleFun, C5, dict:to_list(TmpCallbackDict)),
- NewContractDict = dict:from_list(NewContractList),
- NewCallbackDict = dict:from_list(NewCallbackList),
- dialyzer_codeserver:finalize_contracts(NewContractDict, NewCallbackDict,
- CodeServer).
+ lists:foreach(ModuleFun, Mods),
+ dialyzer_codeserver:finalize_contracts(CodeServer).
-type opaques_fun() :: fun((module()) -> [erl_types:erl_type()]).
@@ -397,7 +397,7 @@ solve_constraints(Contract, Call, Constraints) ->
%% ?debug("Inf: ~s\n", [erl_types:t_to_string(Inf)]),
%% erl_types:t_assign_variables_to_subtype(Contract, Inf).
--type contracts() :: dict:dict(mfa(),dialyzer_contracts:file_contract()).
+-type contracts() :: dialyzer_codeserver:contracts().
%% Checks the contracts for functions that are not implemented
-spec contracts_without_fun(contracts(), [_], dialyzer_callgraph:callgraph()) ->
@@ -407,12 +407,12 @@ contracts_without_fun(Contracts, AllFuns0, Callgraph) ->
AllFuns1 = [{dialyzer_callgraph:lookup_name(Label, Callgraph), Arity}
|| {Label, Arity} <- AllFuns0],
AllFuns2 = [{M, F, A} || {{ok, {M, F, _}}, A} <- AllFuns1],
- AllContractMFAs = dict:fetch_keys(Contracts),
+ AllContractMFAs = maps:keys(Contracts),
ErrorContractMFAs = AllContractMFAs -- AllFuns2,
[warn_spec_missing_fun(MFA, Contracts) || MFA <- ErrorContractMFAs].
warn_spec_missing_fun({M, F, A} = MFA, Contracts) ->
- {{File, Line}, _Contract, _Xtra} = dict:fetch(MFA, Contracts),
+ {{File, Line}, _Contract, _Xtra} = maps:get(MFA, Contracts),
WarningInfo = {File, Line, MFA},
{?WARN_CONTRACT_SYNTAX, WarningInfo, {spec_missing_fun, [M, F, A]}}.
@@ -445,11 +445,11 @@ insert_constraints([], Map) -> Map.
-spec store_tmp_contract(mfa(), file_line(), spec_data(), contracts(), types()) ->
contracts().
-store_tmp_contract(MFA, FileLine, {TypeSpec, Xtra}, SpecDict, RecordsDict) ->
+store_tmp_contract(MFA, FileLine, {TypeSpec, Xtra}, SpecMap, RecordsDict) ->
%% io:format("contract from form: ~p\n", [TypeSpec]),
TmpContract = contract_from_form(TypeSpec, MFA, RecordsDict, FileLine),
%% io:format("contract: ~p\n", [TmpContract]),
- dict:store(MFA, {FileLine, TmpContract, Xtra}, SpecDict).
+ maps:put(MFA, {FileLine, TmpContract, Xtra}, SpecMap).
contract_from_form(Forms, MFA, RecDict, FileLine) ->
{CFuns, Forms1} = contract_from_form(Forms, MFA, RecDict, FileLine, [], []),
@@ -677,7 +677,7 @@ get_invalid_contract_warnings(Modules, CodeServer, Plt, FindOpaques) ->
get_invalid_contract_warnings_modules([Mod|Mods], CodeServer, Plt, FindOpaques, Acc) ->
Contracts1 = dialyzer_codeserver:lookup_mod_contracts(Mod, CodeServer),
- Contracts2 = dict:to_list(Contracts1),
+ Contracts2 = maps:to_list(Contracts1),
Records = dialyzer_codeserver:lookup_mod_records(Mod, CodeServer),
NewAcc = get_invalid_contract_warnings_funs(Contracts2, Plt, Records, FindOpaques, Acc),
get_invalid_contract_warnings_modules(Mods, CodeServer, Plt, FindOpaques, NewAcc);
diff --git a/lib/dialyzer/src/dialyzer_dataflow.erl b/lib/dialyzer/src/dialyzer_dataflow.erl
index 639ed426df..ce292e1140 100644
--- a/lib/dialyzer/src/dialyzer_dataflow.erl
+++ b/lib/dialyzer/src/dialyzer_dataflow.erl
@@ -529,7 +529,7 @@ handle_apply_or_call([{TypeOfApply, {Fun, Sig, Contr, LocalRet}}|Left],
case is_race_analysis_enabled(State) of
true ->
Ann = cerl:get_ann(Tree),
- File = get_file(Ann),
+ File = get_file(Ann, State),
Line = abs(get_line(Ann)),
dialyzer_races:store_race_call(Fun, ArgTypes, Args,
{File, Line}, State);
@@ -3090,7 +3090,7 @@ state__add_warning(#state{warnings = Warnings, warning_mode = true} = State,
Ann = cerl:get_ann(Tree),
case Force of
true ->
- WarningInfo = {get_file(Ann),
+ WarningInfo = {get_file(Ann, State),
abs(get_line(Ann)),
State#state.curr_fun},
Warn = {Tag, WarningInfo, Msg},
@@ -3100,7 +3100,9 @@ state__add_warning(#state{warnings = Warnings, warning_mode = true} = State,
case is_compiler_generated(Ann) of
true -> State;
false ->
- WarningInfo = {get_file(Ann), get_line(Ann), State#state.curr_fun},
+ WarningInfo = {get_file(Ann, State),
+ get_line(Ann),
+ State#state.curr_fun},
Warn = {Tag, WarningInfo, Msg},
case Tag of
?WARN_CONTRACT_RANGE -> ok;
@@ -3499,6 +3501,12 @@ state__put_races(Races, State) ->
state__records_only(#state{records = Records}) ->
#state{records = Records}.
+-spec state__translate_file(file:filename(), state()) -> file:filename().
+
+state__translate_file(FakeFile, State) ->
+ #state{codeserver = CodeServer, module = Module} = State,
+ dialyzer_codeserver:translate_fake_file(CodeServer, Module, FakeFile).
+
%%% ===========================================================================
%%%
%%% Races
@@ -3570,9 +3578,11 @@ get_line([Line|_]) when is_integer(Line) -> Line;
get_line([_|Tail]) -> get_line(Tail);
get_line([]) -> -1.
-get_file([]) -> [];
-get_file([{file, File}|_]) -> File;
-get_file([_|Tail]) -> get_file(Tail).
+get_file([], _State) -> [];
+get_file([{file, FakeFile}|_], State) ->
+ state__translate_file(FakeFile, State);
+get_file([_|Tail], State) ->
+ get_file(Tail, State).
is_compiler_generated(Ann) ->
lists:member(compiler_generated, Ann) orelse (get_line(Ann) < 1).
diff --git a/lib/dialyzer/src/dialyzer_gui_wx.erl b/lib/dialyzer/src/dialyzer_gui_wx.erl
index 4caf64d007..1701aff2f2 100644
--- a/lib/dialyzer/src/dialyzer_gui_wx.erl
+++ b/lib/dialyzer/src/dialyzer_gui_wx.erl
@@ -505,8 +505,9 @@ gui_loop(#gui_state{backend_pid = BackendPid, doc_plt = DocPlt,
end,
ExplanationPid = spawn_link(Fun),
gui_loop(State#gui_state{expl_pid = ExplanationPid});
- {BackendPid, done, _NewPlt, NewDocPlt} ->
+ {BackendPid, done, NewMiniPlt, NewDocPlt} ->
message(State, "Analysis done"),
+ dialyzer_plt:delete(NewMiniPlt),
config_gui_stop(State),
gui_loop(State#gui_state{doc_plt = NewDocPlt});
{'EXIT', BackendPid, {error, Reason}} ->
diff --git a/lib/dialyzer/src/dialyzer_plt.erl b/lib/dialyzer/src/dialyzer_plt.erl
index cf2f0e919e..0eda73a208 100644
--- a/lib/dialyzer/src/dialyzer_plt.erl
+++ b/lib/dialyzer/src/dialyzer_plt.erl
@@ -58,7 +58,9 @@
get_specs/4,
to_file/4,
get_mini_plt/1,
- restore_full_plt/2
+ restore_full_plt/1,
+ delete/1,
+ give_away/2
]).
%% Debug utilities
@@ -82,14 +84,16 @@
%%----------------------------------------------------------------------
-record(plt, {info = table_new() :: dict:dict(),
- types = table_new() :: dict:dict(),
+ types = table_new() :: erl_types:mod_records(),
contracts = table_new() :: dict:dict(),
callbacks = table_new() :: dict:dict(),
exported_types = sets:new() :: sets:set()}).
-record(mini_plt, {info :: ets:tid(),
+ types :: ets:tid(),
contracts :: ets:tid(),
- callbacks :: ets:tid()
+ callbacks :: ets:tid(),
+ exported_types :: ets:tid()
}).
-opaque plt() :: #plt{} | #mini_plt{}.
@@ -130,6 +134,10 @@ delete_module(#plt{info = Info, types = Types,
-spec delete_list(plt(), [mfa() | integer()]) -> plt().
+delete_list(#mini_plt{info = Info,
+ contracts = Contracts}=Plt, List) ->
+ Plt#mini_plt{info = ets_table_delete_list(Info, List),
+ contracts = ets_table_delete_list(Contracts, List)};
delete_list(#plt{info = Info, types = Types,
contracts = Contracts,
callbacks = Callbacks,
@@ -183,7 +191,7 @@ lookup(Plt, Label) when is_integer(Label) ->
lookup_1(#mini_plt{info = Info}, MFAorLabel) ->
ets_table_lookup(Info, MFAorLabel).
--spec insert_types(plt(), dict:dict()) -> plt().
+-spec insert_types(plt(), erl_types:mod_records()) -> plt().
insert_types(PLT, Rec) ->
PLT#plt{types = Rec}.
@@ -193,7 +201,7 @@ insert_types(PLT, Rec) ->
insert_exported_types(PLT, Set) ->
PLT#plt{exported_types = Set}.
--spec get_types(plt()) -> dict:dict().
+-spec get_types(plt()) -> erl_types:mod_records().
get_types(#plt{types = Types}) ->
Types.
@@ -253,8 +261,10 @@ from_file(FileName, ReturnInfo) ->
Msg = io_lib:format("Old PLT file ~s\n", [FileName]),
plt_error(Msg);
ok ->
+ Types = [{Mod, maps:from_list(dict:to_list(Types))} ||
+ {Mod, Types} <- dict:to_list(Rec#file_plt.types)],
Plt = #plt{info = Rec#file_plt.info,
- types = Rec#file_plt.types,
+ types = dict:from_list(Types),
contracts = Rec#file_plt.contracts,
callbacks = Rec#file_plt.callbacks,
exported_types = Rec#file_plt.exported_types},
@@ -371,12 +381,14 @@ to_file(FileName,
end,
OldModDeps, ModDeps),
ImplMd5 = compute_implementation_md5(),
+ FileTypes = dict:from_list([{Mod, dict:from_list(maps:to_list(MTypes))} ||
+ {Mod, MTypes} <- dict:to_list(Types)]),
Record = #file_plt{version = ?VSN,
file_md5_list = MD5,
info = Info,
contracts = Contracts,
callbacks = Callbacks,
- types = Types,
+ types = FileTypes,
exported_types = ExpTypes,
mod_deps = NewModDeps,
implementation_md5 = ImplMd5},
@@ -510,32 +522,100 @@ init_md5_list_1(Md5List, [], Acc) ->
-spec get_mini_plt(plt()) -> plt().
-get_mini_plt(#plt{info = Info, contracts = Contracts, callbacks = Callbacks}) ->
- [ETSInfo, ETSContracts, ETSCallbacks] =
- [ets:new(Name, [public]) || Name <- [plt_info, plt_contracts, plt_callbacks]],
+get_mini_plt(#plt{info = Info,
+ types = Types,
+ contracts = Contracts,
+ callbacks = Callbacks,
+ exported_types = ExpTypes}) ->
+ [ETSInfo, ETSTypes, ETSContracts, ETSCallbacks, ETSExpTypes] =
+ [ets:new(Name, [public]) ||
+ Name <- [plt_info, plt_types, plt_contracts, plt_callbacks,
+ plt_exported_types]],
CallbackList = dict:to_list(Callbacks),
CallbacksByModule =
[{M, [Cb || {{M1,_,_},_} = Cb <- CallbackList, M1 =:= M]} ||
M <- lists:usort([M || {{M,_,_},_} <- CallbackList])],
- [true, true] =
+ [true, true, true] =
[ets:insert(ETS, dict:to_list(Data)) ||
- {ETS, Data} <- [{ETSInfo, Info}, {ETSContracts, Contracts}]],
+ {ETS, Data} <- [{ETSInfo, Info},
+ {ETSTypes, Types},
+ {ETSContracts, Contracts}]],
true = ets:insert(ETSCallbacks, CallbacksByModule),
- #mini_plt{info = ETSInfo, contracts = ETSContracts, callbacks = ETSCallbacks};
+ true = ets:insert(ETSExpTypes, [{ET} || ET <- sets:to_list(ExpTypes)]),
+ #mini_plt{info = ETSInfo,
+ types = ETSTypes,
+ contracts = ETSContracts,
+ callbacks = ETSCallbacks,
+ exported_types = ETSExpTypes};
get_mini_plt(undefined) ->
undefined.
--spec restore_full_plt(plt(), plt()) -> plt().
-
-restore_full_plt(#mini_plt{info = ETSInfo, contracts = ETSContracts}, Plt) ->
- Info = dict:from_list(ets:tab2list(ETSInfo)),
- Contracts = dict:from_list(ets:tab2list(ETSContracts)),
- ets:delete(ETSContracts),
- ets:delete(ETSInfo),
- Plt#plt{info = Info, contracts = Contracts};
-restore_full_plt(undefined, undefined) ->
+-spec restore_full_plt(plt()) -> plt().
+
+restore_full_plt(#mini_plt{info = ETSInfo,
+ types = ETSTypes,
+ contracts = ETSContracts,
+ callbacks = ETSCallbacks,
+ exported_types = ETSExpTypes} = MiniPlt) ->
+ Info = dict:from_list(tab2list(ETSInfo)),
+ Contracts = dict:from_list(tab2list(ETSContracts)),
+ Types = dict:from_list(tab2list(ETSTypes)),
+ Callbacks =
+ dict:from_list([Cb || {_M, Cbs} <- tab2list(ETSCallbacks), Cb <- Cbs]),
+ ExpTypes = sets:from_list([E || {E} <- tab2list(ETSExpTypes)]),
+ ok = delete(MiniPlt),
+ #plt{info = Info,
+ types = Types,
+ contracts = Contracts,
+ callbacks = Callbacks,
+ exported_types = ExpTypes};
+restore_full_plt(undefined) ->
undefined.
+-spec delete(plt()) -> 'ok'.
+
+delete(#mini_plt{info = ETSInfo,
+ types = ETSTypes,
+ contracts = ETSContracts,
+ callbacks = ETSCallbacks,
+ exported_types = ETSExpTypes}) ->
+ true = ets:delete(ETSContracts),
+ true = ets:delete(ETSTypes),
+ true = ets:delete(ETSInfo),
+ true = ets:delete(ETSCallbacks),
+ true = ets:delete(ETSExpTypes),
+ ok.
+
+-spec give_away(plt(), pid()) -> 'ok'.
+
+give_away(#mini_plt{info = ETSInfo,
+ types = ETSTypes,
+ contracts = ETSContracts,
+ callbacks = ETSCallbacks,
+ exported_types = ETSExpTypes},
+ Pid) ->
+ true = ets:give_away(ETSContracts, Pid, any),
+ true = ets:give_away(ETSTypes, Pid, any),
+ true = ets:give_away(ETSInfo, Pid, any),
+ true = ets:give_away(ETSCallbacks, Pid, any),
+ true = ets:give_away(ETSExpTypes, Pid, any),
+ ok.
+
+%% Somewhat slower than ets:tab2list(), but uses less memory.
+tab2list(T) ->
+ tab2list(ets:first(T), T, []).
+
+tab2list('$end_of_table', T, A) ->
+ case ets:first(T) of % no safe_fixtable()...
+ '$end_of_table' -> A;
+ Key -> tab2list(Key, T, A)
+ end;
+tab2list(Key, T, A) ->
+ Vs = ets:lookup(T, Key),
+ Key1 = ets:next(T, Key),
+ ets:delete(T, Key),
+ tab2list(Key1, T, Vs ++ A).
+
%%---------------------------------------------------------------------------
%% Edoc
@@ -607,6 +687,12 @@ table_delete_module1(Plt, Mod) ->
table_delete_module2(Plt, Mod) ->
dict:filter(fun(M, _Val) -> M =/= Mod end, Plt).
+ets_table_delete_list(Tab, [H|T]) ->
+ ets:delete(Tab, H),
+ ets_table_delete_list(Tab, T);
+ets_table_delete_list(Tab, []) ->
+ Tab.
+
table_delete_list(Plt, [H|T]) ->
table_delete_list(dict:erase(H, Plt), T);
table_delete_list(Plt, []) ->
diff --git a/lib/dialyzer/src/dialyzer_succ_typings.erl b/lib/dialyzer/src/dialyzer_succ_typings.erl
index 987da3aecf..0e44a5223f 100644
--- a/lib/dialyzer/src/dialyzer_succ_typings.erl
+++ b/lib/dialyzer/src/dialyzer_succ_typings.erl
@@ -96,7 +96,7 @@ analyze_callgraph(Callgraph, Plt, Codeserver, TimingServer, Solvers, Parent) ->
NewState =
init_state_and_get_success_typings(Callgraph, Plt, Codeserver,
TimingServer, Solvers, Parent),
- dialyzer_plt:restore_full_plt(NewState#st.plt, Plt).
+ NewState#st.plt.
%%--------------------------------------------------------------------
@@ -111,6 +111,7 @@ init_state_and_get_success_typings(Callgraph, Plt, Codeserver,
get_refined_success_typings(SCCs, #st{callgraph = Callgraph,
timing_server = TimingServer} = State) ->
+ erlang:garbage_collect(),
case find_succ_typings(SCCs, State) of
{fixpoint, State1} -> State1;
{not_fixpoint, NotFixpoint1, State1} ->
@@ -155,8 +156,8 @@ get_warnings(Callgraph, Plt, DocPlt, Codeserver,
?timing(TimingServer, "warning",
get_warnings_from_modules(Mods, InitState, MiniDocPlt)),
{postprocess_warnings(CWarns ++ ModWarns, Codeserver),
- dialyzer_plt:restore_full_plt(MiniPlt, Plt),
- dialyzer_plt:restore_full_plt(MiniDocPlt, DocPlt)}.
+ MiniPlt,
+ dialyzer_plt:restore_full_plt(MiniDocPlt)}.
get_warnings_from_modules(Mods, State, DocPlt) ->
#st{callgraph = Callgraph, codeserver = Codeserver,
@@ -174,10 +175,10 @@ collect_warnings(M, {Codeserver, Callgraph, Plt, DocPlt}) ->
%% Check if there are contracts for functions that do not exist
Warnings1 =
dialyzer_contracts:contracts_without_fun(Contracts, AllFuns, Callgraph),
+ Attrs = cerl:module_attrs(ModCode),
{Warnings2, FunTypes} =
dialyzer_dataflow:get_warnings(ModCode, Plt, Callgraph, Codeserver,
Records),
- Attrs = cerl:module_attrs(ModCode),
Warnings3 =
dialyzer_behaviours:check_callbacks(M, Attrs, Records, Plt, Codeserver),
DocPlt = insert_into_doc_plt(FunTypes, Callgraph, DocPlt),
@@ -262,7 +263,7 @@ refine_one_module(M, {CodeServer, Callgraph, Plt, _Solvers}) ->
NewFunTypes =
dialyzer_dataflow:get_fun_types(ModCode, Plt, Callgraph, CodeServer, Records),
Contracts1 = dialyzer_codeserver:lookup_mod_contracts(M, CodeServer),
- Contracts = orddict:from_list(dict:to_list(Contracts1)),
+ Contracts = orddict:from_list(maps:to_list(Contracts1)),
FindOpaques = find_opaques_fun(Records),
DecoratedFunTypes =
decorate_succ_typings(Contracts, Callgraph, NewFunTypes, FindOpaques),
@@ -348,21 +349,25 @@ find_succ_typings(SCCs, #st{codeserver = Codeserver, callgraph = Callgraph,
-spec find_succ_types_for_scc(scc(), typesig_init_data()) -> [mfa_or_funlbl()].
-find_succ_types_for_scc(SCC, {Codeserver, Callgraph, Plt, Solvers}) ->
- SCC_Info = [{MFA,
- dialyzer_codeserver:lookup_mfa_code(MFA, Codeserver),
- dialyzer_codeserver:lookup_mod_records(M, Codeserver)}
- || {M, _, _} = MFA <- SCC],
+find_succ_types_for_scc(SCC0, {Codeserver, Callgraph, Plt, Solvers}) ->
+ SCC = [MFA || {_, _, _} = MFA <- SCC0],
Contracts1 = [{MFA, dialyzer_codeserver:lookup_mfa_contract(MFA, Codeserver)}
- || {_, _, _} = MFA <- SCC],
+ || MFA <- SCC],
Contracts2 = [{MFA, Contract} || {MFA, {ok, Contract}} <- Contracts1],
Contracts3 = orddict:from_list(Contracts2),
Label = dialyzer_codeserver:get_next_core_label(Codeserver),
- AllFuns = collect_fun_info([Fun || {_MFA, {_Var, Fun}, _Rec} <- SCC_Info]),
+ AllFuns = lists:append(
+ [begin
+ {_Var, Fun} =
+ dialyzer_codeserver:lookup_mfa_code(MFA, Codeserver),
+ collect_fun_info([Fun])
+ end || MFA <- SCC]),
+ erlang:garbage_collect(),
PropTypes = get_fun_types_from_plt(AllFuns, Callgraph, Plt),
%% Assume that the PLT contains the current propagated types
- FunTypes = dialyzer_typesig:analyze_scc(SCC_Info, Label, Callgraph,
- Plt, PropTypes, Solvers),
+ FunTypes = dialyzer_typesig:analyze_scc(SCC, Label, Callgraph,
+ Codeserver, Plt, PropTypes,
+ Solvers),
AllFunSet = sets:from_list([X || {X, _} <- AllFuns]),
FilteredFunTypes =
dict:filter(fun(X, _) -> sets:is_element(X, AllFunSet) end, FunTypes),
diff --git a/lib/dialyzer/src/dialyzer_typesig.erl b/lib/dialyzer/src/dialyzer_typesig.erl
index 1787b66192..e8d9c06799 100644
--- a/lib/dialyzer/src/dialyzer_typesig.erl
+++ b/lib/dialyzer/src/dialyzer_typesig.erl
@@ -2,7 +2,7 @@
%%-----------------------------------------------------------------------
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@
-module(dialyzer_typesig).
--export([analyze_scc/6]).
+-export([analyze_scc/7]).
-export([get_safe_underapprox/2]).
%%-import(helper, %% 'helper' could be any module doing sanity checks...
@@ -101,10 +101,9 @@
-type types() :: erl_types:type_table().
--type typesig_scc() :: [{mfa(), {cerl:c_var(), cerl:c_fun()}, types()}].
-type typesig_funmap() :: #{type_var() => type_var()}.
--type prop_types() :: dict:dict(label(), types()).
+-type prop_types() :: dict:dict(label(), erl_types:erl_type()).
-record(state, {callgraph :: dialyzer_callgraph:callgraph()
| 'undefined',
@@ -121,7 +120,7 @@
plt :: dialyzer_plt:plt()
| 'undefined',
prop_types = dict:new() :: prop_types(),
- records = dict:new() :: types(),
+ records = maps:new() :: types(),
scc = [] :: ordsets:ordset(type_var()),
mfas :: [mfa()],
solvers = [] :: [solver()]
@@ -160,11 +159,10 @@
%%-----------------------------------------------------------------------------
%% Analysis of strongly connected components.
%%
-%% analyze_scc(SCC, NextLabel, CallGraph, PLT, PropTypes, Solvers) -> FunTypes
+%% analyze_scc(SCC, NextLabel, CallGraph, CodeServer,
+%% PLT, PropTypes, Solvers) -> FunTypes
%%
-%% SCC - [{MFA, Def, Records}]
-%% where Def = {Var, Fun} as in the Core Erlang module definitions.
-%% Records = dict(RecName, {Arity, [{FieldName, FieldType}]})
+%% SCC - [{MFA}]
%% NextLabel - An integer that is higher than any label in the code.
%% CallGraph - A callgraph as produced by dialyzer_callgraph.erl
%% Note: The callgraph must have been built with all the
@@ -176,28 +174,27 @@
%% Solvers - User specified solvers.
%%-----------------------------------------------------------------------------
--spec analyze_scc(typesig_scc(), label(),
+-spec analyze_scc([mfa()], label(),
dialyzer_callgraph:callgraph(),
+ dialyzer_codeserver:codeserver(),
dialyzer_plt:plt(), prop_types(), [solver()]) -> prop_types().
-analyze_scc(SCC, NextLabel, CallGraph, Plt, PropTypes, Solvers0) ->
+analyze_scc(SCC, NextLabel, CallGraph, CServer, Plt, PropTypes, Solvers0) ->
Solvers = solvers(Solvers0),
- assert_format_of_scc(SCC),
- State1 = new_state(SCC, NextLabel, CallGraph, Plt, PropTypes, Solvers),
- DefSet = add_def_list([Var || {_MFA, {Var, _Fun}, _Rec} <- SCC], sets:new()),
- State2 = traverse_scc(SCC, DefSet, State1),
+ State1 = new_state(SCC, NextLabel, CallGraph, CServer, Plt, PropTypes,
+ Solvers),
+ DefSet = add_def_list(maps:values(State1#state.name_map), sets:new()),
+ ModRecs = [{M, dialyzer_codeserver:lookup_mod_records(M, CServer)} ||
+ M <- lists:usort([M || {M, _, _} <- SCC])],
+ State2 = traverse_scc(SCC, CServer, DefSet, ModRecs, State1),
State3 = state__finalize(State2),
+ erlang:garbage_collect(),
Funs = state__scc(State3),
pp_constrs_scc(Funs, State3),
constraints_to_dot_scc(Funs, State3),
T = solve(Funs, State3),
dict:from_list(maps:to_list(T)).
-assert_format_of_scc([{_MFA, {_Var, _Fun}, _Records}|Left]) ->
- assert_format_of_scc(Left);
-assert_format_of_scc([]) ->
- ok.
-
solvers([]) -> [v2];
solvers(Solvers) -> Solvers.
@@ -207,12 +204,15 @@ solvers(Solvers) -> Solvers.
%%
%% ============================================================================
-traverse_scc([{_MFA, Def, Rec}|Left], DefSet, AccState) ->
+traverse_scc([{M,_,_}=MFA|Left], Codeserver, DefSet, ModRecs, AccState) ->
+ Def = dialyzer_codeserver:lookup_mfa_code(MFA, Codeserver),
+ {M, Rec} = lists:keyfind(M, 1, ModRecs),
TmpState1 = state__set_rec_dict(AccState, Rec),
DummyLetrec = cerl:c_letrec([Def], cerl:c_atom(foo)),
- {NewAccState, _} = traverse(DummyLetrec, DefSet, TmpState1),
- traverse_scc(Left, DefSet, NewAccState);
-traverse_scc([], _DefSet, AccState) ->
+ TmpState2 = state__new_constraint_context(TmpState1),
+ {NewAccState, _} = traverse(DummyLetrec, DefSet, TmpState2),
+ traverse_scc(Left, Codeserver, DefSet, ModRecs, NewAccState);
+traverse_scc([], _Codeserver, _DefSet, _ModRecs, AccState) ->
AccState.
traverse(Tree, DefinedVars, State) ->
@@ -2088,6 +2088,8 @@ v2_solve_disjunct(Disj, Map, V2State0) ->
var_occurs_everywhere(V, Masks, NotFailed) ->
ordsets:is_subset(NotFailed, get_mask(V, Masks)).
+-dialyzer({no_improper_lists, [v2_solve_disj/10, v2_solve_conj/12]}).
+
v2_solve_disj([I|Is], [C|Cs], I, Map0, V2State0, UL, MapL, Eval, Uneval,
Failed0) ->
Id = C#constraint_list.id,
@@ -2106,6 +2108,12 @@ v2_solve_disj([I|Is], [C|Cs], I, Map0, V2State0, UL, MapL, Eval, Uneval,
end;
v2_solve_disj([], [], _I, _Map, V2State, UL, MapL, Eval, Uneval, Failed) ->
{ok, V2State, lists:reverse(Eval), UL, MapL, lists:reverse(Uneval), Failed};
+v2_solve_disj(every_i, Cs, I, Map, V2State, UL, MapL, Eval, Uneval, Failed) ->
+ NewIs = case Cs of
+ [] -> [];
+ _ -> [I|every_i]
+ end,
+ v2_solve_disj(NewIs, Cs, I, Map, V2State, UL, MapL, Eval, Uneval, Failed);
v2_solve_disj(Is, [C|Cs], I, Map, V2State, UL, MapL, Eval, Uneval0, Failed) ->
Uneval = [{I,C#constraint_list.id} ||
not is_failed_list(C, V2State)] ++ Uneval0,
@@ -2177,7 +2185,7 @@ v2_solve_conj([I|Is], [Cs|Tail], I, Map0, Conj, IsFlat, V2State0,
M = lists:keydelete(I, 1, vars_per_child(U, Masks)),
{V2State2, NewF0} = save_updated_vars_list(AllCs, M, V2State1),
{NewF, F} = lists:splitwith(fun(J) -> J < I end, NewF0),
- Is1 = lists:umerge(Is, F),
+ Is1 = umerge_mask(Is, F),
NewFs = [NewF|NewFs0],
v2_solve_conj(Is1, Tail, I+1, Map, Conj, IsFlat, V2State2,
[U|UL], NewFs, VarsUp, LastMap, LastFlags)
@@ -2199,6 +2207,14 @@ v2_solve_conj([], _Cs, _I, Map, Conj, IsFlat, V2State, UL, NewFs, VarsUp,
v2_solve_conj(NewFlags, Cs, 1, Map, Conj, IsFlat, V2State,
[], [], [U|VarsUp], Map, NewFlags)
end;
+v2_solve_conj(every_i, Cs, I, Map, Conj, IsFlat, V2State, UL, NewFs, VarsUp,
+ LastMap, LastFlags) ->
+ NewIs = case Cs of
+ [] -> [];
+ _ -> [I|every_i]
+ end,
+ v2_solve_conj(NewIs, Cs, I, Map, Conj, IsFlat, V2State, UL, NewFs, VarsUp,
+ LastMap, LastFlags);
v2_solve_conj(Is, [_|Tail], I, Map, Conj, IsFlat, V2State, UL, NewFs, VarsUp,
LastMap, LastFlags) ->
v2_solve_conj(Is, Tail, I+1, Map, Conj, IsFlat, V2State, UL, NewFs, VarsUp,
@@ -2215,7 +2231,12 @@ report_detected_loop(_) ->
add_mask_to_flags(Flags, [Im|M], I, L) when I > Im ->
add_mask_to_flags(Flags, M, I, [Im|L]);
add_mask_to_flags(Flags, [_|M], _I, L) ->
- {lists:umerge(M, Flags), lists:reverse(L)}.
+ {umerge_mask(Flags, M), lists:reverse(L)}.
+
+umerge_mask(every_i, _F) ->
+ every_i;
+umerge_mask(Is, F) ->
+ lists:umerge(Is, F).
get_mask(V, Masks) ->
case maps:find(V, Masks) of
@@ -2229,7 +2250,7 @@ get_flags(#v2_state{constr_data = ConData}=V2State0, C) ->
error ->
?debug("get_flags Id=~w Flags=all ~w\n", [Id, length(Cs)]),
V2State = V2State0#v2_state{constr_data = maps:put(Id, {[],[]}, ConData)},
- {V2State, lists:seq(1, length(Cs))};
+ {V2State, every_i};
{ok, failed} ->
{V2State0, failed_list};
{ok, {Part,U}} when U =/= [] ->
@@ -2702,11 +2723,14 @@ pp_map(_S, _Map) ->
%%
%% ============================================================================
-new_state(SCC0, NextLabel, CallGraph, Plt, PropTypes, Solvers) ->
- List = [{MFA, Var} || {MFA, {Var, _Fun}, _Rec} <- SCC0],
+new_state(MFAs, NextLabel, CallGraph, CServer, Plt, PropTypes, Solvers) ->
+ List_SCC =
+ [begin
+ {Var, Label} = dialyzer_codeserver:lookup_mfa_var_label(MFA, CServer),
+ {{MFA, Var}, t_var(Label)}
+ end || MFA <- MFAs],
+ {List, SCC} = lists:unzip(List_SCC),
NameMap = maps:from_list(List),
- MFAs = [MFA || {MFA, _Var} <- List],
- SCC = [mk_var(Fun) || {_MFA, {_Var, Fun}, _Rec} <- SCC0],
SelfRec =
case SCC of
[OneF] ->
@@ -2906,8 +2930,9 @@ state__get_rec_var(Fun, #state{fun_map = Map}) ->
maps:find(Fun, Map).
state__finalize(State) ->
- State1 = enumerate_constraints(State),
- order_fun_constraints(State1).
+ State1 = state__new_constraint_context(State),
+ State2 = enumerate_constraints(State1),
+ order_fun_constraints(State2).
%% ============================================================================
%%
@@ -2987,7 +3012,7 @@ find_constraint_deps([Type|Tail], Acc) ->
NewAcc = [[t_var_name(D) || D <- t_collect_vars(Type)]|Acc],
find_constraint_deps(Tail, NewAcc);
find_constraint_deps([], Acc) ->
- lists:flatten(Acc).
+ lists:append(Acc).
mk_constraint_1(Lhs, eq, Rhs, Deps) when Lhs < Rhs ->
#constraint{lhs = Lhs, op = eq, rhs = Rhs, deps = Deps};
@@ -3095,8 +3120,8 @@ expand_to_conjunctions(#constraint_list{type = disj, list = List}) ->
List1 = [C || C <- List, is_simple_constraint(C)],
%% Just an assert.
[] = [C || #constraint{} = C <- List1],
- Expanded = lists:flatten([expand_to_conjunctions(C)
- || #constraint_list{} = C <- List]),
+ Expanded = lists:append([expand_to_conjunctions(C)
+ || #constraint_list{} = C <- List]),
ReturnList = Expanded ++ List1,
if length(ReturnList) > ?DISJ_NORM_FORM_LIMIT -> throw(too_many_disj);
true -> ReturnList
@@ -3121,8 +3146,10 @@ calculate_deps(List) ->
calculate_deps([H|Tail], Acc) ->
Deps = get_deps(H),
calculate_deps(Tail, [Deps|Acc]);
+calculate_deps([], []) -> [];
+calculate_deps([], [L]) -> L;
calculate_deps([], Acc) ->
- ordsets:from_list(lists:flatten(Acc)).
+ lists:umerge(Acc).
mk_conj_constraint_list(List) ->
mk_constraint_list(conj, List).
diff --git a/lib/dialyzer/src/dialyzer_utils.erl b/lib/dialyzer/src/dialyzer_utils.erl
index 1f2d3e3aaa..e71a953279 100644
--- a/lib/dialyzer/src/dialyzer_utils.erl
+++ b/lib/dialyzer/src/dialyzer_utils.erl
@@ -202,7 +202,7 @@ get_core_from_abstract_code(AbstrCode, Opts) ->
get_record_and_type_info(AbstractCode) ->
Module = get_module(AbstractCode),
- get_record_and_type_info(AbstractCode, Module, dict:new()).
+ get_record_and_type_info(AbstractCode, Module, maps:new()).
-spec get_record_and_type_info(abstract_code(), module(), type_table()) ->
{'ok', type_table()} | {'error', string()}.
@@ -215,7 +215,7 @@ get_record_and_type_info([{attribute, A, record, {Name, Fields0}}|Left],
{ok, Fields} = get_record_fields(Fields0, RecDict),
Arity = length(Fields),
FN = {File, erl_anno:line(A)},
- NewRecDict = dict:store({record, Name}, {FN, [{Arity,Fields}]}, RecDict),
+ NewRecDict = maps:put({record, Name}, {FN, [{Arity,Fields}]}, RecDict),
get_record_and_type_info(Left, Module, NewRecDict, File);
get_record_and_type_info([{attribute, A, type, {{record, Name}, Fields0, []}}
|Left], Module, RecDict, File) ->
@@ -223,7 +223,7 @@ get_record_and_type_info([{attribute, A, type, {{record, Name}, Fields0, []}}
{ok, Fields} = get_record_fields(Fields0, RecDict),
Arity = length(Fields),
FN = {File, erl_anno:line(A)},
- NewRecDict = dict:store({record, Name}, {FN, [{Arity, Fields}]}, RecDict),
+ NewRecDict = maps:put({record, Name}, {FN, [{Arity, Fields}]}, RecDict),
get_record_and_type_info(Left, Module, NewRecDict, File);
get_record_and_type_info([{attribute, A, Attr, {Name, TypeForm}}|Left],
Module, RecDict, File)
@@ -263,9 +263,9 @@ add_new_type(TypeOrOpaque, Name, TypeForm, ArgForms, Module, FN,
false ->
try erl_types:t_var_names(ArgForms) of
ArgNames ->
- dict:store({TypeOrOpaque, Name, Arity},
- {{Module, FN, TypeForm, ArgNames},
- erl_types:t_any()}, RecDict)
+ maps:put({TypeOrOpaque, Name, Arity},
+ {{Module, FN, TypeForm, ArgNames},
+ erl_types:t_any()}, RecDict)
catch
_:_ ->
throw({error, flat_format("Type declaration for ~w does not "
@@ -296,19 +296,18 @@ get_record_fields([{record_field, _Line, Name, _Init}|Left], RecDict, Acc) ->
get_record_fields([], _RecDict, Acc) ->
lists:reverse(Acc).
--spec process_record_remote_types(codeserver()) -> codeserver().
+-spec process_record_remote_types(codeserver()) ->
+ {codeserver(), mod_records()}.
%% The field types are cached. Used during analysis when handling records.
process_record_remote_types(CServer) ->
TempRecords = dialyzer_codeserver:get_temp_records(CServer),
ExpTypes = dialyzer_codeserver:get_exported_types(CServer),
- Cache = erl_types:cache__new(),
- {TempRecords1, Cache1} =
- process_opaque_types0(TempRecords, ExpTypes, Cache),
+ TempRecords1 = process_opaque_types0(TempRecords, ExpTypes),
%% A cache (not the field type cache) is used for speeding things up a bit.
VarTable = erl_types:var_table__new(),
ModuleFun =
- fun({Module, Record}, C0) ->
+ fun({Module, Record}) ->
RecordFun =
fun({Key, Value}, C2) ->
case Key of
@@ -334,24 +333,27 @@ process_record_remote_types(CServer) ->
_Other -> {{Key, Value}, C2}
end
end,
- {RecordList, C1} =
- lists:mapfoldl(RecordFun, C0, dict:to_list(Record)),
- {{Module, dict:from_list(RecordList)}, C1}
+ Cache = erl_types:cache__new(),
+ {RecordList, _NewCache} =
+ lists:mapfoldl(RecordFun, Cache, maps:to_list(Record)),
+ {Module, maps:from_list(RecordList)}
end,
- {NewRecordsList, C1} =
- lists:mapfoldl(ModuleFun, Cache1, dict:to_list(TempRecords1)),
+ NewRecordsList = lists:map(ModuleFun, dict:to_list(TempRecords1)),
NewRecords = dict:from_list(NewRecordsList),
- _C8 = check_record_fields(NewRecords, ExpTypes, C1),
- dialyzer_codeserver:finalize_records(NewRecords, CServer).
+ check_record_fields(NewRecords, ExpTypes),
+ {dialyzer_codeserver:finalize_records(NewRecords, CServer), NewRecords}.
%% erl_types:t_from_form() substitutes the declaration of opaque types
%% for the expanded type in some cases. To make sure the initial type,
%% any(), is not used, the expansion is done twice.
%% XXX: Recursive opaque types are not handled well.
-process_opaque_types0(TempRecords0, TempExpTypes, Cache) ->
- {TempRecords1, NewCache} =
+process_opaque_types0(TempRecords0, TempExpTypes) ->
+ Cache = erl_types:cache__new(),
+ {TempRecords1, Cache1} =
process_opaque_types(TempRecords0, TempExpTypes, Cache),
- process_opaque_types(TempRecords1, TempExpTypes, NewCache).
+ {TempRecords, _NewCache} =
+ process_opaque_types(TempRecords1, TempExpTypes, Cache1),
+ TempRecords.
process_opaque_types(TempRecords, TempExpTypes, Cache) ->
VarTable = erl_types:var_table__new(),
@@ -371,8 +373,8 @@ process_opaque_types(TempRecords, TempExpTypes, Cache) ->
end
end,
{RecordList, C1} =
- lists:mapfoldl(RecordFun, C0, dict:to_list(Record)),
- {{Module, dict:from_list(RecordList)}, C1}
+ lists:mapfoldl(RecordFun, C0, maps:to_list(Record)),
+ {{Module, maps:from_list(RecordList)}, C1}
%% dict:map(RecordFun, Record)
end,
{TempRecordList, NewCache} =
@@ -380,7 +382,8 @@ process_opaque_types(TempRecords, TempExpTypes, Cache) ->
{dict:from_list(TempRecordList), NewCache}.
%% dict:map(ModuleFun, TempRecords).
-check_record_fields(Records, TempExpTypes, Cache) ->
+check_record_fields(Records, TempExpTypes) ->
+ Cache = erl_types:cache__new(),
VarTable = erl_types:var_table__new(),
CheckFun =
fun({Module, Element}, C0) ->
@@ -410,9 +413,10 @@ check_record_fields(Records, TempExpTypes, Cache) ->
msg_with_position(Fun, FileLine)
end
end,
- lists:foldl(ElemFun, C0, dict:to_list(Element))
+ lists:foldl(ElemFun, C0, maps:to_list(Element))
end,
- lists:foldl(CheckFun, Cache, dict:to_list(Records)).
+ _NewCache = lists:foldl(CheckFun, Cache, dict:to_list(Records)),
+ ok.
msg_with_position(Fun, FileLine) ->
try Fun()
@@ -435,17 +439,17 @@ merge_records(NewRecords, OldRecords) ->
%%
%% ============================================================================
--type spec_dict() :: dict:dict().
--type callback_dict() :: dict:dict().
+-type spec_map() :: dialyzer_codeserver:contracts().
+-type callback_map() :: dialyzer_codeserver:contracts().
-spec get_spec_info(module(), abstract_code(), type_table()) ->
- {'ok', spec_dict(), callback_dict()} | {'error', string()}.
+ {'ok', spec_map(), callback_map()} | {'error', string()}.
-get_spec_info(ModName, AbstractCode, RecordsDict) ->
+get_spec_info(ModName, AbstractCode, RecordsMap) ->
OptionalCallbacks0 = get_optional_callbacks(AbstractCode, ModName),
OptionalCallbacks = gb_sets:from_list(OptionalCallbacks0),
- get_spec_info(AbstractCode, dict:new(), dict:new(),
- RecordsDict, ModName, OptionalCallbacks, "nofile").
+ get_spec_info(AbstractCode, maps:new(), maps:new(),
+ RecordsMap, ModName, OptionalCallbacks, "nofile").
get_optional_callbacks(Abs, ModName) ->
[{ModName, F, A} || {F, A} <- get_optional_callbacks(Abs)].
@@ -463,7 +467,7 @@ get_optional_callbacks(Abs) ->
%% are erl_types:erl_type()
get_spec_info([{attribute, Anno, Contract, {Id, TypeSpec}}|Left],
- SpecDict, CallbackDict, RecordsDict, ModName, OptCb, File)
+ SpecMap, CallbackMap, RecordsMap, ModName, OptCb, File)
when ((Contract =:= 'spec') or (Contract =:= 'callback')),
is_list(TypeSpec) ->
Ln = erl_anno:line(Anno),
@@ -472,24 +476,24 @@ get_spec_info([{attribute, Anno, Contract, {Id, TypeSpec}}|Left],
{F, A} -> {ModName, F, A}
end,
Xtra = [optional_callback || gb_sets:is_member(MFA, OptCb)],
- ActiveDict =
+ ActiveMap =
case Contract of
- spec -> SpecDict;
- callback -> CallbackDict
+ spec -> SpecMap;
+ callback -> CallbackMap
end,
- try dict:find(MFA, ActiveDict) of
+ try maps:find(MFA, ActiveMap) of
error ->
SpecData = {TypeSpec, Xtra},
- NewActiveDict =
+ NewActiveMap =
dialyzer_contracts:store_tmp_contract(MFA, {File, Ln}, SpecData,
- ActiveDict, RecordsDict),
- {NewSpecDict, NewCallbackDict} =
+ ActiveMap, RecordsMap),
+ {NewSpecMap, NewCallbackMap} =
case Contract of
- spec -> {NewActiveDict, CallbackDict};
- callback -> {SpecDict, NewActiveDict}
+ spec -> {NewActiveMap, CallbackMap};
+ callback -> {SpecMap, NewActiveMap}
end,
- get_spec_info(Left, NewSpecDict, NewCallbackDict,
- RecordsDict, ModName, OptCb, File);
+ get_spec_info(Left, NewSpecMap, NewCallbackMap,
+ RecordsMap, ModName, OptCb, File);
{ok, {{OtherFile, L}, _D}} ->
{Mod, Fun, Arity} = MFA,
Msg = flat_format(" Contract/callback for function ~w:~w/~w "
@@ -502,16 +506,16 @@ get_spec_info([{attribute, Anno, Contract, {Id, TypeSpec}}|Left],
[Ln, Error])}
end;
get_spec_info([{attribute, _, file, {IncludeFile, _}}|Left],
- SpecDict, CallbackDict, RecordsDict, ModName, OptCb, _File) ->
- get_spec_info(Left, SpecDict, CallbackDict,
- RecordsDict, ModName, OptCb, IncludeFile);
-get_spec_info([_Other|Left], SpecDict, CallbackDict,
- RecordsDict, ModName, OptCb, File) ->
- get_spec_info(Left, SpecDict, CallbackDict,
- RecordsDict, ModName, OptCb, File);
-get_spec_info([], SpecDict, CallbackDict,
- _RecordsDict, _ModName, _OptCb, _File) ->
- {ok, SpecDict, CallbackDict}.
+ SpecMap, CallbackMap, RecordsMap, ModName, OptCb, _File) ->
+ get_spec_info(Left, SpecMap, CallbackMap,
+ RecordsMap, ModName, OptCb, IncludeFile);
+get_spec_info([_Other|Left], SpecMap, CallbackMap,
+ RecordsMap, ModName, OptCb, File) ->
+ get_spec_info(Left, SpecMap, CallbackMap,
+ RecordsMap, ModName, OptCb, File);
+get_spec_info([], SpecMap, CallbackMap,
+ _RecordsMap, _ModName, _OptCb, _File) ->
+ {ok, SpecMap, CallbackMap}.
-spec get_fun_meta_info(module(), abstract_code(), [dial_warn_tag()]) ->
dialyzer_codeserver:fun_meta_info() | {'error', string()}.
@@ -707,7 +711,7 @@ format_errors([]) ->
-spec format_sig(erl_types:erl_type()) -> string().
format_sig(Type) ->
- format_sig(Type, dict:new()).
+ format_sig(Type, maps:new()).
-spec format_sig(erl_types:erl_type(), type_table()) -> string().
@@ -959,9 +963,7 @@ label(Tree) ->
-spec parallelism() -> integer().
parallelism() ->
- CPUs = erlang:system_info(logical_processors_available),
- Schedulers = erlang:system_info(schedulers),
- min(CPUs, Schedulers).
+ erlang:system_info(schedulers_online).
-spec family([{K,V}]) -> [{K,[V]}].
diff --git a/lib/dialyzer/test/behaviour_SUITE_data/dialyzer_options b/lib/dialyzer/test/behaviour_SUITE_data/dialyzer_options
index cb6a88786e..365b4798c5 100644
--- a/lib/dialyzer/test/behaviour_SUITE_data/dialyzer_options
+++ b/lib/dialyzer/test/behaviour_SUITE_data/dialyzer_options
@@ -1,2 +1,2 @@
{dialyzer_options, []}.
-{time_limit, 2}.
+{time_limit, 5}.
diff --git a/lib/dialyzer/test/map_SUITE_data/dialyzer_options b/lib/dialyzer/test/map_SUITE_data/dialyzer_options
index 50991c9bc5..02425c33f2 100644
--- a/lib/dialyzer/test/map_SUITE_data/dialyzer_options
+++ b/lib/dialyzer/test/map_SUITE_data/dialyzer_options
@@ -1 +1,2 @@
{dialyzer_options, []}.
+{time_limit, 30}.
diff --git a/lib/dialyzer/test/opaque_SUITE_data/dialyzer_options b/lib/dialyzer/test/opaque_SUITE_data/dialyzer_options
index 06ed52043a..cb301ff6a1 100644
--- a/lib/dialyzer/test/opaque_SUITE_data/dialyzer_options
+++ b/lib/dialyzer/test/opaque_SUITE_data/dialyzer_options
@@ -1,2 +1,2 @@
{dialyzer_options, [{warnings, [no_unused, no_return]}]}.
-{time_limit, 20}.
+{time_limit, 40}.
diff --git a/lib/dialyzer/test/small_SUITE_data/results/chars b/lib/dialyzer/test/small_SUITE_data/results/chars
new file mode 100644
index 0000000000..2c1f8f8d17
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/results/chars
@@ -0,0 +1,4 @@
+
+chars.erl:29: Invalid type specification for function chars:f/1. The success typing is (#{'b':=50}) -> 'ok'
+chars.erl:32: Function t1/0 has no local return
+chars.erl:32: The call chars:f(#{'b':=50}) breaks the contract (#{'a':=49,'b'=>50,'c'=>51}) -> 'ok'
diff --git a/lib/dialyzer/test/small_SUITE_data/src/anno.erl b/lib/dialyzer/test/small_SUITE_data/src/anno.erl
new file mode 100644
index 0000000000..70f1d42141
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/anno.erl
@@ -0,0 +1,18 @@
+-module(anno).
+
+%% OTP-14131
+
+-export([t1/0, t2/0, t3/0]).
+
+t1() ->
+ A = erl_parse:anno_from_term({attribute, 1, module, my_test}),
+ compile:forms([A], []).
+
+t2() ->
+ A = erl_parse:new_anno({attribute, 1, module, my_test}),
+ compile:forms([A], []).
+
+t3() ->
+ A = erl_parse:new_anno({attribute, 1, module, my_test}),
+ T = erl_parse:anno_to_term(A),
+ {attribute, 1, module, my_test} = T.
diff --git a/lib/dialyzer/test/small_SUITE_data/src/chars.erl b/lib/dialyzer/test/small_SUITE_data/src/chars.erl
new file mode 100644
index 0000000000..1e9c8ab6b9
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/chars.erl
@@ -0,0 +1,32 @@
+-module(chars).
+
+%% ERL-313
+
+-export([t/0]).
+-export([t1/0]).
+
+-record(r, {f :: $A .. $Z}).
+
+-type cs() :: $A..$Z | $a .. $z | $/.
+
+-spec t() -> $0-$0..$9-$0| $?.
+
+t() ->
+ c(#r{f = $z - 3}),
+ c($z - 3),
+ c($B).
+
+-spec c(cs()) -> $3-$0..$9-$0.
+
+c($A + 1) -> 2;
+c(C) ->
+ case C of
+ $z - 3 -> 3;
+ #r{f = $z - 3} -> 7
+ end.
+
+%% Display contract with character in warning:
+-spec f(#{a := $1, b => $2, c => $3}) -> ok. % invalid type spec
+f(_) -> ok.
+
+t1() -> f(#{b => $2}). % breaks the contract
diff --git a/lib/eldap/test/Makefile b/lib/eldap/test/Makefile
index 21a0da926f..81fa8f187a 100644
--- a/lib/eldap/test/Makefile
+++ b/lib/eldap/test/Makefile
@@ -42,7 +42,7 @@ TARGET_FILES= \
SPEC_FILES = eldap.spec
-# COVER_FILE = eldap.cover
+COVER_FILE = eldap.cover
# ----------------------------------------------------
diff --git a/lib/eldap/test/eldap.cover b/lib/eldap/test/eldap.cover
new file mode 100644
index 0000000000..8c15956e72
--- /dev/null
+++ b/lib/eldap/test/eldap.cover
@@ -0,0 +1,3 @@
+{incl_app,eldap,details}.
+
+{excl_mods, eldap, ['ELDAPv3']}.
diff --git a/lib/hipe/cerl/erl_types.erl b/lib/hipe/cerl/erl_types.erl
index 226a5d0f61..10e97ff54d 100644
--- a/lib/hipe/cerl/erl_types.erl
+++ b/lib/hipe/cerl/erl_types.erl
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -236,7 +236,8 @@
-export([t_is_identifier/1]).
-endif.
--export_type([erl_type/0, opaques/0, type_table/0, var_table/0, cache/0]).
+-export_type([erl_type/0, opaques/0, type_table/0, mod_records/0,
+ var_table/0, cache/0]).
%%-define(DEBUG, true).
@@ -379,8 +380,9 @@
-type type_value() :: {{module(), {file:name(), erl_anno:line()},
erl_parse:abstract_type(), ArgNames :: [atom()]},
erl_type()}.
--type type_table() :: dict:dict(record_key() | type_key(),
- record_value() | type_value()).
+-type type_table() :: #{record_key() | type_key() =>
+ record_value() | type_value()}.
+-type mod_records() :: dict:dict(module(), type_table()).
-opaque var_table() :: #{atom() => erl_type()}.
@@ -749,16 +751,16 @@ decorate_tuples_in_sets([], _L, _Opaques, Acc) ->
-spec t_opaque_from_records(type_table()) -> [erl_type()].
-t_opaque_from_records(RecDict) ->
- OpaqueRecDict =
- dict:filter(fun(Key, _Value) ->
+t_opaque_from_records(RecMap) ->
+ OpaqueRecMap =
+ maps:filter(fun(Key, _Value) ->
case Key of
{opaque, _Name, _Arity} -> true;
_ -> false
end
- end, RecDict),
- OpaqueTypeDict =
- dict:map(fun({opaque, Name, _Arity},
+ end, RecMap),
+ OpaqueTypeMap =
+ maps:map(fun({opaque, Name, _Arity},
{{Module, _FileLine, _Form, ArgNames}, _Type}) ->
%% Args = args_to_types(ArgNames),
%% List = lists:zip(ArgNames, Args),
@@ -767,8 +769,8 @@ t_opaque_from_records(RecDict) ->
Rep = t_any(), % not used for anything right now
Args = [t_any() || _ <- ArgNames],
t_opaque(Module, Name, Args, Rep)
- end, OpaqueRecDict),
- [OpaqueType || {_Key, OpaqueType} <- dict:to_list(OpaqueTypeDict)].
+ end, OpaqueRecMap),
+ [OpaqueType || {_Key, OpaqueType} <- maps:to_list(OpaqueTypeMap)].
%% Decompose opaque instances of type arg2 to structured types, in arg1
%% XXX: Same as t_unopaque
@@ -802,10 +804,6 @@ list_struct_from_opaque(Types, Opaques) ->
[t_struct_from_opaque(Type, Opaques) || Type <- Types].
%%-----------------------------------------------------------------------------
-
--type mod_records() :: dict:dict(module(), type_table()).
-
-%%-----------------------------------------------------------------------------
%% Unit type. Signals non termination.
%%
@@ -2245,16 +2243,21 @@ t_has_var_list([]) -> false.
-spec t_collect_vars(erl_type()) -> [erl_type()].
t_collect_vars(T) ->
- t_collect_vars(T, []).
+ Vs = t_collect_vars(T, maps:new()),
+ [V || {V, _} <- maps:to_list(Vs)].
--spec t_collect_vars(erl_type(), [erl_type()]) -> [erl_type()].
+-type ctab() :: #{erl_type() => 'any'}.
+
+-spec t_collect_vars(erl_type(), ctab()) -> ctab().
t_collect_vars(?var(_) = Var, Acc) ->
- ordsets:add_element(Var, Acc);
+ maps:put(Var, any, Acc);
t_collect_vars(?function(Domain, Range), Acc) ->
- ordsets:union(t_collect_vars(Domain, Acc), t_collect_vars(Range, []));
+ Acc1 = t_collect_vars(Domain, Acc),
+ t_collect_vars(Range, Acc1);
t_collect_vars(?list(Contents, Termination, _), Acc) ->
- ordsets:union(t_collect_vars(Contents, Acc), t_collect_vars(Termination, []));
+ Acc1 = t_collect_vars(Contents, Acc),
+ t_collect_vars(Termination, Acc1);
t_collect_vars(?product(Types), Acc) ->
t_collect_vars_list(Types, Acc);
t_collect_vars(?tuple(?any, ?any, ?any), Acc) ->
@@ -3067,88 +3070,91 @@ is_compat_args([A1|Args1], [A2|Args2]) ->
is_compat_args([], []) -> true;
is_compat_args(_, _) -> false.
-is_compat_arg(A1, A2) ->
- is_specialization(A1, A2) orelse is_specialization(A2, A1).
-
--spec is_specialization(erl_type(), erl_type()) -> boolean().
-
-%% Returns true if the first argument is a specialization of the
-%% second argument in the sense that every type is a specialization of
-%% any(). For example, {_,_} is a specialization of any(), but not of
-%% tuple(). Does not handle variables, but any() and unions (sort of).
-
-is_specialization(T, T) -> true;
-is_specialization(_, ?any) -> true;
-is_specialization(?any, _) -> false;
-is_specialization(?function(Domain1, Range1), ?function(Domain2, Range2)) ->
- (is_specialization(Domain1, Domain2) andalso
- is_specialization(Range1, Range2));
-is_specialization(?list(Contents1, Termination1, Size1),
- ?list(Contents2, Termination2, Size2)) ->
+-spec is_compat_arg(erl_type(), erl_type()) -> boolean().
+
+%% The intention is that 'true' is to be returned iff one of the
+%% arguments is a specialization of the other argument in the sense
+%% that every type is a specialization of any(). For example, {_,_} is
+%% a specialization of any(), but not of tuple(). Does not handle
+%% variables, but any() and unions (sort of). However, the
+%% implementation is more relaxed as any() is compatible to anything.
+
+is_compat_arg(T, T) -> true;
+is_compat_arg(_, ?any) -> true;
+is_compat_arg(?any, _) -> true;
+is_compat_arg(?function(Domain1, Range1), ?function(Domain2, Range2)) ->
+ (is_compat_arg(Domain1, Domain2) andalso
+ is_compat_arg(Range1, Range2));
+is_compat_arg(?list(Contents1, Termination1, Size1),
+ ?list(Contents2, Termination2, Size2)) ->
(Size1 =:= Size2 andalso
- is_specialization(Contents1, Contents2) andalso
- is_specialization(Termination1, Termination2));
-is_specialization(?product(Types1), ?product(Types2)) ->
- specialization_list(Types1, Types2);
-is_specialization(?tuple(?any, ?any, ?any), ?tuple(_, _, _)) -> false;
-is_specialization(?tuple(_, _, _), ?tuple(?any, ?any, ?any)) -> false;
-is_specialization(?tuple(Elements1, Arity, _),
- ?tuple(Elements2, Arity, _)) when Arity =/= ?any ->
- specialization_list(Elements1, Elements2);
-is_specialization(?tuple_set([{Arity, List}]),
- ?tuple(Elements2, Arity, _)) when Arity =/= ?any ->
- specialization_list(sup_tuple_elements(List), Elements2);
-is_specialization(?tuple(Elements1, Arity, _),
- ?tuple_set([{Arity, List}])) when Arity =/= ?any ->
- specialization_list(Elements1, sup_tuple_elements(List));
-is_specialization(?tuple_set(List1), ?tuple_set(List2)) ->
+ is_compat_arg(Contents1, Contents2) andalso
+ is_compat_arg(Termination1, Termination2));
+is_compat_arg(?product(Types1), ?product(Types2)) ->
+ is_compat_list(Types1, Types2);
+is_compat_arg(?map(Pairs1, DefK1, DefV1), ?map(Pairs2, DefK2, DefV2)) ->
+ (is_compat_list(Pairs1, Pairs2) andalso
+ is_compat_arg(DefK1, DefK2) andalso
+ is_compat_arg(DefV1, DefV2));
+is_compat_arg(?tuple(?any, ?any, ?any), ?tuple(_, _, _)) -> false;
+is_compat_arg(?tuple(_, _, _), ?tuple(?any, ?any, ?any)) -> false;
+is_compat_arg(?tuple(Elements1, Arity, _),
+ ?tuple(Elements2, Arity, _)) when Arity =/= ?any ->
+ is_compat_list(Elements1, Elements2);
+is_compat_arg(?tuple_set([{Arity, List}]),
+ ?tuple(Elements2, Arity, _)) when Arity =/= ?any ->
+ is_compat_list(sup_tuple_elements(List), Elements2);
+is_compat_arg(?tuple(Elements1, Arity, _),
+ ?tuple_set([{Arity, List}])) when Arity =/= ?any ->
+ is_compat_list(Elements1, sup_tuple_elements(List));
+is_compat_arg(?tuple_set(List1), ?tuple_set(List2)) ->
try
- specialization_list_list([sup_tuple_elements(T) || {_Arity, T} <- List1],
- [sup_tuple_elements(T) || {_Arity, T} <- List2])
+ is_compat_list_list([sup_tuple_elements(T) || {_Arity, T} <- List1],
+ [sup_tuple_elements(T) || {_Arity, T} <- List2])
catch _:_ -> false
end;
-is_specialization(?opaque(_) = T1, T2) ->
- is_specialization(t_opaque_structure(T1), T2);
-is_specialization(T1, ?opaque(_) = T2) ->
- is_specialization(T1, t_opaque_structure(T2));
-is_specialization(?union(List1)=T1, ?union(List2)=T2) ->
- case specialization_union2(T1, T2) of
- {yes, Type1, Type2} -> is_specialization(Type1, Type2);
- no -> specialization_list(List1, List2)
+is_compat_arg(?opaque(_) = T1, T2) ->
+ is_compat_arg(t_opaque_structure(T1), T2);
+is_compat_arg(T1, ?opaque(_) = T2) ->
+ is_compat_arg(T1, t_opaque_structure(T2));
+is_compat_arg(?union(List1)=T1, ?union(List2)=T2) ->
+ case is_compat_union2(T1, T2) of
+ {yes, Type1, Type2} -> is_compat_arg(Type1, Type2);
+ no -> is_compat_list(List1, List2)
end;
-is_specialization(?union(List), T2) ->
+is_compat_arg(?union(List), T2) ->
case unify_union(List) of
- {yes, Type} -> is_specialization(Type, T2);
+ {yes, Type} -> is_compat_arg(Type, T2);
no -> false
end;
-is_specialization(T1, ?union(List)) ->
+is_compat_arg(T1, ?union(List)) ->
case unify_union(List) of
- {yes, Type} -> is_specialization(T1, Type);
+ {yes, Type} -> is_compat_arg(T1, Type);
no -> false
end;
-is_specialization(?var(_), _) -> exit(error);
-is_specialization(_, ?var(_)) -> exit(error);
-is_specialization(?none, _) -> false;
-is_specialization(_, ?none) -> false;
-is_specialization(?unit, _) -> false;
-is_specialization(_, ?unit) -> false;
-is_specialization(#c{}, #c{}) -> false.
+is_compat_arg(?var(_), _) -> exit(error);
+is_compat_arg(_, ?var(_)) -> exit(error);
+is_compat_arg(?none, _) -> false;
+is_compat_arg(_, ?none) -> false;
+is_compat_arg(?unit, _) -> false;
+is_compat_arg(_, ?unit) -> false;
+is_compat_arg(#c{}, #c{}) -> false.
-specialization_list_list(LL1, LL2) ->
- length(LL1) =:= length(LL2) andalso specialization_list_list1(LL1, LL2).
+is_compat_list_list(LL1, LL2) ->
+ length(LL1) =:= length(LL2) andalso is_compat_list_list1(LL1, LL2).
-specialization_list_list1([], []) -> true;
-specialization_list_list1([L1|LL1], [L2|LL2]) ->
- specialization_list(L1, L2) andalso specialization_list_list1(LL1, LL2).
+is_compat_list_list1([], []) -> true;
+is_compat_list_list1([L1|LL1], [L2|LL2]) ->
+ is_compat_list(L1, L2) andalso is_compat_list_list1(LL1, LL2).
-specialization_list(L1, L2) ->
- length(L1) =:= length(L2) andalso specialization_list1(L1, L2).
+is_compat_list(L1, L2) ->
+ length(L1) =:= length(L2) andalso is_compat_list1(L1, L2).
-specialization_list1([], []) -> true;
-specialization_list1([T1|L1], [T2|L2]) ->
- is_specialization(T1, T2) andalso specialization_list1(L1, L2).
+is_compat_list1([], []) -> true;
+is_compat_list1([T1|L1], [T2|L2]) ->
+ is_compat_arg(T1, T2) andalso is_compat_list1(L1, L2).
-specialization_union2(?union(List1)=T1, ?union(List2)=T2) ->
+is_compat_union2(?union(List1)=T1, ?union(List2)=T2) ->
case {unify_union(List1), unify_union(List2)} of
{{yes, Type1}, {yes, Type2}} -> {yes, Type1, Type2};
{{yes, Type1}, no} -> {yes, Type1, T2};
@@ -4181,7 +4187,7 @@ t_map(Fun, T) ->
-spec t_to_string(erl_type()) -> string().
t_to_string(T) ->
- t_to_string(T, dict:new()).
+ t_to_string(T, maps:new()).
-spec t_to_string(erl_type(), type_table()) -> string().
@@ -4542,6 +4548,8 @@ from_form({atom, _L, Atom}, _S, _D, L, C) ->
{t_atom(Atom), L, C};
from_form({integer, _L, Int}, _S, _D, L, C) ->
{t_integer(Int), L, C};
+from_form({char, _L, Char}, _S, _D, L, C) ->
+ {t_integer(Char), L, C};
from_form({op, _L, _Op, _Arg} = Op, _S, _D, L, C) ->
case erl_eval:partial_eval(Op) of
{integer, _, Val} ->
@@ -5056,6 +5064,7 @@ check_record_fields({remote_type, _L, [{atom, _, _}, {atom, _, _}, Args]},
list_check_record_fields(Args, S, C);
check_record_fields({atom, _L, _}, _S, C) -> C;
check_record_fields({integer, _L, _}, _S, C) -> C;
+check_record_fields({char, _L, _}, _S, C) -> C;
check_record_fields({op, _L, _Op, _Arg}, _S, C) -> C;
check_record_fields({op, _L, _Op, _Arg1, _Arg2}, _S, C) -> C;
check_record_fields({type, _L, tuple, any}, _S, C) -> C;
@@ -5157,6 +5166,7 @@ t_form_to_string({var, _L, Name}) -> atom_to_list(Name);
t_form_to_string({atom, _L, Atom}) ->
io_lib:write_string(atom_to_list(Atom), $'); % To quote or not to quote... '
t_form_to_string({integer, _L, Int}) -> integer_to_list(Int);
+t_form_to_string({char, _L, Char}) -> integer_to_list(Char);
t_form_to_string({op, _L, _Op, _Arg} = Op) ->
case erl_eval:partial_eval(Op) of
{integer, _, _} = Int -> t_form_to_string(Int);
@@ -5239,7 +5249,7 @@ t_form_to_string({type, _L, union, Args}) ->
t_form_to_string({type, _L, Name, []} = T) ->
try
M = mod,
- D0 = dict:new(),
+ D0 = maps:new(),
MR = dict:from_list([{M, D0}]),
Site = {type, {M,Name,0}},
V = var_table__new(),
@@ -5303,8 +5313,8 @@ is_erl_type(_) -> false.
-spec lookup_record(atom(), type_table()) ->
'error' | {'ok', [{atom(), parse_form(), erl_type()}]}.
-lookup_record(Tag, RecDict) when is_atom(Tag) ->
- case dict:find({record, Tag}, RecDict) of
+lookup_record(Tag, Table) when is_atom(Tag) ->
+ case maps:find({record, Tag}, Table) of
{ok, {_FileLine, [{_Arity, Fields}]}} ->
{ok, Fields};
{ok, {_FileLine, List}} when is_list(List) ->
@@ -5318,18 +5328,18 @@ lookup_record(Tag, RecDict) when is_atom(Tag) ->
-spec lookup_record(atom(), arity(), type_table()) ->
'error' | {'ok', [{atom(), parse_form(), erl_type()}]}.
-lookup_record(Tag, Arity, RecDict) when is_atom(Tag) ->
- case dict:find({record, Tag}, RecDict) of
+lookup_record(Tag, Arity, Table) when is_atom(Tag) ->
+ case maps:find({record, Tag}, Table) of
{ok, {_FileLine, [{Arity, Fields}]}} -> {ok, Fields};
{ok, {_FileLine, OrdDict}} -> orddict:find(Arity, OrdDict);
error -> error
end.
-spec lookup_type(_, _, _) -> {'type' | 'opaque', type_value()} | 'error'.
-lookup_type(Name, Arity, RecDict) ->
- case dict:find({type, Name, Arity}, RecDict) of
+lookup_type(Name, Arity, Table) ->
+ case maps:find({type, Name, Arity}, Table) of
error ->
- case dict:find({opaque, Name, Arity}, RecDict) of
+ case maps:find({opaque, Name, Arity}, Table) of
error -> error;
{ok, Found} -> {opaque, Found}
end;
@@ -5339,8 +5349,8 @@ lookup_type(Name, Arity, RecDict) ->
-spec type_is_defined('type' | 'opaque', atom(), arity(), type_table()) ->
boolean().
-type_is_defined(TypeOrOpaque, Name, Arity, RecDict) ->
- dict:is_key({TypeOrOpaque, Name, Arity}, RecDict).
+type_is_defined(TypeOrOpaque, Name, Arity, Table) ->
+ maps:is_key({TypeOrOpaque, Name, Arity}, Table).
cannot_have_opaque(Type, TypeName, TypeNames) ->
t_is_none(Type) orelse is_recursive(TypeName, TypeNames).
diff --git a/lib/hipe/test/basic_SUITE_data/basic_num_bif.erl b/lib/hipe/test/basic_SUITE_data/basic_num_bif.erl
new file mode 100644
index 0000000000..807c4b0d0d
--- /dev/null
+++ b/lib/hipe/test/basic_SUITE_data/basic_num_bif.erl
@@ -0,0 +1,217 @@
+%%% -*- erlang-indent-level: 2 -*-
+%%%-------------------------------------------------------------------
+%%% File : basic_num_bif.erl
+%%% Description : Taken from the compiler test suite
+%%%-------------------------------------------------------------------
+-module(basic_num_bif).
+
+-export([test/0]).
+
+%% Tests optimization of the BIFs:
+%% abs/1
+%% float/1
+%% float_to_list/1
+%% integer_to_list/1
+%% list_to_float/1
+%% list_to_integer/1
+%% round/1
+%% trunc/1
+
+test() ->
+ Funs = [fun t_abs/0, fun t_float/0,
+ fun t_float_to_list/0, fun t_integer_to_list/0,
+ fun t_list_to_float_safe/0, fun t_list_to_float_risky/0,
+ fun t_list_to_integer/0, fun t_round/0, fun t_trunc/0],
+ lists:foreach(fun (F) -> ok = F() end, Funs).
+
+t_abs() ->
+ %% Floats.
+ 5.5 = abs(5.5),
+ 0.0 = abs(0.0),
+ 100.0 = abs(-100.0),
+ %% Integers.
+ 5 = abs(5),
+ 0 = abs(0),
+ 100 = abs(-100),
+ %% The largest smallnum. OTP-3190.
+ X = (1 bsl 27) - 1,
+ X = abs(X),
+ X = abs(X-1)+1,
+ X = abs(X+1)-1,
+ X = abs(-X),
+ X = abs(-X-1)-1,
+ X = abs(-X+1)+1,
+ %% Bignums.
+ BigNum = 13984792374983749,
+ BigNum = abs(BigNum),
+ BigNum = abs(-BigNum),
+ ok.
+
+t_float() ->
+ 0.0 = float(0),
+ 2.5 = float(2.5),
+ 0.0 = float(0.0),
+ -100.55 = float(-100.55),
+ 42.0 = float(42),
+ -100.0 = float(-100),
+ %% Bignums.
+ 4294967305.0 = float(4294967305),
+ -4294967305.0 = float(-4294967305),
+ %% Extremely big bignums.
+ Big = list_to_integer(lists:duplicate(2000, $1)),
+ {'EXIT', {badarg, _}} = (catch float(Big)),
+ ok.
+
+%% Tests float_to_list/1.
+
+t_float_to_list() ->
+ test_ftl("0.0e+0", 0.0),
+ test_ftl("2.5e+1", 25.0),
+ test_ftl("2.5e+0", 2.5),
+ test_ftl("2.5e-1", 0.25),
+ test_ftl("-3.5e+17", -350.0e15),
+ ok.
+
+test_ftl(Expect, Float) ->
+ %% No on the next line -- we want the line number from t_float_to_list.
+ Expect = remove_zeros(lists:reverse(float_to_list(Float)), []).
+
+%% Removes any non-significant zeros in a floating point number.
+%% Example: 2.500000e+01 -> 2.5e+1
+
+remove_zeros([$+, $e|Rest], [$0, X|Result]) ->
+ remove_zeros([$+, $e|Rest], [X|Result]);
+remove_zeros([$-, $e|Rest], [$0, X|Result]) ->
+ remove_zeros([$-, $e|Rest], [X|Result]);
+remove_zeros([$0, $.|Rest], [$e|Result]) ->
+ remove_zeros(Rest, [$., $0, $e|Result]);
+remove_zeros([$0|Rest], [$e|Result]) ->
+ remove_zeros(Rest, [$e|Result]);
+remove_zeros([Char|Rest], Result) ->
+ remove_zeros(Rest, [Char|Result]);
+remove_zeros([], Result) ->
+ Result.
+
+%% Tests integer_to_list/1.
+
+t_integer_to_list() ->
+ "0" = integer_to_list(0),
+ "42" = integer_to_list(42),
+ "-42" = integer_to_list(-42),
+ "-42" = integer_to_list(-42),
+ "32768" = integer_to_list(32768),
+ "268435455" = integer_to_list(268435455),
+ "-268435455" = integer_to_list(-268435455),
+ "123456932798748738738" = integer_to_list(123456932798748738738),
+ Big_List = lists:duplicate(2000, $1),
+ Big = list_to_integer(Big_List),
+ Big_List = integer_to_list(Big),
+ ok.
+
+%% Tests list_to_float/1.
+
+t_list_to_float_safe() ->
+ 0.0 = list_to_float("0.0"),
+ 0.0 = list_to_float("-0.0"),
+ 0.5 = list_to_float("0.5"),
+ -0.5 = list_to_float("-0.5"),
+ 100.0 = list_to_float("1.0e2"),
+ 127.5 = list_to_float("127.5"),
+ -199.5 = list_to_float("-199.5"),
+ ok.
+
+%% This might crash the emulator...
+%% (Known to crash the Unix version of Erlang 4.4.1)
+
+t_list_to_float_risky() ->
+ Many_Ones = lists:duplicate(25000, $1),
+ _ = list_to_float("2."++Many_Ones),
+ {'EXIT', {badarg, _}} = (catch list_to_float("2"++Many_Ones)),
+ ok.
+
+%% Tests list_to_integer/1.
+
+t_list_to_integer() ->
+ 0 = list_to_integer("0"),
+ 0 = list_to_integer("00"),
+ 0 = list_to_integer("-0"),
+ 1 = list_to_integer("1"),
+ -1 = list_to_integer("-1"),
+ 42 = list_to_integer("42"),
+ -12 = list_to_integer("-12"),
+ 32768 = list_to_integer("32768"),
+ 268435455 = list_to_integer("268435455"),
+ -268435455 = list_to_integer("-268435455"),
+ %% Bignums.
+ 123456932798748738738 = list_to_integer("123456932798748738738"),
+ _ = list_to_integer(lists:duplicate(2000, $1)),
+ ok.
+
+%% Tests round/1.
+
+t_round() ->
+ 0 = round(0.0),
+ 0 = round(0.4),
+ 1 = round(0.5),
+ 0 = round(-0.4),
+ -1 = round(-0.5),
+ 255 = round(255.3),
+ 256 = round(255.6),
+ -1033 = round(-1033.3),
+ -1034 = round(-1033.6),
+ %% OTP-3722:
+ X = (1 bsl 27) - 1,
+ MX = -X,
+ MXm1 = -X-1,
+ MXp1 = -X+1,
+ F = X + 0.0,
+ X = round(F),
+ X = round(F+1)-1,
+ X = round(F-1)+1,
+ MX = round(-F),
+ MXm1 = round(-F-1),
+ MXp1 = round(-F+1),
+ X = round(F+0.1),
+ X = round(F+1+0.1)-1,
+ X = round(F-1+0.1)+1,
+ MX = round(-F+0.1),
+ MXm1 = round(-F-1+0.1),
+ MXp1 = round(-F+1+0.1),
+ X = round(F-0.1),
+ X = round(F+1-0.1)-1,
+ X = round(F-1-0.1)+1,
+ MX = round(-F-0.1),
+ MXm1 = round(-F-1-0.1),
+ MXp1 = round(-F+1-0.1),
+ 0.5 = abs(round(F+0.5)-(F+0.5)),
+ 0.5 = abs(round(F-0.5)-(F-0.5)),
+ 0.5 = abs(round(-F-0.5)-(-F-0.5)),
+ 0.5 = abs(round(-F+0.5)-(-F+0.5)),
+ %% Bignums.
+ 4294967296 = round(4294967296.1),
+ 4294967297 = round(4294967296.9),
+ -4294967296 = -round(4294967296.1),
+ -4294967297 = -round(4294967296.9),
+ ok.
+
+t_trunc() ->
+ 0 = trunc(0.0),
+ 5 = trunc(5.3333),
+ -10 = trunc(-10.978987),
+ %% The largest smallnum, converted to float (OTP-3722):
+ X = (1 bsl 27) - 1,
+ F = X + 0.0,
+ %% io:format("X = ~p/~w/~w, F = ~p/~w/~w, trunc(F) = ~p/~w/~w~n",
+ %% [X, X, binary_to_list(term_to_binary(X)),
+ %% F, F, binary_to_list(term_to_binary(F)),
+ %% trunc(F), trunc(F), binary_to_list(term_to_binary(trunc(F)))]),
+ X = trunc(F),
+ X = trunc(F+1)-1,
+ X = trunc(F-1)+1,
+ X = -trunc(-F),
+ X = -trunc(-F-1)-1,
+ X = -trunc(-F+1)+1,
+ %% Bignums.
+ 4294967305 = trunc(4294967305.7),
+ -4294967305 = trunc(-4294967305.7),
+ ok.
diff --git a/lib/hipe/test/hipe_SUITE.erl b/lib/hipe/test/hipe_SUITE.erl
index a5b3924aa8..b9adb660f2 100644
--- a/lib/hipe/test/hipe_SUITE.erl
+++ b/lib/hipe/test/hipe_SUITE.erl
@@ -16,7 +16,11 @@
%%
-module(hipe_SUITE).
--compile([export_all]).
+-export([all/0, groups/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_group/2, end_per_group/2,
+ app/0, app/1, appup/0, appup/1]).
+
-include_lib("common_test/include/ct.hrl").
all() ->
diff --git a/lib/hipe/test/opt_verify_SUITE.erl b/lib/hipe/test/opt_verify_SUITE.erl
index 61952e81d7..86083fa02b 100644
--- a/lib/hipe/test/opt_verify_SUITE.erl
+++ b/lib/hipe/test/opt_verify_SUITE.erl
@@ -1,6 +1,9 @@
-module(opt_verify_SUITE).
--compile([export_all]).
+-export([all/0, groups/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_group/2, end_per_group/2,
+ call_elim/0, call_elim/1]).
all() ->
[call_elim].
@@ -23,23 +26,6 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
-call_elim_test_file(Config, FileName, Option) ->
- PrivDir = test_server:lookup_config(priv_dir, Config),
- TempOut = test_server:temp_name(filename:join(PrivDir, "call_elim_out")),
- {ok, TestCase} = compile:file(FileName),
- {ok, TestCase} = hipe:c(TestCase, [Option, {pp_range_icode, {file, TempOut}}]),
- {ok, Icode} = file:read_file(TempOut),
- ok = file:delete(TempOut),
- Icode.
-
-substring_count(Icode, Substring) ->
- substring_count(Icode, Substring, 0).
-substring_count(Icode, Substring, N) ->
- case string:str(Icode, Substring) of
- 0 -> N;
- I -> substring_count(lists:nthtail(I, Icode), Substring, N+1)
- end.
-
call_elim() ->
[{doc, "Test that the call elimination optimization pass is ok"}].
call_elim(Config) ->
@@ -60,3 +46,20 @@ call_elim(Config) ->
Icode6 = call_elim_test_file(Config, F3, no_icode_call_elim),
3 = substring_count(binary:bin_to_list(Icode6), "is_key"),
ok.
+
+call_elim_test_file(Config, FileName, Option) ->
+ PrivDir = test_server:lookup_config(priv_dir, Config),
+ TempOut = test_server:temp_name(filename:join(PrivDir, "call_elim_out")),
+ {ok, TestCase} = compile:file(FileName),
+ {ok, TestCase} = hipe:c(TestCase, [Option, {pp_range_icode, {file, TempOut}}]),
+ {ok, Icode} = file:read_file(TempOut),
+ ok = file:delete(TempOut),
+ Icode.
+
+substring_count(Icode, Substring) ->
+ substring_count(Icode, Substring, 0).
+substring_count(Icode, Substring, N) ->
+ case string:str(Icode, Substring) of
+ 0 -> N;
+ I -> substring_count(lists:nthtail(I, Icode), Substring, N+1)
+ end.
diff --git a/lib/inets/doc/src/httpc.xml b/lib/inets/doc/src/httpc.xml
index 705afec022..4217b3c4fb 100644
--- a/lib/inets/doc/src/httpc.xml
+++ b/lib/inets/doc/src/httpc.xml
@@ -83,7 +83,7 @@
<title>HTTP DATA TYPES</title>
<p>Type definitions related to HTTP:</p>
- <p><c>method() = head | get | put | post | trace | options | delete</c></p>
+ <p><c>method() = head | get | put | post | trace | options | delete | patch</c></p>
<taglist>
<tag><c>request()</c></tag>
<item><p>= <c>{url(), headers()}</c></p>
diff --git a/lib/inets/src/http_client/httpc.erl b/lib/inets/src/http_client/httpc.erl
index 91d87289a2..bd5f6df39e 100644
--- a/lib/inets/src/http_client/httpc.erl
+++ b/lib/inets/src/http_client/httpc.erl
@@ -147,6 +147,26 @@ request(Method, Request, HttpOptions, Options) ->
request(Method, Request, HttpOptions, Options, default_profile()).
request(Method,
+ {Url, Headers, ContentType, TupleBody},
+ HTTPOptions, Options, Profile)
+ when ((Method =:= post) orelse (Method =:= patch) orelse (Method =:= put) orelse (Method =:= delete))
+ andalso (is_atom(Profile) orelse is_pid(Profile)) andalso
+ is_list(ContentType) andalso is_tuple(TupleBody)->
+ case check_body_gen(TupleBody) of
+ ok ->
+ do_request(Method, {Url, Headers, ContentType, TupleBody}, HTTPOptions, Options, Profile);
+ Error ->
+ Error
+ end;
+request(Method,
+ {Url, Headers, ContentType, Body},
+ HTTPOptions, Options, Profile)
+ when ((Method =:= post) orelse (Method =:= patch) orelse (Method =:= put) orelse (Method =:= delete))
+ andalso (is_atom(Profile) orelse is_pid(Profile)) andalso
+ is_list(ContentType) andalso (is_list(Body) orelse is_binary(Body)) ->
+ do_request(Method, {Url, Headers, ContentType, Body}, HTTPOptions, Options, Profile);
+
+request(Method,
{Url, Headers},
HTTPOptions, Options, Profile)
when (Method =:= options) orelse
@@ -155,12 +175,6 @@ request(Method,
(Method =:= delete) orelse
(Method =:= trace) andalso
(is_atom(Profile) orelse is_pid(Profile)) ->
- ?hcrt("request", [{method, Method},
- {url, Url},
- {headers, Headers},
- {http_options, HTTPOptions},
- {options, Options},
- {profile, Profile}]),
case uri_parse(Url, Options) of
{error, Reason} ->
{error, Reason};
@@ -172,21 +186,9 @@ request(Method,
handle_request(Method, Url, ParsedUrl, Headers, [], [],
HTTPOptions, Options, Profile)
end
- end;
-
-request(Method,
- {Url, Headers, ContentType, Body},
- HTTPOptions, Options, Profile)
- when ((Method =:= post) orelse (Method =:= patch) orelse (Method =:= put) orelse
- (Method =:= delete)) andalso (is_atom(Profile) orelse is_pid(Profile)) ->
- ?hcrt("request", [{method, Method},
- {url, Url},
- {headers, Headers},
- {content_type, ContentType},
- {body, Body},
- {http_options, HTTPOptions},
- {options, Options},
- {profile, Profile}]),
+ end.
+
+do_request(Method, {Url, Headers, ContentType, Body}, HTTPOptions, Options, Profile) ->
case uri_parse(Url, Options) of
{error, Reason} ->
{error, Reason};
@@ -196,7 +198,6 @@ request(Method,
HTTPOptions, Options, Profile)
end.
-
%%--------------------------------------------------------------------------
%% cancel_request(RequestId) -> ok
%% cancel_request(RequestId, Profile) -> ok
@@ -209,7 +210,6 @@ cancel_request(RequestId) ->
cancel_request(RequestId, Profile)
when is_atom(Profile) orelse is_pid(Profile) ->
- ?hcrt("cancel request", [{request_id, RequestId}, {profile, Profile}]),
httpc_manager:cancel_request(RequestId, profile_name(Profile)).
@@ -232,7 +232,6 @@ cancel_request(RequestId, Profile)
set_options(Options) ->
set_options(Options, default_profile()).
set_options(Options, Profile) when is_atom(Profile) orelse is_pid(Profile) ->
- ?hcrt("set options", [{options, Options}, {profile, Profile}]),
case validate_options(Options) of
{ok, Opts} ->
httpc_manager:set_options(Opts, profile_name(Profile));
@@ -272,7 +271,6 @@ get_options(all = _Options, Profile) ->
get_options(Options, Profile)
when (is_list(Options) andalso
(is_atom(Profile) orelse is_pid(Profile))) ->
- ?hcrt("get options", [{options, Options}, {profile, Profile}]),
case Options -- get_options() of
[] ->
try
@@ -314,9 +312,6 @@ store_cookies(SetCookieHeaders, Url) ->
store_cookies(SetCookieHeaders, Url, Profile)
when is_atom(Profile) orelse is_pid(Profile) ->
- ?hcrt("store cookies", [{set_cookie_headers, SetCookieHeaders},
- {url, Url},
- {profile, Profile}]),
try
begin
%% Since the Address part is not actually used
@@ -353,9 +348,6 @@ cookie_header(Url, Opts) when is_list(Opts) ->
cookie_header(Url, Opts, Profile)
when (is_list(Opts) andalso (is_atom(Profile) orelse is_pid(Profile))) ->
- ?hcrt("cookie header", [{url, Url},
- {opts, Opts},
- {profile, Profile}]),
try
begin
httpc_manager:which_cookies(Url, Opts, profile_name(Profile))
@@ -398,7 +390,6 @@ which_sessions() ->
which_sessions(default_profile()).
which_sessions(Profile) ->
- ?hcrt("which sessions", [{profile, Profile}]),
try
begin
httpc_manager:which_sessions(profile_name(Profile))
@@ -419,7 +410,6 @@ info() ->
info(default_profile()).
info(Profile) ->
- ?hcrt("info", [{profile, Profile}]),
try
begin
httpc_manager:info(profile_name(Profile))
@@ -440,7 +430,6 @@ reset_cookies() ->
reset_cookies(default_profile()).
reset_cookies(Profile) ->
- ?hcrt("reset cookies", [{profile, Profile}]),
try
begin
httpc_manager:reset_cookies(profile_name(Profile))
@@ -458,7 +447,6 @@ reset_cookies(Profile) ->
%% same behavior as active once for sockets.
%%-------------------------------------------------------------------------
stream_next(Pid) ->
- ?hcrt("stream next", [{handler, Pid}]),
httpc_handler:stream_next(Pid).
@@ -466,7 +454,6 @@ stream_next(Pid) ->
%%% Behaviour callbacks
%%%========================================================================
start_standalone(PropList) ->
- ?hcrt("start standalone", [{proplist, PropList}]),
case proplists:get_value(profile, PropList) of
undefined ->
{error, no_profile};
@@ -477,14 +464,11 @@ start_standalone(PropList) ->
end.
start_service(Config) ->
- ?hcrt("start service", [{config, Config}]),
httpc_profile_sup:start_child(Config).
stop_service(Profile) when is_atom(Profile) ->
- ?hcrt("stop service", [{profile, Profile}]),
httpc_profile_sup:stop_child(Profile);
stop_service(Pid) when is_pid(Pid) ->
- ?hcrt("stop service", [{pid, Pid}]),
case service_info(Pid) of
{ok, [{profile, Profile}]} ->
stop_service(Profile);
@@ -510,7 +494,6 @@ service_info(Pid) ->
%%%========================================================================
%%% Internal functions
%%%========================================================================
-
handle_request(Method, Url,
{Scheme, UserInfo, Host, Port, Path, Query},
Headers0, ContentType, Body0,
@@ -521,9 +504,6 @@ handle_request(Method, Url,
try
begin
- ?hcrt("begin processing", [{started, Started},
- {new_headers, NewHeaders0}]),
-
{NewHeaders, Body} =
case Body0 of
{chunkify, ProcessBody, Acc}
@@ -575,16 +555,13 @@ handle_request(Method, Url,
{ok, RequestId} ->
handle_answer(RequestId, Sync, Options);
{error, Reason} ->
- ?hcrd("request failed", [{reason, Reason}]),
{error, Reason}
end
end
catch
error:{noproc, _} ->
- ?hcrv("noproc", [{profile, Profile}]),
{error, {not_started, Profile}};
throw:Error ->
- ?hcrv("throw", [{error, Error}]),
Error
end.
@@ -620,15 +597,10 @@ handle_answer(RequestId, false, _) ->
handle_answer(RequestId, true, Options) ->
receive
{http, {RequestId, saved_to_file}} ->
- ?hcrt("received saved-to-file", [{request_id, RequestId}]),
{ok, saved_to_file};
{http, {RequestId, {_,_,_} = Result}} ->
- ?hcrt("received answer", [{request_id, RequestId},
- {result, Result}]),
return_answer(Options, Result);
{http, {RequestId, {error, Reason}}} ->
- ?hcrt("received error", [{request_id, RequestId},
- {reason, Reason}]),
{error, Reason}
end.
@@ -1257,18 +1229,14 @@ child_name(Pid, [{Name, Pid} | _]) ->
child_name(Pid, [_ | Children]) ->
child_name(Pid, Children).
-%% d(F) ->
-%% d(F, []).
-
-%% d(F, A) ->
-%% d(get(dbg), F, A).
-
-%% d(true, F, A) ->
-%% io:format(user, "~w:~w:" ++ F ++ "~n", [self(), ?MODULE | A]);
-%% d(_, _, _) ->
-%% ok.
-
host_address(Host, false) ->
Host;
host_address(Host, true) ->
string:strip(string:strip(Host, right, $]), left, $[).
+
+check_body_gen({Fun, _}) when is_function(Fun) ->
+ ok;
+check_body_gen({chunkify, Fun, _}) when is_function(Fun) ->
+ ok;
+check_body_gen(Gen) ->
+ {error, {bad_body_generator, Gen}}.
diff --git a/lib/inets/src/http_client/httpc_handler.erl b/lib/inets/src/http_client/httpc_handler.erl
index 2e7df8e424..c99200777b 100644
--- a/lib/inets/src/http_client/httpc_handler.erl
+++ b/lib/inets/src/http_client/httpc_handler.erl
@@ -32,7 +32,6 @@
%% Internal Application API
-export([
start_link/4,
- %% connect_and_send/2,
send/2,
cancel/2,
stream_next/1,
@@ -165,14 +164,12 @@ info(Pid) ->
%%--------------------------------------------------------------------
%% Request should not be streamed
stream(BodyPart, #request{stream = none} = Request, _) ->
- ?hcrt("stream - none", []),
{false, BodyPart, Request};
%% Stream to caller
stream(BodyPart, #request{stream = Self} = Request, Code)
when ?IS_STREAMED(Code) andalso
((Self =:= self) orelse (Self =:= {self, once})) ->
- ?hcrt("stream - self", [{stream, Self}, {code, Code}]),
httpc_response:send(Request#request.from,
{Request#request.id, stream, BodyPart}),
{true, <<>>, Request};
@@ -182,10 +179,8 @@ stream(BodyPart, #request{stream = Self} = Request, Code)
%% We keep this for backward compatibillity...
stream(BodyPart, #request{stream = Filename} = Request, Code)
when ?IS_STREAMED(Code) andalso is_list(Filename) ->
- ?hcrt("stream - filename", [{stream, Filename}, {code, Code}]),
case file:open(Filename, [write, raw, append, delayed_write]) of
{ok, Fd} ->
- ?hcrt("stream - file open ok", [{fd, Fd}]),
stream(BodyPart, Request#request{stream = Fd}, 200);
{error, Reason} ->
exit({stream_to_file_failed, Reason})
@@ -194,7 +189,6 @@ stream(BodyPart, #request{stream = Filename} = Request, Code)
%% Stream to file
stream(BodyPart, #request{stream = Fd} = Request, Code)
when ?IS_STREAMED(Code) ->
- ?hcrt("stream to file", [{stream, Fd}, {code, Code}]),
case file:write(Fd, BodyPart) of
ok ->
{true, <<>>, Request};
@@ -203,7 +197,6 @@ stream(BodyPart, #request{stream = Fd} = Request, Code)
end;
stream(BodyPart, Request,_) -> % only 200 and 206 responses can be streamed
- ?hcrt("stream - ignore", [{request, Request}]),
{false, BodyPart, Request}.
@@ -257,22 +250,148 @@ init([Parent, Request, Options, ProfileName]) ->
%% {stop, Reason, State} (terminate/2 is called)
%% Description: Handling call messages
%%--------------------------------------------------------------------
-handle_call(#request{address = Addr} = Request, _,
+handle_call(Request, From, State) ->
+ try do_handle_call(Request, From, State) of
+ Result ->
+ Result
+ catch
+ _:Reason ->
+ {stop, {shutdown, Reason} , State}
+ end.
+
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling cast messages
+%%--------------------------------------------------------------------
+handle_cast(Msg, State) ->
+ try do_handle_cast(Msg, State) of
+ Result ->
+ Result
+ catch
+ _:Reason ->
+ {stop, {shutdown, Reason} , State}
+ end.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State} (terminate/2 is called)
+%% Description: Handling all non call/cast messages
+%%--------------------------------------------------------------------
+handle_info(Info, State) ->
+ try do_handle_info(Info, State) of
+ Result ->
+ Result
+ catch
+ _:Reason ->
+ {stop, {shutdown, Reason} , State}
+ end.
+
+%%--------------------------------------------------------------------
+%% Function: terminate(Reason, State) -> _ (ignored by gen_server)
+%% Description: Shutdown the httpc_handler
+%%--------------------------------------------------------------------
+
+%% Init error there is no socket to be closed.
+terminate(normal,
+ #state{request = Request,
+ session = {send_failed, _} = Reason} = State) ->
+ maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ ok;
+
+terminate(normal,
+ #state{request = Request,
+ session = {connect_failed, _} = Reason} = State) ->
+ maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ ok;
+
+terminate(normal, #state{session = undefined}) ->
+ ok;
+
+%% Init error sending, no session information has been setup but
+%% there is a socket that needs closing.
+terminate(normal,
+ #state{session = #session{id = undefined} = Session}) ->
+ close_socket(Session);
+
+%% Socket closed remotely
+terminate(normal,
+ #state{session = #session{socket = {remote_close, Socket},
+ socket_type = SocketType,
+ id = Id},
+ profile_name = ProfileName,
+ request = Request,
+ timers = Timers,
+ pipeline = Pipeline,
+ keep_alive = KeepAlive} = State) ->
+ %% Clobber session
+ (catch httpc_manager:delete_session(Id, ProfileName)),
+
+ maybe_retry_queue(Pipeline, State),
+ maybe_retry_queue(KeepAlive, State),
+
+ %% Cancel timers
+ cancel_timers(Timers),
+
+ %% Maybe deliver answers to requests
+ deliver_answer(Request),
+
+ %% And, just in case, close our side (**really** overkill)
+ http_transport:close(SocketType, Socket);
+
+terminate(_Reason, #state{session = #session{id = Id,
+ socket = Socket,
+ socket_type = SocketType},
+ request = undefined,
+ profile_name = ProfileName,
+ timers = Timers,
+ pipeline = Pipeline,
+ keep_alive = KeepAlive} = State) ->
+
+ %% Clobber session
+ (catch httpc_manager:delete_session(Id, ProfileName)),
+
+ maybe_retry_queue(Pipeline, State),
+ maybe_retry_queue(KeepAlive, State),
+
+ cancel_timer(Timers#timers.queue_timer, timeout_queue),
+ http_transport:close(SocketType, Socket);
+
+terminate(_Reason, #state{request = undefined}) ->
+ ok;
+
+terminate(Reason, #state{request = Request} = State) ->
+ NewState = maybe_send_answer(Request,
+ httpc_response:error(Request, Reason),
+ State),
+ terminate(Reason, NewState#state{request = undefined}).
+
+%%--------------------------------------------------------------------
+%% Func: code_change(_OldVsn, State, Extra) -> {ok, NewState}
+%% Purpose: Convert process state when code is changed
+%%--------------------------------------------------------------------
+
+code_change(_, State, _) ->
+ {ok, State}.
+
+%%%--------------------------------------------------------------------
+%%% Internal functions
+%%%--------------------------------------------------------------------
+do_handle_call(#request{address = Addr} = Request, _,
#state{status = Status,
session = #session{type = pipeline} = Session,
timers = Timers,
options = #options{proxy = Proxy} = _Options,
profile_name = ProfileName} = State0)
when Status =/= undefined ->
-
- ?hcrv("new request on a pipeline session",
- [{request, Request},
- {profile, ProfileName},
- {status, Status},
- {timers, Timers}]),
-
Address = handle_proxy(Addr, Proxy),
-
case httpc_request:send(Address, Session, Request) of
ok ->
@@ -287,9 +406,8 @@ handle_call(#request{address = Addr} = Request, _,
case State0#state.request of
#request{} = OldRequest -> %% Old request not yet finished
- ?hcrd("old request still not finished", []),
%% Make sure to use the new value of timers in state
- NewTimers = State1#state.timers,
+ NewTimers = State1#state.timers,
NewPipeline = queue:in(Request, State1#state.pipeline),
NewSession =
Session#session{queue_length =
@@ -297,7 +415,6 @@ handle_call(#request{address = Addr} = Request, _,
queue:len(NewPipeline) + 1,
client_close = ClientClose},
insert_session(NewSession, ProfileName),
- ?hcrd("session updated", []),
{reply, ok, State1#state{
request = OldRequest,
pipeline = NewPipeline,
@@ -306,7 +423,6 @@ handle_call(#request{address = Addr} = Request, _,
undefined ->
%% Note: tcp-message receiving has already been
%% activated by handle_pipeline/2.
- ?hcrd("no current request", []),
cancel_timer(Timers#timers.queue_timer,
timeout_queue),
NewSession =
@@ -314,18 +430,16 @@ handle_call(#request{address = Addr} = Request, _,
client_close = ClientClose},
httpc_manager:insert_session(NewSession, ProfileName),
NewTimers = Timers#timers{queue_timer = undefined},
- ?hcrd("session created", []),
State = init_wait_for_response_state(Request, State1#state{session = NewSession,
timers = NewTimers}),
{reply, ok, State}
end;
{error, Reason} ->
- ?hcri("failed sending request", [{reason, Reason}]),
NewPipeline = queue:in(Request, State0#state.pipeline),
- {stop, shutdown, {pipeline_failed, Reason}, State0#state{pipeline = NewPipeline}}
+ {stop, {shutdown, {pipeline_failed, Reason}}, State0#state{pipeline = NewPipeline}}
end;
-handle_call(#request{address = Addr} = Request, _,
+do_handle_call(#request{address = Addr} = Request, _,
#state{status = Status,
session = #session{type = keep_alive} = Session,
timers = Timers,
@@ -333,17 +447,11 @@ handle_call(#request{address = Addr} = Request, _,
profile_name = ProfileName} = State0)
when Status =/= undefined ->
- ?hcrv("new request on a keep-alive session",
- [{request, Request},
- {profile, ProfileName},
- {status, Status}]),
-
ClientClose = httpc_request:is_client_closing(Request#request.headers),
case State0#state.request of
#request{} -> %% Old request not yet finished
%% Make sure to use the new value of timers in state
- ?hcrd("old request still not finished", []),
NewKeepAlive = queue:in(Request, State0#state.keep_alive),
NewSession =
Session#session{queue_length =
@@ -351,13 +459,11 @@ handle_call(#request{address = Addr} = Request, _,
queue:len(NewKeepAlive) + 1,
client_close = ClientClose},
insert_session(NewSession, ProfileName),
- ?hcrd("session updated", []),
{reply, ok, State0#state{keep_alive = NewKeepAlive,
session = NewSession}};
undefined ->
%% Note: tcp-message receiving has already been
%% activated by handle_pipeline/2.
- ?hcrd("no current request", []),
cancel_timer(Timers#timers.queue_timer,
timeout_queue),
NewTimers = Timers#timers{queue_timer = undefined},
@@ -365,8 +471,6 @@ handle_call(#request{address = Addr} = Request, _,
Address = handle_proxy(Addr, Proxy),
case httpc_request:send(Address, Session, Request) of
ok ->
- ?hcrd("request sent", []),
-
%% Activate the request time out for the new request
State2 =
activate_request_timeout(State1#state{request = Request}),
@@ -377,22 +481,13 @@ handle_call(#request{address = Addr} = Request, _,
State = init_wait_for_response_state(Request, State2#state{session = NewSession}),
{reply, ok, State};
{error, Reason} ->
- ?hcri("failed sending request", [{reason, Reason}]),
- {stop, shutdown, {keepalive_failed, Reason}, State1}
+ {stop, {shutdown, {keepalive_failed, Reason}}, State1}
end
end;
-
-handle_call(info, _, State) ->
+do_handle_call(info, _, State) ->
Info = handler_info(State),
{reply, Info, State}.
-%%--------------------------------------------------------------------
-%% Function: handle_cast(Msg, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State} (terminate/2 is called)
-%% Description: Handling cast messages
-%%--------------------------------------------------------------------
-
%% When the request in process has been canceled the handler process is
%% stopped and the pipelined requests will be reissued or remaining
%% requests will be sent on a new connection. This is is
@@ -405,145 +500,102 @@ handle_call(info, _, State) ->
%% handle_keep_alive_queue/2 on the other hand will just skip the
%% request as if it was never issued as in this case the request will
%% not have been sent.
-handle_cast({cancel, RequestId},
+do_handle_cast({cancel, RequestId},
#state{request = #request{id = RequestId} = Request,
- profile_name = ProfileName,
canceled = Canceled} = State) ->
- ?hcrv("cancel current request", [{request_id, RequestId},
- {profile, ProfileName},
- {canceled, Canceled}]),
{stop, normal,
State#state{canceled = [RequestId | Canceled],
request = Request#request{from = answer_sent}}};
-handle_cast({cancel, RequestId},
- #state{profile_name = ProfileName,
- request = #request{id = CurrId},
- canceled = Canceled} = State) ->
- ?hcrv("cancel", [{request_id, RequestId},
- {curr_req_id, CurrId},
- {profile, ProfileName},
- {canceled, Canceled}]),
+do_handle_cast({cancel, RequestId},
+ #state{request = #request{},
+ canceled = Canceled} = State) ->
{noreply, State#state{canceled = [RequestId | Canceled]}};
-handle_cast({cancel, RequestId},
- #state{profile_name = ProfileName,
- request = undefined,
- canceled = Canceled} = State) ->
- ?hcrv("cancel", [{request_id, RequestId},
- {curr_req_id, undefined},
- {profile, ProfileName},
- {canceled, Canceled}]),
+do_handle_cast({cancel, _},
+ #state{request = undefined} = State) ->
{noreply, State};
-
-handle_cast(stream_next, #state{session = Session} = State) ->
+do_handle_cast(stream_next, #state{session = Session} = State) ->
activate_once(Session),
%% Inactivate the #state.once here because we don't want
%% next_body_chunk/1 to activate the socket twice.
{noreply, State#state{once = inactive}}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info(Info, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State} (terminate/2 is called)
-%% Description: Handling all non call/cast messages
-%%--------------------------------------------------------------------
-handle_info({Proto, _Socket, Data},
+do_handle_info({Proto, _Socket, Data},
#state{mfa = {Module, Function, Args},
- request = #request{method = Method,
- stream = Stream} = Request,
+ request = #request{method = Method} = Request,
session = Session,
status_line = StatusLine} = State)
when (Proto =:= tcp) orelse
(Proto =:= ssl) orelse
(Proto =:= httpc_handler) ->
- ?hcri("received data", [{proto, Proto},
- {module, Module},
- {function, Function},
- {method, Method},
- {stream, Stream},
- {session, Session},
- {status_line, StatusLine}]),
-
- FinalResult =
- try Module:Function([Data | Args]) of
- {ok, Result} ->
- ?hcrd("data processed - ok", []),
- handle_http_msg(Result, State);
- {_, whole_body, _} when Method =:= head ->
- ?hcrd("data processed - whole body", []),
- handle_response(State#state{body = <<>>});
- {Module, whole_body, [Body, Length]} ->
- ?hcrd("data processed - whole body", [{length, Length}]),
- {_, Code, _} = StatusLine,
- {Streamed, NewBody, NewRequest} = stream(Body, Request, Code),
- %% When we stream we will not keep the already
- %% streamed data, that would be a waste of memory.
- NewLength =
- case Streamed of
- false ->
- Length;
- true ->
- Length - size(Body)
- end,
-
- NewState = next_body_chunk(State, Code),
- NewMFA = {Module, whole_body, [NewBody, NewLength]},
- {noreply, NewState#state{mfa = NewMFA,
- request = NewRequest}};
- {Module, decode_size,
- [TotalChunk, HexList,
+ try Module:Function([Data | Args]) of
+ {ok, Result} ->
+ handle_http_msg(Result, State);
+ {_, whole_body, _} when Method =:= head ->
+ handle_response(State#state{body = <<>>});
+ {Module, whole_body, [Body, Length]} ->
+ {_, Code, _} = StatusLine,
+ {Streamed, NewBody, NewRequest} = stream(Body, Request, Code),
+ %% When we stream we will not keep the already
+ %% streamed data, that would be a waste of memory.
+ NewLength =
+ case Streamed of
+ false ->
+ Length;
+ true ->
+ Length - size(Body)
+ end,
+
+ NewState = next_body_chunk(State, Code),
+ NewMFA = {Module, whole_body, [NewBody, NewLength]},
+ {noreply, NewState#state{mfa = NewMFA,
+ request = NewRequest}};
+ {Module, decode_size,
+ [TotalChunk, HexList, AccHeaderSize,
{MaxBodySize, BodySoFar, AccLength, MaxHeaderSize}]}
- when BodySoFar =/= <<>> ->
- ?hcrd("data processed - decode_size", []),
- %% The response body is chunk-encoded. Steal decoded
- %% chunks as much as possible to stream.
- {_, Code, _} = StatusLine,
- {_, NewBody, NewRequest} = stream(BodySoFar, Request, Code),
- NewState = next_body_chunk(State, Code),
- NewMFA = {Module, decode_size,
- [TotalChunk, HexList,
+ when BodySoFar =/= <<>> ->
+ %% The response body is chunk-encoded. Steal decoded
+ %% chunks as much as possible to stream.
+ {_, Code, _} = StatusLine,
+ {_, NewBody, NewRequest} = stream(BodySoFar, Request, Code),
+ NewState = next_body_chunk(State, Code),
+ NewMFA = {Module, decode_size,
+ [TotalChunk, HexList, AccHeaderSize,
{MaxBodySize, NewBody, AccLength, MaxHeaderSize}]},
+ {noreply, NewState#state{mfa = NewMFA,
+ request = NewRequest}};
+ {Module, decode_data,
+ [ChunkSize, TotalChunk,
+ {MaxBodySize, BodySoFar, AccLength, MaxHeaderSize}]}
+ when TotalChunk =/= <<>> orelse BodySoFar =/= <<>> ->
+ %% The response body is chunk-encoded. Steal decoded
+ %% chunks as much as possible to stream.
+ ChunkSizeToSteal = min(ChunkSize, byte_size(TotalChunk)),
+ <<StolenChunk:ChunkSizeToSteal/binary, NewTotalChunk/binary>> = TotalChunk,
+ StolenBody = <<BodySoFar/binary, StolenChunk/binary>>,
+ NewChunkSize = ChunkSize - ChunkSizeToSteal,
+ {_, Code, _} = StatusLine,
+
+ {_, NewBody, NewRequest} = stream(StolenBody, Request, Code),
+ NewState = next_body_chunk(State, Code),
+ NewMFA = {Module, decode_data,
+ [NewChunkSize, NewTotalChunk,
+ {MaxBodySize, NewBody, AccLength, MaxHeaderSize}]},
{noreply, NewState#state{mfa = NewMFA,
request = NewRequest}};
- {Module, decode_data,
- [ChunkSize, TotalChunk,
- {MaxBodySize, BodySoFar, AccLength, MaxHeaderSize}]}
- when TotalChunk =/= <<>> orelse BodySoFar =/= <<>> ->
- ?hcrd("data processed - decode_data", []),
- %% The response body is chunk-encoded. Steal decoded
- %% chunks as much as possible to stream.
- ChunkSizeToSteal = min(ChunkSize, byte_size(TotalChunk)),
- <<StolenChunk:ChunkSizeToSteal/binary, NewTotalChunk/binary>> = TotalChunk,
- StolenBody = <<BodySoFar/binary, StolenChunk/binary>>,
- NewChunkSize = ChunkSize - ChunkSizeToSteal,
- {_, Code, _} = StatusLine,
-
- {_, NewBody, NewRequest} = stream(StolenBody, Request, Code),
- NewState = next_body_chunk(State, Code),
- NewMFA = {Module, decode_data,
- [NewChunkSize, NewTotalChunk,
- {MaxBodySize, NewBody, AccLength, MaxHeaderSize}]},
- {noreply, NewState#state{mfa = NewMFA,
- request = NewRequest}};
- NewMFA ->
- ?hcrd("data processed - new mfa", []),
- activate_once(Session),
- {noreply, State#state{mfa = NewMFA}}
- catch
- _:_Reason ->
- ?hcrd("data processing exit", [{exit, _Reason}]),
- ClientReason = {could_not_parse_as_http, Data},
- ClientErrMsg = httpc_response:error(Request, ClientReason),
- NewState = answer_request(Request, ClientErrMsg, State),
- {stop, normal, NewState}
- end,
- ?hcri("data processed", [{final_result, FinalResult}]),
- FinalResult;
-
+ NewMFA ->
+ activate_once(Session),
+ {noreply, State#state{mfa = NewMFA}}
+ catch
+ _:Reason ->
+ ClientReason = {could_not_parse_as_http, Data},
+ ClientErrMsg = httpc_response:error(Request, ClientReason),
+ NewState = answer_request(Request, ClientErrMsg, State),
+ {stop, {shutdown, Reason}, NewState}
+ end;
-handle_info({Proto, Socket, Data},
+do_handle_info({Proto, Socket, Data},
#state{mfa = MFA,
request = Request,
session = Session,
@@ -568,200 +620,107 @@ handle_info({Proto, Socket, Data},
{noreply, State};
-
%% The Server may close the connection to indicate that the
%% whole body is now sent instead of sending an length
%% indicator.
-handle_info({tcp_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
+do_handle_info({tcp_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
handle_response(State#state{body = hd(Args)});
-handle_info({ssl_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
+do_handle_info({ssl_closed, _}, State = #state{mfa = {_, whole_body, Args}}) ->
handle_response(State#state{body = hd(Args)});
%%% Server closes idle pipeline
-handle_info({tcp_closed, _}, State = #state{request = undefined}) ->
+do_handle_info({tcp_closed, _}, State = #state{request = undefined}) ->
{stop, normal, State};
-handle_info({ssl_closed, _}, State = #state{request = undefined}) ->
+do_handle_info({ssl_closed, _}, State = #state{request = undefined}) ->
{stop, normal, State};
%%% Error cases
-handle_info({tcp_closed, _}, #state{session = Session0} = State) ->
+do_handle_info({tcp_closed, _}, #state{session = Session0} = State) ->
Socket = Session0#session.socket,
Session = Session0#session{socket = {remote_close, Socket}},
%% {stop, session_remotly_closed, State};
{stop, normal, State#state{session = Session}};
-handle_info({ssl_closed, _}, #state{session = Session0} = State) ->
+do_handle_info({ssl_closed, _}, #state{session = Session0} = State) ->
Socket = Session0#session.socket,
Session = Session0#session{socket = {remote_close, Socket}},
%% {stop, session_remotly_closed, State};
{stop, normal, State#state{session = Session}};
-handle_info({tcp_error, _, _} = Reason, State) ->
+do_handle_info({tcp_error, _, _} = Reason, State) ->
{stop, Reason, State};
-handle_info({ssl_error, _, _} = Reason, State) ->
+do_handle_info({ssl_error, _, _} = Reason, State) ->
{stop, Reason, State};
%% Timeouts
%% Internally, to a request handling process, a request timeout is
%% seen as a canceled request.
-handle_info({timeout, RequestId},
+do_handle_info({timeout, RequestId},
#state{request = #request{id = RequestId} = Request,
canceled = Canceled,
profile_name = ProfileName} = State) ->
- ?hcri("timeout of current request", [{id, RequestId}]),
httpc_response:send(Request#request.from,
httpc_response:error(Request, timeout)),
httpc_manager:request_done(RequestId, ProfileName),
- ?hcrv("response (timeout) sent - now terminate", []),
{stop, normal,
State#state{request = Request#request{from = answer_sent},
canceled = [RequestId | Canceled]}};
-handle_info({timeout, RequestId},
+do_handle_info({timeout, RequestId},
#state{canceled = Canceled,
profile_name = ProfileName} = State) ->
- ?hcri("timeout", [{id, RequestId}]),
Filter =
fun(#request{id = Id, from = From} = Request) when Id =:= RequestId ->
- ?hcrv("found request", [{id, Id}, {from, From}]),
%% Notify the owner
httpc_response:send(From,
httpc_response:error(Request, timeout)),
httpc_manager:request_done(RequestId, ProfileName),
- ?hcrv("response (timeout) sent", []),
[Request#request{from = answer_sent}];
(_) ->
true
end,
case State#state.status of
pipeline ->
- ?hcrd("pipeline", []),
Pipeline = queue:filter(Filter, State#state.pipeline),
{noreply, State#state{canceled = [RequestId | Canceled],
pipeline = Pipeline}};
keep_alive ->
- ?hcrd("keep_alive", []),
KeepAlive = queue:filter(Filter, State#state.keep_alive),
{noreply, State#state{canceled = [RequestId | Canceled],
keep_alive = KeepAlive}}
end;
-handle_info(timeout_queue, State = #state{request = undefined}) ->
+do_handle_info(timeout_queue, State = #state{request = undefined}) ->
{stop, normal, State};
%% Timing was such as the queue_timeout was not canceled!
-handle_info(timeout_queue, #state{timers = Timers} = State) ->
+do_handle_info(timeout_queue, #state{timers = Timers} = State) ->
{noreply, State#state{timers =
Timers#timers{queue_timer = undefined}}};
%% Setting up the connection to the server somehow failed.
-handle_info({init_error, Tag, ClientErrMsg},
+do_handle_info({init_error, Reason, ClientErrMsg},
State = #state{request = Request}) ->
- ?hcrv("init error", [{tag, Tag}, {client_error, ClientErrMsg}]),
NewState = answer_request(Request, ClientErrMsg, State),
- {stop, normal, NewState};
-
+ {stop, {shutdown, Reason}, NewState};
%%% httpc_manager process dies.
-handle_info({'EXIT', _, _}, State = #state{request = undefined}) ->
+do_handle_info({'EXIT', _, _}, State = #state{request = undefined}) ->
{stop, normal, State};
%%Try to finish the current request anyway,
%% there is a fairly high probability that it can be done successfully.
%% Then close the connection, hopefully a new manager is started that
%% can retry requests in the pipeline.
-handle_info({'EXIT', _, _}, State) ->
+do_handle_info({'EXIT', _, _}, State) ->
{noreply, State#state{status = close}}.
-%%--------------------------------------------------------------------
-%% Function: terminate(Reason, State) -> _ (ignored by gen_server)
-%% Description: Shutdown the httpc_handler
-%%--------------------------------------------------------------------
-
-%% Init error there is no socket to be closed.
-terminate(normal,
- #state{request = Request,
- session = {send_failed, AReason} = Reason} = State) ->
- ?hcrd("terminate", [{send_reason, AReason}, {request, Request}]),
- maybe_send_answer(Request,
- httpc_response:error(Request, Reason),
- State),
- ok;
-
-terminate(normal,
- #state{request = Request,
- session = {connect_failed, AReason} = Reason} = State) ->
- ?hcrd("terminate", [{connect_reason, AReason}, {request, Request}]),
- maybe_send_answer(Request,
- httpc_response:error(Request, Reason),
- State),
- ok;
-
-terminate(normal, #state{session = undefined}) ->
- ok;
-
-%% Init error sending, no session information has been setup but
-%% there is a socket that needs closing.
-terminate(normal,
- #state{session = #session{id = undefined} = Session}) ->
- close_socket(Session);
-
-%% Socket closed remotely
-terminate(normal,
- #state{session = #session{socket = {remote_close, Socket},
- socket_type = SocketType,
- id = Id},
- profile_name = ProfileName,
- request = Request,
- timers = Timers,
- pipeline = Pipeline,
- keep_alive = KeepAlive} = State) ->
- ?hcrt("terminate(normal) - remote close",
- [{id, Id}, {profile, ProfileName}]),
-
- %% Clobber session
- (catch httpc_manager:delete_session(Id, ProfileName)),
-
- maybe_retry_queue(Pipeline, State),
- maybe_retry_queue(KeepAlive, State),
-
- %% Cancel timers
- cancel_timers(Timers),
-
- %% Maybe deliver answers to requests
- deliver_answer(Request),
-
- %% And, just in case, close our side (**really** overkill)
- http_transport:close(SocketType, Socket);
-
-terminate(Reason, #state{session = #session{id = Id,
- socket = Socket,
- socket_type = SocketType},
- request = undefined,
- profile_name = ProfileName,
- timers = Timers,
- pipeline = Pipeline,
- keep_alive = KeepAlive} = State) ->
- ?hcrt("terminate",
- [{id, Id}, {profile, ProfileName}, {reason, Reason}]),
-
- %% Clobber session
- (catch httpc_manager:delete_session(Id, ProfileName)),
-
- maybe_retry_queue(Pipeline, State),
- maybe_retry_queue(KeepAlive, State),
-
- cancel_timer(Timers#timers.queue_timer, timeout_queue),
- http_transport:close(SocketType, Socket);
+call(Msg, Pid) ->
+ call(Msg, Pid, infinity).
-terminate(Reason, #state{request = undefined}) ->
- ?hcrt("terminate", [{reason, Reason}]),
- ok;
+call(Msg, Pid, Timeout) ->
+ gen_server:call(Pid, Msg, Timeout).
-terminate(Reason, #state{request = Request} = State) ->
- ?hcrd("terminate", [{reason, Reason}, {request, Request}]),
- NewState = maybe_send_answer(Request,
- httpc_response:error(Request, Reason),
- State),
- terminate(Reason, NewState#state{request = undefined}).
+cast(Msg, Pid) ->
+ gen_server:cast(Pid, Msg).
maybe_retry_queue(Q, State) ->
case queue:is_empty(Q) of
@@ -776,45 +735,13 @@ maybe_send_answer(#request{from = answer_sent}, _Reason, State) ->
maybe_send_answer(Request, Answer, State) ->
answer_request(Request, Answer, State).
-deliver_answer(#request{id = Id, from = From} = Request)
+deliver_answer(#request{from = From} = Request)
when is_pid(From) ->
Response = httpc_response:error(Request, socket_closed_remotely),
- ?hcrd("deliver answer", [{id, Id}, {from, From}, {response, Response}]),
httpc_response:send(From, Response);
-deliver_answer(Request) ->
- ?hcrd("skip deliver answer", [{request, Request}]),
+deliver_answer(_Request) ->
ok.
-
-%%--------------------------------------------------------------------
-%% Func: code_change(_OldVsn, State, Extra) -> {ok, NewState}
-%% Purpose: Convert process state when code is changed
-%%--------------------------------------------------------------------
-
-code_change(_, State, _) ->
- {ok, State}.
-
-
-%% new_http_options({http_options, TimeOut, AutoRedirect, SslOpts,
-%% Auth, Relaxed}) ->
-%% {http_options, "HTTP/1.1", TimeOut, AutoRedirect, SslOpts,
-%% Auth, Relaxed}.
-
-%% old_http_options({http_options, _, TimeOut, AutoRedirect,
-%% SslOpts, Auth, Relaxed}) ->
-%% {http_options, TimeOut, AutoRedirect, SslOpts, Auth, Relaxed}.
-
-%% new_queue(Queue, Fun) ->
-%% List = queue:to_list(Queue),
-%% NewList =
-%% lists:map(fun(Request) ->
-%% Settings =
-%% Fun(Request#request.settings),
-%% Request#request{settings = Settings}
-%% end, List),
-%% queue:from_list(NewList).
-
-
%%%--------------------------------------------------------------------
%%% Internal functions
%%%--------------------------------------------------------------------
@@ -872,26 +799,21 @@ connect(SocketType, ToAddress,
connect_and_send_first_request(Address, Request, #state{options = Options} = State) ->
SocketType = socket_type(Request),
ConnTimeout = (Request#request.settings)#http_options.connect_timeout,
- ?hcri("connect",
- [{address, Address}, {request, Request}, {options, Options}]),
case connect(SocketType, Address, Options, ConnTimeout) of
{ok, Socket} ->
ClientClose =
- httpc_request:is_client_closing(
- Request#request.headers),
+ httpc_request:is_client_closing(
+ Request#request.headers),
SessionType = httpc_manager:session_type(Options),
SocketType = socket_type(Request),
Session = #session{id = {Request#request.address, self()},
scheme = Request#request.scheme,
socket = Socket,
- socket_type = SocketType,
- client_close = ClientClose,
- type = SessionType},
- ?hcri("connected - now send first request", [{socket, Socket}]),
-
+ socket_type = SocketType,
+ client_close = ClientClose,
+ type = SessionType},
case httpc_request:send(Address, Session, Request) of
ok ->
- ?hcri("first request sent", []),
TmpState = State#state{request = Request,
session = Session,
mfa = init_mfa(Request, State),
@@ -949,12 +871,6 @@ handler_info(#state{request = Request,
options = _Options,
timers = _Timers} = _State) ->
- ?hcrt("handler info", [{request, Request},
- {session, Session},
- {pipeline, Pipeline},
- {keep_alive, KeepAlive},
- {status, Status}]),
-
%% Info about the current request
RequestInfo =
case Request of
@@ -965,8 +881,6 @@ handler_info(#state{request = Request,
[{id, Id}, {started, ReqStarted}]
end,
- ?hcrt("handler info", [{request_info, RequestInfo}]),
-
%% Info about the current session/socket
SessionType = Session#session.type,
QueueLen = case SessionType of
@@ -979,22 +893,12 @@ handler_info(#state{request = Request,
Socket = Session#session.socket,
SocketType = Session#session.socket_type,
- ?hcrt("handler info", [{session_type, SessionType},
- {queue_length, QueueLen},
- {scheme, Scheme},
- {socket, Socket}]),
-
SocketOpts = http_transport:getopts(SocketType, Socket),
SocketStats = http_transport:getstat(SocketType, Socket),
Remote = http_transport:peername(SocketType, Socket),
Local = http_transport:sockname(SocketType, Socket),
- ?hcrt("handler info", [{remote, Remote},
- {local, Local},
- {socket_opts, SocketOpts},
- {socket_stats, SocketStats}]),
-
SocketInfo = [{remote, Remote},
{local, Local},
{socket_opts, SocketOpts},
@@ -1014,7 +918,6 @@ handler_info(#state{request = Request,
handle_http_msg({Version, StatusCode, ReasonPharse, Headers, Body},
State = #state{request = Request}) ->
- ?hcrt("handle_http_msg", [{headers, Headers}]),
case Headers#http_response_h.'content-type' of
"multipart/byteranges" ++ _Param ->
exit({not_yet_implemented, multypart_byteranges});
@@ -1028,15 +931,12 @@ handle_http_msg({Version, StatusCode, ReasonPharse, Headers, Body},
end;
handle_http_msg({ChunkedHeaders, Body},
#state{status_line = {_, Code, _}, headers = Headers} = State) ->
- ?hcrt("handle_http_msg",
- [{chunked_headers, ChunkedHeaders}, {headers, Headers}]),
NewHeaders = http_chunk:handle_headers(Headers, ChunkedHeaders),
{_, NewBody, NewRequest} = stream(Body, State#state.request, Code),
handle_response(State#state{headers = NewHeaders,
body = NewBody,
request = NewRequest});
handle_http_msg(Body, #state{status_line = {_,Code, _}} = State) ->
- ?hcrt("handle_http_msg", [{code, Code}]),
{_, NewBody, NewRequest} = stream(Body, State#state.request, Code),
handle_response(State#state{body = NewBody, request = NewRequest}).
@@ -1051,41 +951,28 @@ handle_http_body(_, #state{status = {ssl_tunnel, Request},
{stop, normal, NewState};
handle_http_body(<<>>, #state{status_line = {_,304, _}} = State) ->
- ?hcrt("handle_http_body - 304", []),
handle_response(State#state{body = <<>>});
handle_http_body(<<>>, #state{status_line = {_,204, _}} = State) ->
- ?hcrt("handle_http_body - 204", []),
handle_response(State#state{body = <<>>});
handle_http_body(<<>>, #state{request = #request{method = head}} = State) ->
- ?hcrt("handle_http_body - head", []),
handle_response(State#state{body = <<>>});
handle_http_body(Body, #state{headers = Headers,
max_body_size = MaxBodySize,
status_line = {_,Code, _},
request = Request} = State) ->
- ?hcrt("handle_http_body",
- [{max_body_size, MaxBodySize}, {headers, Headers}, {code, Code}]),
TransferEnc = Headers#http_response_h.'transfer-encoding',
case case_insensitive_header(TransferEnc) of
"chunked" ->
- ?hcrt("handle_http_body - chunked", []),
try http_chunk:decode(Body, State#state.max_body_size,
State#state.max_header_size) of
{Module, Function, Args} ->
- ?hcrt("handle_http_body - new mfa",
- [{module, Module},
- {function, Function},
- {args, Args}]),
NewState = next_body_chunk(State, Code),
{noreply, NewState#state{mfa =
{Module, Function, Args}}};
{ok, {ChunkedHeaders, NewBody}} ->
- ?hcrt("handle_http_body - new body",
- [{chunked_headers, ChunkedHeaders},
- {new_body, NewBody}]),
NewHeaders = http_chunk:handle_headers(Headers,
ChunkedHeaders),
case Body of
@@ -1107,7 +994,6 @@ handle_http_body(Body, #state{headers = Headers,
{stop, normal, NewState}
end;
Enc when Enc =:= "identity"; Enc =:= undefined ->
- ?hcrt("handle_http_body - identity", []),
Length =
list_to_integer(Headers#http_response_h.'content-length'),
case ((Length =< MaxBodySize) orelse (MaxBodySize =:= nolimit)) of
@@ -1131,7 +1017,6 @@ handle_http_body(Body, #state{headers = Headers,
{stop, normal, NewState}
end;
Encoding when is_list(Encoding) ->
- ?hcrt("handle_http_body - other", [{encoding, Encoding}]),
NewState = answer_request(Request,
httpc_response:error(Request,
unknown_encoding),
@@ -1152,18 +1037,10 @@ handle_response(#state{request = Request,
options = Options,
profile_name = ProfileName} = State)
when Status =/= new ->
-
- ?hcrd("handle response", [{profile, ProfileName},
- {status, Status},
- {request, Request},
- {session, Session},
- {status_line, StatusLine}]),
-
handle_cookies(Headers, Request, Options, ProfileName),
case httpc_response:result({StatusLine, Headers, Body}, Request) of
%% 100-continue
continue ->
- ?hcrd("handle response - continue", []),
%% Send request body
{_, RequestBody} = Request#request.content,
send_raw(Session, RequestBody),
@@ -1180,7 +1057,6 @@ handle_response(#state{request = Request,
%% Ignore unexpected 100-continue response and receive the
%% actual response that the server will send right away.
{ignore, Data} ->
- ?hcrd("handle response - ignore", [{data, Data}]),
Relaxed = (Request#request.settings)#http_options.relaxed,
MFA = {httpc_response, parse,
[State#state.max_header_size, Relaxed]},
@@ -1194,23 +1070,17 @@ handle_response(#state{request = Request,
%% obsolete and the manager will create a new request
%% with the same id as the current.
{redirect, NewRequest, Data} ->
- ?hcrt("handle response - redirect",
- [{new_request, NewRequest}, {data, Data}]),
ok = httpc_manager:redirect_request(NewRequest, ProfileName),
handle_queue(State#state{request = undefined}, Data);
{retry, TimeNewRequest, Data} ->
- ?hcrt("handle response - retry",
- [{time_new_request, TimeNewRequest}, {data, Data}]),
ok = httpc_manager:retry_request(TimeNewRequest, ProfileName),
handle_queue(State#state{request = undefined}, Data);
{ok, Msg, Data} ->
- ?hcrd("handle response - ok", []),
stream_remaining_body(Body, Request, StatusLine),
end_stream(StatusLine, Request),
NewState = maybe_send_answer(Request, Msg, State),
handle_queue(NewState, Data);
{stop, Msg} ->
- ?hcrd("handle response - stop", [{msg, Msg}]),
end_stream(StatusLine, Request),
NewState = maybe_send_answer(Request, Msg, State),
{stop, normal, NewState}
@@ -1245,28 +1115,19 @@ handle_pipeline(#state{status = pipeline,
profile_name = ProfileName,
options = #options{pipeline_timeout = TimeOut}} = State,
Data) ->
-
- ?hcrd("handle pipeline", [{profile, ProfileName},
- {session, Session},
- {timeout, TimeOut}]),
-
case queue:out(State#state.pipeline) of
{empty, _} ->
- ?hcrd("pipeline queue empty", []),
handle_empty_queue(Session, ProfileName, TimeOut, State);
{{value, NextRequest}, Pipeline} ->
- ?hcrd("pipeline queue non-empty", []),
case lists:member(NextRequest#request.id,
State#state.canceled) of
true ->
- ?hcrv("next request had been cancelled", []),
%% See comment for handle_cast({cancel, RequestId})
{stop, normal,
State#state{request =
NextRequest#request{from = answer_sent},
pipeline = Pipeline}};
false ->
- ?hcrv("next request", [{request, NextRequest}]),
NewSession =
Session#session{queue_length =
%% Queue + current
@@ -1283,25 +1144,16 @@ handle_keep_alive_queue(#state{status = keep_alive,
options = #options{keep_alive_timeout = TimeOut,
proxy = Proxy}} = State,
Data) ->
-
- ?hcrd("handle keep_alive", [{profile, ProfileName},
- {session, Session},
- {timeout, TimeOut}]),
-
case queue:out(State#state.keep_alive) of
{empty, _} ->
- ?hcrd("keep_alive queue empty", []),
handle_empty_queue(Session, ProfileName, TimeOut, State);
{{value, NextRequest}, KeepAlive} ->
- ?hcrd("keep_alive queue non-empty", []),
case lists:member(NextRequest#request.id,
State#state.canceled) of
true ->
- ?hcrv("next request has already been canceled", []),
handle_keep_alive_queue(
State#state{keep_alive = KeepAlive}, Data);
false ->
- ?hcrv("next request", [{request, NextRequest}]),
#request{address = Addr} = NextRequest,
Address = handle_proxy(Addr, Proxy),
case httpc_request:send(Address, Session, NextRequest) of
@@ -1314,7 +1166,6 @@ handle_keep_alive_queue(#state{status = keep_alive,
end
end
end.
-
handle_empty_queue(Session, ProfileName, TimeOut, State) ->
%% The server may choose too terminate an idle pipline| keep_alive session
%% in this case we want to receive the close message
@@ -1350,7 +1201,6 @@ init_wait_for_response_state(Request, State) ->
status_line = undefined,
headers = undefined,
body = undefined}.
-
gather_data(<<>>, Session, State) ->
activate_once(Session),
{noreply, State};
@@ -1381,10 +1231,6 @@ activate_request_timeout(
State;
_ ->
ReqId = Request#request.id,
- ?hcrt("activate request timer",
- [{request_id, ReqId},
- {time_consumed, t() - Request#request.started},
- {timeout, Timeout}]),
Msg = {timeout, ReqId},
Ref = erlang:send_after(Timeout, self(), Msg),
Request2 = Request#request{timer = Ref},
@@ -1427,10 +1273,6 @@ try_to_enable_pipeline_or_keep_alive(
status_line = {Version, _, _},
headers = Headers,
profile_name = ProfileName} = State) ->
- ?hcrd("try to enable pipeline or keep-alive",
- [{version, Version},
- {headers, Headers},
- {session, Session}]),
case is_keep_alive_enabled_server(Version, Headers) andalso
is_keep_alive_connection(Headers, Session) of
true ->
@@ -1455,7 +1297,6 @@ answer_request(#request{id = RequestId, from = From} = Request, Msg,
#state{session = Session,
timers = Timers,
profile_name = ProfileName} = State) ->
- ?hcrt("answer request", [{request, Request}, {msg, Msg}]),
httpc_response:send(From, Msg),
RequestTimers = Timers#timers.request_timers,
TimerRef =
@@ -1602,42 +1443,32 @@ socket_type(#request{scheme = http}) ->
ip_comm;
socket_type(#request{scheme = https, settings = Settings}) ->
Settings#http_options.ssl.
-%% socket_type(http) ->
-%% ip_comm;
-%% socket_type(https) ->
-%% {ssl1, []}. %% Dummy value ok for ex setopts that does not use this value
start_stream({_Version, _Code, _ReasonPhrase}, _Headers,
#request{stream = none} = Request) ->
- ?hcrt("start stream - none", []),
{ok, Request};
start_stream({_Version, Code, _ReasonPhrase}, Headers,
#request{stream = self} = Request)
when ?IS_STREAMED(Code) ->
- ?hcrt("start stream - self", [{code, Code}]),
Msg = httpc_response:stream_start(Headers, Request, ignore),
httpc_response:send(Request#request.from, Msg),
{ok, Request};
start_stream({_Version, Code, _ReasonPhrase}, Headers,
#request{stream = {self, once}} = Request)
when ?IS_STREAMED(Code) ->
- ?hcrt("start stream - self:once", [{code, Code}]),
Msg = httpc_response:stream_start(Headers, Request, self()),
httpc_response:send(Request#request.from, Msg),
{ok, Request};
start_stream({_Version, Code, _ReasonPhrase}, _Headers,
#request{stream = Filename} = Request)
when ?IS_STREAMED(Code) andalso is_list(Filename) ->
- ?hcrt("start stream", [{code, Code}, {filename, Filename}]),
case file:open(Filename, [write, raw, append, delayed_write]) of
{ok, Fd} ->
- ?hcri("start stream - file open ok", [{fd, Fd}]),
{ok, Request#request{stream = Fd}};
{error, Reason} ->
exit({stream_to_file_failed, Reason})
end;
start_stream(_StatusLine, _Headers, Request) ->
- ?hcrt("start stream - no op", []),
{ok, Request}.
stream_remaining_body(<<>>, _, _) ->
@@ -1648,16 +1479,12 @@ stream_remaining_body(Body, Request, {_, Code, _}) ->
%% Note the end stream message is handled by httpc_response and will
%% be sent by answer_request
end_stream(_, #request{stream = none}) ->
- ?hcrt("end stream - none", []),
ok;
end_stream(_, #request{stream = self}) ->
- ?hcrt("end stream - self", []),
ok;
end_stream(_, #request{stream = {self, once}}) ->
- ?hcrt("end stream - self:once", []),
ok;
end_stream({_,200,_}, #request{stream = Fd}) ->
- ?hcrt("end stream - 200", [{stream, Fd}]),
case file:close(Fd) of
ok ->
ok;
@@ -1665,15 +1492,13 @@ end_stream({_,200,_}, #request{stream = Fd}) ->
file:close(Fd)
end;
end_stream({_,206,_}, #request{stream = Fd}) ->
- ?hcrt("end stream - 206", [{stream, Fd}]),
case file:close(Fd) of
ok ->
ok;
{error, enospc} -> % Could be due to delayed_write
file:close(Fd)
end;
-end_stream(SL, R) ->
- ?hcrt("end stream", [{status_line, SL}, {request, R}]),
+end_stream(_, _) ->
ok.
@@ -1702,11 +1527,8 @@ handle_verbose(trace) ->
handle_verbose(_) ->
ok.
-
-
send_raw(#session{socket = Socket, socket_type = SocketType},
{ProcessBody, Acc}) when is_function(ProcessBody, 1) ->
- ?hcrt("send raw", [{acc, Acc}]),
send_raw(SocketType, Socket, ProcessBody, Acc);
send_raw(#session{socket = Socket, socket_type = SocketType}, Body) ->
http_transport:send(SocketType, Socket, Body).
@@ -1717,7 +1539,6 @@ send_raw(SocketType, Socket, ProcessBody, Acc) ->
ok;
{ok, Data, NewAcc} ->
DataBin = iolist_to_binary(Data),
- ?hcrd("send", [{data, DataBin}]),
case http_transport:send(SocketType, Socket, DataBin) of
ok ->
send_raw(SocketType, Socket, ProcessBody, NewAcc);
@@ -1883,16 +1704,3 @@ update_session(ProfileName, #session{id = SessionId} = Session, Pos, Value) ->
end.
-%% ---------------------------------------------------------------------
-
-call(Msg, Pid) ->
- call(Msg, Pid, infinity).
-
-call(Msg, Pid, Timeout) ->
- gen_server:call(Pid, Msg, Timeout).
-
-cast(Msg, Pid) ->
- gen_server:cast(Pid, Msg).
-
-t() ->
- http_util:timestamp().
diff --git a/lib/inets/src/http_client/httpc_response.erl b/lib/inets/src/http_client/httpc_response.erl
index d8bdac24e3..0fd5faa466 100644
--- a/lib/inets/src/http_client/httpc_response.erl
+++ b/lib/inets/src/http_client/httpc_response.erl
@@ -363,7 +363,7 @@ redirect(Response = {StatusLine, Headers, Body}, Request) ->
%% Automatic redirection
{ok, {Scheme, _, Host, Port, Path, Query}} ->
NewHeaders =
- (Request#request.headers)#http_request_h{host = Host},
+ (Request#request.headers)#http_request_h{host = Host++":"++integer_to_list(Port)},
NewRequest =
Request#request{redircount =
Request#request.redircount+1,
diff --git a/lib/inets/src/http_server/httpd_request_handler.erl b/lib/inets/src/http_server/httpd_request_handler.erl
index 7e20a9ba67..82273c8c74 100644
--- a/lib/inets/src/http_server/httpd_request_handler.erl
+++ b/lib/inets/src/http_server/httpd_request_handler.erl
@@ -241,9 +241,9 @@ handle_info({tcp_closed, _}, State) ->
handle_info({ssl_closed, _}, State) ->
{stop, normal, State};
handle_info({tcp_error, _, _} = Reason, State) ->
- {stop, Reason, State};
+ {stop, {shutdown, Reason}, State};
handle_info({ssl_error, _, _} = Reason, State) ->
- {stop, Reason, State};
+ {stop, {shutdown, Reason}, State};
%% Timeouts
handle_info(timeout, #state{mfa = {_, parse, _}} = State) ->
diff --git a/lib/inets/src/inets_app/inets.appup.src b/lib/inets/src/inets_app/inets.appup.src
index 3a31daeb20..d28d4cd766 100644
--- a/lib/inets/src/inets_app/inets.appup.src
+++ b/lib/inets/src/inets_app/inets.appup.src
@@ -18,10 +18,14 @@
%% %CopyrightEnd%
{"%VSN%",
[
+ {<<"6.2.4">>, [{load_module, httpd_request_handler,
+ soft_purge, soft_purge, []}]},
{<<"6\\..*">>,[{restart_application, inets}]},
{<<"5\\..*">>,[{restart_application, inets}]}
],
[
+ {<<"6.2.4">>, [{load_module, httpd_request_handler,
+ soft_purge, soft_purge, []}]},
{<<"6\\..*">>,[{restart_application, inets}]},
{<<"5\\..*">>,[{restart_application, inets}]}
]
diff --git a/lib/inets/test/httpc_SUITE.erl b/lib/inets/test/httpc_SUITE.erl
index a64ae2b87c..8aea38037d 100644
--- a/lib/inets/test/httpc_SUITE.erl
+++ b/lib/inets/test/httpc_SUITE.erl
@@ -88,7 +88,8 @@ real_requests()->
stream_through_mfa,
streaming_error,
inet_opts,
- invalid_headers
+ invalid_headers,
+ invalid_body
].
only_simulated() ->
@@ -125,7 +126,9 @@ only_simulated() ->
redirect_see_other,
redirect_temporary_redirect,
port_in_host_header,
- relaxed
+ redirect_port_in_host_header,
+ relaxed,
+ multipart_chunks
].
misc() ->
@@ -1000,10 +1003,25 @@ invalid_headers(Config) ->
Request = {url(group_name(Config), "/dummy.html", Config), [{"cookie", undefined}]},
{error, _} = httpc:request(get, Request, [], []).
+%%-------------------------------------------------------------------------
+
+invalid_body(Config) ->
+ URL = url(group_name(Config), "/dummy.html", Config),
+ try
+ httpc:request(post, {URL, [], <<"text/plain">>, "foobar"},
+ [], []),
+ ct:fail(accepted_invalid_input)
+ catch
+ error:function_clause ->
+ ok
+ end.
+
+%%-------------------------------------------------------------------------
remote_socket_close(Config) when is_list(Config) ->
URL = url(group_name(Config), "/just_close.html", Config),
{error, socket_closed_remotely} = httpc:request(URL).
+
%%-------------------------------------------------------------------------
remote_socket_close_async(Config) when is_list(Config) ->
@@ -1102,7 +1120,20 @@ port_in_host_header(Config) when is_list(Config) ->
Request = {url(group_name(Config), "/ensure_host_header_with_port.html", Config), []},
{ok, {{_, 200, _}, _, Body}} = httpc:request(get, Request, [], []),
inets_test_lib:check_body(Body).
+%%-------------------------------------------------------------------------
+redirect_port_in_host_header(Config) when is_list(Config) ->
+ Request = {url(group_name(Config), "/redirect_ensure_host_header_with_port.html", Config), []},
+ {ok, {{_, 200, _}, _, Body}} = httpc:request(get, Request, [], []),
+ inets_test_lib:check_body(Body).
+
+%%-------------------------------------------------------------------------
+multipart_chunks(Config) when is_list(Config) ->
+ Request = {url(group_name(Config), "/multipart_chunks.html", Config), []},
+ {ok, Ref} = httpc:request(get, Request, [], [{sync, false}, {stream, self}]),
+ ok = receive_stream_n(Ref, 10),
+ httpc:cancel_request(Ref).
+
%%-------------------------------------------------------------------------
timeout_memory_leak() ->
[{doc, "Check OTP-8739"}].
@@ -1398,7 +1429,7 @@ dummy_server(Caller, SocketType, Inet, Extra) ->
end.
dummy_server_init(Caller, ip_comm, Inet, _) ->
- BaseOpts = [binary, {packet, 0}, {reuseaddr,true}, {active, false}],
+ BaseOpts = [binary, {packet, 0}, {reuseaddr,true}, {keepalive, true}, {active, false}],
{ok, ListenSocket} = gen_tcp:listen(0, [Inet | BaseOpts]),
{ok, Port} = inet:port(ListenSocket),
Caller ! {port, Port},
@@ -1680,6 +1711,12 @@ handle_uri(_,"/ensure_host_header_with_port.html",_,Headers,_,_) ->
"HTTP/1.1 500 Internal Server Error\r\n" ++
"Content-Length:" ++ Len ++ "\r\n\r\n" ++ B
end;
+handle_uri(_,"/redirect_ensure_host_header_with_port.html",Port,_,Socket,_) ->
+ NewUri = url_start(Socket) ++
+ integer_to_list(Port) ++ "/ensure_host_header_with_port.html",
+ "HTTP/1.1 302 Found \r\n" ++
+ "Location:" ++ NewUri ++ "\r\n" ++
+ "Content-Length:0\r\n\r\n";
handle_uri(_,"/300.html",Port,_,Socket,_) ->
NewUri = url_start(Socket) ++
@@ -1968,6 +2005,16 @@ handle_uri(_,"/missing_CR.html",_,_,_,_) ->
"Content-Length:32\r\n\n" ++
"<HTML><BODY>foobar</BODY></HTML>";
+handle_uri(_,"/multipart_chunks.html",_,_,Socket,_) ->
+ Head = "HTTP/1.1 200 ok\r\n" ++
+ "Transfer-Encoding:chunked\r\n" ++
+ "Date: " ++ httpd_util:rfc1123_date() ++ "\r\n"
+ "Connection: Keep-Alive\r\n" ++
+ "Content-Type: multipart/x-mixed-replace; boundary=chunk_boundary\r\n" ++
+ "\r\n",
+ send(Socket, Head),
+ send_multipart_chunks(Socket),
+ http_chunk:encode_last();
handle_uri("HEAD",_,_,_,_,_) ->
"HTTP/1.1 200 ok\r\n" ++
"Content-Length:0\r\n\r\n";
@@ -2264,3 +2311,21 @@ otp_8739_dummy_server_main(_Parent, ListenSocket) ->
Error ->
exit(Error)
end.
+
+send_multipart_chunks(Socket) ->
+ send(Socket, http_chunk:encode("--chunk_boundary\r\n")),
+ send(Socket, http_chunk:encode("Content-Type: text/plain\r\nContent-Length: 4\r\n\r\n")),
+ send(Socket, http_chunk:encode("test\r\n")),
+ ct:sleep(500),
+ send_multipart_chunks(Socket).
+
+receive_stream_n(_, 0) ->
+ ok;
+receive_stream_n(Ref, N) ->
+ receive
+ {http, {Ref, stream_start, _}} ->
+ receive_stream_n(Ref, N);
+ {http, {Ref,stream, Data}} ->
+ ct:pal("Data: ~p", [Data]),
+ receive_stream_n(Ref, N-1)
+ end.
diff --git a/lib/kernel/doc/src/heart.xml b/lib/kernel/doc/src/heart.xml
index 59a046bf4d..5b5b71e521 100644
--- a/lib/kernel/doc/src/heart.xml
+++ b/lib/kernel/doc/src/heart.xml
@@ -37,10 +37,7 @@
the <c>heart</c> port program is to check that the Erlang runtime system
it is supervising is still running. If the port program has not
received any heartbeats within <c>HEART_BEAT_TIMEOUT</c> seconds
- (defaults to 60 seconds), the system can be rebooted. Also, if
- the system is equipped with a hardware watchdog timer and is
- running Solaris, the watchdog can be used to supervise the entire
- system.</p>
+ (defaults to 60 seconds), the system can be rebooted.</p>
<p>An Erlang runtime system to be monitored by a heart program
is to be started with command-line flag <c>-heart</c> (see
also <seealso marker="erts:erl"><c>erl(1)</c></seealso>).
@@ -51,17 +48,13 @@
or a terminated Erlang runtime system, environment variable
<c>HEART_COMMAND</c> must be set before the system is started.
If this variable is not set, a warning text is printed but
- the system does not reboot. However, if the hardware watchdog is
- used, it still triggers a reboot <c>HEART_BEAT_BOOT_DELAY</c>
- seconds later (defaults to 60 seconds).</p>
+ the system does not reboot.</p>
<p>To reboot on Windows, <c>HEART_COMMAND</c> can be
set to <c>heart -shutdown</c> (included in the Erlang delivery)
or to any other suitable program that can activate a reboot.</p>
- <p>The hardware watchdog is not started under Solaris if
- environment variable <c>HW_WD_DISABLE</c> is set.</p>
- <p>The environment variables <c>HEART_BEAT_TIMEOUT</c> and
- <c>HEART_BEAT_BOOT_DELAY</c> can be used to configure the heart
- time-outs; they can be set in the operating system shell before Erlang
+ <p>The environment variable <c>HEART_BEAT_TIMEOUT</c>
+ can be used to configure the heart
+ time-outs; it can be set in the operating system shell before Erlang
is started or be specified at the command line:</p>
<pre>
% <input>erl -heart -env HEART_BEAT_TIMEOUT 30 ...</input></pre>
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index 3523f680a3..3ee8e2c6e6 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -360,8 +360,12 @@ init(Max) when is_integer(Max) ->
%% go back.
init({go_back, _PostState}) ->
{ok, {?buffer_size, 0, []}};
-init(_) -> %% Start and just relay to other
- {ok, []}. %% node if node(GLeader) =/= node().
+init(_) ->
+ %% The error logger process may receive a huge amount of
+ %% messages. Make sure that they are stored off heap to
+ %% avoid exessive GCs.
+ process_flag(message_queue_data, off_heap),
+ {ok, []}.
-spec handle_event(term(), state()) -> {'ok', state()}.
diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl
index 58b601e456..6d94f7770f 100644
--- a/lib/kernel/src/file.erl
+++ b/lib/kernel/src/file.erl
@@ -1413,7 +1413,7 @@ path_open_first([Path|Rest], Name, Mode, LastError) ->
case open(FileName, Mode) of
{ok, Fd} ->
{ok, Fd, FileName};
- {error, enoent} ->
+ {error, Reason} when Reason =:= enoent; Reason =:= enotdir ->
path_open_first(Rest, Name, Mode, LastError);
Error ->
Error
diff --git a/lib/kernel/src/heart.erl b/lib/kernel/src/heart.erl
index eea78aabdf..8fa48d56fb 100644
--- a/lib/kernel/src/heart.erl
+++ b/lib/kernel/src/heart.erl
@@ -198,16 +198,11 @@ start_portprogram() ->
end.
get_heart_timeouts() ->
- HeartOpts = case os:getenv("HEART_BEAT_TIMEOUT") of
- false -> "";
- H when is_list(H) ->
- "-ht " ++ H
- end,
- HeartOpts ++ case os:getenv("HEART_BEAT_BOOT_DELAY") of
- false -> "";
- W when is_list(W) ->
- " -wt " ++ W
- end.
+ case os:getenv("HEART_BEAT_TIMEOUT") of
+ false -> "";
+ H when is_list(H) ->
+ "-ht " ++ H
+ end.
check_start_heart() ->
case init:get_argument(heart) of
diff --git a/lib/kernel/src/rpc.erl b/lib/kernel/src/rpc.erl
index 21bff02214..bd6ea26678 100644
--- a/lib/kernel/src/rpc.erl
+++ b/lib/kernel/src/rpc.erl
@@ -67,17 +67,27 @@
%%------------------------------------------------------------------------
+
+%% The rex server may receive a huge amount of
+%% messages. Make sure that they are stored off heap to
+%% avoid exessive GCs.
+
+-define(SPAWN_OPTS, [{spawn_opt,[{message_queue_data,off_heap}]}]).
+
%% Remote execution and broadcasting facility
-spec start() -> {'ok', pid()} | 'ignore' | {'error', term()}.
start() ->
- gen_server:start({local,?NAME}, ?MODULE, [], []).
+ gen_server:start({local,?NAME}, ?MODULE, [], ?SPAWN_OPTS).
-spec start_link() -> {'ok', pid()} | 'ignore' | {'error', term()}.
start_link() ->
- gen_server:start_link({local,?NAME}, ?MODULE, [], []).
+ %% The rex server process may receive a huge amount of
+ %% messages. Make sure that they are stored off heap to
+ %% avoid exessive GCs.
+ gen_server:start_link({local,?NAME}, ?MODULE, [], ?SPAWN_OPTS).
-spec stop() -> term().
diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl
index b6e7551741..bb01c2384d 100644
--- a/lib/kernel/test/error_logger_SUITE.erl
+++ b/lib/kernel/test/error_logger_SUITE.erl
@@ -30,6 +30,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
+ off_heap/1,
error_report/1, info_report/1, error/1, info/1,
emulator/1, tty/1, logfile/1, add/1, delete/1]).
@@ -45,7 +46,7 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [error_report, info_report, error, info, emulator, tty,
+ [off_heap, error_report, info_report, error, info, emulator, tty,
logfile, add, delete].
groups() ->
@@ -66,6 +67,16 @@ end_per_group(_GroupName, Config) ->
%%-----------------------------------------------------------------
+off_heap(_Config) ->
+ %% The error_logger process may receive a huge amount of
+ %% messages. Make sure that they are stored off heap to
+ %% avoid exessive GCs.
+ MQD = message_queue_data,
+ {MQD,off_heap} = process_info(whereis(error_logger), MQD),
+ ok.
+
+%%-----------------------------------------------------------------
+
error_report(Config) when is_list(Config) ->
error_logger:add_report_handler(?MODULE, self()),
Rep1 = [{tag1,"data1"},{tag2,data2},{tag3,3}],
diff --git a/lib/kernel/test/rpc_SUITE.erl b/lib/kernel/test/rpc_SUITE.erl
index 1c72ddc87f..d76c4097d8 100644
--- a/lib/kernel/test/rpc_SUITE.erl
+++ b/lib/kernel/test/rpc_SUITE.erl
@@ -21,7 +21,8 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2]).
--export([call/1, block_call/1, multicall/1, multicall_timeout/1,
+-export([off_heap/1,
+ call/1, block_call/1, multicall/1, multicall_timeout/1,
multicall_dies/1, multicall_node_dies/1,
called_dies/1, called_node_dies/1,
called_throws/1, call_benchmark/1, async_call/1]).
@@ -35,7 +36,7 @@ suite() ->
{timetrap,{minutes,2}}].
all() ->
- [call, block_call, multicall, multicall_timeout,
+ [off_heap, call, block_call, multicall, multicall_timeout,
multicall_dies, multicall_node_dies, called_dies,
called_node_dies, called_throws, call_benchmark,
async_call].
@@ -55,6 +56,13 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
+off_heap(_Config) ->
+ %% The rex server process may receive a huge amount of
+ %% messages. Make sure that they are stored off heap to
+ %% avoid exessive GCs.
+ MQD = message_queue_data,
+ {MQD,off_heap} = process_info(whereis(rex), MQD),
+ ok.
%% Test different rpc calls.
diff --git a/lib/observer/src/cdv_detail_wx.erl b/lib/observer/src/cdv_detail_wx.erl
index 44f121f359..5782339183 100644
--- a/lib/observer/src/cdv_detail_wx.erl
+++ b/lib/observer/src/cdv_detail_wx.erl
@@ -55,7 +55,7 @@ init([Id, Data, ParentFrame, Callback, Parent]) ->
end,
{stop,normal};
{info,Info} ->
- observer_lib:display_info_dialog(Info),
+ observer_lib:display_info_dialog(ParentFrame,Info),
{stop,normal}
end.
diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl
index 2f9f81104a..13e73f027d 100644
--- a/lib/observer/src/crashdump_viewer.erl
+++ b/lib/observer/src/crashdump_viewer.erl
@@ -928,7 +928,10 @@ general_info(File) ->
WholeLine -> WholeLine
end,
- GI = get_general_info(Fd,#general_info{created=Created}),
+ {Slogan,SysVsn} = get_slogan_and_sysvsn(Fd,[]),
+ GI = get_general_info(Fd,#general_info{created=Created,
+ slogan=Slogan,
+ system_vsn=SysVsn}),
{MemTot,MemMax} =
case lookup_index(?memory) of
@@ -982,12 +985,20 @@ general_info(File) ->
mem_max=MemMax,
instr_info=InstrInfo}.
+get_slogan_and_sysvsn(Fd,Acc) ->
+ case val(Fd,eof) of
+ "Slogan: " ++ SloganPart when Acc==[] ->
+ get_slogan_and_sysvsn(Fd,[SloganPart]);
+ "System version: " ++ SystemVsn ->
+ {lists:append(lists:reverse(Acc)),SystemVsn};
+ eof ->
+ {lists:append(lists:reverse(Acc)),"-1"};
+ SloganPart ->
+ get_slogan_and_sysvsn(Fd,[[$\n|SloganPart]|Acc])
+ end.
+
get_general_info(Fd,GenInfo) ->
case line_head(Fd) of
- "Slogan" ->
- get_general_info(Fd,GenInfo#general_info{slogan=val(Fd)});
- "System version" ->
- get_general_info(Fd,GenInfo#general_info{system_vsn=val(Fd)});
"Compiled" ->
get_general_info(Fd,GenInfo#general_info{compile_time=val(Fd)});
"Taints" ->
diff --git a/lib/observer/src/etop.erl b/lib/observer/src/etop.erl
index fcb900960b..925f4456bb 100644
--- a/lib/observer/src/etop.erl
+++ b/lib/observer/src/etop.erl
@@ -23,7 +23,7 @@
-export([start/0, start/1, config/2, stop/0, dump/1, help/0]).
%% Internal
-export([update/1]).
--export([loadinfo/1, meminfo/2, getopt/2]).
+-export([loadinfo/2, meminfo/2, getopt/2]).
-include("etop.hrl").
-include("etop_defs.hrl").
@@ -319,18 +319,18 @@ output(graphical) -> exit({deprecated, "Use observer instead"});
output(text) -> etop_txt.
-loadinfo(SysI) ->
+loadinfo(SysI,Prev) ->
#etop_info{n_procs = Procs,
run_queue = RQ,
now = Now,
wall_clock = WC,
runtime = RT} = SysI,
- Cpu = calculate_cpu_utilization(WC,RT),
+ Cpu = calculate_cpu_utilization(WC,RT,Prev#etop_info.runtime),
Clock = io_lib:format("~2.2.0w:~2.2.0w:~2.2.0w",
tuple_to_list(element(2,calendar:now_to_datetime(Now)))),
{Cpu,Procs,RQ,Clock}.
-calculate_cpu_utilization({_,WC},{_,RT}) ->
+calculate_cpu_utilization({_,WC},{_,RT},_) ->
%% Old version of observer_backend, using statistics(wall_clock)
%% and statistics(runtime)
case {WC,RT} of
@@ -341,15 +341,23 @@ calculate_cpu_utilization({_,WC},{_,RT}) ->
_ ->
round(100*RT/WC)
end;
-calculate_cpu_utilization(_,undefined) ->
+calculate_cpu_utilization(_,undefined,_) ->
%% First time collecting - no cpu utilization has been measured
%% since scheduler_wall_time flag is not yet on
0;
-calculate_cpu_utilization(_,RTInfo) ->
+calculate_cpu_utilization(WC,RTInfo,undefined) ->
+ %% Second time collecting - RTInfo shows scheduler_wall_time since
+ %% flag was set to true. Faking previous values by setting
+ %% everything to zero.
+ ZeroRT = [{Id,0,0} || {Id,_,_} <- RTInfo],
+ calculate_cpu_utilization(WC,RTInfo,ZeroRT);
+calculate_cpu_utilization(_,RTInfo,PrevRTInfo) ->
%% New version of observer_backend, using statistics(scheduler_wall_time)
- Sum = lists:foldl(fun({_,A,T},{AAcc,TAcc}) -> {A+AAcc,T+TAcc} end,
+ Sum = lists:foldl(fun({{_, A0, T0}, {_, A1, T1}},{AAcc,TAcc}) ->
+ {(A1 - A0)+AAcc,(T1 - T0)+TAcc}
+ end,
{0,0},
- RTInfo),
+ lists:zip(PrevRTInfo,RTInfo)),
case Sum of
{0,0} ->
0;
diff --git a/lib/observer/src/etop_txt.erl b/lib/observer/src/etop_txt.erl
index 3b4c176478..6b8f9df24f 100644
--- a/lib/observer/src/etop_txt.erl
+++ b/lib/observer/src/etop_txt.erl
@@ -22,35 +22,35 @@
%%-compile(export_all).
-export([init/1,stop/1]).
--export([do_update/3]).
+-export([do_update/4]).
-include("etop.hrl").
-include("etop_defs.hrl").
--import(etop,[loadinfo/1,meminfo/2]).
+-import(etop,[loadinfo/2,meminfo/2]).
-define(PROCFORM,"~-15w~-20s~8w~8w~8w~8w ~-20s~n").
stop(Pid) -> Pid ! stop.
init(Config) ->
- loop(Config).
+ loop(#etop_info{},Config).
-loop(Config) ->
- Info = do_update(Config),
+loop(Prev,Config) ->
+ Info = do_update(Prev,Config),
receive
stop -> stopped;
- {dump,Fd} -> do_update(Fd,Info,Config), loop(Config);
- {config,_,Config1} -> loop(Config1)
- after Config#opts.intv -> loop(Config)
+ {dump,Fd} -> do_update(Fd,Info,Prev,Config), loop(Info,Config);
+ {config,_,Config1} -> loop(Info,Config1)
+ after Config#opts.intv -> loop(Info,Config)
end.
-do_update(Config) ->
+do_update(Prev,Config) ->
Info = etop:update(Config),
- do_update(standard_io,Info,Config).
+ do_update(standard_io,Info,Prev,Config).
-do_update(Fd,Info,Config) ->
- {Cpu,NProcs,RQ,Clock} = loadinfo(Info),
+do_update(Fd,Info,Prev,Config) ->
+ {Cpu,NProcs,RQ,Clock} = loadinfo(Info,Prev),
io:nl(Fd),
writedoubleline(Fd),
case Info#etop_info.memi of
diff --git a/lib/observer/src/observer_app_wx.erl b/lib/observer/src/observer_app_wx.erl
index 936b2783e2..80a41fdde9 100644
--- a/lib/observer/src/observer_app_wx.erl
+++ b/lib/observer/src/observer_app_wx.erl
@@ -191,8 +191,8 @@ handle_event(#wx{event=#wxMouse{type=Type, x=X0, y=Y0}},
end;
handle_event(#wx{event=#wxCommand{type=command_menu_selected}},
- State = #state{sel=undefined}) ->
- observer_lib:display_info_dialog("Select process first"),
+ State = #state{panel=Panel,sel=undefined}) ->
+ observer_lib:display_info_dialog(Panel,"Select process first"),
{noreply, State};
handle_event(#wx{id=?ID_PROC_INFO, event=#wxCommand{type=command_menu_selected}},
@@ -205,7 +205,7 @@ handle_event(#wx{id=?ID_PROC_MSG, event=#wxCommand{type=command_menu_selected}},
case observer_lib:user_term(Panel, "Enter message", "") of
cancel -> ok;
{ok, Term} -> Pid ! Term;
- {error, Error} -> observer_lib:display_info_dialog(Error)
+ {error, Error} -> observer_lib:display_info_dialog(Panel,Error)
end,
{noreply, State};
@@ -214,7 +214,7 @@ handle_event(#wx{id=?ID_PROC_KILL, event=#wxCommand{type=command_menu_selected}}
case observer_lib:user_term(Panel, "Enter Exit Reason", "kill") of
cancel -> ok;
{ok, Term} -> exit(Pid, Term);
- {error, Error} -> observer_lib:display_info_dialog(Error)
+ {error, Error} -> observer_lib:display_info_dialog(Panel,Error)
end,
{noreply, State};
diff --git a/lib/observer/src/observer_lib.erl b/lib/observer/src/observer_lib.erl
index 59a2f9f205..47844c1307 100644
--- a/lib/observer/src/observer_lib.erl
+++ b/lib/observer/src/observer_lib.erl
@@ -20,7 +20,7 @@
-module(observer_lib).
-export([get_wx_parent/1,
- display_info_dialog/1, display_yes_no_dialog/1,
+ display_info_dialog/2, display_yes_no_dialog/1,
display_progress_dialog/2, destroy_progress_dialog/0,
wait_for_progress/0, report_progress/1,
user_term/3, user_term_multiline/3,
@@ -105,10 +105,10 @@ setup_timer(Bool, {Timer, Old}) ->
timer:cancel(Timer),
setup_timer(Bool, {false, Old}).
-display_info_dialog(Str) ->
- display_info_dialog("",Str).
-display_info_dialog(Title,Str) ->
- Dlg = wxMessageDialog:new(wx:null(), Str, [{caption,Title}]),
+display_info_dialog(Parent,Str) ->
+ display_info_dialog(Parent,"",Str).
+display_info_dialog(Parent,Title,Str) ->
+ Dlg = wxMessageDialog:new(Parent, Str, [{caption,Title}]),
wxMessageDialog:showModal(Dlg),
wxMessageDialog:destroy(Dlg),
ok.
@@ -461,14 +461,16 @@ create_box(Parent, Data) ->
link_entry(Panel,Value);
_ ->
Value = to_str(Value0),
- case length(Value) > 100 of
- true ->
- Shown = lists:sublist(Value, 80),
+ case string:sub_word(lists:sublist(Value, 80),1,$\n) of
+ Value ->
+ %% Short string, no newlines - show all
+ wxStaticText:new(Panel, ?wxID_ANY, Value);
+ Shown ->
+ %% Long or with newlines,
+ %% use tooltip to show all
TCtrl = wxStaticText:new(Panel, ?wxID_ANY, [Shown,"..."]),
wxWindow:setToolTip(TCtrl,wxToolTip:new(Value)),
- TCtrl;
- false ->
- wxStaticText:new(Panel, ?wxID_ANY, Value)
+ TCtrl
end
end,
wxSizer:add(Line, 10, 0), % space of size 10 horisontally
@@ -722,7 +724,7 @@ progress_loop(Title,PD,Caller) ->
if is_list(Reason) -> Reason;
true -> file:format_error(Reason)
end,
- display_info_dialog("Crashdump Viewer Error",FailMsg),
+ display_info_dialog(PD,"Crashdump Viewer Error",FailMsg),
Caller ! error,
unregister(?progress_handler),
unlink(Caller);
diff --git a/lib/observer/src/observer_port_wx.erl b/lib/observer/src/observer_port_wx.erl
index 53ba3fa607..c21d2705c0 100644
--- a/lib/observer/src/observer_port_wx.erl
+++ b/lib/observer/src/observer_port_wx.erl
@@ -267,10 +267,19 @@ handle_cast(Event, _State) ->
error({unhandled_cast, Event}).
handle_info({portinfo_open, PortIdStr},
- State = #state{grid=Grid, ports=Ports, open_wins=Opened}) ->
- Port = lists:keyfind(PortIdStr,#port.id_str,Ports),
- NewOpened = display_port_info(Grid, Port, Opened),
- {noreply, State#state{open_wins = NewOpened}};
+ State = #state{node=Node, grid=Grid, opt=Opt, open_wins=Opened}) ->
+ Ports0 = get_ports(Node),
+ Ports = update_grid(Grid, Opt, Ports0),
+ Port = lists:keyfind(PortIdStr, #port.id_str, Ports),
+ NewOpened =
+ case Port of
+ false ->
+ self() ! {error,"No such port: " ++ PortIdStr},
+ Opened;
+ _ ->
+ display_port_info(Grid, Port, Opened)
+ end,
+ {noreply, State#state{ports=Ports, open_wins=NewOpened}};
handle_info(refresh_interval, State = #state{node=Node, grid=Grid, opt=Opt,
ports=OldPorts}) ->
@@ -296,8 +305,9 @@ handle_info(not_active, State = #state{timer = Timer0}) ->
Timer = observer_lib:stop_timer(Timer0),
{noreply, State#state{timer=Timer}};
-handle_info({error, Error}, State) ->
- handle_error(Error),
+handle_info({error, Error}, #state{panel=Panel} = State) ->
+ Str = io_lib:format("ERROR: ~s~n",[Error]),
+ observer_lib:display_info_dialog(Panel, Str),
{noreply, State};
handle_info(_Event, State) ->
@@ -501,11 +511,6 @@ filter_monitor_info() ->
[Pid || {process, Pid} <- Ms]
end.
-
-handle_error(Foo) ->
- Str = io_lib:format("ERROR: ~s~n",[Foo]),
- observer_lib:display_info_dialog(Str).
-
update_grid(Grid, Opt, Ports) ->
wx:batch(fun() -> update_grid2(Grid, Opt, Ports) end).
update_grid2(Grid, #opt{sort_key=Sort,sort_incr=Dir}, Ports) ->
diff --git a/lib/observer/src/observer_pro_wx.erl b/lib/observer/src/observer_pro_wx.erl
index ee6829b847..f07b9e295a 100644
--- a/lib/observer/src/observer_pro_wx.erl
+++ b/lib/observer/src/observer_pro_wx.erl
@@ -511,7 +511,13 @@ table_holder(#holder{info=Info, attrs=Attrs,
table_holder(S0);
{dump, Fd} ->
EtopInfo = (S0#holder.etop)#etop_info{procinfo=array:to_list(Info)},
- etop_txt:do_update(Fd, EtopInfo, #opts{node=Node}),
+ %% The empty #etop_info{} below is a dummy previous info
+ %% value. It is used by etop to calculate the scheduler
+ %% utilization since last update. When dumping to file,
+ %% there is no previous measurement to use, so we just add
+ %% a dummy here, and the value shown will be since the
+ %% tool was started.
+ etop_txt:do_update(Fd, EtopInfo, #etop_info{}, #opts{node=Node}),
file:close(Fd),
table_holder(S0);
stop ->
diff --git a/lib/observer/src/observer_procinfo.erl b/lib/observer/src/observer_procinfo.erl
index c13b164ff9..21eb9facc5 100644
--- a/lib/observer/src/observer_procinfo.erl
+++ b/lib/observer/src/observer_procinfo.erl
@@ -92,7 +92,7 @@ init([Pid, ParentFrame, Parent]) ->
observer_wx:return_to_localnode(ParentFrame, node(Pid)),
{stop, badrpc};
process_undefined ->
- observer_lib:display_info_dialog("No such alive process"),
+ observer_lib:display_info_dialog(ParentFrame,"No such alive process"),
{stop, normal}
end.
diff --git a/lib/observer/src/observer_tv_wx.erl b/lib/observer/src/observer_tv_wx.erl
index 968a7620aa..4356cb890c 100644
--- a/lib/observer/src/observer_tv_wx.erl
+++ b/lib/observer/src/observer_tv_wx.erl
@@ -238,8 +238,9 @@ handle_info(not_active, State = #state{timer = Timer0}) ->
Timer = observer_lib:stop_timer(Timer0),
{noreply, State#state{timer=Timer}};
-handle_info({error, Error}, #state{opt=Opt}=State) ->
- handle_error(Error),
+handle_info({error, Error}, #state{panel=Panel,opt=Opt}=State) ->
+ Str = io_lib:format("ERROR: ~s~n",[Error]),
+ observer_lib:display_info_dialog(Panel,Str),
case Opt#opt.type of
mnesia -> wxMenuBar:check(observer_wx:get_menubar(), ?ID_ETS, true);
_ -> ok
@@ -365,10 +366,6 @@ list_to_strings([A]) -> integer_to_list(A);
list_to_strings([A|B]) ->
integer_to_list(A) ++ " ," ++ list_to_strings(B).
-handle_error(Foo) ->
- Str = io_lib:format("ERROR: ~s~n",[Foo]),
- observer_lib:display_info_dialog(Str).
-
update_grid(Grid, Opt, Tables) ->
wx:batch(fun() -> update_grid2(Grid, Opt, Tables) end).
update_grid2(Grid, #opt{sort_key=Sort,sort_incr=Dir}, Tables) ->
diff --git a/lib/observer/src/observer_wx.erl b/lib/observer/src/observer_wx.erl
index 5732c12006..3031a1f90d 100644
--- a/lib/observer/src/observer_wx.erl
+++ b/lib/observer/src/observer_wx.erl
@@ -467,10 +467,10 @@ handle_info(_Info, State) ->
stop_servers(#state{node=Node, log=LogOn, sys_panel=Sys, pro_panel=Procs, tv_panel=TVs,
trace_panel=Trace, app_panel=Apps, perf_panel=Perfs,
- allc_panel=Alloc} = _State) ->
+ allc_panel=Alloc, port_panel=Ports} = _State) ->
LogOn andalso rpc:block_call(Node, rb, stop, []),
Me = self(),
- Tabs = [Sys, Procs, TVs, Trace, Apps, Perfs, Alloc],
+ Tabs = [Sys, Procs, Ports, TVs, Trace, Apps, Perfs, Alloc],
Stop = fun() ->
try
_ = [wx_object:stop(Panel) || Panel <- Tabs],
@@ -580,9 +580,10 @@ get_active_pid(#state{notebook=Notebook, pro_panel=Pro, sys_panel=Sys,
pid2panel(Pid, #state{pro_panel=Pro, sys_panel=Sys,
tv_panel=Tv, trace_panel=Trace, app_panel=App,
- perf_panel=Perf, allc_panel=Alloc}) ->
+ perf_panel=Perf, allc_panel=Alloc, port_panel=Port}) ->
case Pid of
Pro -> "Processes";
+ Port -> "Ports";
Sys -> "System";
Tv -> "Table Viewer" ;
Trace -> ?TRACE_STR;
diff --git a/lib/observer/test/observer_SUITE.erl b/lib/observer/test/observer_SUITE.erl
index 4c882ad951..b5fb027878 100644
--- a/lib/observer/test/observer_SUITE.erl
+++ b/lib/observer/test/observer_SUITE.erl
@@ -34,7 +34,8 @@
%% Test cases
-export([app_file/1, appup_file/1,
- basic/1, process_win/1, table_win/1
+ basic/1, process_win/1, table_win/1,
+ port_win_when_tab_not_initiated/1
]).
%% Default timetrap timeout (set in init_per_testcase)
@@ -49,7 +50,8 @@ groups() ->
[{gui, [],
[basic,
process_win,
- table_win
+ table_win,
+ port_win_when_tab_not_initiated
]
}].
@@ -299,6 +301,17 @@ table_win(Config) when is_list(Config) ->
observer:stop(),
ok.
+%% Test PR-1296/OTP-14151
+%% Clicking a link to a port before the port tab has been activated the
+%% first time crashes observer.
+port_win_when_tab_not_initiated(Config) ->
+ {ok,Port} = gen_tcp:listen(0,[]),
+ ok = observer:start(),
+ Notebook = setup_whitebox_testing(),
+ observer ! {open_link,erlang:port_to_list(Port)},
+ timer:sleep(1000),
+ observer:stop(),
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/lib/os_mon/src/memsup.erl b/lib/os_mon/src/memsup.erl
index 4729d090f8..0a9a883390 100644
--- a/lib/os_mon/src/memsup.erl
+++ b/lib/os_mon/src/memsup.erl
@@ -701,6 +701,7 @@ get_os_wordsize_with_uname() ->
"sparc64" -> 64;
"amd64" -> 64;
"ppc64" -> 64;
+ "s390x" -> 64;
_ -> 32
end.
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index c503230d70..37aa05e0fd 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -757,6 +757,39 @@ fun(#'DistributionPoint'{}, #'CertificateList'{},
</func>
<func>
+ <name>pkix_verify_hostname(Cert, ReferenceIDs) -> boolean()</name>
+ <name>pkix_verify_hostname(Cert, ReferenceIDs, Opts) -> boolean()</name>
+ <fsummary>Verifies that a PKIX x.509 certificate <i>presented identifier</i> (e.g hostname) is
+ an expected one.</fsummary>
+ <type>
+ <v>Cert = der_encoded() | #'OTPCertificate'{} </v>
+ <v>ReferenceIDs = [ RefID ]</v>
+ <v>RefID = {IdType,string()}</v>
+ <v>IdType = dns_id | srv_id | uri_id</v>
+ <v>Opts = [ PvhOpt() ]</v>
+ <v>PvhOpt = [MatchOpt | FailCallBackOpt | FqdnExtractOpt]</v>
+ <v>MatchOpt = {fun(RefId | FQDN::string(), PresentedID) -> boolean() | default}</v>
+ <v>PresentedID = {dNSName,string()} | {uniformResourceIdentifier,string()}</v>
+ <v>FailCallBackOpt = {fail_callback, fun(#'OTPCertificate'{}) -> boolean()}</v>
+ <v>FqdnExtractOpt = {fqdn_fun, fun(RefID) -> FQDN::string() | default | undefined}</v>
+ </type>
+ <desc>
+ <p>This function checks that the <i>Presented Identifier</i> (e.g hostname) in a peer certificate
+ conforms with the Expected Identifier that the client wants to connect to.
+ This functions is intended to be added as an extra client check to the peer certificate when performing
+ <seealso marker="public_key:public_key#pkix_path_validation-3">public_key:pkix_path_validation/3</seealso>
+ </p>
+ <p>See <url href="https://tools.ietf.org/html/rfc6125">RFC 6125</url>
+ for detailed information about hostname verification.
+ The <seealso marker="using_public_key#verify_hostname">User's Manual</seealso>
+ and
+ <seealso marker="using_public_key#verify_hostname_examples">code examples</seealso>
+ describes this function more detailed.
+ </p>
+ </desc>
+ </func>
+
+ <func>
<name>sign(Msg, DigestType, Key) -> binary()</name>
<fsummary>Creates a digital signature.</fsummary>
<type>
diff --git a/lib/public_key/doc/src/using_public_key.xml b/lib/public_key/doc/src/using_public_key.xml
index e3a1eed4be..417d479da3 100644
--- a/lib/public_key/doc/src/using_public_key.xml
+++ b/lib/public_key/doc/src/using_public_key.xml
@@ -417,6 +417,259 @@ true = public_key:verify(Digest, none, Signature, PublicKey),</code>
</section>
+ <section>
+ <marker id="verify_hostname"></marker>
+ <title>Verifying a certificate hostname</title>
+ <section>
+ <title>Background</title>
+ <p>When a client checks a server certificate there are a number of checks available like
+ checks that the certificate is not revoked, not forged or not out-of-date.
+ </p>
+ <p>There are however attacks that are not detected by those checks. Suppose a bad guy has
+ succeded with a DNS infection. Then the client could belive it is connecting to one host but
+ ends up at another but evil one. Though it is evil, it could have a perfectly legal
+ certificate! The certificate has a valid signature, it is not revoked, the certificate chain
+ is not faked and has a trusted root and so on.
+ </p>
+ <p>To detect that the server is not the intended one, the client must additionaly perform
+ a <i>hostname verification</i>. This procedure is described in
+ <url href="https://tools.ietf.org/html/rfc6125">RFC 6125</url>. The idea is that the certificate
+ lists the hostnames it could be fetched from. This is checked by the certificate issuer when
+ the certificate is signed. So if the certificate is issued by a trusted root the client
+ could trust the host names signed in it.
+ </p>
+ <p>There is a default hostname matching procedure defined in
+ <url href="https://tools.ietf.org/html/rfc6125#section-6">RFC 6125, section 6</url>
+ as well as protocol dependent variations defined in
+ <url href="https://tools.ietf.org/html/rfc6125#appendix-B">RFC 6125 appendix B</url>.
+ The default procedure is implemented in
+ <seealso marker="public_key:public_key#pkix_verify_hostname-2">public_key:pkix_verify_hostname/2,3</seealso>.
+ It is possible for a client to hook in modified rules using the options list.
+ </p>
+ <p>Some terminology is needed: the certificate presents hostname(s) on which it is valid.
+ Those are called <i>Presented IDs</i>. The hostname(s) the client belives it connects to
+ are called <i>Reference IDs</i>. The matching rules aims to verify that there is at least
+ one of the Reference IDs that matches one of the Presented IDs. If not, the verification fails.
+ </p>
+ <p>The IDs contains normal fully qualified domain names like e.g <c>foo.example.com</c>,
+ but IP addresses are not recommended. The rfc describes why this is not recommended as well
+ as security considerations about how to aquire the Reference IDs.
+ </p>
+ <p>Internationalized domain names are not supported.
+ </p>
+ </section>
+ <section>
+ <title>The verification process</title>
+ <p>Traditionally the Presented IDs were found in the <c>Subject</c> certificate field as <c>CN</c>
+ names. This is still quite common. When printing a certificate they show up as:
+ </p>
+ <code>
+ $ openssl x509 -text &lt; cert.pem
+ ...
+ Subject: C=SE, CN=example.com, CN=*.example.com, O=erlang.org
+ ...
+ </code>
+ <p>The example <c>Subject</c> field has one C, two CN and one O part. It is only the
+ CN (Common Name) that is used by hostname verification. The two other (C and O) is not used
+ here even when they contain a domain name like the O part. The C and O parts are defined
+ elsewhere and meaningful only for other functions.
+ </p>
+ <p>In the example the Presented IDs are <c>example.com</c> as well as hostnames matching
+ <c>*.example.com</c>. For example <c>foo.example.com</c> and <c>bar.example.com</c> both
+ matches but not <c>foo.bar.example.com</c>. The name <c>erlang.org</c> matches neither
+ since it is not a CN.
+ </p>
+ <p>In case where the Presented IDs are fetched from the <c>Subject</c> certificate field, the
+ names may contain wildcard characters. The function handles this as defined in
+ <url href="https://tools.ietf.org/html/rfc6125#section-6.4.3">chapter 6.4.3 in RFC 6125</url>.
+ </p>
+ <p>There may only be one wildcard character and that is in the first label, for example:
+ <c>*.example.com</c>. This matches <c>foo.example.com</c> but neither <c>example.com</c> nor
+ <c>foo.bar.example.com</c>.
+ </p>
+ <p>There may be label characters before or/and after the wildcard. For example:
+ <c>a*d.example.com</c> matches <c>abcd.example.com</c> and <c>ad.example.com</c>,
+ but not <c>ab.cd.example.com</c>.
+ </p>
+ <p>In the previous example there is no indication of which protocols are expected. So a client
+ has no indication of whether it is a web server, an ldap server or maybe a sip server it is
+ connected to.
+ There are fields in the certificate that can indicate this. To be more exact, the rfc
+ introduces the usage of the <c>X509v3 Subject Alternative Name</c> in the <c>X509v3 extensions</c>
+ field:
+ </p>
+ <code>
+ $ openssl x509 -text &lt; cert.pem
+ ...
+ X509v3 extensions:
+ X509v3 Subject Alternative Name:
+ DNS:kb.example.org, URI:https://www.example.org
+ ...
+ </code>
+ <p>Here <c>kb.example.org</c> serves any protocol while <c>www.example.org</c> presents a secure
+ web server.
+ </p>
+
+ <p>The next example has both <c>Subject</c> and <c>Subject Alternate Name</c> present:</p>
+ <code>
+ $ openssl x509 -text &lt; cert.pem
+ ...
+ Subject: C=SE, CN=example.com, CN=*.example.com, O=erlang.org
+ ...
+ X509v3 extensions:
+ X509v3 Subject Alternative Name:
+ DNS:kb.example.org, URI:https://www.example.org
+ ...
+ </code>
+ <p>The RFC states that if a certificate defines Reference IDs in a <c>Subject Alternate Name</c>
+ field, the <c>Subject</c> field MUST NOT be used for host name checking, even if it contains
+ valid CN names.
+ Therefore only <c>kb.example.org</c> and <c>https://www.example.org</c> matches. The match fails
+ both for <c>example.com</c> and <c>foo.example.com</c> becuase they are in the <c>Subject</c>
+ field which is not checked because the <c>Subject Alternate Name</c> field is present.
+ </p>
+ </section>
+
+ <section>
+ <marker id="verify_hostname_examples"></marker>
+ <title>Function call examples</title>
+ <note>
+ <p>Other applications like ssl/tls or https might have options that are passed
+ down to the <c>public_key:pkix_verify_hostname</c>. You will probably not
+ have to call it directly</p>
+ </note>
+ <p>Suppose our client expects to connect to the web server https://www.example.net. This
+ URI is therefore the Reference IDs of the client.
+ The call will be:
+ </p>
+ <code>
+ public_key:pkix_verify_hostname(CertFromHost,
+ [{uri_id, "https://www.example.net"}
+ ]).
+ </code>
+ <p>The call will return <c>true</c> or <c>false</c> depending on the check. The caller
+ do not need to handle the matching rules in the rfc. The matching will proceed as:
+ </p>
+ <list>
+ <item>If there is a <c>Subject Alternate Name</c> field, the <c>{uri_id,string()}</c> in the
+ function call will be compared to any
+ <c>{uniformResourceIdentifier,string()}</c> in the Certificate field.
+ If the two <c>strings()</c> are equal (case insensitive), there is a match.
+ The same applies for any <c>{dns_id,string()}</c> in the call which is compared
+ with all <c>{dNSName,string()}</c> in the Certificate field.
+ </item>
+ <item>If there is NO <c>Subject Alternate Name</c> field, the <c>Subject</c> field will be
+ checked. All <c>CN</c> names will be compared to all hostnames <i>extracted</i> from
+ <c>{uri_id,string()}</c> and from <c>{dns_id,string()}</c>.
+ </item>
+ </list>
+ </section>
+ <section>
+ <title>Extending the search mechanism</title>
+ <p>The caller can use own extraction and matching rules. This is done with the two options
+ <c>fqdn_fun</c> and <c>match_fun</c>.
+ </p>
+ <section>
+ <title>Hostname extraction</title>
+ <p>The <c>fqdn_fun</c> extracts hostnames (Fully Qualified Domain Names) from uri_id
+ or other ReferenceIDs that are not pre-defined in the public_key function.
+ Suppose you have some URI with a very special protocol-part:
+ <c>myspecial://example.com"</c>. Since this a non-standard URI there will be no hostname
+ extracted for matching CN-names in the <c>Subject</c>.</p>
+ <p>To "teach" the function how to extract, you can give a fun which replaces the default
+ extraction function.
+ The <c>fqdn_fun</c> takes one argument and returns
+ either a <c>string()</c> to be matched to each CN-name or the atom <c>default</c> which will invoke
+ the default fqdn extraction function. The return value <c>undefined</c> removes the current
+ URI from the fqdn extraction.
+ </p>
+ <code>
+ ...
+ Extract = fun({uri_id, "myspecial://"++HostName}) -> HostName;
+ (_Else) -> default
+ end,
+ ...
+ public_key:pkix_verify_hostname(CertFromHost, RefIDs,
+ [{fqdn_fun, Extract}])
+ ...
+ </code>
+ </section>
+ <section>
+ <title>Re-defining the match operations</title>
+ <p>The default matching handles dns_id and uri_id. In an uri_id the value is tested for
+ equality with a value from the <c>Subject Alternate Name</c>. If som other kind of matching
+ is needed, use the <c>match_fun</c> option.
+ </p>
+ <p>The <c>match_fun</c> takes two arguments and returns either <c>true</c>,
+ <c>false</c> or <c>default</c>. The value <c>default</c> will invoke the default
+ match function.
+ </p>
+ <code>
+ ...
+ Match = fun({uri_id,"myspecial://"++A},
+ {uniformResourceIdentifier,"myspecial://"++B}) ->
+ my_match(A,B);
+ (_RefID, _PresentedID) ->
+ default
+ end,
+ ...
+ public_key:pkix_verify_hostname(CertFromHost, RefIDs,
+ [{match_fun, Match}]),
+ ...
+ </code>
+ <p>In case of a match operation between a ReferenceID and a CN value from the <c>Subject</c>
+ field, the first argument to the fun is the extracted hostname from the ReferenceID, and the
+ second argument is the tuple <c>{cn, string()}</c> taken from the <c>Subject</c> field. That
+ makes it possible to have separate matching rules for Presented IDs from the <c>Subject</c>
+ field and from the <c>Subject Alternate Name</c> field.
+ </p>
+ <p>The default matching transformes the ascii values in strings to lowercase before comparing.
+ The <c>match_fun</c> is however called without any transfomation applied to the strings. The
+ reason is to enable the user to do unforseen handling of the strings where the original format
+ is needed.
+ </p>
+ </section>
+ </section>
+ <section>
+ <title>"Pinning" a Certificate</title>
+ <p>The <url href="https://tools.ietf.org/html/rfc6125">RFC 6125</url> defines <i>pinning</i>
+ as:</p>
+ <quote>
+ <p>"The act of establishing a cached name association between
+ the application service's certificate and one of the client's
+ reference identifiers, despite the fact that none of the presented
+ identifiers matches the given reference identifier. ..."
+ </p>
+ </quote>
+ <p>The purpose is to have a mechanism for a human to accept an otherwise faulty Certificate.
+ In for example a web browser, you could get a question like </p>
+ <quote>
+ <p>Warning: you wanted to visit the site www.example.com,
+ but the certificate is for shop.example.com. Accept anyway (yes/no)?"
+ </p>
+ </quote>
+ <p>This could be accomplished with the option <c>fail_callback</c> which will
+ be called if the hostname verification fails:
+ </p>
+ <code>
+ -include_lib("public_key/include/public_key.hrl"). % Record def
+ ...
+ Fail = fun(#'OTPCertificate'{}=C) ->
+ case in_my_cache(C) orelse my_accept(C) of
+ true ->
+ enter_my_cache(C),
+ true;
+ false ->
+ false
+ end,
+ ...
+ public_key:pkix_verify_hostname(CertFromHost, RefIDs,
+ [{fail_callback, Fail}]),
+ ...
+ </code>
+ </section>
+ </section>
+
<section>
<title>SSH Files</title>
diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl
index 05c09f8996..402f514803 100644
--- a/lib/public_key/src/public_key.erl
+++ b/lib/public_key/src/public_key.erl
@@ -48,6 +48,7 @@
pkix_issuer_id/2,
pkix_normalize_name/1,
pkix_path_validation/3,
+ pkix_verify_hostname/2, pkix_verify_hostname/3,
ssh_decode/2, ssh_encode/2,
ssh_hostkey_fingerprint/1, ssh_hostkey_fingerprint/2,
ssh_curvename2oid/1, oid2ssh_curvename/1,
@@ -763,6 +764,76 @@ pkix_crls_validate(OtpCert, DPAndCRLs0, Options) ->
pkix_crls_validate(OtpCert, DPAndCRLs, DPAndCRLs,
Options, pubkey_crl:init_revokation_state()).
+%--------------------------------------------------------------------
+-spec pkix_verify_hostname(Cert :: #'OTPCertificate'{} | binary(),
+ ReferenceIDs :: [{uri_id | dns_id | oid(), string()}]) -> boolean().
+
+-spec pkix_verify_hostname(Cert :: #'OTPCertificate'{} | binary(),
+ ReferenceIDs :: [{uri_id | dns_id | oid(), string()}],
+ Options :: proplists:proplist()) -> boolean().
+
+%% Description: Validates a hostname to RFC 6125
+%%--------------------------------------------------------------------
+pkix_verify_hostname(Cert, ReferenceIDs) ->
+ pkix_verify_hostname(Cert, ReferenceIDs, []).
+
+pkix_verify_hostname(BinCert, ReferenceIDs, Options) when is_binary(BinCert) ->
+ pkix_verify_hostname(pkix_decode_cert(BinCert,otp), ReferenceIDs, Options);
+
+pkix_verify_hostname(Cert = #'OTPCertificate'{tbsCertificate = TbsCert}, ReferenceIDs0, Opts) ->
+ MatchFun = proplists:get_value(match_fun, Opts, undefined),
+ FailCB = proplists:get_value(fail_callback, Opts, fun(_Cert) -> false end),
+ FqdnFun = proplists:get_value(fqdn_fun, Opts, fun verify_hostname_extract_fqdn_default/1),
+
+ ReferenceIDs = [{T,to_string(V)} || {T,V} <- ReferenceIDs0],
+ PresentedIDs =
+ try lists:keyfind(?'id-ce-subjectAltName',
+ #'Extension'.extnID,
+ TbsCert#'OTPTBSCertificate'.extensions)
+ of
+ #'Extension'{extnValue = ExtVals} ->
+ [{T,to_string(V)} || {T,V} <- ExtVals];
+ false ->
+ []
+ catch
+ _:_ -> []
+ end,
+ %% PresentedIDs example: [{dNSName,"ewstest.ericsson.com"}, {dNSName,"www.ericsson.com"}]}
+ case PresentedIDs of
+ [] ->
+ %% Fallback to CN-ids [rfc6125, ch6]
+ case TbsCert#'OTPTBSCertificate'.subject of
+ {rdnSequence,RDNseq} ->
+ PresentedCNs =
+ [{cn, to_string(V)}
+ || ATVs <- RDNseq, % RDNseq is list-of-lists
+ #'AttributeTypeAndValue'{type = ?'id-at-commonName',
+ value = {_T,V}} <- ATVs
+ % _T = kind of string (teletexString etc)
+ ],
+ %% Example of PresentedCNs: [{cn,"www.ericsson.se"}]
+ %% match ReferenceIDs to PresentedCNs
+ verify_hostname_match_loop(verify_hostname_fqnds(ReferenceIDs, FqdnFun),
+ PresentedCNs,
+ MatchFun, FailCB, Cert);
+
+ _ ->
+ false
+ end;
+ _ ->
+ %% match ReferenceIDs to PresentedIDs
+ case verify_hostname_match_loop(ReferenceIDs, PresentedIDs,
+ MatchFun, FailCB, Cert) of
+ false ->
+ %% Try to extract DNS-IDs from URIs etc
+ DNS_ReferenceIDs =
+ [{dns_is,X} || X <- verify_hostname_fqnds(ReferenceIDs, FqdnFun)],
+ verify_hostname_match_loop(DNS_ReferenceIDs, PresentedIDs,
+ MatchFun, FailCB, Cert);
+ true ->
+ true
+ end
+ end.
%%--------------------------------------------------------------------
-spec ssh_decode(binary(), public_key | ssh_file()) -> [{public_key(), Attributes::list()}]
@@ -1200,3 +1271,96 @@ ascii_to_lower(String) ->
end)>>
||
<<C>> <= iolist_to_binary(String) >>.
+
+%%%----------------------------------------------------------------
+%%% pkix_verify_hostname help functions
+verify_hostname_extract_fqdn_default({dns_id,S}) ->
+ S;
+verify_hostname_extract_fqdn_default({uri_id,URI}) ->
+ {ok,{https,_,Host,_,_,_}} = http_uri:parse(URI),
+ Host.
+
+
+verify_hostname_fqnds(L, FqdnFun) ->
+ [E || E0 <- L,
+ E <- [try case FqdnFun(E0) of
+ default -> verify_hostname_extract_fqdn_default(E0);
+ undefined -> undefined; % will make the "is_list(E)" test fail
+ Other -> Other
+ end
+ catch _:_-> undefined % will make the "is_list(E)" test fail
+ end],
+ is_list(E),
+ E =/= "",
+ {error,einval} == inet:parse_address(E)
+ ].
+
+
+-define(srvName_OID, {1,3,6,1,4,1,434,2,2,1,37,0}).
+
+verify_hostname_match_default(Ref, Pres) ->
+ verify_hostname_match_default0(to_lower_ascii(Ref), to_lower_ascii(Pres)).
+
+verify_hostname_match_default0(FQDN=[_|_], {cn,FQDN}) ->
+ not lists:member($*, FQDN);
+verify_hostname_match_default0(FQDN=[_|_], {cn,Name=[_|_]}) ->
+ [F1|Fs] = string:tokens(FQDN, "."),
+ [N1|Ns] = string:tokens(Name, "."),
+ match_wild(F1,N1) andalso Fs==Ns;
+verify_hostname_match_default0({dns_id,R}, {dNSName,P}) ->
+ R==P;
+verify_hostname_match_default0({uri_id,R}, {uniformResourceIdentifier,P}) ->
+ R==P;
+verify_hostname_match_default0({srv_id,R}, {T,P}) when T == srvName ;
+ T == ?srvName_OID ->
+ R==P;
+verify_hostname_match_default0(_, _) ->
+ false.
+
+
+match_wild(A, [$*|B]) -> match_wild_suffixes(A, B);
+match_wild([C|A], [ C|B]) -> match_wild(A, B);
+match_wild([], []) -> true;
+match_wild(_, _) -> false.
+
+%% Match the parts after the only wildcard by comparing them from the end
+match_wild_suffixes(A, B) -> match_wild_sfx(lists:reverse(A), lists:reverse(B)).
+
+match_wild_sfx([$*|_], _) -> false; % Bad name (no wildcards alowed)
+match_wild_sfx(_, [$*|_]) -> false; % Bad pattern (no more wildcards alowed)
+match_wild_sfx([A|Ar], [A|Br]) -> match_wild_sfx(Ar, Br);
+match_wild_sfx(Ar, []) -> not lists:member($*, Ar); % Chk for bad name (= wildcards)
+match_wild_sfx(_, _) -> false.
+
+
+verify_hostname_match_loop(Refs0, Pres0, undefined, FailCB, Cert) ->
+ Pres = lists:map(fun to_lower_ascii/1, Pres0),
+ Refs = lists:map(fun to_lower_ascii/1, Refs0),
+ lists:any(
+ fun(R) ->
+ lists:any(fun(P) ->
+ verify_hostname_match_default(R,P) orelse FailCB(Cert)
+ end, Pres)
+ end, Refs);
+verify_hostname_match_loop(Refs, Pres, MatchFun, FailCB, Cert) ->
+ lists:any(
+ fun(R) ->
+ lists:any(fun(P) ->
+ (case MatchFun(R,P) of
+ default -> verify_hostname_match_default(R,P);
+ Bool -> Bool
+ end) orelse FailCB(Cert)
+ end,
+ Pres)
+ end,
+ Refs).
+
+
+to_lower_ascii(S) when is_list(S) -> lists:map(fun to_lower_ascii/1, S);
+to_lower_ascii({T,S}) -> {T, to_lower_ascii(S)};
+to_lower_ascii(C) when $A =< C,C =< $Z -> C + ($a-$A);
+to_lower_ascii(C) -> C.
+
+to_string(S) when is_list(S) -> S;
+to_string(B) when is_binary(B) -> binary_to_list(B).
+
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index cd24819899..615ff32539 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -45,6 +45,9 @@ all() ->
{group, sign_verify},
pkix, pkix_countryname, pkix_emailaddress, pkix_path_validation,
pkix_iso_rsa_oid, pkix_iso_dsa_oid, pkix_crl, general_name,
+ pkix_verify_hostname_cn,
+ pkix_verify_hostname_subjAltName,
+ pkix_verify_hostname_options,
short_cert_issuer_hash, short_crl_issuer_hash,
ssh_hostkey_fingerprint_md5_implicit,
ssh_hostkey_fingerprint_md5,
@@ -814,6 +817,114 @@ pkix_path_validation(Config) when is_list(Config) ->
ok.
%%--------------------------------------------------------------------
+%% To generate the PEM file contents:
+%%
+%% openssl req -x509 -nodes -newkey rsa:1024 -keyout /dev/null -subj '/C=SE/CN=example.com/CN=*.foo.example.com/CN=a*b.bar.example.com/O=erlang.org' > public_key_SUITE_data/pkix_verify_hostname_cn.pem
+%%
+%% Note that the same pem-file is used in pkix_verify_hostname_options/1
+%%
+%% Subject: C=SE, CN=example.com, CN=*.foo.example.com, CN=a*b.bar.example.com, O=erlang.org
+%% extensions = no subjAltName
+
+pkix_verify_hostname_cn(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ {ok,Bin} = file:read_file(filename:join(DataDir,"pkix_verify_hostname_cn.pem")),
+ Cert = public_key:pkix_decode_cert(element(2,hd(public_key:pem_decode(Bin))), otp),
+
+ %% Check that 1) only CNs are checked,
+ %% 2) an empty label does not match a wildcard and
+ %% 3) a wildcard does not match more than one label
+ false = public_key:pkix_verify_hostname(Cert, [{dns_id,"erlang.org"},
+ {dns_id,"foo.EXAMPLE.com"},
+ {dns_id,"b.a.foo.EXAMPLE.com"}]),
+
+ %% Check that a hostname is extracted from a https-uri and used for checking:
+ true = public_key:pkix_verify_hostname(Cert, [{uri_id,"HTTPS://EXAMPLE.com"}]),
+
+ %% Check wildcard matching one label:
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"a.foo.EXAMPLE.com"}]),
+
+ %% Check wildcard with surrounding chars matches one label:
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"accb.bar.EXAMPLE.com"}]),
+
+ %% Check that a wildcard with surrounding chars matches an empty string:
+ true = public_key:pkix_verify_hostname(Cert, [{uri_id,"https://ab.bar.EXAMPLE.com"}]).
+
+%%--------------------------------------------------------------------
+%% To generate the PEM file contents:
+%%
+%% openssl req -x509 -nodes -newkey rsa:1024 -keyout /dev/null -extensions SAN -config public_key_SUITE_data/verify_hostname.conf 2>/dev/null > public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem
+%%
+%% Subject: C=SE, CN=example.com
+%% Subject Alternative Name: DNS:kb.example.org, URI:http://www.example.org, URI:https://wws.example.org
+
+pkix_verify_hostname_subjAltName(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ {ok,Bin} = file:read_file(filename:join(DataDir,"pkix_verify_hostname_subjAltName.pem")),
+ Cert = public_key:pkix_decode_cert(element(2,hd(public_key:pem_decode(Bin))), otp),
+
+ %% Check that neither a uri nor dns hostname matches a CN if subjAltName is present:
+ false = public_key:pkix_verify_hostname(Cert, [{uri_id,"https://example.com"},
+ {dns_id,"example.com"}]),
+
+ %% Check that a uri_id matches a URI subjAltName:
+ true = public_key:pkix_verify_hostname(Cert, [{uri_id,"https://wws.example.org"}]),
+
+ %% Check that a dns_id does not match a URI subjAltName:
+ false = public_key:pkix_verify_hostname(Cert, [{dns_id,"www.example.org"},
+ {dns_id,"wws.example.org"}]),
+
+ %% Check that a dns_id matches a DNS subjAltName:
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"kb.example.org"}]).
+
+%%--------------------------------------------------------------------
+%% Uses the pem-file for pkix_verify_hostname_cn
+%% Subject: C=SE, CN=example.com, CN=*.foo.example.com, CN=a*b.bar.example.com, O=erlang.org
+pkix_verify_hostname_options(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ {ok,Bin} = file:read_file(filename:join(DataDir,"pkix_verify_hostname_cn.pem")),
+ Cert = public_key:pkix_decode_cert(element(2,hd(public_key:pem_decode(Bin))), otp),
+
+ %% Check that the fail_callback is called and is presented the correct certificate:
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"erlang.org"}],
+ [{fail_callback,
+ fun(#'OTPCertificate'{}=C) when C==Cert ->
+ true; % To test the return value matters
+ (#'OTPCertificate'{}=C) ->
+ ct:log("~p:~p: Wrong cert:~n~p~nExpect~n~p",
+ [?MODULE, ?LINE, C, Cert]),
+ ct:fail("Wrong cert, see log");
+ (C) ->
+ ct:log("~p:~p: Bad cert: ~p",[?MODULE,?LINE,C]),
+ ct:fail("Bad cert, see log")
+ end}]),
+
+ %% Check the callback for user-provided match functions:
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"very.wrong.domain"}],
+ [{match_fun,
+ fun("very.wrong.domain", {cn,"example.com"}) ->
+ true;
+ (_, _) ->
+ false
+ end}]),
+ false = public_key:pkix_verify_hostname(Cert, [{dns_id,"not.example.com"}],
+ [{match_fun, fun(_, _) -> default end}]),
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"example.com"}],
+ [{match_fun, fun(_, _) -> default end}]),
+
+ %% Check the callback for user-provided fqdn extraction:
+ true = public_key:pkix_verify_hostname(Cert, [{uri_id,"some://very.wrong.domain"}],
+ [{fqdn_fun,
+ fun({uri_id, "some://very.wrong.domain"}) ->
+ "example.com";
+ (_) ->
+ ""
+ end}]),
+ true = public_key:pkix_verify_hostname(Cert, [{uri_id,"https://example.com"}],
+ [{fqdn_fun, fun(_) -> default end}]),
+ false = public_key:pkix_verify_hostname(Cert, [{uri_id,"some://very.wrong.domain"}]).
+
+%%--------------------------------------------------------------------
pkix_iso_rsa_oid() ->
[{doc, "Test workaround for supporting certs that use ISO oids"
" 1.3.14.3.2.29 instead of PKIX/PKCS oid"}].
diff --git a/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_cn.pem b/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_cn.pem
new file mode 100644
index 0000000000..9f7b428f9a
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_cn.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICsjCCAhugAwIBAgIJAMCGx1ezaJFRMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNV
+BAYTAlNFMRQwEgYDVQQDDAtleGFtcGxlLmNvbTEaMBgGA1UEAwwRKi5mb28uZXhh
+bXBsZS5jb20xHDAaBgNVBAMME2EqYi5iYXIuZXhhbXBsZS5jb20xEzARBgNVBAoM
+CmVybGFuZy5vcmcwHhcNMTYxMjIwMTUwNDUyWhcNMTcwMTE5MTUwNDUyWjByMQsw
+CQYDVQQGEwJTRTEUMBIGA1UEAwwLZXhhbXBsZS5jb20xGjAYBgNVBAMMESouZm9v
+LmV4YW1wbGUuY29tMRwwGgYDVQQDDBNhKmIuYmFyLmV4YW1wbGUuY29tMRMwEQYD
+VQQKDAplcmxhbmcub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDVGJgZ
+defGucvMXf0RrEm6Hb18IfVUo9IV6swSP/kwAu/608ZIZdzlfp2pxC0e72a4E3WN
+4vrGxAr2wMMQOiyoy4qlAeLX27THJ6Q4Vl82fc6QuOJbScKIydSZ4KoB+luGlBu5
+b6xYh2pBbneKFpsecmK5rsWtTactjD4n1tKjUwIDAQABo1AwTjAdBgNVHQ4EFgQU
+OCtzidUeaDva7qp12T0CQrgfLW4wHwYDVR0jBBgwFoAUOCtzidUeaDva7qp12T0C
+QrgfLW4wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQCAz+ComCMo9Qbu
+PHxG7pv3mQvoxrMFva/Asg4o9mW2mDyrk0DwI4zU8vMHbSRKSBYGm4TATXsQkDQT
+gJw/bxhISnhZZtPC7Yup8kJCkJ6S6EDLYrlzgsRqfeU6jWim3nbfaLyMi9dHFDMk
+HULnyNNW3qxTEKi8Wo2sCMej4l7KFg==
+-----END CERTIFICATE-----
diff --git a/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem b/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem
new file mode 100644
index 0000000000..83e1ad37b3
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICEjCCAXugAwIBAgIJANwliLph5EiAMA0GCSqGSIb3DQEBCwUAMCMxCzAJBgNV
+BAYTAlNFMRQwEgYDVQQDEwtleGFtcGxlLmNvbTAeFw0xNjEyMjAxNTEyMjRaFw0x
+NzAxMTkxNTEyMjRaMCMxCzAJBgNVBAYTAlNFMRQwEgYDVQQDEwtleGFtcGxlLmNv
+bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAydstIN157w8QxkVaOl3wm81j
+fgZ8gqO3BXkECPF6bw5ewLlmePL6Qs4RypsaRe7cKJ9rHFlwhpdcYkxWSWEt2N7Z
+Ry3N4SjuU04ohWbYgy3ijTt7bJg7jOV1Dh56BnI4hwhQj0oNFizNZOeRRfEzdMnS
++uk03t/Qre2NS7KbwnUCAwEAAaNOMEwwSgYDVR0RBEMwQYIOa2IuZXhhbXBsZS5v
+cmeGFmh0dHA6Ly93d3cuZXhhbXBsZS5vcmeGF2h0dHBzOi8vd3dzLmV4YW1wbGUu
+b3JnMA0GCSqGSIb3DQEBCwUAA4GBAKqFqW5gCso422bXriCBJoygokOTTOw1Rzpq
+K8Mm0B8W9rrW9OTkoLEcjekllZcUCZFin2HovHC5HlHZz+mQvBI1M6sN2HVQbSzS
+EgL66U9gwJVnn9/U1hXhJ0LO28aGbyE29DxnewNR741dWN3oFxCdlNaO6eMWaEsO
+gduJ5sDl
+-----END CERTIFICATE-----
diff --git a/lib/public_key/test/public_key_SUITE_data/verify_hostname.conf b/lib/public_key/test/public_key_SUITE_data/verify_hostname.conf
new file mode 100644
index 0000000000..a28864dc78
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/verify_hostname.conf
@@ -0,0 +1,16 @@
+[req]
+prompt = no
+distinguished_name = DN
+
+[DN]
+C=SE
+CN=example.com
+
+[SAN]
+subjectAltName = @alt_names
+
+[alt_names]
+DNS = kb.example.org
+URI.1 = http://www.example.org
+URI.2 = https://wws.example.org
+
diff --git a/lib/runtime_tools/doc/src/LTTng.xml b/lib/runtime_tools/doc/src/LTTng.xml
index 82a4c79379..7aae5e5c41 100644
--- a/lib/runtime_tools/doc/src/LTTng.xml
+++ b/lib/runtime_tools/doc/src/LTTng.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf8" ?>
+<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE chapter SYSTEM "chapter.dtd">
<chapter>
<header>
diff --git a/lib/runtime_tools/src/observer_backend.erl b/lib/runtime_tools/src/observer_backend.erl
index e943fb4a3e..b27bc63d15 100644
--- a/lib/runtime_tools/src/observer_backend.erl
+++ b/lib/runtime_tools/src/observer_backend.erl
@@ -314,13 +314,12 @@ etop_collect(Collector) ->
case SchedulerWallTime of
undefined ->
- spawn(fun() -> flag_holder_proc(Collector) end),
+ erlang:system_flag(scheduler_wall_time,true),
+ spawn(fun() -> flag_holder_proc(Collector) end),
ok;
_ ->
ok
- end,
-
- erlang:system_flag(scheduler_wall_time,true).
+ end.
flag_holder_proc(Collector) ->
Ref = erlang:monitor(process,Collector),
diff --git a/lib/sasl/test/release_handler_SUITE_data/start b/lib/sasl/test/release_handler_SUITE_data/start
index 87275045b1..eab2b77aed 100755
--- a/lib/sasl/test/release_handler_SUITE_data/start
+++ b/lib/sasl/test/release_handler_SUITE_data/start
@@ -21,8 +21,7 @@ then
fi
HEART_COMMAND=$ROOTDIR/bin/start
-HW_WD_DISABLE=true
-export HW_WD_DISABLE HEART_COMMAND
+export HEART_COMMAND
START_ERL_DATA=${1:-$RELDIR/start_erl.data}
diff --git a/lib/sasl/test/release_handler_SUITE_data/start_client b/lib/sasl/test/release_handler_SUITE_data/start_client
index 5ea94d6f7c..05d744f06e 100755
--- a/lib/sasl/test/release_handler_SUITE_data/start_client
+++ b/lib/sasl/test/release_handler_SUITE_data/start_client
@@ -24,8 +24,7 @@ RELDIR=$CLIENTDIR/releases
# Note that this scripts is modified an copied to $CLIENTDIR/bin/start
# in release_handler_SUITE:copy_client - therefore HEART_COMMAND is as follows:
HEART_COMMAND=$CLIENTDIR/bin/start
-HW_WD_DISABLE=true
-export HW_WD_DISABLE HEART_COMMAND
+export HEART_COMMAND
START_ERL_DATA=${1:-$RELDIR/start_erl.data}
diff --git a/lib/snmp/src/app/snmp.appup.src b/lib/snmp/src/app/snmp.appup.src
index ca61782639..db09ec3dc5 100644
--- a/lib/snmp/src/app/snmp.appup.src
+++ b/lib/snmp/src/app/snmp.appup.src
@@ -8,6 +8,10 @@
%% {update, snmpa_local_db, soft, soft_purge, soft_purge, []}
%% {add_module, snmpm_net_if_mt}
[
+ {<<"5\\.2\\.4">>,
+ [{load_module, snmp, soft_purge, soft_purge, []},
+ {load_module, snmpc_lib, soft_purge, soft_purge, []},
+ {load_module, snmpc_mib_gram, soft_purge, soft_purge, []}]},
{<<"5\\..*">>, [{restart_application, snmp}]},
{<<"4\\..*">>, [{restart_application, snmp}]}
],
@@ -17,6 +21,10 @@
%% {remove, {snmpm_net_if_mt, soft_purge, soft_purge}}
[
+ {<<"5\\.2\\.4">>,
+ [{load_module, snmp, soft_purge, soft_purge, []},
+ {load_module, snmpc_lib, soft_purge, soft_purge, []},
+ {load_module, snmpc_mib_gram, soft_purge, soft_purge, []}]},
{<<"5\\..*">>, [{restart_application, snmp}]},
{<<"4\\..*">>, [{restart_application, snmp}]}
]
diff --git a/lib/snmp/src/app/snmp.erl b/lib/snmp/src/app/snmp.erl
index df3933ea01..8a736f688b 100644
--- a/lib/snmp/src/app/snmp.erl
+++ b/lib/snmp/src/app/snmp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -573,9 +573,16 @@ print_mod_info(Prefix, {Module, Info}) ->
CompDate =
case key1search(compile_time, Info) of
{value, {Year, Month, Day, Hour, Min, Sec}} ->
- lists:flatten(
- io_lib:format("~w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w",
- [Year, Month, Day, Hour, Min, Sec]));
+ io_lib:format(
+ "~w-~2..0w-~2..0w ~2..0w:~2..0w:~2..0w",
+ [Year, Month, Day, Hour, Min, Sec]);
+ _ ->
+ "Not found"
+ end,
+ Digest =
+ case key1search(md5, Info) of
+ {value, MD5} when is_binary(MD5) ->
+ [io_lib:format("~2.16.0b", [Byte]) || <<Byte>> <= MD5];
_ ->
"Not found"
end,
@@ -583,12 +590,14 @@ print_mod_info(Prefix, {Module, Info}) ->
"~s Vsn: ~s~n"
"~s App vsn: ~s~n"
"~s Compiler ver: ~s~n"
- "~s Compile time: ~s~n",
+ "~s Compile time: ~s~n"
+ "~s MD5 digest: ~s~n",
[Prefix, Module,
Prefix, Vsn,
Prefix, AppVsn,
- Prefix, CompVer,
- Prefix, CompDate]),
+ Prefix, CompVer,
+ Prefix, CompDate,
+ Prefix, Digest]),
ok.
key1search(Key, Vals) ->
@@ -617,7 +626,7 @@ versions1() ->
Error ->
Error
end.
-
+
versions2() ->
case ms2() of
{ok, Mods} ->
@@ -625,25 +634,56 @@ versions2() ->
Error ->
Error
end.
-
+
version_info(Mods) ->
SysInfo = sys_info(),
OsInfo = os_info(),
ModInfo = [mod_version_info(Mod) || Mod <- Mods],
[{sys_info, SysInfo}, {os_info, OsInfo}, {mod_info, ModInfo}].
-
+
mod_version_info(Mod) ->
Info = Mod:module_info(),
- {value, {attributes, Attr}} = lists:keysearch(attributes, 1, Info),
- {value, {vsn, [Vsn]}} = lists:keysearch(vsn, 1, Attr),
- {value, {app_vsn, AppVsn}} = lists:keysearch(app_vsn, 1, Attr),
- {value, {compile, Comp}} = lists:keysearch(compile, 1, Info),
- {value, {version, Ver}} = lists:keysearch(version, 1, Comp),
- {value, {time, Time}} = lists:keysearch(time, 1, Comp),
- {Mod, [{vsn, Vsn},
- {app_vsn, AppVsn},
- {compiler_version, Ver},
- {compile_time, Time}]}.
+ {Mod,
+ case key1search(attributes, Info) of
+ {value, Attr} ->
+ case key1search(vsn, Attr) of
+ {value, [Vsn]} ->
+ [{vsn, Vsn}];
+ not_found ->
+ []
+ end ++
+ case key1search(app_vsn, Attr) of
+ {value, AppVsn} ->
+ [{app_vsn, AppVsn}];
+ not_found ->
+ []
+ end;
+ not_found ->
+ []
+ end ++
+ case key1search(compile, Info) of
+ {value, Comp} ->
+ case key1search(version, Comp) of
+ {value, Ver} ->
+ [{compiler_version, Ver}];
+ not_found ->
+ []
+ end ++
+ case key1search(time, Comp) of
+ {value, Ver} ->
+ [{compile_time, Ver}];
+ not_found ->
+ []
+ end;
+ not_found ->
+ []
+ end ++
+ case key1search(md5, Info) of
+ {value, Bin} ->
+ [{md5, Bin}];
+ not_found ->
+ []
+ end}.
sys_info() ->
SysArch = string:strip(erlang:system_info(system_architecture),right,$\n),
diff --git a/lib/snmp/src/compile/snmpc_lib.erl b/lib/snmp/src/compile/snmpc_lib.erl
index 51690b6e7e..33ddd78308 100644
--- a/lib/snmp/src/compile/snmpc_lib.erl
+++ b/lib/snmp/src/compile/snmpc_lib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -99,7 +99,7 @@ make_ASN1type({{type_with_size,Type,{range,Lo,Hi}},Line}) ->
print_error("Undefined type '~w'",[Type],Line),
guess_string_type()
end;
-make_ASN1type({{integer_with_enum,Type,Enums},Line}) ->
+make_ASN1type({{type_with_enum,Type,Enums},Line}) ->
case lookup_vartype(Type) of
{value,ASN1type} -> ASN1type#asn1_type{assocList = [{enums, Enums}]};
false ->
diff --git a/lib/snmp/src/compile/snmpc_mib_gram.yrl b/lib/snmp/src/compile/snmpc_mib_gram.yrl
index 743c3a6550..14a668127e 100644
--- a/lib/snmp/src/compile/snmpc_mib_gram.yrl
+++ b/lib/snmp/src/compile/snmpc_mib_gram.yrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -387,10 +387,12 @@ syntax -> type : {{type, cat('$1')},line_of('$1')}.
syntax -> type size : {{type_with_size, cat('$1'), '$2'},line_of('$1')}.
syntax -> usertype size : {{type_with_size,val('$1'), '$2'},line_of('$1')}.
syntax -> 'INTEGER' '{' namedbits '}' :
- {{integer_with_enum, 'INTEGER', '$3'}, line_of('$1')}.
+ {{type_with_enum, 'INTEGER', '$3'}, line_of('$1')}.
syntax -> 'BITS' '{' namedbits '}' :
ensure_ver(2,'$1'),
{{bits, '$3'}, line_of('$1')}.
+syntax -> usertype '{' namedbits '}' :
+ {{type_with_enum, 'INTEGER', '$3'}, line_of('$1')}.
syntax -> 'SEQUENCE' 'OF' usertype :
{{sequence_of,val('$3')},line_of('$1')}.
diff --git a/lib/snmp/test/snmp_compiler_test.erl b/lib/snmp/test/snmp_compiler_test.erl
index 2c8851c2a7..9b3c2bfd2c 100644
--- a/lib/snmp/test/snmp_compiler_test.erl
+++ b/lib/snmp/test/snmp_compiler_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -56,7 +56,8 @@
otp_8574/1,
otp_8595/1,
otp_10799/1,
- otp_10808/1
+ otp_10808/1,
+ otp_14145/1
]).
@@ -135,7 +136,8 @@ all() ->
].
groups() ->
- [{tickets, [], [otp_6150, otp_8574, otp_8595, otp_10799, otp_10808]}].
+ [{tickets, [],
+ [otp_6150, otp_8574, otp_8595, otp_10799, otp_10808, otp_14145]}].
init_per_group(_GroupName, Config) ->
Config.
@@ -431,6 +433,30 @@ otp_10808(Config) when is_list(Config) ->
%%======================================================================
+otp_14145(suite) ->
+ [];
+otp_14145(Config) when is_list(Config) ->
+ put(tname, otp10808),
+ p("starting with Config: ~p~n", [Config]),
+
+ Dir = ?config(case_top_dir, Config),
+ MibDir = ?config(mib_dir, Config),
+ MibName = "OTP14145-MIB",
+ MibFile = join(MibDir, MibName++".mib"),
+ ?line {ok, MibBin} =
+ snmpc:compile(MibFile, [{outdir, Dir},
+ {verbosity, trace},
+ {group_check, false},
+ module_compliance]),
+ p("Mib: ~n~p~n", [MibBin]),
+ MIB = read_mib(MibBin),
+ Oid = [1,3,6,1,2,1,67,4],
+ check_mib(MIB#mib.mes, Oid, undefined),
+ ok.
+
+
+%%======================================================================
+
augments_extra_info(suite) ->
[];
augments_extra_info(Config) when is_list(Config) ->
diff --git a/lib/snmp/test/snmp_test_data/OTP14145-MIB.mib b/lib/snmp/test/snmp_test_data/OTP14145-MIB.mib
new file mode 100644
index 0000000000..f29c65c4c2
--- /dev/null
+++ b/lib/snmp/test/snmp_test_data/OTP14145-MIB.mib
@@ -0,0 +1,44 @@
+OTP14145-MIB DEFINITIONS ::= BEGIN
+
+IMPORTS
+ MODULE-IDENTITY, OBJECT-TYPE,
+ mib-2 FROM SNMPv2-SMI
+ InetAddressType, InetAddress FROM INET-ADDRESS-MIB
+ MODULE-COMPLIANCE, OBJECT-GROUP FROM SNMPv2-CONF;
+
+testMibId MODULE-IDENTITY
+ LAST-UPDATED "200608210000Z" -- 21 August 2006
+ ORGANIZATION "a"
+ CONTACT-INFO "a"
+ DESCRIPTION "a"
+ REVISION "200608210000Z" -- 21 August 2006
+ DESCRIPTION "a"
+ ::= { mib-2 67 }
+
+testObj OBJECT-TYPE
+ SYNTAX InetAddressType
+ -- SYNTAX InetAddress
+ MAX-ACCESS read-only
+ STATUS current
+ DESCRIPTION "a"
+ ::= { testMibId 2 }
+
+testObjId OBJECT IDENTIFIER ::= { testMibId 3 }
+
+testMibCompliance MODULE-COMPLIANCE
+ STATUS current
+ DESCRIPTION "a"
+ MODULE
+ OBJECT testObj
+ SYNTAX InetAddressType { ipv4(1), ipv6(2) }
+ -- SYNTAX InetAddress ( SIZE(4|16) )
+ DESCRIPTION "a"
+ ::= { testMibId 4 }
+
+testObjGroup OBJECT-GROUP
+ OBJECTS { testObj }
+ STATUS current
+ DESCRIPTION "a"
+ ::= { testObjId 1 }
+
+END
diff --git a/lib/snmp/vsn.mk b/lib/snmp/vsn.mk
index 28eba0d0d6..30b8ee1124 100644
--- a/lib/snmp/vsn.mk
+++ b/lib/snmp/vsn.mk
@@ -2,7 +2,7 @@
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2017. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,6 +19,6 @@
# %CopyrightEnd%
APPLICATION = snmp
-SNMP_VSN = 5.2.4
+SNMP_VSN = 5.2.5
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(SNMP_VSN)$(PRE_VSN)"
diff --git a/lib/ssh/src/ssh.app.src b/lib/ssh/src/ssh.app.src
index 76b7d8cd55..2bb7491b0c 100644
--- a/lib/ssh/src/ssh.app.src
+++ b/lib/ssh/src/ssh.app.src
@@ -48,4 +48,3 @@
"stdlib-3.1"
]}]}.
-
diff --git a/lib/ssh/src/ssh_auth.erl b/lib/ssh/src/ssh_auth.erl
index ac35b70209..9b54ecb2dd 100644
--- a/lib/ssh/src/ssh_auth.erl
+++ b/lib/ssh/src/ssh_auth.erl
@@ -406,7 +406,11 @@ handle_userauth_info_response(#ssh_msg_userauth_info_response{num_responses = 1,
kb_tries_left = KbTriesLeft,
user = User,
userauth_supported_methods = Methods} = Ssh) ->
- SendOneEmpty = proplists:get_value(tstflg, Opts) == one_empty,
+ SendOneEmpty =
+ (proplists:get_value(tstflg,Opts) == one_empty)
+ orelse
+ proplists:get_value(one_empty, proplists:get_value(tstflg,Opts,[]), false),
+
case check_password(User, unicode:characters_to_list(Password), Opts, Ssh) of
{true,Ssh1} when SendOneEmpty==true ->
Msg = #ssh_msg_userauth_info_request{name = "",
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 7451c9e6d0..4496c657c3 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -609,13 +609,15 @@ handle_event(_, #ssh_msg_kexdh_reply{} = Msg, {key_exchange,client,ReNeg}, D) ->
%%%---- diffie-hellman group exchange
handle_event(_, #ssh_msg_kex_dh_gex_request{} = Msg, {key_exchange,server,ReNeg}, D) ->
- {ok, GexGroup, Ssh} = ssh_transport:handle_kex_dh_gex_request(Msg, D#data.ssh_params),
+ {ok, GexGroup, Ssh1} = ssh_transport:handle_kex_dh_gex_request(Msg, D#data.ssh_params),
send_bytes(GexGroup, D),
+ Ssh = ssh_transport:parallell_gen_key(Ssh1),
{next_state, {key_exchange_dh_gex_init,server,ReNeg}, D#data{ssh_params=Ssh}};
handle_event(_, #ssh_msg_kex_dh_gex_request_old{} = Msg, {key_exchange,server,ReNeg}, D) ->
- {ok, GexGroup, Ssh} = ssh_transport:handle_kex_dh_gex_request(Msg, D#data.ssh_params),
+ {ok, GexGroup, Ssh1} = ssh_transport:handle_kex_dh_gex_request(Msg, D#data.ssh_params),
send_bytes(GexGroup, D),
+ Ssh = ssh_transport:parallell_gen_key(Ssh1),
{next_state, {key_exchange_dh_gex_init,server,ReNeg}, D#data{ssh_params=Ssh}};
handle_event(_, #ssh_msg_kex_dh_gex_group{} = Msg, {key_exchange,client,ReNeg}, D) ->
@@ -1206,7 +1208,7 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
catch
_C:_E ->
disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_PROTOCOL_ERROR,
- description = "Encountered unexpected input"},
+ description = "Bad packet"},
StateName, D)
end;
@@ -1221,13 +1223,12 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
{bad_mac, Ssh1} ->
disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_PROTOCOL_ERROR,
- description = "Bad mac"},
+ description = "Bad packet"},
StateName, D0#data{ssh_params=Ssh1});
- {error, {exceeds_max_size,PacketLen}} ->
+ {error, {exceeds_max_size,_PacketLen}} ->
disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_PROTOCOL_ERROR,
- description = "Bad packet length "
- ++ integer_to_list(PacketLen)},
+ description = "Bad packet"},
StateName, D0)
catch
_C:_E ->
diff --git a/lib/ssh/src/ssh_dbg.erl b/lib/ssh/src/ssh_dbg.erl
index dff2bae9f2..0345bbdea7 100644
--- a/lib/ssh/src/ssh_dbg.erl
+++ b/lib/ssh/src/ssh_dbg.erl
@@ -50,50 +50,61 @@ messages(Write, MangleArg) when is_function(Write,2),
is_function(MangleArg,1) ->
catch dbg:start(),
setup_tracer(Write, MangleArg),
- dbg:p(new,c),
+ dbg:p(new,[c,timestamp]),
dbg_ssh_messages().
dbg_ssh_messages() ->
dbg:tp(ssh_message,encode,1, x),
dbg:tp(ssh_message,decode,1, x),
- dbg:tpl(ssh_transport,select_algorithm,3, x).
-
+ dbg:tpl(ssh_transport,select_algorithm,3, x),
+ dbg:tp(ssh_transport,hello_version_msg,1, x),
+ dbg:tp(ssh_transport,handle_hello_version,1, x).
+
%%%----------------------------------------------------------------
stop() ->
dbg:stop().
%%%================================================================
-msg_formater({trace,Pid,call,{ssh_message,encode,[Msg]}}, D) ->
- fmt("~nSEND ~p ~s~n", [Pid,wr_record(shrink_bin(Msg))], D);
-msg_formater({trace,_Pid,return_from,{ssh_message,encode,1},_Res}, D) ->
+msg_formater({trace_ts,Pid,call,{ssh_message,encode,[Msg]},TS}, D) ->
+ fmt("~n~s SEND ~p ~s~n", [ts(TS),Pid,wr_record(shrink_bin(Msg))], D);
+msg_formater({trace_ts,_Pid,return_from,{ssh_message,encode,1},_Res,_TS}, D) ->
D;
-msg_formater({trace,_Pid,call,{ssh_message,decode,_}}, D) ->
+msg_formater({trace_ts,_Pid,call,{ssh_message,decode,_},_TS}, D) ->
D;
-msg_formater({trace,Pid,return_from,{ssh_message,decode,1},Msg}, D) ->
- fmt("~n~p RECV ~s~n", [Pid,wr_record(shrink_bin(Msg))], D);
+msg_formater({trace_ts,Pid,return_from,{ssh_message,decode,1},Msg,TS}, D) ->
+ fmt("~n~s ~p RECV ~s~n", [ts(TS),Pid,wr_record(shrink_bin(Msg))], D);
-msg_formater({trace,_Pid,call,{ssh_transport,select_algorithm,_}}, D) ->
+msg_formater({trace_ts,_Pid,call,{ssh_transport,select_algorithm,_},_TS}, D) ->
+ D;
+msg_formater({trace_ts,Pid,return_from,{ssh_transport,select_algorithm,3},{ok,Alg},TS}, D) ->
+ fmt("~n~s ~p ALGORITHMS~n~s~n", [ts(TS),Pid, wr_record(Alg)], D);
+
+msg_formater({trace_ts,_Pid,call,{ssh_transport,hello_version_msg,_},_TS}, D) ->
D;
-msg_formater({trace,Pid,return_from,{ssh_transport,select_algorithm,3},{ok,Alg}}, D) ->
- fmt("~n~p ALGORITHMS~n~s~n", [Pid, wr_record(Alg)], D);
+msg_formater({trace_ts,Pid,return_from,{ssh_transport,hello_version_msg,1},Hello,TS}, D) ->
+ fmt("~n~s ~p TCP SEND HELLO~n ~p~n", [ts(TS),Pid,lists:flatten(Hello)], D);
+msg_formater({trace_ts,Pid,call,{ssh_transport,handle_hello_version,[Hello]},TS}, D) ->
+ fmt("~n~s ~p RECV HELLO~n ~p~n", [ts(TS),Pid,lists:flatten(Hello)], D);
+msg_formater({trace_ts,_Pid,return_from,{ssh_transport,handle_hello_version,1},_,_TS}, D) ->
+ D;
-msg_formater({trace,Pid,send,{tcp,Sock,Bytes},Pid}, D) ->
- fmt("~n~p TCP SEND on ~p~n ~p~n", [Pid,Sock, shrink_bin(Bytes)], D);
+msg_formater({trace_ts,Pid,send,{tcp,Sock,Bytes},Pid,TS}, D) ->
+ fmt("~n~s ~p TCP SEND on ~p~n ~p~n", [ts(TS),Pid,Sock, shrink_bin(Bytes)], D);
-msg_formater({trace,Pid,send,{tcp,Sock,Bytes},Dest}, D) ->
- fmt("~n~p TCP SEND from ~p TO ~p~n ~p~n", [Pid,Sock,Dest, shrink_bin(Bytes)], D);
+msg_formater({trace_ts,Pid,send,{tcp,Sock,Bytes},Dest,TS}, D) ->
+ fmt("~n~s ~p TCP SEND from ~p TO ~p~n ~p~n", [ts(TS),Pid,Sock,Dest, shrink_bin(Bytes)], D);
-msg_formater({trace,Pid,send,ErlangMsg,Dest}, D) ->
- fmt("~n~p ERL MSG SEND TO ~p~n ~p~n", [Pid,Dest, shrink_bin(ErlangMsg)], D);
+msg_formater({trace_ts,Pid,send,ErlangMsg,Dest,TS}, D) ->
+ fmt("~n~s ~p ERL MSG SEND TO ~p~n ~p~n", [ts(TS),Pid,Dest, shrink_bin(ErlangMsg)], D);
-msg_formater({trace,Pid,'receive',{tcp,Sock,Bytes}}, D) ->
- fmt("~n~p TCP RECEIVE on ~p~n ~p~n", [Pid,Sock,shrink_bin(Bytes)], D);
+msg_formater({trace_ts,Pid,'receive',{tcp,Sock,Bytes},TS}, D) ->
+ fmt("~n~s ~p TCP RECEIVE on ~p~n ~p~n", [ts(TS),Pid,Sock,shrink_bin(Bytes)], D);
-msg_formater({trace,Pid,'receive',ErlangMsg}, D) ->
- fmt("~n~p ERL MSG RECEIVE~n ~p~n", [Pid,shrink_bin(ErlangMsg)], D);
+msg_formater({trace_ts,Pid,'receive',ErlangMsg,TS}, D) ->
+ fmt("~n~s ~p ERL MSG RECEIVE~n ~p~n", [ts(TS),Pid,shrink_bin(ErlangMsg)], D);
msg_formater(M, D) ->
@@ -106,6 +117,11 @@ msg_formater(M, D) ->
fmt(Fmt, Args, D=#data{writer=Write,acc=Acc}) ->
D#data{acc = Write(io_lib:format(Fmt, Args), Acc)}.
+ts({_,_,Usec}=Now) ->
+ {_Date,{HH,MM,SS}} = calendar:now_to_local_time(Now),
+ io_lib:format("~.2.0w:~.2.0w:~.2.0w.~.6.0w",[HH,MM,SS,Usec]);
+ts(_) ->
+ "-".
%%%----------------------------------------------------------------
setup_tracer(Write, MangleArg) ->
Handler = fun(Arg, D) ->
@@ -116,11 +132,11 @@ setup_tracer(Write, MangleArg) ->
ok.
%%%----------------------------------------------------------------
-shrink_bin(B) when is_binary(B), size(B)>100 -> {'*** SHRINKED BIN',
+shrink_bin(B) when is_binary(B), size(B)>256 -> {'*** SHRINKED BIN',
size(B),
- element(1,split_binary(B,20)),
+ element(1,split_binary(B,64)),
'...',
- element(2,split_binary(B,size(B)-20))
+ element(2,split_binary(B,size(B)-64))
};
shrink_bin(L) when is_list(L) -> lists:map(fun shrink_bin/1, L);
shrink_bin(T) when is_tuple(T) -> list_to_tuple(shrink_bin(tuple_to_list(T)));
diff --git a/lib/ssh/src/ssh_sftpd_file_api.erl b/lib/ssh/src/ssh_sftpd_file_api.erl
index 78f452df67..e444e52ac0 100644
--- a/lib/ssh/src/ssh_sftpd_file_api.erl
+++ b/lib/ssh/src/ssh_sftpd_file_api.erl
@@ -36,7 +36,7 @@
-callback list_dir(file:name(), State::term()) ->
{{ok, Filenames::term()}, State::term()} | {{error, Reason::term()}, State::term()}.
-callback make_dir(Dir::term(), State::term()) ->
- {{ok, State::term()},State::term()} | {{error, Reason::term()}, State::term()}.
+ {ok, State::term()} | {{error, Reason::term()}, State::term()}.
-callback make_symlink(Path2::term(), Path::term(), State::term()) ->
{ok, State::term()} | {{error, Reason::term()}, State::term()}.
-callback open(Path::term(), Flags::term(), State::term()) ->
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index 21ba34506a..a7cc4cd52c 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -44,6 +44,7 @@
handle_kexdh_reply/2,
handle_kex_ecdh_init/2,
handle_kex_ecdh_reply/2,
+ parallell_gen_key/1,
extract_public_key/1,
ssh_packet/2, pack/2,
sha/1, sign/3, verify/4]).
@@ -296,9 +297,6 @@ handle_kexinit_msg(#ssh_msg_kexinit{} = CounterPart, #ssh_msg_kexinit{} = Own,
end.
-%% TODO: diffie-hellman-group14-sha1 should also be supported.
-%% Maybe check more things ...
-
verify_algorithm(#alg{kex = undefined}) -> false;
verify_algorithm(#alg{hkey = undefined}) -> false;
verify_algorithm(#alg{send_mac = undefined}) -> false;
@@ -316,17 +314,29 @@ verify_algorithm(#alg{kex = Kex}) -> lists:member(Kex, supported_algorithms(kex)
key_exchange_first_msg(Kex, Ssh0) when Kex == 'diffie-hellman-group1-sha1' ;
Kex == 'diffie-hellman-group14-sha1' ->
{G, P} = dh_group(Kex),
- {Public, Private} = generate_key(dh, [P,G]),
+ Sz = dh_bits(Ssh0#ssh.algorithms),
+ {Public, Private} = generate_key(dh, [P,G,2*Sz]),
{SshPacket, Ssh1} = ssh_packet(#ssh_msg_kexdh_init{e = Public}, Ssh0),
{ok, SshPacket,
Ssh1#ssh{keyex_key = {{Private, Public}, {G, P}}}};
key_exchange_first_msg(Kex, Ssh0=#ssh{opts=Opts}) when Kex == 'diffie-hellman-group-exchange-sha1' ;
Kex == 'diffie-hellman-group-exchange-sha256' ->
- {Min,NBits,Max} =
+ {Min,NBits0,Max} =
proplists:get_value(dh_gex_limits, Opts, {?DEFAULT_DH_GROUP_MIN,
?DEFAULT_DH_GROUP_NBITS,
?DEFAULT_DH_GROUP_MAX}),
+ DhBits = dh_bits(Ssh0#ssh.algorithms),
+ NBits1 =
+ %% NIST Special Publication 800-57 Part 1 Revision 4: Recommendation for Key Management
+ if
+ DhBits =< 112 -> 2048;
+ DhBits =< 128 -> 3072;
+ DhBits =< 192 -> 7680;
+ true -> 8192
+ end,
+ NBits = min(max(max(NBits0,NBits1),Min), Max),
+
{SshPacket, Ssh1} =
ssh_packet(#ssh_msg_kex_dh_gex_request{min = Min,
n = NBits,
@@ -350,12 +360,13 @@ key_exchange_first_msg(Kex, Ssh0) when Kex == 'ecdh-sha2-nistp256' ;
%%% diffie-hellman-group14-sha1
%%%
handle_kexdh_init(#ssh_msg_kexdh_init{e = E},
- Ssh0 = #ssh{algorithms = #alg{kex=Kex}}) ->
+ Ssh0 = #ssh{algorithms = #alg{kex=Kex} = Algs}) ->
%% server
{G, P} = dh_group(Kex),
if
1=<E, E=<(P-1) ->
- {Public, Private} = generate_key(dh, [P,G]),
+ Sz = dh_bits(Algs),
+ {Public, Private} = generate_key(dh, [P,G,2*Sz]),
K = compute_key(dh, E, Private, [P,G]),
MyPrivHostKey = get_host_key(Ssh0),
MyPubHostKey = extract_public_key(MyPrivHostKey),
@@ -426,13 +437,12 @@ handle_kex_dh_gex_request(#ssh_msg_kex_dh_gex_request{min = Min0,
{Min, Max} = adjust_gex_min_max(Min0, Max0, Opts),
case public_key:dh_gex_group(Min, NBits, Max,
proplists:get_value(dh_gex_groups,Opts)) of
- {ok, {_Sz, {G,P}}} ->
- {Public, Private} = generate_key(dh, [P,G]),
+ {ok, {_, {G,P}}} ->
{SshPacket, Ssh} =
ssh_packet(#ssh_msg_kex_dh_gex_group{p = P, g = G}, Ssh0),
{ok, SshPacket,
- Ssh#ssh{keyex_key = {{Private, Public}, {G, P}},
- keyex_info = {Min, Max, NBits}
+ Ssh#ssh{keyex_key = {x, {G, P}},
+ keyex_info = {Min0, Max0, NBits}
}};
{error,_} ->
ssh_connection_handler:disconnect(
@@ -461,12 +471,11 @@ handle_kex_dh_gex_request(#ssh_msg_kex_dh_gex_request_old{n = NBits},
{Min, Max} = adjust_gex_min_max(Min0, Max0, Opts),
case public_key:dh_gex_group(Min, NBits, Max,
proplists:get_value(dh_gex_groups,Opts)) of
- {ok, {_Sz, {G,P}}} ->
- {Public, Private} = generate_key(dh, [P,G]),
+ {ok, {_, {G,P}}} ->
{SshPacket, Ssh} =
ssh_packet(#ssh_msg_kex_dh_gex_group{p = P, g = G}, Ssh0),
{ok, SshPacket,
- Ssh#ssh{keyex_key = {{Private, Public}, {G, P}},
+ Ssh#ssh{keyex_key = {x, {G, P}},
keyex_info = {-1, -1, NBits} % flag for kex_h hash calc
}};
{error,_} ->
@@ -507,7 +516,8 @@ adjust_gex_min_max(Min0, Max0, Opts) ->
handle_kex_dh_gex_group(#ssh_msg_kex_dh_gex_group{p = P, g = G}, Ssh0) ->
%% client
- {Public, Private} = generate_key(dh, [P,G]),
+ Sz = dh_bits(Ssh0#ssh.algorithms),
+ {Public, Private} = generate_key(dh, [P,G,2*Sz]),
{SshPacket, Ssh1} =
ssh_packet(#ssh_msg_kex_dh_gex_init{e = Public}, Ssh0), % Pub = G^Priv mod P (def)
@@ -1117,6 +1127,51 @@ verify(PlainText, Hash, Sig, Key) ->
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% Unit: bytes
+
+-record(cipher_data, {
+ key_bytes,
+ iv_bytes,
+ block_bytes
+ }).
+
+%%% Start of a more parameterized crypto handling.
+cipher('AEAD_AES_128_GCM') ->
+ #cipher_data{key_bytes = 16,
+ iv_bytes = 12,
+ block_bytes = 16};
+
+cipher('AEAD_AES_256_GCM') ->
+ #cipher_data{key_bytes = 32,
+ iv_bytes = 12,
+ block_bytes = 16};
+
+cipher('3des-cbc') ->
+ #cipher_data{key_bytes = 24,
+ iv_bytes = 8,
+ block_bytes = 8};
+
+cipher('aes128-cbc') ->
+ #cipher_data{key_bytes = 16,
+ iv_bytes = 16,
+ block_bytes = 16};
+
+cipher('aes128-ctr') ->
+ #cipher_data{key_bytes = 16,
+ iv_bytes = 16,
+ block_bytes = 16};
+
+cipher('aes192-ctr') ->
+ #cipher_data{key_bytes = 24,
+ iv_bytes = 16,
+ block_bytes = 16};
+
+cipher('aes256-ctr') ->
+ #cipher_data{key_bytes = 32,
+ iv_bytes = 16,
+ block_bytes = 16}.
+
+
encrypt_init(#ssh{encrypt = none} = Ssh) ->
{ok, Ssh};
encrypt_init(#ssh{encrypt = 'AEAD_AES_128_GCM', role = client} = Ssh) ->
@@ -1497,11 +1552,11 @@ send_mac_init(SSH) ->
common ->
case SSH#ssh.role of
client ->
- KeySize = mac_key_size(SSH#ssh.send_mac),
+ KeySize = 8*mac_key_bytes(SSH#ssh.send_mac),
Key = hash(SSH, "E", KeySize),
{ok, SSH#ssh { send_mac_key = Key }};
server ->
- KeySize = mac_key_size(SSH#ssh.send_mac),
+ KeySize = 8*mac_key_bytes(SSH#ssh.send_mac),
Key = hash(SSH, "F", KeySize),
{ok, SSH#ssh { send_mac_key = Key }}
end;
@@ -1520,10 +1575,10 @@ recv_mac_init(SSH) ->
common ->
case SSH#ssh.role of
client ->
- Key = hash(SSH, "F", mac_key_size(SSH#ssh.recv_mac)),
+ Key = hash(SSH, "F", 8*mac_key_bytes(SSH#ssh.recv_mac)),
{ok, SSH#ssh { recv_mac_key = Key }};
server ->
- Key = hash(SSH, "E", mac_key_size(SSH#ssh.recv_mac)),
+ Key = hash(SSH, "E", 8*mac_key_bytes(SSH#ssh.recv_mac)),
{ok, SSH#ssh { recv_mac_key = Key }}
end;
aead ->
@@ -1640,13 +1695,15 @@ sha(?'secp384r1') -> sha(secp384r1);
sha(?'secp521r1') -> sha(secp521r1).
-mac_key_size('hmac-sha1') -> 20*8;
-mac_key_size('hmac-sha1-96') -> 20*8;
-mac_key_size('hmac-md5') -> 16*8;
-mac_key_size('hmac-md5-96') -> 16*8;
-mac_key_size('hmac-sha2-256')-> 32*8;
-mac_key_size('hmac-sha2-512')-> 512;
-mac_key_size(none) -> 0.
+mac_key_bytes('hmac-sha1') -> 20;
+mac_key_bytes('hmac-sha1-96') -> 20;
+mac_key_bytes('hmac-md5') -> 16;
+mac_key_bytes('hmac-md5-96') -> 16;
+mac_key_bytes('hmac-sha2-256')-> 32;
+mac_key_bytes('hmac-sha2-512')-> 64;
+mac_key_bytes('AEAD_AES_128_GCM') -> 0;
+mac_key_bytes('AEAD_AES_256_GCM') -> 0;
+mac_key_bytes(none) -> 0.
mac_digest_size('hmac-sha1') -> 20;
mac_digest_size('hmac-sha1-96') -> 12;
@@ -1671,6 +1728,13 @@ dh_group('diffie-hellman-group1-sha1') -> ?dh_group1;
dh_group('diffie-hellman-group14-sha1') -> ?dh_group14.
%%%----------------------------------------------------------------
+parallell_gen_key(Ssh = #ssh{keyex_key = {x, {G, P}},
+ algorithms = Algs}) ->
+ Sz = dh_bits(Algs),
+ {Public, Private} = generate_key(dh, [P,G,2*Sz]),
+ Ssh#ssh{keyex_key = {{Private, Public}, {G, P}}}.
+
+
generate_key(Algorithm, Args) ->
{Public,Private} = crypto:generate_key(Algorithm, Args),
{crypto:bytes_to_integer(Public), crypto:bytes_to_integer(Private)}.
@@ -1681,6 +1745,15 @@ compute_key(Algorithm, OthersPublic, MyPrivate, Args) ->
crypto:bytes_to_integer(Shared).
+dh_bits(#alg{encrypt = Encrypt,
+ send_mac = SendMac}) ->
+ C = cipher(Encrypt),
+ 8 * lists:max([C#cipher_data.key_bytes,
+ C#cipher_data.block_bytes,
+ C#cipher_data.iv_bytes,
+ mac_key_bytes(SendMac)
+ ]).
+
ecdh_curve('ecdh-sha2-nistp256') -> secp256r1;
ecdh_curve('ecdh-sha2-nistp384') -> secp384r1;
ecdh_curve('ecdh-sha2-nistp521') -> secp521r1.
diff --git a/lib/ssh/test/property_test/ssh_eqc_encode_decode.erl b/lib/ssh/test/property_test/ssh_eqc_encode_decode.erl
index dc3b7dc7e6..8ca29b9399 100644
--- a/lib/ssh/test/property_test/ssh_eqc_encode_decode.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_encode_decode.erl
@@ -54,15 +54,18 @@
-endif.
-endif.
+%% Public key records:
+-include_lib("public_key/include/public_key.hrl").
%%% Properties:
prop_ssh_decode() ->
- ?FORALL(Msg, ssh_msg(),
- try ssh_message:decode(Msg)
+ ?FORALL({Msg,KexFam}, ?LET(KF, kex_family(), {ssh_msg(KF),KF} ),
+ try ssh_message:decode(decode_state(Msg,KexFam))
of
_ -> true
catch
+
C:E -> io:format('~p:~p~n',[C,E]),
false
end
@@ -71,122 +74,101 @@ prop_ssh_decode() ->
%%% This fails because ssh_message is not symmetric in encode and decode regarding data types
prop_ssh_decode_encode() ->
- ?FORALL(Msg, ssh_msg(),
- Msg == ssh_message:encode(ssh_message:decode(Msg))
+ ?FORALL({Msg,KexFam}, ?LET(KF, kex_family(), {ssh_msg(KF),KF} ),
+ Msg == ssh_message:encode(
+ fix_asym(
+ ssh_message:decode(decode_state(Msg,KexFam))))
).
%%%================================================================
%%%
-%%% Scripts to generate message generators
-%%%
-
-%% awk '/^( |\t)+byte( |\t)+SSH/,/^( |\t)*$/{print}' rfc425?.txt | sed 's/^\( \|\\t\)*//' > msgs.txt
-
-%% awk '/^byte( |\t)+SSH/{print $2","}' < msgs.txt
-
-%% awk 'BEGIN{print "%%%---- BEGIN GENERATED";prev=0} END{print " >>.\n%%%---- END GENERATED"} /^byte( |\t)+SSH/{if (prev==1) print " >>.\n"; prev=1; printf "%c%s%c",39,$2,39; print "()->\n <<?"$2;next} /^string( |\t)+\"/{print " ,"$2;next} /^string( |\t)+.*address/{print " ,(ssh_string_address())/binary %%",$2,$3,$4,$5,$6;next}/^string( |\t)+.*US-ASCII/{print " ,(ssh_string_US_ASCII())/binary %%",$2,$3,$4,$5,$6;next} /^string( |\t)+.*UTF-8/{print " ,(ssh_string_UTF_8())/binary %% ",$2,$3,$4,$5,$6;next} /^[a-z0-9]+( |\t)/{print " ,(ssh_"$1"())/binary %%",$2,$3,$4,$5,$6;next} /^byte\[16\]( |\t)+/{print" ,(ssh_byte_16())/binary %%",$2,$3,$4,$5,$6;next} /^name-list( |\t)+/{print" ,(ssh_name_list())/binary %%",$2,$3,$4,$5,$6;next} /./{print "?? %%",$0}' < msgs.txt > gen.txt
-
-%%%================================================================
-%%%
%%% Generators
%%%
-ssh_msg() -> ?LET(M,oneof(
-[[msg_code('SSH_MSG_CHANNEL_CLOSE'),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_DATA'),gen_uint32(),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_EOF'),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_EXTENDED_DATA'),gen_uint32(),gen_uint32(),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_FAILURE'),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("direct-tcpip"),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( ),gen_uint32(),gen_string( ),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("forwarded-tcpip"),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( ),gen_uint32(),gen_string( ),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("session"),gen_uint32(),gen_uint32(),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("x11"),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( ),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string( ),gen_uint32(),gen_uint32(),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_OPEN_CONFIRMATION'),gen_uint32(),gen_uint32(),gen_uint32(),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_OPEN_FAILURE'),gen_uint32(),gen_uint32(),gen_string( ),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("env"),gen_boolean(),gen_string( ),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("exec"),gen_boolean(),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("exit-signal"),0,gen_string( ),gen_boolean(),gen_string( ),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("exit-status"),0,gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("pty-req"),gen_boolean(),gen_string( ),gen_uint32(),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("shell"),gen_boolean()],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("signal"),0,gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("subsystem"),gen_boolean(),gen_string( )],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("window-change"),0,gen_uint32(),gen_uint32(),gen_uint32(),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("x11-req"),gen_boolean(),gen_boolean(),gen_string( ),gen_string( ),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("xon-xoff"),0,gen_boolean()],
- [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string( ),gen_boolean()],
- [msg_code('SSH_MSG_CHANNEL_SUCCESS'),gen_uint32()],
- [msg_code('SSH_MSG_CHANNEL_WINDOW_ADJUST'),gen_uint32(),gen_uint32()],
-%%Assym [msg_code('SSH_MSG_DEBUG'),gen_boolean(),gen_string( ),gen_string( )],
- [msg_code('SSH_MSG_DISCONNECT'),gen_uint32(),gen_string( ),gen_string( )],
-%%Assym [msg_code('SSH_MSG_GLOBAL_REQUEST'),gen_string("cancel-tcpip-forward"),gen_boolean(),gen_string( ),gen_uint32()],
-%%Assym [msg_code('SSH_MSG_GLOBAL_REQUEST'),gen_string("tcpip-forward"),gen_boolean(),gen_string( ),gen_uint32()],
-%%Assym [msg_code('SSH_MSG_GLOBAL_REQUEST'),gen_string( ),gen_boolean()],
- [msg_code('SSH_MSG_IGNORE'),gen_string( )],
- %% [msg_code('SSH_MSG_KEXDH_INIT'),gen_mpint()],
- %% [msg_code('SSH_MSG_KEXDH_REPLY'),gen_string( ),gen_mpint(),gen_string( )],
- %% [msg_code('SSH_MSG_KEXINIT'),gen_byte(16),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_boolean(),gen_uint32()],
- [msg_code('SSH_MSG_KEX_DH_GEX_GROUP'),gen_mpint(),gen_mpint()],
- [msg_code('SSH_MSG_NEWKEYS')],
- [msg_code('SSH_MSG_REQUEST_FAILURE')],
- [msg_code('SSH_MSG_REQUEST_SUCCESS')],
- [msg_code('SSH_MSG_REQUEST_SUCCESS'),gen_uint32()],
- [msg_code('SSH_MSG_SERVICE_ACCEPT'),gen_string( )],
- [msg_code('SSH_MSG_SERVICE_REQUEST'),gen_string( )],
- [msg_code('SSH_MSG_UNIMPLEMENTED'),gen_uint32()],
- [msg_code('SSH_MSG_USERAUTH_BANNER'),gen_string( ),gen_string( )],
- [msg_code('SSH_MSG_USERAUTH_FAILURE'),gen_name_list(),gen_boolean()],
- [msg_code('SSH_MSG_USERAUTH_PASSWD_CHANGEREQ'),gen_string( ),gen_string( )],
- [msg_code('SSH_MSG_USERAUTH_PK_OK'),gen_string( ),gen_string( )],
- [msg_code('SSH_MSG_USERAUTH_SUCCESS')]
-]
-
-), list_to_binary(M)).
-
-
-%%%================================================================
-%%%
-%%% Generator
-%%%
-
-do() ->
- io_lib:format('[~s~n]',
- [write_gen(
- files(["rfc4254.txt",
- "rfc4253.txt",
- "rfc4419.txt",
- "rfc4252.txt",
- "rfc4256.txt"]))]).
-
-
-write_gen(L) when is_list(L) ->
- string:join(lists:map(fun write_gen/1, L), ",\n ");
-write_gen({MsgName,Args}) ->
- lists:flatten(["[",generate_args([MsgName|Args]),"]"]).
-
-generate_args(As) -> string:join([generate_arg(A) || A <- As], ",").
-
-generate_arg({<<"string">>, <<"\"",B/binary>>}) ->
- S = get_string($",B),
- ["gen_string(\"",S,"\")"];
-generate_arg({<<"string">>, _}) -> "gen_string( )";
-generate_arg({<<"byte[",B/binary>>, _}) ->
- io_lib:format("gen_byte(~p)",[list_to_integer(get_string($],B))]);
-generate_arg({<<"byte">> ,_}) -> "gen_byte()";
-generate_arg({<<"uint16">>,_}) -> "gen_uint16()";
-generate_arg({<<"uint32">>,_}) -> "gen_uint32()";
-generate_arg({<<"uint64">>,_}) -> "gen_uint64()";
-generate_arg({<<"mpint">>,_}) -> "gen_mpint()";
-generate_arg({<<"name-list">>,_}) -> "gen_name_list()";
-generate_arg({<<"boolean">>,<<"FALSE">>}) -> "0";
-generate_arg({<<"boolean">>,<<"TRUE">>}) -> "1";
-generate_arg({<<"boolean">>,_}) -> "gen_boolean()";
-generate_arg({<<"....">>,_}) -> ""; %% FIXME
-generate_arg(Name) when is_binary(Name) ->
- lists:flatten(["msg_code('",binary_to_list(Name),"')"]).
-
+ssh_msg(<<"dh">>) ->
+ ?LET(M,oneof(
+ [
+ [msg_code('SSH_MSG_KEXDH_INIT'),gen_mpint()], % 30
+ [msg_code('SSH_MSG_KEXDH_REPLY'),gen_pubkey_string(rsa),gen_mpint(),gen_signature_string(rsa)] % 31
+ | rest_ssh_msgs()
+ ]),
+ list_to_binary(M));
+
+ssh_msg(<<"dh_gex">>) ->
+ ?LET(M,oneof(
+ [
+ [msg_code('SSH_MSG_KEX_DH_GEX_REQUEST_OLD'),gen_uint32()], % 30
+ [msg_code('SSH_MSG_KEX_DH_GEX_GROUP'),gen_mpint(),gen_mpint()] % 31
+ | rest_ssh_msgs()
+ ]),
+ list_to_binary(M));
+
+ ssh_msg(<<"ecdh">>) ->
+ ?LET(M,oneof(
+ [
+ [msg_code('SSH_MSG_KEX_ECDH_INIT'),gen_mpint()], % 30
+ [msg_code('SSH_MSG_KEX_ECDH_REPLY'),gen_pubkey_string(ecdsa),gen_mpint(),gen_signature_string(ecdsa)] % 31
+ | rest_ssh_msgs()
+ ]),
+ list_to_binary(M)).
+
+
+rest_ssh_msgs() ->
+ [%% SSH_MSG_USERAUTH_INFO_RESPONSE
+ %% hard args SSH_MSG_USERAUTH_INFO_REQUEST
+ %% rfc4252 p12 error SSH_MSG_USERAUTH_REQUEST
+ [msg_code('SSH_MSG_KEX_DH_GEX_REQUEST'),gen_uint32(),gen_uint32(),gen_uint32()],
+ [msg_code('SSH_MSG_KEX_DH_GEX_INIT'),gen_mpint()],
+ [msg_code('SSH_MSG_KEX_DH_GEX_REPLY'),gen_pubkey_string(rsa),gen_mpint(),gen_signature_string(rsa)],
+ [msg_code('SSH_MSG_CHANNEL_CLOSE'),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_DATA'),gen_uint32(),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_EOF'),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_EXTENDED_DATA'),gen_uint32(),gen_uint32(),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_FAILURE'),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("direct-tcpip"),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( ),gen_uint32(),gen_string( ),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("forwarded-tcpip"),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( ),gen_uint32(),gen_string( ),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("session"),gen_uint32(),gen_uint32(),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string("x11"),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( ),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_OPEN'),gen_string( ),gen_uint32(),gen_uint32(),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_OPEN_CONFIRMATION'),gen_uint32(),gen_uint32(),gen_uint32(),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_OPEN_FAILURE'),gen_uint32(),gen_uint32(),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("env"),gen_boolean(),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("exec"),gen_boolean(),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("exit-signal"),0,gen_string( ),gen_boolean(),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("exit-status"),0,gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("pty-req"),gen_boolean(),gen_string( ),gen_uint32(),gen_uint32(),gen_uint32(),gen_uint32(),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("shell"),gen_boolean()],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("signal"),0,gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("subsystem"),gen_boolean(),gen_string( )],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("window-change"),0,gen_uint32(),gen_uint32(),gen_uint32(),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("x11-req"),gen_boolean(),gen_boolean(),gen_string( ),gen_string( ),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string("xon-xoff"),0,gen_boolean()],
+ [msg_code('SSH_MSG_CHANNEL_REQUEST'),gen_uint32(),gen_string( ),gen_boolean()],
+ [msg_code('SSH_MSG_CHANNEL_SUCCESS'),gen_uint32()],
+ [msg_code('SSH_MSG_CHANNEL_WINDOW_ADJUST'),gen_uint32(),gen_uint32()],
+ [msg_code('SSH_MSG_DEBUG'),gen_boolean(),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_DISCONNECT'),gen_uint32(),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_GLOBAL_REQUEST'),gen_string("cancel-tcpip-forward"),gen_boolean(),gen_string( ),gen_uint32()],
+ [msg_code('SSH_MSG_GLOBAL_REQUEST'),gen_string("tcpip-forward"),gen_boolean(),gen_string( ),gen_uint32()],
+ [msg_code('SSH_MSG_GLOBAL_REQUEST'),gen_string( ),gen_boolean()],
+ [msg_code('SSH_MSG_IGNORE'),gen_string( )],
+ [msg_code('SSH_MSG_KEXINIT'),gen_byte(16),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_name_list(),gen_boolean(),gen_uint32()],
+ [msg_code('SSH_MSG_NEWKEYS')],
+ [msg_code('SSH_MSG_REQUEST_FAILURE')],
+ [msg_code('SSH_MSG_REQUEST_SUCCESS')],
+ [msg_code('SSH_MSG_REQUEST_SUCCESS'),gen_uint32()],
+ [msg_code('SSH_MSG_SERVICE_ACCEPT'),gen_string( )],
+ [msg_code('SSH_MSG_SERVICE_REQUEST'),gen_string( )],
+ [msg_code('SSH_MSG_UNIMPLEMENTED'),gen_uint32()],
+ [msg_code('SSH_MSG_USERAUTH_BANNER'),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_USERAUTH_FAILURE'),gen_name_list(),gen_boolean()],
+ [msg_code('SSH_MSG_USERAUTH_PASSWD_CHANGEREQ'),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_USERAUTH_PK_OK'),gen_string( ),gen_string( )],
+ [msg_code('SSH_MSG_USERAUTH_SUCCESS')]
+ ].
+
+kex_family() -> oneof([<<"dh">>, <<"dh_gex">>, <<"ecdh">>]).
gen_boolean() -> choose(0,1).
@@ -202,10 +184,7 @@ gen_byte(N) when N>0 -> [gen_byte() || _ <- lists:seq(1,N)].
gen_char() -> choose($a,$z).
-gen_mpint() -> ?LET(Size, choose(1,20),
- ?LET(Str, vector(Size, gen_byte()),
- gen_string( strip_0s(Str) )
- )).
+gen_mpint() -> ?LET(I, largeint(), ssh_bits:mpint(I)).
strip_0s([0|T]) -> strip_0s(T);
strip_0s(X) -> X.
@@ -230,13 +209,22 @@ gen_name() -> gen_string().
uint32_to_list(I) -> binary_to_list(<<I:32/unsigned-big-integer>>).
-%%%----
-get_string(Delim, B) ->
- binary_to_list( element(1, split_binary(B, count_string_chars(Delim,B,0))) ).
-
-count_string_chars(Delim, <<Delim,_/binary>>, Acc) -> Acc;
-count_string_chars(Delim, <<_,B/binary>>, Acc) -> count_string_chars(Delim, B, Acc+1).
+gen_pubkey_string(Type) ->
+ PubKey = case Type of
+ rsa -> #'RSAPublicKey'{modulus = 12345,publicExponent = 2};
+ ecdsa -> {#'ECPoint'{point=[1,2,3,4,5]},
+ {namedCurve,{1,2,840,10045,3,1,7}}} % 'secp256r1' nistp256
+ end,
+ gen_string(public_key:ssh_encode(PubKey, ssh2_pubkey)).
+
+gen_signature_string(Type) ->
+ Signature = <<"hejhopp">>,
+ Id = case Type of
+ rsa -> "ssh-rsa";
+ ecdsa -> "ecdsa-sha2-nistp256"
+ end,
+ gen_string(gen_string(Id) ++ gen_string(Signature)).
-define(MSG_CODE(Name,Num),
msg_code(Name) -> Num;
@@ -273,124 +261,34 @@ msg_code(Num) -> Name
?MSG_CODE('SSH_MSG_CHANNEL_FAILURE', 100);
?MSG_CODE('SSH_MSG_USERAUTH_INFO_REQUEST', 60);
?MSG_CODE('SSH_MSG_USERAUTH_INFO_RESPONSE', 61);
+?MSG_CODE('SSH_MSG_KEXDH_INIT', 30);
+?MSG_CODE('SSH_MSG_KEXDH_REPLY', 31);
?MSG_CODE('SSH_MSG_KEX_DH_GEX_REQUEST_OLD', 30);
?MSG_CODE('SSH_MSG_KEX_DH_GEX_REQUEST', 34);
?MSG_CODE('SSH_MSG_KEX_DH_GEX_GROUP', 31);
?MSG_CODE('SSH_MSG_KEX_DH_GEX_INIT', 32);
-?MSG_CODE('SSH_MSG_KEX_DH_GEX_REPLY', 33).
-
-%%%=============================================================================
-%%%=============================================================================
-%%%=============================================================================
-
-files(Fs) ->
- Defs = lists:usort(lists:flatten(lists:map(fun file/1, Fs))),
- DefinedIDs = lists:usort([binary_to_list(element(1,D)) || D <- Defs]),
- WantedIDs = lists:usort(wanted_messages()),
- Missing = WantedIDs -- DefinedIDs,
- case Missing of
- [] -> ok;
- _ -> io:format('%% Warning: missing ~p~n', [Missing])
- end,
- Defs.
-
-
-file(F) ->
- {ok,B} = file:read_file(F),
- hunt_msg_def(B).
-
-
-hunt_msg_def(<<"\n",B/binary>>) -> some_hope(skip_blanks(B));
-hunt_msg_def(<<_, B/binary>>) -> hunt_msg_def(B);
-hunt_msg_def(<<>>) -> [].
-
-some_hope(<<"byte ", B/binary>>) -> try_message(skip_blanks(B));
-some_hope(B) -> hunt_msg_def(B).
-
-try_message(B = <<"SSH_MSG_",_/binary>>) ->
- {ID,Rest} = get_id(B),
- case lists:member(binary_to_list(ID), wanted_messages()) of
- true ->
- {Lines,More} = get_def_lines(skip_blanks(Rest), []),
- [{ID,lists:reverse(Lines)} | hunt_msg_def(More)];
- false ->
- hunt_msg_def(Rest)
- end;
-try_message(B) -> hunt_msg_def(B).
-
-
-skip_blanks(<<32, B/binary>>) -> skip_blanks(B);
-skip_blanks(<< 9, B/binary>>) -> skip_blanks(B);
-skip_blanks(B) -> B.
-
-get_def_lines(B0 = <<"\n",B/binary>>, Acc) ->
- {ID,Rest} = get_id(skip_blanks(B)),
- case {size(ID), skip_blanks(Rest)} of
- {0,<<"....",More/binary>>} ->
- {Text,LineEnd} = get_to_eol(skip_blanks(More)),
- get_def_lines(LineEnd, [{<<"....">>,Text}|Acc]);
- {0,_} ->
- {Acc,B0};
- {_,Rest1} ->
- {Text,LineEnd} = get_to_eol(Rest1),
- get_def_lines(LineEnd, [{ID,Text}|Acc])
- end;
-get_def_lines(B, Acc) ->
- {Acc,B}.
-
-
-get_to_eol(B) -> split_binary(B, count_to_eol(B,0)).
-
-count_to_eol(<<"\n",_/binary>>, Acc) -> Acc;
-count_to_eol(<<>>, Acc) -> Acc;
-count_to_eol(<<_,B/binary>>, Acc) -> count_to_eol(B,Acc+1).
-
-
-get_id(B) -> split_binary(B, count_id_chars(B,0)).
-
-count_id_chars(<<C,B/binary>>, Acc) when $A=<C,C=<$Z -> count_id_chars(B,Acc+1);
-count_id_chars(<<C,B/binary>>, Acc) when $a=<C,C=<$z -> count_id_chars(B,Acc+1);
-count_id_chars(<<C,B/binary>>, Acc) when $0=<C,C=<$9 -> count_id_chars(B,Acc+1);
-count_id_chars(<<"_",B/binary>>, Acc) -> count_id_chars(B,Acc+1);
-count_id_chars(<<"-",B/binary>>, Acc) -> count_id_chars(B,Acc+1); %% e.g name-list
-count_id_chars(<<"[",B/binary>>, Acc) -> count_id_chars(B,Acc+1); %% e.g byte[16]
-count_id_chars(<<"]",B/binary>>, Acc) -> count_id_chars(B,Acc+1); %% e.g byte[16]
-count_id_chars(_, Acc) -> Acc.
-
-wanted_messages() ->
- ["SSH_MSG_CHANNEL_CLOSE",
- "SSH_MSG_CHANNEL_DATA",
- "SSH_MSG_CHANNEL_EOF",
- "SSH_MSG_CHANNEL_EXTENDED_DATA",
- "SSH_MSG_CHANNEL_FAILURE",
- "SSH_MSG_CHANNEL_OPEN",
- "SSH_MSG_CHANNEL_OPEN_CONFIRMATION",
- "SSH_MSG_CHANNEL_OPEN_FAILURE",
- "SSH_MSG_CHANNEL_REQUEST",
- "SSH_MSG_CHANNEL_SUCCESS",
- "SSH_MSG_CHANNEL_WINDOW_ADJUST",
- "SSH_MSG_DEBUG",
- "SSH_MSG_DISCONNECT",
- "SSH_MSG_GLOBAL_REQUEST",
- "SSH_MSG_IGNORE",
- "SSH_MSG_KEXDH_INIT",
- "SSH_MSG_KEXDH_REPLY",
- "SSH_MSG_KEXINIT",
- "SSH_MSG_KEX_DH_GEX_GROUP",
- "SSH_MSG_KEX_DH_GEX_REQUEST",
- "SSH_MSG_KEX_DH_GEX_REQUEST_OLD",
- "SSH_MSG_NEWKEYS",
- "SSH_MSG_REQUEST_FAILURE",
- "SSH_MSG_REQUEST_SUCCESS",
- "SSH_MSG_SERVICE_ACCEPT",
- "SSH_MSG_SERVICE_REQUEST",
- "SSH_MSG_UNIMPLEMENTED",
- "SSH_MSG_USERAUTH_BANNER",
- "SSH_MSG_USERAUTH_FAILURE",
-%% hard args "SSH_MSG_USERAUTH_INFO_REQUEST",
-%% "SSH_MSG_USERAUTH_INFO_RESPONSE",
- "SSH_MSG_USERAUTH_PASSWD_CHANGEREQ",
- "SSH_MSG_USERAUTH_PK_OK",
-%%rfc4252 p12 error "SSH_MSG_USERAUTH_REQUEST",
- "SSH_MSG_USERAUTH_SUCCESS"].
+?MSG_CODE('SSH_MSG_KEX_DH_GEX_REPLY', 33);
+?MSG_CODE('SSH_MSG_KEX_ECDH_INIT', 30);
+?MSG_CODE('SSH_MSG_KEX_ECDH_REPLY', 31).
+
+%%%====================================================
+%%%=== WARNING: Knowledge of the test object ahead! ===
+%%%====================================================
+
+%% SSH message records:
+-include_lib("ssh/src/ssh_connect.hrl").
+-include_lib("ssh/src/ssh_transport.hrl").
+
+%%% Encoding and decodeing is asymetric so out=binary in=string. Sometimes. :(
+fix_asym(#ssh_msg_global_request{name=N} = M) -> M#ssh_msg_global_request{name = binary_to_list(N)};
+fix_asym(#ssh_msg_debug{message=D,language=L} = M) -> M#ssh_msg_debug{message = binary_to_list(D),
+ language = binary_to_list(L)};
+fix_asym(#ssh_msg_kexinit{cookie=C} = M) -> M#ssh_msg_kexinit{cookie = <<C:128>>};
+fix_asym(M) -> M.
+
+%%% Message codes 30 and 31 are overloaded depending on kex family so arrange the decoder
+%%% input as the test object does
+decode_state(<<30,_/binary>>=Msg, KexFam) -> <<KexFam/binary, Msg/binary>>;
+decode_state(<<31,_/binary>>=Msg, KexFam) -> <<KexFam/binary, Msg/binary>>;
+decode_state(Msg, _) -> Msg.
diff --git a/lib/ssh/test/ssh_algorithms_SUITE.erl b/lib/ssh/test/ssh_algorithms_SUITE.erl
index 8b2db0e1a8..4327068b7b 100644
--- a/lib/ssh/test/ssh_algorithms_SUITE.erl
+++ b/lib/ssh/test/ssh_algorithms_SUITE.erl
@@ -198,18 +198,14 @@ try_exec_simple_group(Group, Config) ->
%%--------------------------------------------------------------------
%% Testing all default groups
-simple_exec_groups() -> [{timetrap,{minutes,5}}].
-
simple_exec_groups(Config) ->
Sizes = interpolate( public_key:dh_gex_group_sizes() ),
lists:foreach(
fun(Sz) ->
ct:log("Try size ~p",[Sz]),
ct:comment(Sz),
- case simple_exec_group(Sz, Config) of
- expected -> ct:log("Size ~p ok",[Sz]);
- _ -> ct:log("Size ~p not ok",[Sz])
- end
+ simple_exec_group(Sz, Config),
+ ct:log("Size ~p ok",[Sz])
end, Sizes),
ct:comment("~p",[lists:map(fun({_,I,_}) -> I;
(I) -> I
diff --git a/lib/ssh/test/ssh_benchmark_SUITE.erl b/lib/ssh/test/ssh_benchmark_SUITE.erl
index c2bfc48449..c5a6447839 100644
--- a/lib/ssh/test/ssh_benchmark_SUITE.erl
+++ b/lib/ssh/test/ssh_benchmark_SUITE.erl
@@ -30,7 +30,7 @@
suite() -> [{ct_hooks,[{ts_install_cth,[{nodenames,2}]}]},
- {timetrap,{minutes,3}}
+ {timetrap,{minutes,6}}
].
%%suite() -> [{ct_hooks,[ts_install_cth]}].
diff --git a/lib/ssh/test/ssh_options_SUITE.erl b/lib/ssh/test/ssh_options_SUITE.erl
index 8f060bebd8..86f5cb1746 100644
--- a/lib/ssh/test/ssh_options_SUITE.erl
+++ b/lib/ssh/test/ssh_options_SUITE.erl
@@ -831,10 +831,13 @@ supported_hash(HashAlg) ->
really_do_hostkey_fingerprint_check(Config, HashAlg) ->
PrivDir = proplists:get_value(priv_dir, Config),
- UserDir = filename:join(PrivDir, nopubkey), % to make sure we don't use public-key-auth
- file:make_dir(UserDir),
+ UserDirServer = filename:join(PrivDir, nopubkey), % to make sure we don't use public-key-auth
+ file:make_dir(UserDirServer),
SysDir = proplists:get_value(data_dir, Config),
+ UserDirClient =
+ ssh_test_lib:create_random_dir(Config), % Ensure no 'known_hosts' disturbs
+
%% All host key fingerprints. Trust that public_key has checked the ssh_hostkey_fingerprint
%% function since that function is used by the ssh client...
FPs = [case HashAlg of
@@ -857,7 +860,7 @@ really_do_hostkey_fingerprint_check(Config, HashAlg) ->
%% Start daemon with the public keys that we got fingerprints from
{Pid, Host, Port} = ssh_test_lib:daemon([{system_dir, SysDir},
- {user_dir, UserDir},
+ {user_dir, UserDirServer},
{password, "morot"}]),
FP_check_fun = fun(PeerName, FP) ->
@@ -876,7 +879,7 @@ really_do_hostkey_fingerprint_check(Config, HashAlg) ->
end},
{user, "foo"},
{password, "morot"},
- {user_dir, UserDir},
+ {user_dir, UserDirClient},
{user_interaction, false}]),
ssh:stop_daemon(Pid).
diff --git a/lib/ssh/test/ssh_property_test_SUITE.erl b/lib/ssh/test/ssh_property_test_SUITE.erl
index 7ba2732a88..9b2a84d8e4 100644
--- a/lib/ssh/test/ssh_property_test_SUITE.erl
+++ b/lib/ssh/test/ssh_property_test_SUITE.erl
@@ -68,9 +68,6 @@ init_per_group(_, Config) ->
end_per_group(_, Config) ->
Config.
-%%% Always skip the testcase that is not quite in phase with the
-%%% ssh_message.erl code
-init_per_testcase(decode_encode, _) -> {skip, "Fails - testcase is not ok"};
init_per_testcase(_TestCase, Config) -> Config.
end_per_testcase(_TestCase, Config) -> Config.
diff --git a/lib/ssh/test/ssh_sftp_SUITE.erl b/lib/ssh/test/ssh_sftp_SUITE.erl
index 70662f5d93..acf76157a2 100644
--- a/lib/ssh/test/ssh_sftp_SUITE.erl
+++ b/lib/ssh/test/ssh_sftp_SUITE.erl
@@ -1038,7 +1038,7 @@ oldprep(Config) ->
prepare(Config0) ->
PrivDir = proplists:get_value(priv_dir, Config0),
- Dir = filename:join(PrivDir, random_chars(10)),
+ Dir = filename:join(PrivDir, ssh_test_lib:random_chars(10)),
file:make_dir(Dir),
Keys = [filename,
testfile,
@@ -1058,8 +1058,6 @@ prepare(Config0) ->
[{sftp_priv_dir,Dir} | Config2].
-random_chars(N) -> [crypto:rand_uniform($a,$z) || _<-lists:duplicate(N,x)].
-
foldl_keydelete(Keys, L) ->
lists:foldl(fun(K,E) -> lists:keydelete(K,1,E) end,
L,
diff --git a/lib/ssh/test/ssh_test_lib.erl b/lib/ssh/test/ssh_test_lib.erl
index f93237f3e7..286ac6e882 100644
--- a/lib/ssh/test/ssh_test_lib.erl
+++ b/lib/ssh/test/ssh_test_lib.erl
@@ -113,19 +113,27 @@ std_simple_exec(Host, Port, Config) ->
std_simple_exec(Host, Port, Config, []).
std_simple_exec(Host, Port, Config, Opts) ->
+ ct:log("~p:~p std_simple_exec",[?MODULE,?LINE]),
ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, Opts),
+ ct:log("~p:~p connected! ~p",[?MODULE,?LINE,ConnectionRef]),
{ok, ChannelId} = ssh_connection:session_channel(ConnectionRef, infinity),
- success = ssh_connection:exec(ConnectionRef, ChannelId, "23+21-2.", infinity),
- Data = {ssh_cm, ConnectionRef, {data, ChannelId, 0, <<"42\n">>}},
- case ssh_test_lib:receive_exec_result(Data) of
- expected ->
- ok;
- Other ->
- ct:fail(Other)
- end,
- ssh_test_lib:receive_exec_end(ConnectionRef, ChannelId),
- ssh:close(ConnectionRef).
-
+ ct:log("~p:~p session_channel ok ~p",[?MODULE,?LINE,ChannelId]),
+ ExecResult = ssh_connection:exec(ConnectionRef, ChannelId, "23+21-2.", infinity),
+ ct:log("~p:~p exec ~p",[?MODULE,?LINE,ExecResult]),
+ case ExecResult of
+ success ->
+ Expected = {ssh_cm, ConnectionRef, {data,ChannelId,0,<<"42\n">>}},
+ case receive_exec_result(Expected) of
+ expected ->
+ ok;
+ Other ->
+ ct:fail(Other)
+ end,
+ receive_exec_end(ConnectionRef, ChannelId),
+ ssh:close(ConnectionRef);
+ _ ->
+ ct:fail(ExecResult)
+ end.
start_shell(Port, IOServer) ->
start_shell(Port, IOServer, []).
@@ -834,3 +842,20 @@ get_kex_init(Conn, Ref, TRef) ->
end
end.
+%%%----------------------------------------------------------------
+%%% Return a string with N random characters
+%%%
+random_chars(N) -> [crypto:rand_uniform($a,$z) || _<-lists:duplicate(N,x)].
+
+
+create_random_dir(Config) ->
+ PrivDir = proplists:get_value(priv_dir, Config),
+ Name = filename:join(PrivDir, random_chars(15)),
+ case file:make_dir(Name) of
+ ok ->
+ Name;
+ {error,eexist} ->
+ %% The Name already denotes an existing file system object, try again.
+ %% The likelyhood of always generating an existing file name is low
+ create_random_dir(Config)
+ end.
diff --git a/lib/ssh/test/ssh_upgrade_SUITE.erl b/lib/ssh/test/ssh_upgrade_SUITE.erl
index b5b27c369a..7b9b109fa1 100644
--- a/lib/ssh/test/ssh_upgrade_SUITE.erl
+++ b/lib/ssh/test/ssh_upgrade_SUITE.erl
@@ -199,6 +199,4 @@ close(#state{server = Server,
connection = undefined}.
-random_contents() -> list_to_binary( random_chars(3) ).
-
-random_chars(N) -> [crypto:rand_uniform($a,$z) || _<-lists:duplicate(N,x)].
+random_contents() -> list_to_binary( ssh_test_lib:random_chars(3) ).
diff --git a/lib/ssl/doc/src/ssl.xml b/lib/ssl/doc/src/ssl.xml
index edc7e0d8b2..916b41742e 100644
--- a/lib/ssl/doc/src/ssl.xml
+++ b/lib/ssl/doc/src/ssl.xml
@@ -424,6 +424,14 @@ marker="public_key:public_key#pkix_path_validation-3">public_key:pkix_path_valid
</taglist>
</item>
+
+ <tag><c>max_handshake_size</c></tag>
+ <item>
+ <p>Integer (24 bits unsigned). Used to limit the size of
+ valid TLS handshake packets to avoid DoS attacks.
+ Defaults to 256*1024.</p>
+ </item>
+
</taglist>
</item>
diff --git a/lib/ssl/src/Makefile b/lib/ssl/src/Makefile
index 3dda1a3316..2e7df9792e 100644
--- a/lib/ssl/src/Makefile
+++ b/lib/ssl/src/Makefile
@@ -48,9 +48,17 @@ MODULES= \
dtls \
ssl_alert \
ssl_app \
- ssl_dist_sup\
ssl_sup \
+ ssl_admin_sup\
+ tls_connection_sup \
+ ssl_connection_sup \
+ ssl_listen_tracker_sup\
+ dtls_connection_sup \
+ dtls_udp_listener\
dtls_udp_sup \
+ ssl_dist_sup\
+ ssl_dist_admin_sup\
+ ssl_dist_connection_sup\
inet_tls_dist \
inet6_tls_dist \
ssl_certificate\
@@ -61,21 +69,18 @@ MODULES= \
dtls_connection \
ssl_config \
ssl_connection \
- tls_connection_sup \
- dtls_connection_sup \
tls_handshake \
dtls_handshake\
ssl_handshake\
ssl_manager \
ssl_session \
ssl_session_cache \
+ ssl_pem_cache \
ssl_crl\
ssl_crl_cache \
ssl_crl_hash_dir \
tls_socket \
dtls_socket \
- dtls_udp_listener\
- ssl_listen_tracker_sup \
tls_record \
dtls_record \
ssl_record \
diff --git a/lib/ssl/src/ssl.app.src b/lib/ssl/src/ssl.app.src
index 9c5d795848..148989174d 100644
--- a/lib/ssl/src/ssl.app.src
+++ b/lib/ssl/src/ssl.app.src
@@ -10,12 +10,14 @@
tls_v1,
ssl_v3,
ssl_v2,
+ tls_connection_sup,
%% DTLS
dtls_connection,
dtls_handshake,
dtls_record,
dtls_socket,
dtls_v1,
+ dtls_connection_sup,
dtls_udp_listener,
dtls_udp_sup,
%% API
@@ -31,16 +33,19 @@
ssl_cipher,
ssl_srp_primes,
ssl_alert,
- ssl_listen_tracker_sup,
+ ssl_listen_tracker_sup, %% may be used by DTLS over SCTP
%% Erlang Distribution over SSL/TLS
inet_tls_dist,
inet6_tls_dist,
ssl_tls_dist_proxy,
ssl_dist_sup,
- %% SSL/TLS session handling
+ ssl_dist_connection_sup,
+ ssl_dist_admin_sup,
+ %% SSL/TLS session and cert handling
ssl_session,
ssl_session_cache,
ssl_manager,
+ ssl_pem_cache,
ssl_pkix_db,
ssl_certificate,
%% CRL handling
@@ -51,8 +56,8 @@
%% App structure
ssl_app,
ssl_sup,
- tls_connection_sup,
- dtls_connection_sup
+ ssl_admin_sup,
+ ssl_connection_sup
]},
{registered, [ssl_sup, ssl_manager]},
{applications, [crypto, public_key, kernel, stdlib]},
diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl
index c72ee44a95..4a5a7e25ea 100644
--- a/lib/ssl/src/ssl.erl
+++ b/lib/ssl/src/ssl.erl
@@ -577,7 +577,7 @@ prf(#sslsocket{pid = {Listen,_}}, _,_,_,_) when is_port(Listen) ->
%% Description: Clear the PEM cache
%%--------------------------------------------------------------------
clear_pem_cache() ->
- ssl_manager:clear_pem_cache().
+ ssl_pem_cache:clear().
%%---------------------------------------------------------------
-spec format_error({error, term()}) -> list().
@@ -765,7 +765,8 @@ handle_options(Opts0, Role) ->
client, Role),
crl_check = handle_option(crl_check, Opts, false),
crl_cache = handle_option(crl_cache, Opts, {ssl_crl_cache, {internal, []}}),
- v2_hello_compatible = handle_option(v2_hello_compatible, Opts, false)
+ v2_hello_compatible = handle_option(v2_hello_compatible, Opts, false),
+ max_handshake_size = handle_option(max_handshake_size, Opts, ?DEFAULT_MAX_HANDSHAKE_SIZE)
},
CbInfo = proplists:get_value(cb_info, Opts, default_cb_info(Protocol)),
@@ -780,7 +781,8 @@ handle_options(Opts0, Role) ->
alpn_preferred_protocols, next_protocols_advertised,
client_preferred_next_protocols, log_alert,
server_name_indication, honor_cipher_order, padding_check, crl_check, crl_cache,
- fallback, signature_algs, eccs, honor_ecc_order, beast_mitigation, v2_hello_compatible],
+ fallback, signature_algs, eccs, honor_ecc_order, beast_mitigation, v2_hello_compatible,
+ max_handshake_size],
SockOpts = lists:foldl(fun(Key, PropList) ->
proplists:delete(Key, PropList)
@@ -1028,6 +1030,8 @@ validate_option(beast_mitigation, Value) when Value == one_n_minus_one orelse
Value;
validate_option(v2_hello_compatible, Value) when is_boolean(Value) ->
Value;
+validate_option(max_handshake_size, Value) when is_integer(Value) andalso Value =< ?MAX_UNIT24 ->
+ Value;
validate_option(Opt, Value) ->
throw({error, {options, {Opt, Value}}}).
diff --git a/lib/ssl/src/ssl_admin_sup.erl b/lib/ssl/src/ssl_admin_sup.erl
new file mode 100644
index 0000000000..9c96435753
--- /dev/null
+++ b/lib/ssl/src/ssl_admin_sup.erl
@@ -0,0 +1,95 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(ssl_admin_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0, manager_opts/0]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+
+init([]) ->
+ PEMCache = pem_cache_child_spec(),
+ SessionCertManager = session_and_cert_manager_child_spec(),
+ {ok, {{rest_for_one, 10, 3600}, [PEMCache, SessionCertManager]}}.
+
+manager_opts() ->
+ CbOpts = case application:get_env(ssl, session_cb) of
+ {ok, Cb} when is_atom(Cb) ->
+ InitArgs = session_cb_init_args(),
+ [{session_cb, Cb}, {session_cb_init_args, InitArgs}];
+ _ ->
+ []
+ end,
+ case application:get_env(ssl, session_lifetime) of
+ {ok, Time} when is_integer(Time) ->
+ [{session_lifetime, Time}| CbOpts];
+ _ ->
+ CbOpts
+ end.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+
+pem_cache_child_spec() ->
+ Name = ssl_pem_cache,
+ StartFunc = {ssl_pem_cache, start_link, [[]]},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [ssl_pem_cache],
+ Type = worker,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+session_and_cert_manager_child_spec() ->
+ Opts = manager_opts(),
+ Name = ssl_manager,
+ StartFunc = {ssl_manager, start_link, [Opts]},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [ssl_manager],
+ Type = worker,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+session_cb_init_args() ->
+ case application:get_env(ssl, session_cb_init_args) of
+ {ok, Args} when is_list(Args) ->
+ Args;
+ _ ->
+ []
+ end.
diff --git a/lib/ssl/src/ssl_certificate.erl b/lib/ssl/src/ssl_certificate.erl
index f359655d85..8aa2aa4081 100644
--- a/lib/ssl/src/ssl_certificate.erl
+++ b/lib/ssl/src/ssl_certificate.erl
@@ -125,21 +125,21 @@ file_to_crls(File, DbHandle) ->
%% Description: Validates ssl/tls specific extensions
%%--------------------------------------------------------------------
validate(_,{extension, #'Extension'{extnID = ?'id-ce-extKeyUsage',
- extnValue = KeyUse}}, {Role, _,_, _, _}) ->
+ extnValue = KeyUse}}, UserState = {Role, _,_, _, _}) ->
case is_valid_extkey_usage(KeyUse, Role) of
true ->
- {valid, Role};
+ {valid, UserState};
false ->
{fail, {bad_cert, invalid_ext_key_usage}}
end;
-validate(_, {extension, _}, Role) ->
- {unknown, Role};
+validate(_, {extension, _}, UserState) ->
+ {unknown, UserState};
validate(_, {bad_cert, _} = Reason, _) ->
{fail, Reason};
-validate(_, valid, Role) ->
- {valid, Role};
-validate(_, valid_peer, Role) ->
- {valid, Role}.
+validate(_, valid, UserState) ->
+ {valid, UserState};
+validate(_, valid_peer, UserState) ->
+ {valid, UserState}.
%%--------------------------------------------------------------------
-spec is_valid_key_usage(list(), term()) -> boolean().
diff --git a/lib/ssl/src/ssl_config.erl b/lib/ssl/src/ssl_config.erl
index 0652d029c3..09d4c3e678 100644
--- a/lib/ssl/src/ssl_config.erl
+++ b/lib/ssl/src/ssl_config.erl
@@ -32,18 +32,20 @@ init(SslOpts, Role) ->
init_manager_name(SslOpts#ssl_options.erl_dist),
- {ok, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheHandle, CRLDbHandle, OwnCert}
+ {ok, #{pem_cache := PemCache} = Config}
= init_certificates(SslOpts, Role),
PrivateKey =
- init_private_key(PemCacheHandle, SslOpts#ssl_options.key, SslOpts#ssl_options.keyfile,
+ init_private_key(PemCache, SslOpts#ssl_options.key, SslOpts#ssl_options.keyfile,
SslOpts#ssl_options.password, Role),
- DHParams = init_diffie_hellman(PemCacheHandle, SslOpts#ssl_options.dh, SslOpts#ssl_options.dhfile, Role),
- {ok, CertDbRef, CertDbHandle, FileRefHandle, CacheHandle, CRLDbHandle, OwnCert, PrivateKey, DHParams}.
+ DHParams = init_diffie_hellman(PemCache, SslOpts#ssl_options.dh, SslOpts#ssl_options.dhfile, Role),
+ {ok, Config#{private_key => PrivateKey, dh_params => DHParams}}.
init_manager_name(false) ->
- put(ssl_manager, ssl_manager:manager_name(normal));
+ put(ssl_manager, ssl_manager:name(normal)),
+ put(ssl_pem_cache, ssl_pem_cache:name(normal));
init_manager_name(true) ->
- put(ssl_manager, ssl_manager:manager_name(dist)).
+ put(ssl_manager, ssl_manager:name(dist)),
+ put(ssl_pem_cache, ssl_pem_cache:name(dist)).
init_certificates(#ssl_options{cacerts = CaCerts,
cacertfile = CACertFile,
@@ -51,7 +53,7 @@ init_certificates(#ssl_options{cacerts = CaCerts,
cert = Cert,
crl_cache = CRLCache
}, Role) ->
- {ok, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheHandle, CRLDbInfo} =
+ {ok, Config} =
try
Certs = case CaCerts of
undefined ->
@@ -59,41 +61,37 @@ init_certificates(#ssl_options{cacerts = CaCerts,
_ ->
{der, CaCerts}
end,
- {ok, _, _, _, _, _, _} = ssl_manager:connection_init(Certs, Role, CRLCache)
+ {ok,_} = ssl_manager:connection_init(Certs, Role, CRLCache)
catch
_:Reason ->
file_error(CACertFile, {cacertfile, Reason})
end,
- init_certificates(Cert, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle,
- CacheHandle, CRLDbInfo, CertFile, Role).
+ init_certificates(Cert, Config, CertFile, Role).
-init_certificates(undefined, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheHandle,
- CRLDbInfo, <<>>, _) ->
- {ok, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheHandle, CRLDbInfo, undefined};
+init_certificates(undefined, Config, <<>>, _) ->
+ {ok, Config#{own_certificate => undefined}};
-init_certificates(undefined, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle,
- CacheHandle, CRLDbInfo, CertFile, client) ->
+init_certificates(undefined, #{pem_cache := PemCache} = Config, CertFile, client) ->
try
%% Ignoring potential proxy-certificates see:
%% http://dev.globus.org/wiki/Security/ProxyFileFormat
- [OwnCert|_] = ssl_certificate:file_to_certificats(CertFile, PemCacheHandle),
- {ok, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheHandle, CRLDbInfo, OwnCert}
+ [OwnCert|_] = ssl_certificate:file_to_certificats(CertFile, PemCache),
+ {ok, Config#{own_certificate => OwnCert}}
catch _Error:_Reason ->
- {ok, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheHandle, CRLDbInfo, undefined}
- end;
+ {ok, Config#{own_certificate => undefined}}
+ end;
-init_certificates(undefined, CertDbRef, CertDbHandle, FileRefHandle,
- PemCacheHandle, CacheRef, CRLDbInfo, CertFile, server) ->
+init_certificates(undefined, #{pem_cache := PemCache} = Config, CertFile, server) ->
try
- [OwnCert|_] = ssl_certificate:file_to_certificats(CertFile, PemCacheHandle),
- {ok, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheRef, CRLDbInfo, OwnCert}
+ [OwnCert|_] = ssl_certificate:file_to_certificats(CertFile, PemCache),
+ {ok, Config#{own_certificate => OwnCert}}
catch
_:Reason ->
file_error(CertFile, {certfile, Reason})
end;
-init_certificates(Cert, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheRef, CRLDbInfo, _, _) ->
- {ok, CertDbRef, CertDbHandle, FileRefHandle, PemCacheHandle, CacheRef, CRLDbInfo, Cert}.
-
+init_certificates(Cert, Config, _, _) ->
+ {ok, Config#{own_certificate => Cert}}.
+
init_private_key(_, undefined, <<>>, _Password, _Client) ->
undefined;
init_private_key(DbHandle, undefined, KeyFile, Password, _) ->
@@ -135,6 +133,8 @@ file_error(File, Throw) ->
case Throw of
{Opt,{badmatch, {error, {badmatch, Error}}}} ->
throw({options, {Opt, binary_to_list(File), Error}});
+ {Opt, {badmatch, Error}} ->
+ throw({options, {Opt, binary_to_list(File), Error}});
_ ->
throw(Throw)
end.
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 6e7c8c5ddd..4fbac4cad3 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -323,8 +323,14 @@ handle_session(#server_hello{cipher_suite = CipherSuite,
-spec ssl_config(#ssl_options{}, client | server, #state{}) -> #state{}.
%%--------------------------------------------------------------------
ssl_config(Opts, Role, State) ->
- {ok, Ref, CertDbHandle, FileRefHandle, CacheHandle, CRLDbInfo,
- OwnCert, Key, DHParams} =
+ {ok, #{cert_db_ref := Ref,
+ cert_db_handle := CertDbHandle,
+ fileref_db_handle := FileRefHandle,
+ session_cache := CacheHandle,
+ crl_db_info := CRLDbHandle,
+ private_key := Key,
+ dh_params := DHParams,
+ own_certificate := OwnCert}} =
ssl_config:init(Opts, Role),
Handshake = ssl_handshake:init_handshake_history(),
TimeStamp = erlang:monotonic_time(),
@@ -335,7 +341,7 @@ ssl_config(Opts, Role, State) ->
file_ref_db = FileRefHandle,
cert_db_ref = Ref,
cert_db = CertDbHandle,
- crl_db = CRLDbInfo,
+ crl_db = CRLDbHandle,
session_cache = CacheHandle,
private_key = Key,
diffie_hellman_params = DHParams,
@@ -864,11 +870,11 @@ handle_call({close, {Pid, Timeout}}, From, StateName, State0, Connection) when i
%% When downgrading an TLS connection to a transport connection
%% we must recive the close alert from the peer before releasing the
%% transport socket.
- {next_state, downgrade, State, [{timeout, Timeout, downgrade}]};
+ {next_state, downgrade, State#state{terminated = true}, [{timeout, Timeout, downgrade}]};
handle_call({close, _} = Close, From, StateName, State, Connection) ->
%% Run terminate before returning so that the reuseaddr
- %% inet-option
- Result = Connection:terminate(Close, StateName, State),
+ %% inet-option works properly
+ Result = Connection:terminate(Close, StateName, State#state{terminated = true}),
{stop_and_reply, {shutdown, normal},
{reply, From, Result}, State};
handle_call({shutdown, How0}, From, _,
@@ -1010,7 +1016,10 @@ handle_info(Msg, StateName, #state{socket = Socket, error_tag = Tag} = State) ->
terminate(_, _, #state{terminated = true}) ->
%% Happens when user closes the connection using ssl:close/1
%% we want to guarantee that Transport:close has been called
- %% when ssl:close/1 returns.
+ %% when ssl:close/1 returns unless it is a downgrade where
+ %% we want to guarantee that close alert is recived before
+ %% returning. In both cases terminate has been run manually
+ %% before run by gen_statem which will end up here
ok;
terminate({shutdown, transport_closed} = Reason,
@@ -2425,16 +2434,23 @@ handle_sni_extension(#sni{hostname = Hostname}, State0) ->
undefined ->
State0;
_ ->
- {ok, Ref, CertDbHandle, FileRefHandle, CacheHandle, CRLDbHandle, OwnCert, Key, DHParams} =
- ssl_config:init(NewOptions, State0#state.role),
- State0#state{
- session = State0#state.session#session{own_certificate = OwnCert},
- file_ref_db = FileRefHandle,
- cert_db_ref = Ref,
- cert_db = CertDbHandle,
- crl_db = CRLDbHandle,
- session_cache = CacheHandle,
- private_key = Key,
+ {ok, #{cert_db_ref := Ref,
+ cert_db_handle := CertDbHandle,
+ fileref_db_handle := FileRefHandle,
+ session_cache := CacheHandle,
+ crl_db_info := CRLDbHandle,
+ private_key := Key,
+ dh_params := DHParams,
+ own_certificate := OwnCert}} =
+ ssl_config:init(NewOptions, State0#state.role),
+ State0#state{
+ session = State0#state.session#session{own_certificate = OwnCert},
+ file_ref_db = FileRefHandle,
+ cert_db_ref = Ref,
+ cert_db = CertDbHandle,
+ crl_db = CRLDbHandle,
+ session_cache = CacheHandle,
+ private_key = Key,
diffie_hellman_params = DHParams,
ssl_options = NewOptions,
sni_hostname = Hostname
diff --git a/lib/ssl/src/ssl_connection_sup.erl b/lib/ssl/src/ssl_connection_sup.erl
new file mode 100644
index 0000000000..1a1f43e683
--- /dev/null
+++ b/lib/ssl/src/ssl_connection_sup.erl
@@ -0,0 +1,101 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(ssl_connection_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+
+init([]) ->
+
+ TLSConnetionManager = tls_connection_manager_child_spec(),
+ %% Handles emulated options so that they inherited by the accept
+ %% socket, even when setopts is performed on the listen socket
+ ListenOptionsTracker = listen_options_tracker_child_spec(),
+
+ DTLSConnetionManager = dtls_connection_manager_child_spec(),
+ DTLSUdpListeners = dtls_udp_listeners_spec(),
+
+ {ok, {{one_for_one, 10, 3600}, [TLSConnetionManager,
+ ListenOptionsTracker,
+ DTLSConnetionManager,
+ DTLSUdpListeners
+ ]}}.
+
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+
+tls_connection_manager_child_spec() ->
+ Name = tls_connection,
+ StartFunc = {tls_connection_sup, start_link, []},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [tls_connection_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+dtls_connection_manager_child_spec() ->
+ Name = dtls_connection,
+ StartFunc = {dtls_connection_sup, start_link, []},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [dtls_connection_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+listen_options_tracker_child_spec() ->
+ Name = tls_socket,
+ StartFunc = {ssl_listen_tracker_sup, start_link, []},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [tls_socket],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+dtls_udp_listeners_spec() ->
+ Name = dtls_udp_listener,
+ StartFunc = {dtls_udp_sup, start_link, []},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
diff --git a/lib/ssl/src/ssl_crl.erl b/lib/ssl/src/ssl_crl.erl
index fc60bdba67..33375b5e09 100644
--- a/lib/ssl/src/ssl_crl.erl
+++ b/lib/ssl/src/ssl_crl.erl
@@ -29,7 +29,7 @@
-export([trusted_cert_and_path/3]).
-trusted_cert_and_path(CRL, {SerialNumber, Issuer},{Db, DbRef} = DbHandle) ->
+trusted_cert_and_path(CRL, {SerialNumber, Issuer},{_, {Db, DbRef}} = DbHandle) ->
case ssl_pkix_db:lookup_trusted_cert(Db, DbRef, SerialNumber, Issuer) of
undefined ->
trusted_cert_and_path(CRL, issuer_not_found, DbHandle);
@@ -37,17 +37,34 @@ trusted_cert_and_path(CRL, {SerialNumber, Issuer},{Db, DbRef} = DbHandle) ->
{ok, Root, Chain} = ssl_certificate:certificate_chain(OtpCert, Db, DbRef),
{ok, Root, lists:reverse(Chain)}
end;
-
-trusted_cert_and_path(CRL, issuer_not_found, {Db, DbRef} = DbHandle) ->
- case find_issuer(CRL, DbHandle) of
+trusted_cert_and_path(CRL, issuer_not_found, {CertPath, {Db, DbRef}}) ->
+ case find_issuer(CRL, {certpath,
+ [{Der, public_key:pkix_decode_cert(Der,otp)} || Der <- CertPath]}) of
{ok, OtpCert} ->
{ok, Root, Chain} = ssl_certificate:certificate_chain(OtpCert, Db, DbRef),
{ok, Root, lists:reverse(Chain)};
{error, issuer_not_found} ->
- {ok, unknown_crl_ca, []}
- end.
+ trusted_cert_and_path(CRL, issuer_not_found, {Db, DbRef})
+ end;
+trusted_cert_and_path(CRL, issuer_not_found, {Db, DbRef} = DbInfo) ->
+ case find_issuer(CRL, DbInfo) of
+ {ok, OtpCert} ->
+ {ok, Root, Chain} = ssl_certificate:certificate_chain(OtpCert, Db, DbRef),
+ {ok, Root, lists:reverse(Chain)};
+ {error, issuer_not_found} ->
+ {error, unknown_ca}
+ end.
-find_issuer(CRL, {Db,DbRef}) ->
+find_issuer(CRL, {certpath = Db, DbRef}) ->
+ Issuer = public_key:pkix_normalize_name(public_key:pkix_crl_issuer(CRL)),
+ IsIssuerFun =
+ fun({_Der,ErlCertCandidate}, Acc) ->
+ verify_crl_issuer(CRL, ErlCertCandidate, Issuer, Acc);
+ (_, Acc) ->
+ Acc
+ end,
+ find_issuer(IsIssuerFun, Db, DbRef);
+find_issuer(CRL, {Db, DbRef}) ->
Issuer = public_key:pkix_normalize_name(public_key:pkix_crl_issuer(CRL)),
IsIssuerFun =
fun({_Key, {_Der,ErlCertCandidate}}, Acc) ->
@@ -55,26 +72,33 @@ find_issuer(CRL, {Db,DbRef}) ->
(_, Acc) ->
Acc
end,
- if is_reference(DbRef) -> % actual DB exists
- try ssl_pkix_db:foldl(IsIssuerFun, issuer_not_found, Db) of
- issuer_not_found ->
- {error, issuer_not_found}
- catch
- {ok, _} = Result ->
- Result
- end;
- is_tuple(DbRef), element(1,DbRef) =:= extracted -> % cache bypass byproduct
- {extracted, CertsData} = DbRef,
- Certs = [Entry || {decoded, Entry} <- CertsData],
- try lists:foldl(IsIssuerFun, issuer_not_found, Certs) of
- issuer_not_found ->
- {error, issuer_not_found}
- catch
- {ok, _} = Result ->
- Result
- end
- end.
+ find_issuer(IsIssuerFun, Db, DbRef).
+find_issuer(IsIssuerFun, certpath, Certs) ->
+ try lists:foldl(IsIssuerFun, issuer_not_found, Certs) of
+ issuer_not_found ->
+ {error, issuer_not_found}
+ catch
+ {ok, _} = Result ->
+ Result
+ end;
+find_issuer(IsIssuerFun, extracted, CertsData) ->
+ Certs = [Entry || {decoded, Entry} <- CertsData],
+ try lists:foldl(IsIssuerFun, issuer_not_found, Certs) of
+ issuer_not_found ->
+ {error, issuer_not_found}
+ catch
+ {ok, _} = Result ->
+ Result
+ end;
+find_issuer(IsIssuerFun, Db, _) ->
+ try ssl_pkix_db:foldl(IsIssuerFun, issuer_not_found, Db) of
+ issuer_not_found ->
+ {error, issuer_not_found}
+ catch
+ {ok, _} = Result ->
+ Result
+ end.
verify_crl_issuer(CRL, ErlCertCandidate, Issuer, NotIssuer) ->
TBSCert = ErlCertCandidate#'OTPCertificate'.tbsCertificate,
diff --git a/lib/ssl/src/ssl_dist_admin_sup.erl b/lib/ssl/src/ssl_dist_admin_sup.erl
new file mode 100644
index 0000000000..f60806c4cb
--- /dev/null
+++ b/lib/ssl/src/ssl_dist_admin_sup.erl
@@ -0,0 +1,74 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2016-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(ssl_dist_admin_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+
+init([]) ->
+ PEMCache = pem_cache_child_spec(),
+ SessionCertManager = session_and_cert_manager_child_spec(),
+ {ok, {{rest_for_one, 10, 3600}, [PEMCache, SessionCertManager]}}.
+
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+
+pem_cache_child_spec() ->
+ Name = ssl_pem_cache_dist,
+ StartFunc = {ssl_pem_cache, start_link_dist, [[]]},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [ssl_pem_cache],
+ Type = worker,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+session_and_cert_manager_child_spec() ->
+ Opts = ssl_admin_sup:manager_opts(),
+ Name = ssl_dist_manager,
+ StartFunc = {ssl_manager, start_link_dist, [Opts]},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [ssl_manager],
+ Type = worker,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
diff --git a/lib/ssl/src/ssl_dist_connection_sup.erl b/lib/ssl/src/ssl_dist_connection_sup.erl
new file mode 100644
index 0000000000..e5842c866e
--- /dev/null
+++ b/lib/ssl/src/ssl_dist_connection_sup.erl
@@ -0,0 +1,79 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(ssl_dist_connection_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callback
+-export([init/1]).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+-spec start_link() -> {ok, pid()} | ignore | {error, term()}.
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+%%%=========================================================================
+%%% Supervisor callback
+%%%=========================================================================
+
+init([]) ->
+
+ TLSConnetionManager = tls_connection_manager_child_spec(),
+ %% Handles emulated options so that they inherited by the accept
+ %% socket, even when setopts is performed on the listen socket
+ ListenOptionsTracker = listen_options_tracker_child_spec(),
+
+ {ok, {{one_for_one, 10, 3600}, [TLSConnetionManager,
+ ListenOptionsTracker
+ ]}}.
+
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+
+tls_connection_manager_child_spec() ->
+ Name = dist_tls_connection,
+ StartFunc = {tls_connection_sup, start_link_dist, []},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [tls_connection_sup],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
+listen_options_tracker_child_spec() ->
+ Name = dist_tls_socket,
+ StartFunc = {ssl_listen_tracker_sup, start_link_dist, []},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [tls_socket],
+ Type = supervisor,
+ {Name, StartFunc, Restart, Shutdown, Type, Modules}.
+
diff --git a/lib/ssl/src/ssl_dist_sup.erl b/lib/ssl/src/ssl_dist_sup.erl
index d47cd76bf5..690b896919 100644
--- a/lib/ssl/src/ssl_dist_sup.erl
+++ b/lib/ssl/src/ssl_dist_sup.erl
@@ -44,34 +44,29 @@ start_link() ->
%%%=========================================================================
init([]) ->
- SessionCertManager = session_and_cert_manager_child_spec(),
- ConnetionManager = connection_manager_child_spec(),
- ListenOptionsTracker = listen_options_tracker_child_spec(),
+ AdminSup = ssl_admin_child_spec(),
+ ConnectionSup = ssl_connection_sup(),
ProxyServer = proxy_server_child_spec(),
-
- {ok, {{one_for_all, 10, 3600}, [SessionCertManager, ConnetionManager,
- ListenOptionsTracker,
- ProxyServer]}}.
+ {ok, {{one_for_all, 10, 3600}, [AdminSup, ProxyServer, ConnectionSup]}}.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
-session_and_cert_manager_child_spec() ->
- Opts = ssl_sup:manager_opts(),
- Name = ssl_manager_dist,
- StartFunc = {ssl_manager, start_link_dist, [Opts]},
+ssl_admin_child_spec() ->
+ Name = ssl_dist_admin_sup,
+ StartFunc = {ssl_dist_admin_sup, start_link , []},
Restart = permanent,
Shutdown = 4000,
- Modules = [ssl_manager],
- Type = worker,
+ Modules = [ssl_admin_sup],
+ Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
-connection_manager_child_spec() ->
- Name = ssl_connection_dist,
- StartFunc = {tls_connection_sup, start_link_dist, []},
- Restart = permanent,
- Shutdown = infinity,
- Modules = [tls_connection_sup],
+ssl_connection_sup() ->
+ Name = ssl_dist_connection_sup,
+ StartFunc = {ssl_dist_connection_sup, start_link, []},
+ Restart = permanent,
+ Shutdown = 4000,
+ Modules = [ssl_connection_sup],
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
@@ -83,12 +78,3 @@ proxy_server_child_spec() ->
Modules = [ssl_tls_dist_proxy],
Type = worker,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
-
-listen_options_tracker_child_spec() ->
- Name = tls_socket_dist,
- StartFunc = {ssl_listen_tracker_sup, start_link_dist, []},
- Restart = permanent,
- Shutdown = 4000,
- Modules = [tls_socket],
- Type = supervisor,
- {Name, StartFunc, Restart, Shutdown, Type, Modules}.
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index 4acc745c5f..cb61c82334 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -397,14 +397,13 @@ verify_signature(_, Hash, {HashAlgo, _SignAlg}, Signature,
%%--------------------------------------------------------------------
certify(#certificate{asn1_certificates = ASN1Certs}, CertDbHandle, CertDbRef,
MaxPathLen, _Verify, ValidationFunAndState0, PartialChain, CRLCheck, CRLDbHandle, Role) ->
- [PeerCert | _] = ASN1Certs,
-
- ValidationFunAndState = validation_fun_and_state(ValidationFunAndState0, Role,
- CertDbHandle, CertDbRef, CRLCheck, CRLDbHandle),
-
+ [PeerCert | _] = ASN1Certs,
try
{TrustedCert, CertPath} =
ssl_certificate:trusted_cert_and_path(ASN1Certs, CertDbHandle, CertDbRef, PartialChain),
+ ValidationFunAndState = validation_fun_and_state(ValidationFunAndState0, Role,
+ CertDbHandle, CertDbRef,
+ CRLCheck, CRLDbHandle, CertPath),
case public_key:pkix_path_validation(TrustedCert,
CertPath,
[{max_path_length, MaxPathLen},
@@ -1541,7 +1540,8 @@ sni1(Hostname) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
-validation_fun_and_state({Fun, UserState0}, Role, CertDbHandle, CertDbRef, CRLCheck, CRLDbHandle) ->
+validation_fun_and_state({Fun, UserState0}, Role, CertDbHandle, CertDbRef,
+ CRLCheck, CRLDbHandle, CertPath) ->
{fun(OtpCert, {extension, _} = Extension, {SslState, UserState}) ->
case ssl_certificate:validate(OtpCert,
Extension,
@@ -1550,22 +1550,25 @@ validation_fun_and_state({Fun, UserState0}, Role, CertDbHandle, CertDbRef, CRLC
{valid, {NewSslState, UserState}};
{fail, Reason} ->
apply_user_fun(Fun, OtpCert, Reason, UserState,
- SslState);
+ SslState, CertPath);
{unknown, _} ->
apply_user_fun(Fun, OtpCert,
- Extension, UserState, SslState)
+ Extension, UserState, SslState, CertPath)
end;
(OtpCert, VerifyResult, {SslState, UserState}) ->
apply_user_fun(Fun, OtpCert, VerifyResult, UserState,
- SslState)
+ SslState, CertPath)
end, {{Role, CertDbHandle, CertDbRef, CRLCheck, CRLDbHandle}, UserState0}};
-validation_fun_and_state(undefined, Role, CertDbHandle, CertDbRef, CRLCheck, CRLDbHandle) ->
+validation_fun_and_state(undefined, Role, CertDbHandle, CertDbRef,
+ CRLCheck, CRLDbHandle, CertPath) ->
{fun(OtpCert, {extension, _} = Extension, SslState) ->
ssl_certificate:validate(OtpCert,
Extension,
SslState);
- (OtpCert, VerifyResult, SslState) when (VerifyResult == valid) or (VerifyResult == valid_peer) ->
- case crl_check(OtpCert, CRLCheck, CertDbHandle, CertDbRef, CRLDbHandle, VerifyResult) of
+ (OtpCert, VerifyResult, SslState) when (VerifyResult == valid) or
+ (VerifyResult == valid_peer) ->
+ case crl_check(OtpCert, CRLCheck, CertDbHandle, CertDbRef,
+ CRLDbHandle, VerifyResult, CertPath) of
valid ->
{VerifyResult, SslState};
Reason ->
@@ -1578,20 +1581,21 @@ validation_fun_and_state(undefined, Role, CertDbHandle, CertDbRef, CRLCheck, CRL
end, {Role, CertDbHandle, CertDbRef, CRLCheck, CRLDbHandle}}.
apply_user_fun(Fun, OtpCert, VerifyResult, UserState0,
- {_, CertDbHandle, CertDbRef, CRLCheck, CRLDbHandle} = SslState) when
+ {_, CertDbHandle, CertDbRef, CRLCheck, CRLDbHandle} = SslState, CertPath) when
(VerifyResult == valid) or (VerifyResult == valid_peer) ->
case Fun(OtpCert, VerifyResult, UserState0) of
{Valid, UserState} when (Valid == valid) or (Valid == valid_peer) ->
- case crl_check(OtpCert, CRLCheck, CertDbHandle, CertDbRef, CRLDbHandle, VerifyResult) of
+ case crl_check(OtpCert, CRLCheck, CertDbHandle, CertDbRef,
+ CRLDbHandle, VerifyResult, CertPath) of
valid ->
{Valid, {SslState, UserState}};
Result ->
- apply_user_fun(Fun, OtpCert, Result, UserState, SslState)
+ apply_user_fun(Fun, OtpCert, Result, UserState, SslState, CertPath)
end;
{fail, _} = Fail ->
Fail
end;
-apply_user_fun(Fun, OtpCert, ExtensionOrError, UserState0, SslState) ->
+apply_user_fun(Fun, OtpCert, ExtensionOrError, UserState0, SslState, _CertPath) ->
case Fun(OtpCert, ExtensionOrError, UserState0) of
{Valid, UserState} when (Valid == valid) or (Valid == valid_peer)->
{Valid, {SslState, UserState}};
@@ -2187,13 +2191,14 @@ handle_psk_identity(_PSKIdentity, LookupFun)
handle_psk_identity(PSKIdentity, {Fun, UserState}) ->
Fun(psk, PSKIdentity, UserState).
-crl_check(_, false, _,_,_, _) ->
+crl_check(_, false, _,_,_, _, _) ->
valid;
-crl_check(_, peer, _, _,_, valid) -> %% Do not check CAs with this option.
+crl_check(_, peer, _, _,_, valid, _) -> %% Do not check CAs with this option.
valid;
-crl_check(OtpCert, Check, CertDbHandle, CertDbRef, {Callback, CRLDbHandle}, _) ->
+crl_check(OtpCert, Check, CertDbHandle, CertDbRef, {Callback, CRLDbHandle}, _, CertPath) ->
Options = [{issuer_fun, {fun(_DP, CRL, Issuer, DBInfo) ->
- ssl_crl:trusted_cert_and_path(CRL, Issuer, DBInfo)
+ ssl_crl:trusted_cert_and_path(CRL, Issuer, {CertPath,
+ DBInfo})
end, {CertDbHandle, CertDbRef}}},
{update_crl, fun(DP, CRL) -> Callback:fresh_crl(DP, CRL) end}
],
@@ -2229,7 +2234,8 @@ dps_and_crls(OtpCert, Callback, CRLDbHandle, ext) ->
no_dps;
DistPoints ->
Issuer = OtpCert#'OTPCertificate'.tbsCertificate#'OTPTBSCertificate'.issuer,
- distpoints_lookup(DistPoints, Issuer, Callback, CRLDbHandle)
+ CRLs = distpoints_lookup(DistPoints, Issuer, Callback, CRLDbHandle),
+ dps_and_crls(DistPoints, CRLs, [])
end;
dps_and_crls(OtpCert, Callback, CRLDbHandle, same_issuer) ->
@@ -2242,7 +2248,13 @@ dps_and_crls(OtpCert, Callback, CRLDbHandle, same_issuer) ->
end, GenNames),
[{DP, {CRL, public_key:der_decode('CertificateList', CRL)}} || CRL <- CRLs].
-distpoints_lookup([], _, _, _) ->
+dps_and_crls([], _, Acc) ->
+ Acc;
+dps_and_crls([DP | Rest], CRLs, Acc) ->
+ DpCRL = [{DP, {CRL, public_key:der_decode('CertificateList', CRL)}} || CRL <- CRLs],
+ dps_and_crls(Rest, CRLs, DpCRL ++ Acc).
+
+distpoints_lookup([],_, _, _) ->
[];
distpoints_lookup([DistPoint | Rest], Issuer, Callback, CRLDbHandle) ->
Result =
@@ -2257,7 +2269,7 @@ distpoints_lookup([DistPoint | Rest], Issuer, Callback, CRLDbHandle) ->
not_available ->
distpoints_lookup(Rest, Issuer, Callback, CRLDbHandle);
CRLs ->
- [{DistPoint, {CRL, public_key:der_decode('CertificateList', CRL)}} || CRL <- CRLs]
+ CRLs
end.
sign_algo(?rsaEncryption) ->
diff --git a/lib/ssl/src/ssl_handshake.hrl b/lib/ssl/src/ssl_handshake.hrl
index fde92035a2..324b7dbde3 100644
--- a/lib/ssl/src/ssl_handshake.hrl
+++ b/lib/ssl/src/ssl_handshake.hrl
@@ -80,6 +80,9 @@
-define(CLIENT_KEY_EXCHANGE, 16).
-define(FINISHED, 20).
+-define(MAX_UNIT24, 8388607).
+-define(DEFAULT_MAX_HANDSHAKE_SIZE, (256*1024)).
+
-record(random, {
gmt_unix_time, % uint32
random_bytes % opaque random_bytes[28]
diff --git a/lib/ssl/src/ssl_internal.hrl b/lib/ssl/src/ssl_internal.hrl
index 98b89bb811..c34af9f82c 100644
--- a/lib/ssl/src/ssl_internal.hrl
+++ b/lib/ssl/src/ssl_internal.hrl
@@ -142,7 +142,8 @@
signature_algs,
eccs,
honor_ecc_order :: boolean(),
- v2_hello_compatible :: boolean()
+ v2_hello_compatible :: boolean(),
+ max_handshake_size :: integer()
}).
-record(socket_options,
diff --git a/lib/ssl/src/ssl_manager.erl b/lib/ssl/src/ssl_manager.erl
index 5bd9521de7..2b82f18bb5 100644
--- a/lib/ssl/src/ssl_manager.erl
+++ b/lib/ssl/src/ssl_manager.erl
@@ -32,10 +32,9 @@
new_session_id/1, clean_cert_db/2,
register_session/2, register_session/3, invalidate_session/2,
insert_crls/2, insert_crls/3, delete_crls/1, delete_crls/2,
- invalidate_session/3, invalidate_pem/1, clear_pem_cache/0, manager_name/1]).
+ invalidate_session/3, name/1]).
-% Spawn export
--export([init_session_validator/1, init_pem_cache_validator/1]).
+-export([init_session_validator/1]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -52,9 +51,7 @@
session_lifetime :: integer(),
certificate_db :: db_handle(),
session_validation_timer :: reference(),
- last_delay_timer = {undefined, undefined},%% Keep for testing purposes
- last_pem_check :: erlang:timestamp(),
- clear_pem_cache :: integer(),
+ last_delay_timer = {undefined, undefined},%% Keep for testing purposes
session_cache_client_max :: integer(),
session_cache_server_max :: integer(),
session_server_invalidator :: undefined | pid(),
@@ -63,7 +60,6 @@
-define(GEN_UNIQUE_ID_MAX_TRIES, 10).
-define(SESSION_VALIDATION_INTERVAL, 60000).
--define(CLEAR_PEM_CACHE, 120000).
-define(CLEAN_SESSION_DB, 60000).
-define(CLEAN_CERT_DB, 500).
-define(DEFAULT_MAX_SESSION_CACHE, 1000).
@@ -74,14 +70,14 @@
%%====================================================================
%%--------------------------------------------------------------------
--spec manager_name(normal | dist) -> atom().
+-spec name(normal | dist) -> atom().
%%
%% Description: Returns the registered name of the ssl manager process
%% in the operation modes 'normal' and 'dist'.
%%--------------------------------------------------------------------
-manager_name(normal) ->
+name(normal) ->
?MODULE;
-manager_name(dist) ->
+name(dist) ->
list_to_atom(atom_to_list(?MODULE) ++ "dist").
%%--------------------------------------------------------------------
@@ -91,9 +87,10 @@ manager_name(dist) ->
%% and certificate caching.
%%--------------------------------------------------------------------
start_link(Opts) ->
- DistMangerName = manager_name(normal),
- gen_server:start_link({local, DistMangerName},
- ?MODULE, [DistMangerName, Opts], []).
+ MangerName = name(normal),
+ CacheName = ssl_pem_cache:name(normal),
+ gen_server:start_link({local, MangerName},
+ ?MODULE, [MangerName, CacheName, Opts], []).
%%--------------------------------------------------------------------
-spec start_link_dist(list()) -> {ok, pid()} | ignore | {error, term()}.
@@ -102,38 +99,23 @@ start_link(Opts) ->
%% be used by the erlang distribution. Note disables soft upgrade!
%%--------------------------------------------------------------------
start_link_dist(Opts) ->
- DistMangerName = manager_name(dist),
+ DistMangerName = name(dist),
+ DistCacheName = ssl_pem_cache:name(dist),
gen_server:start_link({local, DistMangerName},
- ?MODULE, [DistMangerName, Opts], []).
+ ?MODULE, [DistMangerName, DistCacheName, Opts], []).
%%--------------------------------------------------------------------
-spec connection_init(binary()| {der, list()}, client | server,
{Cb :: atom(), Handle:: term()}) ->
- {ok, certdb_ref(), db_handle(), db_handle(),
- db_handle(), db_handle(), CRLInfo::term()}.
+ {ok, map()}.
%%
%% Description: Do necessary initializations for a new connection.
%%--------------------------------------------------------------------
connection_init({der, _} = Trustedcerts, Role, CRLCache) ->
- case bypass_pem_cache() of
- true ->
- {ok, Extracted} = ssl_pkix_db:extract_trusted_certs(Trustedcerts),
- call({connection_init, Extracted, Role, CRLCache});
- false ->
- call({connection_init, Trustedcerts, Role, CRLCache})
- end;
-
-connection_init(<<>> = Trustedcerts, Role, CRLCache) ->
- call({connection_init, Trustedcerts, Role, CRLCache});
-
+ {ok, Extracted} = ssl_pkix_db:extract_trusted_certs(Trustedcerts),
+ call({connection_init, Extracted, Role, CRLCache});
connection_init(Trustedcerts, Role, CRLCache) ->
- case bypass_pem_cache() of
- true ->
- {ok, Extracted} = ssl_pkix_db:extract_trusted_certs(Trustedcerts),
- call({connection_init, Extracted, Role, CRLCache});
- false ->
- call({connection_init, Trustedcerts, Role, CRLCache})
- end.
+ call({connection_init, Trustedcerts, Role, CRLCache}).
%%--------------------------------------------------------------------
-spec cache_pem_file(binary(), term()) -> {ok, term()} | {error, reason()}.
@@ -141,31 +123,14 @@ connection_init(Trustedcerts, Role, CRLCache) ->
%% Description: Cache a pem file and return its content.
%%--------------------------------------------------------------------
cache_pem_file(File, DbHandle) ->
- case bypass_pem_cache() of
- true ->
- ssl_pkix_db:decode_pem_file(File);
- false ->
- case ssl_pkix_db:lookup_cached_pem(DbHandle, File) of
- [{Content,_}] ->
- {ok, Content};
- [Content] ->
- {ok, Content};
- undefined ->
- call({cache_pem, File})
- end
+ case ssl_pkix_db:lookup(File, DbHandle) of
+ [Content] ->
+ {ok, Content};
+ undefined ->
+ ssl_pem_cache:insert(File)
end.
%%--------------------------------------------------------------------
--spec clear_pem_cache() -> ok.
-%%
-%% Description: Clear the PEM cache
-%%--------------------------------------------------------------------
-clear_pem_cache() ->
- %% Not supported for distribution at the moement, should it be?
- put(ssl_manager, manager_name(normal)),
- call(unconditionally_clear_pem_cache).
-
-%%--------------------------------------------------------------------
-spec lookup_trusted_cert(term(), reference(), serialnumber(), issuer()) ->
undefined |
{ok, {der_cert(), #'OTPCertificate'{}}}.
@@ -222,26 +187,22 @@ invalidate_session(Port, Session) ->
load_mitigation(),
cast({invalidate_session, Port, Session}).
--spec invalidate_pem(File::binary()) -> ok.
-invalidate_pem(File) ->
- cast({invalidate_pem, File}).
-
insert_crls(Path, CRLs)->
insert_crls(Path, CRLs, normal).
insert_crls(?NO_DIST_POINT_PATH = Path, CRLs, ManagerType)->
- put(ssl_manager, manager_name(ManagerType)),
+ put(ssl_manager, name(ManagerType)),
cast({insert_crls, Path, CRLs});
insert_crls(Path, CRLs, ManagerType)->
- put(ssl_manager, manager_name(ManagerType)),
+ put(ssl_manager, name(ManagerType)),
call({insert_crls, Path, CRLs}).
delete_crls(Path)->
delete_crls(Path, normal).
delete_crls(?NO_DIST_POINT_PATH = Path, ManagerType)->
- put(ssl_manager, manager_name(ManagerType)),
+ put(ssl_manager, name(ManagerType)),
cast({delete_crls, Path});
delete_crls(Path, ManagerType)->
- put(ssl_manager, manager_name(ManagerType)),
+ put(ssl_manager, name(ManagerType)),
call({delete_crls, Path}).
%%====================================================================
@@ -255,13 +216,14 @@ delete_crls(Path, ManagerType)->
%%
%% Description: Initiates the server
%%--------------------------------------------------------------------
-init([Name, Opts]) ->
- put(ssl_manager, Name),
+init([ManagerName, PemCacheName, Opts]) ->
+ put(ssl_manager, ManagerName),
+ put(ssl_pem_cache, PemCacheName),
process_flag(trap_exit, true),
CacheCb = proplists:get_value(session_cb, Opts, ssl_session_cache),
SessionLifeTime =
proplists:get_value(session_lifetime, Opts, ?'24H_in_sec'),
- CertDb = ssl_pkix_db:create(),
+ CertDb = ssl_pkix_db:create(PemCacheName),
ClientSessionCache =
CacheCb:init([{role, client} |
proplists:get_value(session_cb_init_args, Opts, [])]),
@@ -270,16 +232,12 @@ init([Name, Opts]) ->
proplists:get_value(session_cb_init_args, Opts, [])]),
Timer = erlang:send_after(SessionLifeTime * 1000 + 5000,
self(), validate_sessions),
- Interval = pem_check_interval(),
- erlang:send_after(Interval, self(), clear_pem_cache),
{ok, #state{certificate_db = CertDb,
session_cache_client = ClientSessionCache,
session_cache_server = ServerSessionCache,
session_cache_cb = CacheCb,
session_lifetime = SessionLifeTime,
session_validation_timer = Timer,
- last_pem_check = os:timestamp(),
- clear_pem_cache = Interval,
session_cache_client_max =
max_session_cache_size(session_cache_client_max),
session_cache_server_max =
@@ -302,18 +260,25 @@ init([Name, Opts]) ->
handle_call({{connection_init, <<>>, Role, {CRLCb, UserCRLDb}}, _Pid}, _From,
#state{certificate_db = [CertDb, FileRefDb, PemChace | _] = Db} = State) ->
Ref = make_ref(),
- Result = {ok, Ref, CertDb, FileRefDb, PemChace,
- session_cache(Role, State), {CRLCb, crl_db_info(Db, UserCRLDb)}},
- {reply, Result, State#state{certificate_db = Db}};
+ {reply, {ok, #{cert_db_ref => Ref,
+ cert_db_handle => CertDb,
+ fileref_db_handle => FileRefDb,
+ pem_cache => PemChace,
+ session_cache => session_cache(Role, State),
+ crl_db_info => {CRLCb, crl_db_info(Db, UserCRLDb)}}}, State};
handle_call({{connection_init, Trustedcerts, Role, {CRLCb, UserCRLDb}}, Pid}, _From,
#state{certificate_db = [CertDb, FileRefDb, PemChace | _] = Db} = State) ->
case add_trusted_certs(Pid, Trustedcerts, Db) of
{ok, Ref} ->
- {reply, {ok, Ref, CertDb, FileRefDb, PemChace, session_cache(Role, State),
- {CRLCb, crl_db_info(Db, UserCRLDb)}}, State};
- {error, _} = Error ->
- {reply, Error, State}
+ {reply, {ok, #{cert_db_ref => Ref,
+ cert_db_handle => CertDb,
+ fileref_db_handle => FileRefDb,
+ pem_cache => PemChace,
+ session_cache => session_cache(Role, State),
+ crl_db_info => {CRLCb, crl_db_info(Db, UserCRLDb)}}}, State};
+ {error, _} = Error ->
+ {reply, Error, State}
end;
handle_call({{insert_crls, Path, CRLs}, _}, _From,
@@ -330,21 +295,7 @@ handle_call({{new_session_id, Port}, _},
_, #state{session_cache_cb = CacheCb,
session_cache_server = Cache} = State) ->
Id = new_id(Port, ?GEN_UNIQUE_ID_MAX_TRIES, Cache, CacheCb),
- {reply, Id, State};
-
-handle_call({{cache_pem,File}, _Pid}, _,
- #state{certificate_db = Db} = State) ->
- try ssl_pkix_db:cache_pem_file(File, Db) of
- Result ->
- {reply, Result, State}
- catch
- _:Reason ->
- {reply, {error, Reason}, State}
- end;
-handle_call({unconditionally_clear_pem_cache, _},_,
- #state{certificate_db = [_,_,PemChace | _]} = State) ->
- ssl_pkix_db:clear(PemChace),
- {reply, ok, State}.
+ {reply, Id, State}.
%%--------------------------------------------------------------------
-spec handle_cast(msg(), #state{}) -> {noreply, #state{}}.
@@ -382,11 +333,6 @@ handle_cast({insert_crls, Path, CRLs},
handle_cast({delete_crls, CRLsOrPath},
#state{certificate_db = Db} = State) ->
ssl_pkix_db:remove_crls(Db, CRLsOrPath),
- {noreply, State};
-
-handle_cast({invalidate_pem, File},
- #state{certificate_db = [_, _, PemCache | _]} = State) ->
- ssl_pkix_db:remove(File, PemCache),
{noreply, State}.
%%--------------------------------------------------------------------
@@ -418,22 +364,14 @@ handle_info({delayed_clean_session, Key, Cache}, #state{session_cache_cb = Cache
CacheCb:delete(Cache, Key),
{noreply, State};
-handle_info(clear_pem_cache, #state{certificate_db = [_,_,PemChace | _],
- clear_pem_cache = Interval,
- last_pem_check = CheckPoint} = State) ->
- NewCheckPoint = os:timestamp(),
- start_pem_cache_validator(PemChace, CheckPoint),
- erlang:send_after(Interval, self(), clear_pem_cache),
- {noreply, State#state{last_pem_check = NewCheckPoint}};
-
handle_info({clean_cert_db, Ref, File},
- #state{certificate_db = [CertDb,RefDb, PemCache | _]} = State) ->
+ #state{certificate_db = [CertDb, {RefDb, FileMapDb} | _]} = State) ->
case ssl_pkix_db:lookup(Ref, RefDb) of
undefined -> %% Alredy cleaned
ok;
_ ->
- clean_cert_db(Ref, CertDb, RefDb, PemCache, File)
+ clean_cert_db(Ref, CertDb, RefDb, FileMapDb, File)
end,
{noreply, State};
@@ -523,14 +461,6 @@ delay_time() ->
?CLEAN_SESSION_DB
end.
-bypass_pem_cache() ->
- case application:get_env(ssl, bypass_pem_cache) of
- {ok, Bool} when is_boolean(Bool) ->
- Bool;
- _ ->
- false
- end.
-
max_session_cache_size(CacheType) ->
case application:get_env(ssl, CacheType) of
{ok, Size} when is_integer(Size) ->
@@ -594,16 +524,11 @@ new_id(Port, Tries, Cache, CacheCb) ->
new_id(Port, Tries - 1, Cache, CacheCb)
end.
-clean_cert_db(Ref, CertDb, RefDb, PemCache, File) ->
+clean_cert_db(Ref, CertDb, RefDb, FileMapDb, File) ->
case ssl_pkix_db:ref_count(Ref, RefDb, 0) of
0 ->
- case ssl_pkix_db:lookup_cached_pem(PemCache, File) of
- [{Content, Ref}] ->
- ssl_pkix_db:insert(File, Content, PemCache);
- _ ->
- ok
- end,
ssl_pkix_db:remove(Ref, RefDb),
+ ssl_pkix_db:remove(File, FileMapDb),
ssl_pkix_db:remove_trusted_certs(Ref, CertDb);
_ ->
ok
@@ -687,42 +612,6 @@ exists_equivalent(#session{
exists_equivalent(Session, [ _ | Rest]) ->
exists_equivalent(Session, Rest).
-start_pem_cache_validator(PemCache, CheckPoint) ->
- spawn_link(?MODULE, init_pem_cache_validator,
- [[get(ssl_manager), PemCache, CheckPoint]]).
-
-init_pem_cache_validator([SslManagerName, PemCache, CheckPoint]) ->
- put(ssl_manager, SslManagerName),
- ssl_pkix_db:foldl(fun pem_cache_validate/2,
- CheckPoint, PemCache).
-
-pem_cache_validate({File, _}, CheckPoint) ->
- case file:read_file_info(File, []) of
- {ok, #file_info{mtime = Time}} ->
- case is_before_checkpoint(Time, CheckPoint) of
- true ->
- ok;
- false ->
- invalidate_pem(File)
- end;
- _ ->
- invalidate_pem(File)
- end,
- CheckPoint.
-
-pem_check_interval() ->
- case application:get_env(ssl, ssl_pem_cache_clean) of
- {ok, Interval} when is_integer(Interval) ->
- Interval;
- _ ->
- ?CLEAR_PEM_CACHE
- end.
-
-is_before_checkpoint(Time, CheckPoint) ->
- calendar:datetime_to_gregorian_seconds(
- calendar:now_to_datetime(CheckPoint)) -
- calendar:datetime_to_gregorian_seconds(Time) > 0.
-
add_trusted_certs(Pid, Trustedcerts, Db) ->
try
ssl_pkix_db:add_trusted_certs(Pid, Trustedcerts, Db)
diff --git a/lib/ssl/src/ssl_pem_cache.erl b/lib/ssl/src/ssl_pem_cache.erl
new file mode 100644
index 0000000000..f63a301f69
--- /dev/null
+++ b/lib/ssl/src/ssl_pem_cache.erl
@@ -0,0 +1,266 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 20016-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%----------------------------------------------------------------------
+%% Purpose: Manages ssl sessions and trusted certifacates
+%%----------------------------------------------------------------------
+
+-module(ssl_pem_cache).
+-behaviour(gen_server).
+
+%% Internal application API
+-export([start_link/1,
+ start_link_dist/1,
+ name/1,
+ insert/1,
+ clear/0]).
+
+% Spawn export
+-export([init_pem_cache_validator/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include("ssl_handshake.hrl").
+-include("ssl_internal.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-record(state, {
+ pem_cache,
+ last_pem_check :: erlang:timestamp(),
+ clear :: integer()
+ }).
+
+-define(CLEAR_PEM_CACHE, 120000).
+-define(DEFAULT_MAX_SESSION_CACHE, 1000).
+
+%%====================================================================
+%% API
+%%====================================================================
+
+%%--------------------------------------------------------------------
+-spec name(normal | dist) -> atom().
+%%
+%% Description: Returns the registered name of the ssl cache process
+%% in the operation modes 'normal' and 'dist'.
+%%--------------------------------------------------------------------
+name(normal) ->
+ ?MODULE;
+name(dist) ->
+ list_to_atom(atom_to_list(?MODULE) ++ "dist").
+
+%%--------------------------------------------------------------------
+-spec start_link(list()) -> {ok, pid()} | ignore | {error, term()}.
+%%
+%% Description: Starts the ssl pem cache handler
+%%--------------------------------------------------------------------
+start_link(_) ->
+ CacheName = name(normal),
+ gen_server:start_link({local, CacheName},
+ ?MODULE, [CacheName], []).
+
+%%--------------------------------------------------------------------
+-spec start_link_dist(list()) -> {ok, pid()} | ignore | {error, term()}.
+%%
+%% Description: Starts a special instance of the ssl manager to
+%% be used by the erlang distribution. Note disables soft upgrade!
+%%--------------------------------------------------------------------
+start_link_dist(_) ->
+ DistCacheName = name(dist),
+ gen_server:start_link({local, DistCacheName},
+ ?MODULE, [DistCacheName], []).
+
+
+%%--------------------------------------------------------------------
+-spec insert(binary()) -> {ok, term()} | {error, reason()}.
+%%
+%% Description: Cache a pem file and return its content.
+%%--------------------------------------------------------------------
+insert(File) ->
+ {ok, PemBin} = file:read_file(File),
+ Content = public_key:pem_decode(PemBin),
+ case bypass_cache() of
+ true ->
+ {ok, Content};
+ false ->
+ cast({cache_pem, File, Content}),
+ {ok, Content}
+ end.
+
+%%--------------------------------------------------------------------
+-spec clear() -> ok.
+%%
+%% Description: Clear the PEM cache
+%%--------------------------------------------------------------------
+clear() ->
+ %% Not supported for distribution at the moement, should it be?
+ put(ssl_pem_cache, name(normal)),
+ call(unconditionally_clear_pem_cache).
+
+-spec invalidate_pem(File::binary()) -> ok.
+invalidate_pem(File) ->
+ cast({invalidate_pem, File}).
+
+%%====================================================================
+%% gen_server callbacks
+%%====================================================================
+
+%%--------------------------------------------------------------------
+-spec init(list()) -> {ok, #state{}}.
+%% Possible return values not used now.
+%% | {ok, #state{}, timeout()} | ignore | {stop, term()}.
+%%
+%% Description: Initiates the server
+%%--------------------------------------------------------------------
+init([Name]) ->
+ put(ssl_pem_cache, Name),
+ process_flag(trap_exit, true),
+ PemCache = ssl_pkix_db:create_pem_cache(Name),
+ Interval = pem_check_interval(),
+ erlang:send_after(Interval, self(), clear_pem_cache),
+ {ok, #state{pem_cache = PemCache,
+ last_pem_check = os:timestamp(),
+ clear = Interval
+ }}.
+
+%%--------------------------------------------------------------------
+-spec handle_call(msg(), from(), #state{}) -> {reply, reply(), #state{}}.
+%% Possible return values not used now.
+%% {reply, reply(), #state{}, timeout()} |
+%% {noreply, #state{}} |
+%% {noreply, #state{}, timeout()} |
+%% {stop, reason(), reply(), #state{}} |
+%% {stop, reason(), #state{}}.
+%%
+%% Description: Handling call messages
+%%--------------------------------------------------------------------
+handle_call({unconditionally_clear_pem_cache, _},_,
+ #state{pem_cache = PemCache} = State) ->
+ ssl_pkix_db:clear(PemCache),
+ {reply, ok, State}.
+
+%%--------------------------------------------------------------------
+-spec handle_cast(msg(), #state{}) -> {noreply, #state{}}.
+%% Possible return values not used now.
+%% | {noreply, #state{}, timeout()} |
+%% {stop, reason(), #state{}}.
+%%
+%% Description: Handling cast messages
+%%--------------------------------------------------------------------
+handle_cast({cache_pem, File, Content}, #state{pem_cache = Db} = State) ->
+ ssl_pkix_db:insert(File, Content, Db),
+ {noreply, State};
+
+handle_cast({invalidate_pem, File}, #state{pem_cache = Db} = State) ->
+ ssl_pkix_db:remove(File, Db),
+ {noreply, State}.
+
+
+%%--------------------------------------------------------------------
+-spec handle_info(msg(), #state{}) -> {noreply, #state{}}.
+%% Possible return values not used now.
+%% |{noreply, #state{}, timeout()} |
+%% {stop, reason(), #state{}}.
+%%
+%% Description: Handling all non call/cast messages
+%%-------------------------------------------------------------------
+handle_info(clear_pem_cache, #state{pem_cache = PemCache,
+ clear = Interval,
+ last_pem_check = CheckPoint} = State) ->
+ NewCheckPoint = os:timestamp(),
+ start_pem_cache_validator(PemCache, CheckPoint),
+ erlang:send_after(Interval, self(), clear_pem_cache),
+ {noreply, State#state{last_pem_check = NewCheckPoint}};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+%%--------------------------------------------------------------------
+-spec terminate(reason(), #state{}) -> ok.
+%%
+%% Description: This function is called by a gen_server when it is about to
+%% terminate. It should be the opposite of Module:init/1 and do any necessary
+%% cleaning up. When it returns, the gen_server terminates with Reason.
+%% The return value is ignored.
+%%--------------------------------------------------------------------
+terminate(_Reason, #state{}) ->
+ ok.
+
+%%--------------------------------------------------------------------
+-spec code_change(term(), #state{}, list()) -> {ok, #state{}}.
+%%
+%% Description: Convert process state when code is changed
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+call(Msg) ->
+ gen_server:call(get(ssl_pem_cache), {Msg, self()}, infinity).
+
+cast(Msg) ->
+ gen_server:cast(get(ssl_pem_cache), Msg).
+
+start_pem_cache_validator(PemCache, CheckPoint) ->
+ spawn_link(?MODULE, init_pem_cache_validator,
+ [[get(ssl_pem_cache), PemCache, CheckPoint]]).
+
+init_pem_cache_validator([CacheName, PemCache, CheckPoint]) ->
+ put(ssl_pem_cache, CacheName),
+ ssl_pkix_db:foldl(fun pem_cache_validate/2,
+ CheckPoint, PemCache).
+
+pem_cache_validate({File, _}, CheckPoint) ->
+ case file:read_file_info(File, []) of
+ {ok, #file_info{mtime = Time}} ->
+ case is_before_checkpoint(Time, CheckPoint) of
+ true ->
+ ok;
+ false ->
+ invalidate_pem(File)
+ end;
+ _ ->
+ invalidate_pem(File)
+ end,
+ CheckPoint.
+
+is_before_checkpoint(Time, CheckPoint) ->
+ calendar:datetime_to_gregorian_seconds(
+ calendar:now_to_datetime(CheckPoint)) -
+ calendar:datetime_to_gregorian_seconds(Time) > 0.
+
+pem_check_interval() ->
+ case application:get_env(ssl, ssl_pem_cache_clean) of
+ {ok, Interval} when is_integer(Interval) ->
+ Interval;
+ _ ->
+ ?CLEAR_PEM_CACHE
+ end.
+
+bypass_cache() ->
+ case application:get_env(ssl, bypass_pem_cache) of
+ {ok, Bool} when is_boolean(Bool) ->
+ Bool;
+ _ ->
+ false
+ end.
diff --git a/lib/ssl/src/ssl_pkix_db.erl b/lib/ssl/src/ssl_pkix_db.erl
index b4299969e4..cde05bb16f 100644
--- a/lib/ssl/src/ssl_pkix_db.erl
+++ b/lib/ssl/src/ssl_pkix_db.erl
@@ -28,11 +28,11 @@
-include_lib("public_key/include/public_key.hrl").
-include_lib("kernel/include/file.hrl").
--export([create/0, add_crls/3, remove_crls/2, remove/1, add_trusted_certs/3,
+-export([create/1, create_pem_cache/1,
+ add_crls/3, remove_crls/2, remove/1, add_trusted_certs/3,
extract_trusted_certs/1,
remove_trusted_certs/2, insert/3, remove/2, clear/1, db_size/1,
ref_count/3, lookup_trusted_cert/4, foldl/3, select_cert_by_issuer/2,
- lookup_cached_pem/2, cache_pem_file/2, cache_pem_file/3,
decode_pem_file/1, lookup/2]).
%%====================================================================
@@ -40,25 +40,31 @@
%%====================================================================
%%--------------------------------------------------------------------
--spec create() -> [db_handle(),...].
+-spec create(atom()) -> [db_handle(),...].
%%
%% Description: Creates a new certificate db.
%% Note: lookup_trusted_cert/4 may be called from any process but only
%% the process that called create may call the other functions.
%%--------------------------------------------------------------------
-create() ->
+create(PEMCacheName) ->
[%% Let connection process delete trusted certs
%% that can only belong to one connection. (Supplied directly
%% on DER format to ssl:connect/listen.)
ets:new(ssl_otp_cacertificate_db, [set, public]),
%% Let connection processes call ref_count/3 directly
- ets:new(ssl_otp_ca_file_ref, [set, public]),
- ets:new(ssl_otp_pem_cache, [set, protected]),
+ {ets:new(ssl_otp_ca_file_ref, [set, public]),
+ ets:new(ssl_otp_ca_ref_file_mapping, [set, protected])
+ },
+ %% Lookups in named table owned by ssl_pem_cache process
+ PEMCacheName,
%% Default cache
{ets:new(ssl_otp_crl_cache, [set, protected]),
ets:new(ssl_otp_crl_issuer_mapping, [bag, protected])}
].
+create_pem_cache(Name) ->
+ ets:new(Name, [named_table, set, protected]).
+
%%--------------------------------------------------------------------
-spec remove([db_handle()]) -> ok.
%%
@@ -70,6 +76,10 @@ remove(Dbs) ->
true = ets:delete(Db1);
(undefined) ->
ok;
+ (ssl_pem_cache) ->
+ ok;
+ (ssl_pem_cache_dist) ->
+ ok;
(Db) ->
true = ets:delete(Db)
end, Dbs).
@@ -101,11 +111,6 @@ lookup_trusted_cert(_DbHandle, {extracted,Certs}, SerialNumber, Issuer) ->
{ok, Cert}
end.
-lookup_cached_pem([_, _, PemChache | _], File) ->
- lookup_cached_pem(PemChache, File);
-lookup_cached_pem(PemChache, File) ->
- lookup(File, PemChache).
-
%%--------------------------------------------------------------------
-spec add_trusted_certs(pid(), {erlang:timestamp(), string()} |
{der, list()}, [db_handle()]) -> {ok, [db_handle()]}.
@@ -122,17 +127,11 @@ add_trusted_certs(_Pid, {der, DerList}, [CertDb, _,_ | _]) ->
add_certs_from_der(DerList, NewRef, CertDb),
{ok, NewRef};
-add_trusted_certs(_Pid, File, [CertsDb, RefDb, PemChache | _] = Db) ->
- case lookup_cached_pem(Db, File) of
- [{_Content, Ref}] ->
+add_trusted_certs(_Pid, File, [ _, {RefDb, FileMapDb} | _] = Db) ->
+ case lookup(File, FileMapDb) of
+ [Ref] ->
ref_count(Ref, RefDb, 1),
{ok, Ref};
- [Content] ->
- Ref = make_ref(),
- update_counter(Ref, 1, RefDb),
- insert(File, {Content, Ref}, PemChache),
- add_certs_from_pem(Content, Ref, CertsDb),
- {ok, Ref};
undefined ->
new_trusted_cert_entry(File, Db)
end.
@@ -151,25 +150,6 @@ extract_trusted_certs(File) ->
{error, {badmatch, Error}}
end.
-%%--------------------------------------------------------------------
-%%
-%% Description: Cache file as binary in DB
-%%--------------------------------------------------------------------
--spec cache_pem_file(binary(), [db_handle()]) -> {ok, term()}.
-cache_pem_file(File, [_CertsDb, _RefDb, PemChache | _]) ->
- {ok, PemBin} = file:read_file(File),
- Content = public_key:pem_decode(PemBin),
- insert(File, Content, PemChache),
- {ok, Content}.
-
-
--spec cache_pem_file(reference(), binary(), [db_handle()]) -> {ok, term()}.
-cache_pem_file(Ref, File, [_CertsDb, _RefDb, PemChache| _]) ->
- {ok, PemBin} = file:read_file(File),
- Content = public_key:pem_decode(PemBin),
- insert(File, {Content, Ref}, PemChache),
- {ok, Content}.
-
-spec decode_pem_file(binary()) -> {ok, term()}.
decode_pem_file(File) ->
case file:read_file(File) of
@@ -246,6 +226,8 @@ select_cert_by_issuer(Cache, Issuer) ->
%%--------------------------------------------------------------------
ref_count({extracted, _}, _Db, _N) ->
not_cached;
+ref_count(Key, {Db, _}, N) ->
+ ref_count(Key, Db, N);
ref_count(Key, Db, N) ->
ets:update_counter(Db,Key,N).
@@ -278,9 +260,9 @@ insert(Key, Data, Db) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
-update_counter(Key, Count, Db) ->
- true = ets:insert(Db, {Key, Count}),
- ok.
+init_ref_db(Ref, File, {RefDb, FileMapDb}) ->
+ true = ets:insert(RefDb, {Ref, 1}),
+ true = ets:insert(FileMapDb, {File, Ref}).
remove_certs(Ref, CertsDb) ->
true = ets:match_delete(CertsDb, {{Ref, '_', '_'}, '_'}),
@@ -326,10 +308,10 @@ decode_certs(Ref, Cert) ->
undefined
end.
-new_trusted_cert_entry(File, [CertsDb, RefDb, _ | _] = Db) ->
+new_trusted_cert_entry(File, [CertsDb, RefsDb, _ | _]) ->
Ref = make_ref(),
- update_counter(Ref, 1, RefDb),
- {ok, Content} = cache_pem_file(Ref, File, Db),
+ init_ref_db(Ref, File, RefsDb),
+ {ok, Content} = ssl_pem_cache:insert(File),
add_certs_from_pem(Content, Ref, CertsDb),
{ok, Ref}.
diff --git a/lib/ssl/src/ssl_sup.erl b/lib/ssl/src/ssl_sup.erl
index 8245801139..05a7aaaa82 100644
--- a/lib/ssl/src/ssl_sup.erl
+++ b/lib/ssl/src/ssl_sup.erl
@@ -25,7 +25,7 @@
-behaviour(supervisor).
%% API
--export([start_link/0, manager_opts/0]).
+-export([start_link/0]).
%% Supervisor callback
-export([init/1]).
@@ -44,90 +44,28 @@ start_link() ->
%%%=========================================================================
init([]) ->
- SessionCertManager = session_and_cert_manager_child_spec(),
- TLSConnetionManager = tls_connection_manager_child_spec(),
- %% Handles emulated options so that they inherited by the accept
- %% socket, even when setopts is performed on the listen socket
- ListenOptionsTracker = listen_options_tracker_child_spec(),
-
- DTLSConnetionManager = dtls_connection_manager_child_spec(),
- DTLSUdpListeners = dtls_udp_listeners_spec(),
+ {ok, {{rest_for_one, 10, 3600}, [ssl_admin_child_spec(),
+ ssl_connection_sup()
+ ]}}.
- {ok, {{one_for_all, 10, 3600}, [SessionCertManager, TLSConnetionManager,
- ListenOptionsTracker,
- DTLSConnetionManager, DTLSUdpListeners
- ]}}.
-
-
-manager_opts() ->
- CbOpts = case application:get_env(ssl, session_cb) of
- {ok, Cb} when is_atom(Cb) ->
- InitArgs = session_cb_init_args(),
- [{session_cb, Cb}, {session_cb_init_args, InitArgs}];
- _ ->
- []
- end,
- case application:get_env(ssl, session_lifetime) of
- {ok, Time} when is_integer(Time) ->
- [{session_lifetime, Time}| CbOpts];
- _ ->
- CbOpts
- end.
-
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
-
-session_and_cert_manager_child_spec() ->
- Opts = manager_opts(),
- Name = ssl_manager,
- StartFunc = {ssl_manager, start_link, [Opts]},
+ssl_admin_child_spec() ->
+ Name = ssl_admin_sup,
+ StartFunc = {ssl_admin_sup, start_link, []},
Restart = permanent,
Shutdown = 4000,
- Modules = [ssl_manager],
- Type = worker,
- {Name, StartFunc, Restart, Shutdown, Type, Modules}.
-
-tls_connection_manager_child_spec() ->
- Name = tls_connection,
- StartFunc = {tls_connection_sup, start_link, []},
- Restart = permanent,
- Shutdown = 4000,
- Modules = [tls_connection_sup],
+ Modules = [ssl_admin_sup],
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
-dtls_connection_manager_child_spec() ->
- Name = dtls_connection,
- StartFunc = {dtls_connection_sup, start_link, []},
+ssl_connection_sup() ->
+ Name = ssl_connection_sup,
+ StartFunc = {ssl_connection_sup, start_link, []},
Restart = permanent,
Shutdown = 4000,
- Modules = [dtls_connection_sup],
- Type = supervisor,
- {Name, StartFunc, Restart, Shutdown, Type, Modules}.
-
-listen_options_tracker_child_spec() ->
- Name = tls_socket,
- StartFunc = {ssl_listen_tracker_sup, start_link, []},
- Restart = permanent,
- Shutdown = 4000,
- Modules = [tls_socket],
- Type = supervisor,
- {Name, StartFunc, Restart, Shutdown, Type, Modules}.
-
-dtls_udp_listeners_spec() ->
- Name = dtls_udp_listener,
- StartFunc = {dtls_udp_sup, start_link, []},
- Restart = permanent,
- Shutdown = 4000,
- Modules = [],
+ Modules = [ssl_connection_sup],
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
-session_cb_init_args() ->
- case application:get_env(ssl, session_cb_init_args) of
- {ok, Args} when is_list(Args) ->
- Args;
- _ ->
- []
- end.
diff --git a/lib/ssl/src/tls_connection.erl b/lib/ssl/src/tls_connection.erl
index 32991d3079..77606911be 100644
--- a/lib/ssl/src/tls_connection.erl
+++ b/lib/ssl/src/tls_connection.erl
@@ -424,18 +424,26 @@ handle_common_event(internal, #ssl_tls{type = ?HANDSHAKE, fragment = Data},
ssl_options = Options} = State0) ->
try
{Packets, Buf} = tls_handshake:get_tls_handshake(Version,Data,Buf0, Options),
- State =
+ State1 =
State0#state{protocol_buffers =
Buffers#protocol_buffers{tls_handshake_buffer = Buf}},
- Events = tls_handshake_events(Packets),
- case StateName of
- connection ->
- ssl_connection:hibernate_after(StateName, State, Events);
- _ ->
- {next_state, StateName, State#state{unprocessed_handshake_events = unprocessed_events(Events)}, Events}
- end
+ case Packets of
+ [] ->
+ assert_buffer_sanity(Buf, Options),
+ {Record, State} = next_record(State1),
+ next_event(StateName, Record, State);
+ _ ->
+ Events = tls_handshake_events(Packets),
+ case StateName of
+ connection ->
+ ssl_connection:hibernate_after(StateName, State1, Events);
+ _ ->
+ {next_state, StateName,
+ State1#state{unprocessed_handshake_events = unprocessed_events(Events)}, Events}
+ end
+ end
catch throw:#alert{} = Alert ->
- ssl_connection:handle_own_alert(Alert, Version, StateName, State0)
+ ssl_connection:handle_own_alert(Alert, Version, StateName, State0)
end;
%%% TLS record protocol level application data messages
handle_common_event(internal, #ssl_tls{type = ?APPLICATION_DATA, fragment = Data}, StateName, State) ->
@@ -615,8 +623,6 @@ next_event(StateName, Record, State, Actions) ->
{next_state, StateName, State, [{next_event, internal, Alert} | Actions]}
end.
-tls_handshake_events([]) ->
- throw(?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE, malformed_handshake));
tls_handshake_events(Packets) ->
lists:map(fun(Packet) ->
{next_event, internal, {handshake, Packet}}
@@ -735,3 +741,25 @@ unprocessed_events(Events) ->
%% handshake events left to process before we should
%% process more TLS-records received on the socket.
erlang:length(Events)-1.
+
+
+assert_buffer_sanity(<<?BYTE(_Type), ?UINT24(Length), Rest/binary>>, #ssl_options{max_handshake_size = Max}) when
+ Length =< Max ->
+ case size(Rest) of
+ N when N < Length ->
+ true;
+ N when N > Length ->
+ throw(?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE,
+ too_big_handshake_data));
+ _ ->
+ throw(?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE,
+ malformed_handshake_data))
+ end;
+assert_buffer_sanity(Bin, _) ->
+ case size(Bin) of
+ N when N < 3 ->
+ true;
+ _ ->
+ throw(?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE,
+ malformed_handshake_data))
+ end.
diff --git a/lib/ssl/test/make_certs.erl b/lib/ssl/test/make_certs.erl
index d85be6c69e..e14f7f60c4 100644
--- a/lib/ssl/test/make_certs.erl
+++ b/lib/ssl/test/make_certs.erl
@@ -172,8 +172,8 @@ revoke(Root, CA, User, C) ->
gencrl(Root, CA, C).
gencrl(Root, CA, C) ->
- %% By default, the CRL is valid for 24 hours from now.
- gencrl(Root, CA, C, 24).
+ %% By default, the CRL is valid for a week from now.
+ gencrl(Root, CA, C, 24*7).
gencrl(Root, CA, C, CrlHours) ->
CACnfFile = filename:join([Root, CA, "ca.cnf"]),
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index 52c1af5b4c..f0a3c42e8d 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -136,7 +136,8 @@ options_tests() ->
honor_server_cipher_order,
honor_client_cipher_order,
unordered_protocol_versions_server,
- unordered_protocol_versions_client
+ unordered_protocol_versions_client,
+ max_handshake_size
].
options_tests_tls() ->
@@ -960,9 +961,9 @@ clear_pem_cache(Config) when is_list(Config) ->
{status, _, _, StatusInfo} = sys:get_status(whereis(ssl_manager)),
[_, _,_, _, Prop] = StatusInfo,
State = ssl_test_lib:state(Prop),
- [_,FilRefDb |_] = element(6, State),
+ [_,{FilRefDb, _} |_] = element(6, State),
{Server, Client} = basic_verify_test_no_close(Config),
- CountReferencedFiles = fun({_,-1}, Acc) ->
+ CountReferencedFiles = fun({_, -1}, Acc) ->
Acc;
({_, N}, Acc) ->
N + Acc
@@ -3860,6 +3861,29 @@ unordered_protocol_versions_client(Config) when is_list(Config) ->
ssl_test_lib:check_result(Server, ServerMsg, Client, ClientMsg).
%%--------------------------------------------------------------------
+max_handshake_size() ->
+ [{doc,"Test that we can set max_handshake_size to max value."}].
+
+max_handshake_size(Config) when is_list(Config) ->
+ ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
+
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {ssl_test_lib, send_recv_result_active, []}},
+ {options, [{max_handshake_size, 8388607} |ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib, send_recv_result_active, []}},
+ {options, [{max_handshake_size, 8388607} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok).
+
+%%--------------------------------------------------------------------
server_name_indication_option() ->
[{doc,"Test API server_name_indication option to connect."}].
diff --git a/lib/ssl/test/ssl_pem_cache_SUITE.erl b/lib/ssl/test/ssl_pem_cache_SUITE.erl
index f10d27fbc6..96b15d9b51 100644
--- a/lib/ssl/test/ssl_pem_cache_SUITE.erl
+++ b/lib/ssl/test/ssl_pem_cache_SUITE.erl
@@ -82,8 +82,8 @@ pem_cleanup() ->
[{doc, "Test pem cache invalidate mechanism"}].
pem_cleanup(Config)when is_list(Config) ->
process_flag(trap_exit, true),
- ClientOpts = proplists:get_value(client_opts, Config),
- ServerOpts = proplists:get_value(server_opts, Config),
+ ClientOpts = proplists:get_value(client_verification_opts, Config),
+ ServerOpts = proplists:get_value(server_verification_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
Server =
diff --git a/lib/stdlib/src/erl_eval.erl b/lib/stdlib/src/erl_eval.erl
index 40a34aa30f..eafee346eb 100644
--- a/lib/stdlib/src/erl_eval.erl
+++ b/lib/stdlib/src/erl_eval.erl
@@ -1306,6 +1306,7 @@ partial_eval(Expr) ->
ev_expr({op,_,Op,L,R}) -> erlang:Op(ev_expr(L), ev_expr(R));
ev_expr({op,_,Op,A}) -> erlang:Op(ev_expr(A));
ev_expr({integer,_,X}) -> X;
+ev_expr({char,_,X}) -> X;
ev_expr({float,_,X}) -> X;
ev_expr({atom,_,X}) -> X;
ev_expr({tuple,_,Es}) ->
diff --git a/lib/stdlib/src/erl_parse.yrl b/lib/stdlib/src/erl_parse.yrl
index 4f38256e6b..d2dd2848b5 100644
--- a/lib/stdlib/src/erl_parse.yrl
+++ b/lib/stdlib/src/erl_parse.yrl
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -156,6 +156,7 @@ type -> '#' atom '{' field_types '}' : {type, ?anno('$1'),
record, ['$2'|'$4']}.
type -> binary_type : '$1'.
type -> integer : '$1'.
+type -> char : '$1'.
type -> 'fun' '(' ')' : {type, ?anno('$1'), 'fun', []}.
type -> 'fun' '(' fun_type_100 ')' : '$3'.
@@ -1557,13 +1558,17 @@ new_anno(Term) ->
Abstr :: erl_parse_tree().
anno_to_term(Abstract) ->
- map_anno(fun erl_anno:to_term/1, Abstract).
+ F = fun(Anno, Acc) -> {erl_anno:to_term(Anno), Acc} end,
+ {NewAbstract, []} = modify_anno1(Abstract, [], F),
+ NewAbstract.
-spec anno_from_term(Term) -> erl_parse_tree() when
Term :: term().
anno_from_term(Term) ->
- map_anno(fun erl_anno:from_term/1, Term).
+ F = fun(T, Acc) -> {erl_anno:from_term(T), Acc} end,
+ {NewTerm, []} = modify_anno1(Term, [], F),
+ NewTerm.
%% Forms.
%% Recognize what sys_pre_expand does:
diff --git a/lib/stdlib/src/escript.erl b/lib/stdlib/src/escript.erl
index 7f5ef4df42..c42ae981e7 100644
--- a/lib/stdlib/src/escript.erl
+++ b/lib/stdlib/src/escript.erl
@@ -481,46 +481,49 @@ find_first_body_line(Fd, HeaderSz0, LineNo, KeepFirst, Sections) ->
%% Look for special comment on second line
Line2 = get_line(Fd),
{ok, HeaderSz2} = file:position(Fd, cur),
- case classify_line(Line2) of
- emu_args ->
- %% Skip special comment on second line
- Line3 = get_line(Fd),
- {HeaderSz2, LineNo + 2, Fd,
- Sections#sections{type = guess_type(Line3),
- comment = undefined,
- emu_args = Line2}};
- Line2Type ->
- %% Look for special comment on third line
- Line3 = get_line(Fd),
- {ok, HeaderSz3} = file:position(Fd, cur),
- Line3Type = classify_line(Line3),
- if
- Line3Type =:= emu_args ->
- %% Skip special comment on third line
- Line4 = get_line(Fd),
- {HeaderSz3, LineNo + 3, Fd,
- Sections#sections{type = guess_type(Line4),
- comment = Line2,
- emu_args = Line3}};
- Sections#sections.shebang =:= undefined,
- KeepFirst =:= true ->
- %% No shebang. Use the entire file
- {HeaderSz0, LineNo, Fd,
- Sections#sections{type = guess_type(Line2)}};
- Sections#sections.shebang =:= undefined ->
- %% No shebang. Skip the first line
- {HeaderSz1, LineNo, Fd,
- Sections#sections{type = guess_type(Line2)}};
- Line2Type =:= comment ->
- %% Skip shebang on first line and comment on second
- {HeaderSz2, LineNo + 2, Fd,
- Sections#sections{type = guess_type(Line3),
- comment = Line2}};
- true ->
- %% Just skip shebang on first line
- {HeaderSz1, LineNo + 1, Fd,
- Sections#sections{type = guess_type(Line2)}}
- end
+ if
+ Sections#sections.shebang =:= undefined,
+ KeepFirst =:= true ->
+ %% No shebang. Use the entire file
+ {HeaderSz0, LineNo, Fd,
+ Sections#sections{type = guess_type(Line2)}};
+ Sections#sections.shebang =:= undefined ->
+ %% No shebang. Skip the first line
+ {HeaderSz1, LineNo, Fd,
+ Sections#sections{type = guess_type(Line2)}};
+ true ->
+ case classify_line(Line2) of
+ emu_args ->
+ %% Skip special comment on second line
+ Line3 = get_line(Fd),
+ {HeaderSz2, LineNo + 2, Fd,
+ Sections#sections{type = guess_type(Line3),
+ comment = undefined,
+ emu_args = Line2}};
+ comment ->
+ %% Look for special comment on third line
+ Line3 = get_line(Fd),
+ {ok, HeaderSz3} = file:position(Fd, cur),
+ Line3Type = classify_line(Line3),
+ if
+ Line3Type =:= emu_args ->
+ %% Skip special comment on third line
+ Line4 = get_line(Fd),
+ {HeaderSz3, LineNo + 3, Fd,
+ Sections#sections{type = guess_type(Line4),
+ comment = Line2,
+ emu_args = Line3}};
+ true ->
+ %% Skip shebang on first line and comment on second
+ {HeaderSz2, LineNo + 2, Fd,
+ Sections#sections{type = guess_type(Line3),
+ comment = Line2}}
+ end;
+ _ ->
+ %% Just skip shebang on first line
+ {HeaderSz1, LineNo + 1, Fd,
+ Sections#sections{type = guess_type(Line2)}}
+ end
end.
classify_line(Line) ->
diff --git a/lib/stdlib/test/escript_SUITE.erl b/lib/stdlib/test/escript_SUITE.erl
index 28d69232a0..0b9106a99c 100644
--- a/lib/stdlib/test/escript_SUITE.erl
+++ b/lib/stdlib/test/escript_SUITE.erl
@@ -28,6 +28,7 @@
strange_name/1,
emulator_flags/1,
emulator_flags_no_shebang/1,
+ two_lines/1,
module_script/1,
beam_script/1,
archive_script/1,
@@ -49,7 +50,7 @@ suite() ->
all() ->
[basic, errors, strange_name, emulator_flags,
- emulator_flags_no_shebang,
+ emulator_flags_no_shebang, two_lines,
module_script, beam_script, archive_script, epp,
create_and_extract, foldl, overflow,
archive_script_file_access, unicode].
@@ -153,6 +154,18 @@ emulator_flags(Config) when is_list(Config) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+two_lines(Config) when is_list(Config) ->
+ Data = proplists:get_value(data_dir, Config),
+ Dir = filename:absname(Data), %Get rid of trailing slash.
+ run(Dir, "two_lines -arg1 arg2 arg3",
+ [<<"main:[\"-arg1\",\"arg2\",\"arg3\"]\n"
+ "ERL_FLAGS=false\n"
+ "unknown:[]\n"
+ "ExitCode:0">>]),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
emulator_flags_no_shebang(Config) when is_list(Config) ->
Data = proplists:get_value(data_dir, Config),
Dir = filename:absname(Data), %Get rid of trailing slash.
diff --git a/lib/stdlib/test/escript_SUITE_data/two_lines b/lib/stdlib/test/escript_SUITE_data/two_lines
new file mode 100755
index 0000000000..cf4e99639c
--- /dev/null
+++ b/lib/stdlib/test/escript_SUITE_data/two_lines
@@ -0,0 +1,2 @@
+#! /usr/bin/env escript
+main(MainArgs) -> io:format("main:~p\n", [MainArgs]), ErlArgs = init:get_arguments(), io:format("ERL_FLAGS=~p\n", [os:getenv("ERL_FLAGS")]), io:format("unknown:~p\n",[[E || E <- ErlArgs, element(1, E) =:= unknown]]).
diff --git a/lib/stdlib/test/rand_SUITE.erl b/lib/stdlib/test/rand_SUITE.erl
index 02b7cb10c2..47e7c4f03d 100644
--- a/lib/stdlib/test/rand_SUITE.erl
+++ b/lib/stdlib/test/rand_SUITE.erl
@@ -275,13 +275,13 @@ gen(_, _, Acc) -> lists:reverse(Acc).
%% Check that the algorithms generate sound values.
basic_stats_uniform_1(Config) when is_list(Config) ->
- ct:timetrap({minutes,6}), %% valgrind needs a lot of time
+ ct:timetrap({minutes,15}), %% valgrind needs a lot of time
[basic_uniform_1(?LOOP, rand:seed_s(Alg), 0.0, array:new([{default, 0}]))
|| Alg <- algs()],
ok.
basic_stats_uniform_2(Config) when is_list(Config) ->
- ct:timetrap({minutes,6}), %% valgrind needs a lot of time
+ ct:timetrap({minutes,15}), %% valgrind needs a lot of time
[basic_uniform_2(?LOOP, rand:seed_s(Alg), 0, array:new([{default, 0}]))
|| Alg <- algs()],
ok.
@@ -388,7 +388,7 @@ crypto_uniform_n(N, State0) ->
%% Not a test but measures the time characteristics of the different algorithms
measure(Suite) when is_atom(Suite) -> [];
measure(_Config) ->
- ct:timetrap({minutes,6}), %% valgrind needs a lot of time
+ ct:timetrap({minutes,15}), %% valgrind needs a lot of time
Algos = [crypto64|algs()],
io:format("RNG uniform integer performance~n",[]),
_ = measure_1(random, fun(State) -> {int, random:uniform_s(10000, State)} end),
diff --git a/lib/stdlib/test/shell_SUITE.erl b/lib/stdlib/test/shell_SUITE.erl
index c409a6949b..80585ca359 100644
--- a/lib/stdlib/test/shell_SUITE.erl
+++ b/lib/stdlib/test/shell_SUITE.erl
@@ -2325,7 +2325,7 @@ otp_6554(Config) when is_list(Config) ->
"[unproper | list]).">>),
%% Cheating:
"exception error: no function clause matching "
- "erl_eval:do_apply(4)" ++ _ =
+ "shell:apply_fun(4)" ++ _ =
comm_err(<<"erlang:error(function_clause, [4]).">>),
"exception error: no function clause matching "
"lists:reverse(" ++ _ =
diff --git a/lib/typer/src/typer.erl b/lib/typer/src/typer.erl
index 5c82750a21..cd67af41ed 100644
--- a/lib/typer/src/typer.erl
+++ b/lib/typer/src/typer.erl
@@ -143,8 +143,9 @@ extract(#analysis{macros = Macros,
MergedRecords = dialyzer_utils:merge_records(NewRecords, OldRecords),
CodeServer2 = dialyzer_codeserver:set_temp_records(MergedRecords, CodeServer1),
CodeServer3 = dialyzer_codeserver:finalize_exported_types(NewExpTypes, CodeServer2),
- CodeServer4 = dialyzer_utils:process_record_remote_types(CodeServer3),
- dialyzer_contracts:process_contract_remote_types(CodeServer4)
+ {CodeServer4, RecordDict} =
+ dialyzer_utils:process_record_remote_types(CodeServer3),
+ dialyzer_contracts:process_contract_remote_types(CodeServer4, RecordDict)
catch
throw:{error, ErrorMsg} ->
compile_error(ErrorMsg)
@@ -156,7 +157,7 @@ extract(#analysis{macros = Macros,
fun(Module, TmpPlt) ->
{ok, ModuleContracts} = dict:find(Module, Contracts),
SpecList = [{MFA, Contract}
- || {MFA, {_FileLine, Contract}} <- dict:to_list(ModuleContracts)],
+ || {MFA, {_FileLine, Contract}} <- maps:to_list(ModuleContracts)],
dialyzer_plt:insert_contract_list(TmpPlt, SpecList)
end,
NewTrustPLT = lists:foldl(FoldFun, TrustPLT, Modules),
@@ -172,8 +173,10 @@ get_type_info(#analysis{callgraph = CallGraph,
StrippedCallGraph = remove_external(CallGraph, TrustPLT),
%% io:format("--- Analyzing callgraph... "),
try
- NewPlt = dialyzer_succ_typings:analyze_callgraph(StrippedCallGraph,
- TrustPLT, CodeServer),
+ NewMiniPlt = dialyzer_succ_typings:analyze_callgraph(StrippedCallGraph,
+ TrustPLT,
+ CodeServer),
+ NewPlt = dialyzer_plt:restore_full_plt(NewMiniPlt),
Analysis#analysis{callgraph = StrippedCallGraph, trust_plt = NewPlt}
catch
error:What ->
@@ -224,7 +227,7 @@ get_external(Exts, Plt) ->
-type fa() :: {atom(), arity()}.
-type func_info() :: {line(), atom(), arity()}.
--record(info, {records = map__new() :: map_dict(),
+-record(info, {records = maps:new() :: erl_types:type_table(),
functions = [] :: [func_info()],
types = map__new() :: map_dict(),
edoc = false :: boolean()}).
@@ -267,7 +270,7 @@ write_inc_files(Inc) ->
Functions = [Key || {Key, _} <- Val],
Val1 = [{{F,A},Type} || {{_Line,F,A},Type} <- Val],
Info = #info{types = map__from_list(Val1),
- records = map__new(),
+ records = maps:new(),
%% Note we need to sort functions here!
functions = lists:keysort(1, Functions)},
%% io:format("Types ~p\n", [Info#info.types]),
@@ -849,8 +852,9 @@ collect_info(Analysis) ->
TmpCServer1 = dialyzer_codeserver:set_temp_records(MergedRecords, TmpCServer),
TmpCServer2 =
dialyzer_codeserver:finalize_exported_types(MergedExpTypes, TmpCServer1),
- TmpCServer3 = dialyzer_utils:process_record_remote_types(TmpCServer2),
- dialyzer_contracts:process_contract_remote_types(TmpCServer3)
+ {TmpCServer3, RecordDict} =
+ dialyzer_utils:process_record_remote_types(TmpCServer2),
+ dialyzer_contracts:process_contract_remote_types(TmpCServer3, RecordDict)
catch
throw:{error, ErrorMsg} ->
fatal_error(ErrorMsg)
diff --git a/lib/xmerl/src/xmerl_scan.erl b/lib/xmerl/src/xmerl_scan.erl
index 5e0459ec21..9f6b27113e 100644
--- a/lib/xmerl/src/xmerl_scan.erl
+++ b/lib/xmerl/src/xmerl_scan.erl
@@ -2225,16 +2225,18 @@ processed_whole_element(S=#xmerl_scanner{hook_fun = _Hook,
AllAttrs =
case S#xmerl_scanner.default_attrs of
true ->
- [ #xmlAttribute{name = AttName,
- parents = [{Name, Pos} | Parents],
- language = Lang,
- nsinfo = NSI,
- namespace = Namespace,
- value = AttValue,
- normalized = true} ||
- {AttName, AttValue} <- get_default_attrs(S, Name),
- AttValue =/= no_value,
- not lists:keymember(AttName, #xmlAttribute.name, Attrs) ];
+ DefaultAttrs =
+ [ #xmlAttribute{name = AttName,
+ parents = [{Name, Pos} | Parents],
+ language = Lang,
+ nsinfo = NSI,
+ namespace = Namespace,
+ value = AttValue,
+ normalized = true} ||
+ {AttName, AttValue} <- get_default_attrs(S, Name),
+ AttValue =/= no_value,
+ not lists:keymember(AttName, #xmlAttribute.name, Attrs) ],
+ lists:append(Attrs, DefaultAttrs);
false ->
Attrs
end,
diff --git a/lib/xmerl/test/xmerl_SUITE.erl b/lib/xmerl/test/xmerl_SUITE.erl
index e97b8c6a4b..cf7c0b7548 100644
--- a/lib/xmerl/test/xmerl_SUITE.erl
+++ b/lib/xmerl/test/xmerl_SUITE.erl
@@ -54,7 +54,8 @@ groups() ->
cpd_expl_provided_DTD]},
{misc, [],
[latin1_alias, syntax_bug1, syntax_bug2, syntax_bug3,
- pe_ref1, copyright, testXSEIF, export_simple1, export]},
+ pe_ref1, copyright, testXSEIF, export_simple1, export,
+ default_attrs_bug]},
{eventp_tests, [], [sax_parse_and_export]},
{ticket_tests, [],
[ticket_5998, ticket_7211, ticket_7214, ticket_7430,
@@ -223,6 +224,21 @@ syntax_bug3(Config) ->
Err -> Err
end.
+default_attrs_bug(Config) ->
+ file:set_cwd(datadir(Config)),
+ Doc = "<!DOCTYPE doc [<!ATTLIST doc b CDATA \"default\">]>\n"
+ "<doc a=\"explicit\"/>",
+ {#xmlElement{attributes = [#xmlAttribute{name = a, value = "explicit"},
+ #xmlAttribute{name = b, value = "default"}]},
+ []
+ } = xmerl_scan:string(Doc, [{default_attrs, true}]),
+ Doc2 = "<!DOCTYPE doc [<!ATTLIST doc b CDATA \"default\">]>\n"
+ "<doc b=\"also explicit\" a=\"explicit\"/>",
+ {#xmlElement{attributes = [#xmlAttribute{name = b, value = "also explicit"},
+ #xmlAttribute{name = a, value = "explicit"}]},
+ []
+ } = xmerl_scan:string(Doc2, [{default_attrs, true}]).
+
pe_ref1(Config) ->
file:set_cwd(datadir(Config)),
{#xmlElement{},[]} = xmerl_scan:file(datadir_join(Config,[misc,"PE_ref1.xml"]),[{validation,true}]).
diff --git a/otp_build b/otp_build
index 28a229b101..d0f75aff71 100755
--- a/otp_build
+++ b/otp_build
@@ -19,9 +19,6 @@
# %CopyrightEnd%
#
-# Expected autoconf version
-EXPECTED_AUTOCONF_VERSION=2.59
-
# Global configuration variables
#
# NOTE: lazy_configure depends on '.' always being last directory
@@ -288,30 +285,6 @@ do_autoconf ()
create_lib_configure_in
distribute_config_helpers
- if target_contains win32; then
- # Select the correct autoconf on cygwin
- save_want_autoconf_ver=$WANT_AUTOCONF_VER
- WANT_AUTOCONF_VER=$EXPECTED_AUTOCONF_VERSION
- export WANT_AUTOCONF_VER
- fi
- exp_ac_vsn=$EXPECTED_AUTOCONF_VERSION
- ac_vsn_blob=`autoconf --version`
- ac_vsn=`echo x$ac_vsn_blob | sed "s|[^0-9]*\([0-9][^ \t\n]*\).*|\1|"`
- case "$ac_vsn" in
- $exp_ac_vsn)
- ;;
- *)
- echo "***************************************************" 1>&2
- echo "***************************************************" 1>&2
- echo "*** WARNING: System might fail to configure or" 1>&2
- echo "*** might be erroneously configured" 1>&2
- echo "*** since autoconf version $ac_vsn is used" 1>&2
- echo "*** instead of version $exp_ac_vsn!" 1>&2
- echo "***************************************************" 1>&2
- echo "***************************************************" 1>&2
- ;;
- esac
-
if [ ! -z "$OVERRIDE_CONFIGURE" ]; then
echo "Autoconf disabled on target $TARGET, but is performed on host" >&2
# We still use erts configure for erl_interface and VxWorks
@@ -346,11 +319,6 @@ do_autoconf ()
done
restore_vars OVERRIDE_TARGET TARGET
-
- if target_contains win32; then
- WANT_AUTOCONF_VER=$save_want_autoconf_ver
- export WANT_AUTOCONF_VER
- fi
}
run_configure ()
@@ -583,7 +551,6 @@ do_lazy_configure ()
CONFIGURE_DIR=$dir \
EXTRA_CONFIGURE_DEPENDENCIES=$xc_dep \
EXTRA_CONFIG_STATUS_DEPENDENCIES=$xcs_dep \
- EXPECTED_AUTOCONF_VERSION=$EXPECTED_AUTOCONF_VERSION \
lazy_configure
echo "=== Done configuring $dir"
echo ""
@@ -613,7 +580,6 @@ do_lazy_configure_clean ()
MAKE="$MAKE" TARGET=$TARGET \
ERL_TOP=$ERL_TOP \
CONFIGURE_DIR=$dir \
- EXPECTED_AUTOCONF_VERSION=$EXPECTED_AUTOCONF_VERSION \
lazy_configure_clean
echo "=== Done cleaning configure in $dir"
echo ""
@@ -644,7 +610,6 @@ do_lazy_configure_target_clean ()
MAKE="$MAKE" TARGET=$TARGET \
ERL_TOP=$ERL_TOP \
CONFIGURE_DIR=$dir \
- EXPECTED_AUTOCONF_VERSION=$EXPECTED_AUTOCONF_VERSION \
lazy_configure_target_clean
echo "=== Done target cleaning configure in $dir"
echo ""
diff --git a/otp_versions.table b/otp_versions.table
index 2f31878968..6ca9636c37 100644
--- a/otp_versions.table
+++ b/otp_versions.table
@@ -16,6 +16,7 @@ OTP-19.0.3 : inets-6.3.2 kernel-5.0.1 ssl-8.0.1 # asn1-4.0.3 common_test-1.12.2
OTP-19.0.2 : compiler-7.0.1 erts-8.0.2 stdlib-3.0.1 # asn1-4.0.3 common_test-1.12.2 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7 debugger-4.2 dialyzer-3.0.1 diameter-1.12 edoc-0.7.19 eldap-1.2.2 erl_docgen-0.5 erl_interface-3.9 et-1.6 eunit-2.3 gs-1.6.1 hipe-3.15.1 ic-4.4.1 inets-6.3.1 jinterface-1.7 kernel-5.0 megaco-3.18.1 mnesia-4.14 observer-2.2.1 odbc-2.11.2 orber-3.8.2 os_mon-2.4.1 otp_mibs-1.1.1 parsetools-2.1.2 percept-0.9 public_key-1.2 reltool-0.7.1 runtime_tools-1.10 sasl-3.0 snmp-5.2.3 ssh-4.3.1 ssl-8.0 syntax_tools-2.0 tools-2.8.5 typer-0.9.11 wx-1.7 xmerl-1.3.11 :
OTP-19.0.1 : dialyzer-3.0.1 erts-8.0.1 inets-6.3.1 observer-2.2.1 ssh-4.3.1 tools-2.8.5 # asn1-4.0.3 common_test-1.12.2 compiler-7.0 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7 debugger-4.2 diameter-1.12 edoc-0.7.19 eldap-1.2.2 erl_docgen-0.5 erl_interface-3.9 et-1.6 eunit-2.3 gs-1.6.1 hipe-3.15.1 ic-4.4.1 jinterface-1.7 kernel-5.0 megaco-3.18.1 mnesia-4.14 odbc-2.11.2 orber-3.8.2 os_mon-2.4.1 otp_mibs-1.1.1 parsetools-2.1.2 percept-0.9 public_key-1.2 reltool-0.7.1 runtime_tools-1.10 sasl-3.0 snmp-5.2.3 ssl-8.0 stdlib-3.0 syntax_tools-2.0 typer-0.9.11 wx-1.7 xmerl-1.3.11 :
OTP-19.0 : asn1-4.0.3 common_test-1.12.2 compiler-7.0 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7 debugger-4.2 dialyzer-3.0 diameter-1.12 edoc-0.7.19 eldap-1.2.2 erl_docgen-0.5 erl_interface-3.9 erts-8.0 et-1.6 eunit-2.3 gs-1.6.1 hipe-3.15.1 ic-4.4.1 inets-6.3 jinterface-1.7 kernel-5.0 megaco-3.18.1 mnesia-4.14 observer-2.2 odbc-2.11.2 orber-3.8.2 os_mon-2.4.1 otp_mibs-1.1.1 parsetools-2.1.2 percept-0.9 public_key-1.2 reltool-0.7.1 runtime_tools-1.10 sasl-3.0 snmp-5.2.3 ssh-4.3 ssl-8.0 stdlib-3.0 syntax_tools-2.0 tools-2.8.4 typer-0.9.11 wx-1.7 xmerl-1.3.11 # :
+OTP-18.3.4.5 : crypto-3.6.3.1 erts-7.3.1.3 inets-6.2.4.1 ssh-4.2.2.3 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1 erl_docgen-0.4.2 erl_interface-3.8.2 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssl-7.3.3.1 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
OTP-18.3.4.4 : erts-7.3.1.2 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1 erl_docgen-0.4.2 erl_interface-3.8.2 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssh-4.2.2.2 ssl-7.3.3.1 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
OTP-18.3.4.3 : ssh-4.2.2.2 # asn1-4.0.2 common_test-1.12.1.1 compiler-6.0.3 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1 erl_docgen-0.4.2 erl_interface-3.8.2 erts-7.3.1.1 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssl-7.3.3.1 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
OTP-18.3.4.2 : common_test-1.12.1.1 erts-7.3.1.1 ssl-7.3.3.1 # asn1-4.0.2 compiler-6.0.3 cosEvent-2.2 cosEventDomain-1.2 cosFileTransfer-1.2 cosNotification-1.2.1 cosProperty-1.2 cosTime-1.2.1 cosTransactions-1.3.1 crypto-3.6.3 debugger-4.1.2 dialyzer-2.9 diameter-1.11.2 edoc-0.7.18 eldap-1.2.1 erl_docgen-0.4.2 erl_interface-3.8.2 et-1.5.1 eunit-2.2.13 gs-1.6 hipe-3.15 ic-4.4 inets-6.2.4 jinterface-1.6.1 kernel-4.2 megaco-3.18 mnesia-4.13.4 observer-2.1.2 odbc-2.11.1 orber-3.8.1 os_mon-2.4 ose-1.1 otp_mibs-1.1 parsetools-2.1.1 percept-0.8.11 public_key-1.1.1 reltool-0.7 runtime_tools-1.9.3 sasl-2.7 snmp-5.2.2 ssh-4.2.2.1 stdlib-2.8 syntax_tools-1.7 test_server-3.10 tools-2.8.3 typer-0.9.10 webtool-0.9.1 wx-1.6.1 xmerl-1.3.10 :
diff --git a/scripts/run-smoke-tests b/scripts/run-smoke-tests
new file mode 100755
index 0000000000..c2333e7825
--- /dev/null
+++ b/scripts/run-smoke-tests
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -ev
+
+cd $ERL_TOP/release/tests/test_server
+$ERL_TOP/bin/erl -s ts install -s ts smoke_test batch -s init stop
+
+if grep -q '=failed *[1-9]' ct_run.test_server@*/*/run.*/suite.log; then
+ echo "One or more tests failed."
+ exit 1
+fi
diff --git a/system/doc/design_principles/statem.xml b/system/doc/design_principles/statem.xml
index f627145f9f..d08ddd0036 100644
--- a/system/doc/design_principles/statem.xml
+++ b/system/doc/design_principles/statem.xml
@@ -130,7 +130,7 @@ handle_event(EventType, EventContent, State, Data) ->
{next_state, NewState, NewData}
</pre>
<p>
- Se section
+ See section
<seealso marker="#One Event Handler">One Event Handler</seealso>
for an example.
</p>
@@ -887,7 +887,7 @@ stop() ->
</p>
<p>
This type of time-out is useful to for example act on inactivity.
- Let us start restart the code sequence
+ Let us restart the code sequence
if no button is pressed for say 30 seconds:
</p>
<code type="erl"><![CDATA[
diff --git a/system/doc/efficiency_guide/advanced.xml b/system/doc/efficiency_guide/advanced.xml
index eee2648f34..e1760d0ded 100644
--- a/system/doc/efficiency_guide/advanced.xml
+++ b/system/doc/efficiency_guide/advanced.xml
@@ -87,15 +87,15 @@
</row>
<row>
<cell>Small Map</cell>
- <cell>4 words + 2 words per entry (key and value) + the size of each key and value pair.</cell>
+ <cell>5 words + the size of all keys and values.</cell>
</row>
<row>
- <cell>Large Map</cell>
+ <cell>Large Map (> 32 keys)</cell>
<cell>
- At least, 2 words + 2 x <c>N</c> words + 2 x log16(<c>N</c>) words +
- the size of each key and value pair, where <c>N</c> is the number of pairs in the Map.
- A large Map is represented as a tree internally where each node in the tree is a
- "sparse tuple" of arity 16.
+ <c>N</c> x <c>F</c> words + the size of all keys and values.<br></br>
+ <c>N</c> is the number of keys in the Map.<br></br>
+ <c>F</c> is a sparsity factor that can vary between 1.6 and 1.8
+ due to the probabilistic nature of the internal HAMT data structure.
</cell>
</row>
<row>
diff --git a/system/doc/reference_manual/typespec.xml b/system/doc/reference_manual/typespec.xml
index ced584ed35..a0ea41cb3b 100644
--- a/system/doc/reference_manual/typespec.xml
+++ b/system/doc/reference_manual/typespec.xml
@@ -11,7 +11,7 @@
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
+
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
@@ -63,7 +63,7 @@
Types consist of, and are built from, a set of predefined types,
for example, <c>integer()</c>, <c>atom()</c>, and <c>pid()</c>.
Predefined types represent a typically infinite set of Erlang terms that
- belong to this type. For example, the type <c>atom()</c> stands for the
+ belong to this type. For example, the type <c>atom()</c> denotes the
set of all Erlang atoms.
</p>
<p>
@@ -131,19 +131,19 @@
| nonempty_improper_list(Type1, Type2) %% Type1 and Type2 as above
| nonempty_list(Type) %% Proper non-empty list
- Map :: map() %% stands for a map of any size
- | #{} %% stands for the empty map
+ Map :: map() %% denotes a map of any size
+ | #{} %% denotes the empty map
| #{PairList}
- Tuple :: tuple() %% stands for a tuple of any size
+ Tuple :: tuple() %% denotes a tuple of any size
| {}
| {TList}
PairList :: Pair
| Pair, PairList
- Pair :: Type := Type %% denotes a pair that must be present
- | Type => Type
+ Pair :: Type := Type %% denotes a mandatory pair
+ | Type => Type %% denotes an optional pair
TList :: Type
| Type, TList
@@ -161,7 +161,7 @@
that <c>M</c> or <c>N</c>, or both, are zero.
</p>
<p>
- Because lists are commonly used, they have shorthand type notations.
+ Because lists are commonly used, they have shorthand type notations.
The types <c>list(T)</c> and <c>nonempty_list(T)</c> have the shorthands
<c>[T]</c> and <c>[T,...]</c>, respectively.
The only difference between the two shorthands is that <c>[T]</c> can be an
@@ -169,14 +169,18 @@
</p>
<p>
Notice that the shorthand for <c>list()</c>, that is, the list of
- elements of unknown type, is <c>[_]</c> (or <c>[any()]</c>), not <c>[]</c>.
+ elements of unknown type, is <c>[_]</c> (or <c>[any()]</c>), not <c>[]</c>.
The notation <c>[]</c> specifies the singleton type for the empty list.
</p>
<p>
The general form of maps is <c>#{PairList}</c>. The key types in
<c>PairList</c> are allowed to overlap, and if they do, the
leftmost pair takes precedence. A map pair has a key in
- <c>PairList</c> if it belongs to this type.
+ <c>PairList</c> if it belongs to this type. A <c>PairList</c> may contain
+ both 'mandatory' and 'optional' pairs where 'mandatory' denotes that
+ a key type, and its associated value type, must be present.
+ In the case of an 'optional' pair it is not required for the key type to
+ be present.
</p>
<p>
Notice that the syntactic representation of <c>map()</c> is
@@ -184,8 +188,8 @@
The notation <c>#{}</c> specifies the singleton type for the empty map.
</p>
<p>
- For convenience, the following types are also built-in.
- They can be thought as predefined aliases for the type unions also shown in
+ For convenience, the following types are also built-in.
+ They can be thought as predefined aliases for the type unions also shown in
the table.
</p>
<table>
@@ -201,37 +205,37 @@
<row>
<cell><c>bitstring()</c></cell><cell><c>&lt;&lt;_:_*1&gt;&gt;</c></cell>
</row>
- <row>
+ <row>
<cell><c>boolean()</c></cell><cell><c>'false' | 'true'</c></cell>
</row>
- <row>
+ <row>
<cell><c>byte()</c></cell><cell><c>0..255</c></cell>
</row>
<row>
<cell><c>char()</c></cell><cell><c>0..16#10ffff</c></cell>
</row>
- <row>
+ <row>
<cell><c>nil()</c></cell><cell><c>[]</c></cell>
</row>
<row>
<cell><c>number()</c></cell><cell><c>integer() | float()</c></cell>
</row>
- <row>
+ <row>
<cell><c>list()</c></cell><cell><c>[any()]</c></cell>
</row>
- <row>
+ <row>
<cell><c>maybe_improper_list()</c></cell><cell><c>maybe_improper_list(any(), any())</c></cell>
</row>
- <row>
+ <row>
<cell><c>nonempty_list()</c></cell><cell><c>nonempty_list(any())</c></cell>
</row>
<row>
<cell><c>string()</c></cell><cell><c>[char()]</c></cell>
</row>
- <row>
+ <row>
<cell><c>nonempty_string()</c></cell><cell><c>[char(),...]</c></cell>
</row>
- <row>
+ <row>
<cell><c>iodata()</c></cell><cell><c>iolist() | binary()</c></cell>
</row>
<row>
@@ -243,7 +247,7 @@
<row>
<cell><c>module()</c></cell><cell><c>atom()</c></cell>
</row>
- <row>
+ <row>
<cell><c>mfa()</c></cell><cell><c>{module(),atom(),arity()}</c></cell>
</row>
<row>
@@ -259,7 +263,7 @@
<cell><c>timeout()</c></cell><cell><c>'infinity' | non_neg_integer()</c></cell>
</row>
<row>
- <cell><c>no_return()</c></cell><cell><c>none()</c></cell>
+ <cell><c>no_return()</c></cell><cell><c>none()</c></cell>
</row>
<tcaption>Built-in types, predefined aliases</tcaption>
</table>
@@ -284,11 +288,11 @@
</row>
<tcaption>Additional built-in types</tcaption>
</table>
-
+
<p>
Users are not allowed to define types with the same names as the
predefined or built-in ones. This is checked by the compiler and
- its violation results in a compilation error.
+ its violation results in a compilation error.
</p>
<note>
<p>
@@ -394,13 +398,13 @@
<pre>
-record(rec, {field1 :: Type1, field2, field3 :: Type3}).</pre>
<p>
- For fields without type annotations, their type defaults to any().
+ For fields without type annotations, their type defaults to any().
That is, the previous example is a shorthand for the following:
</p>
<pre>
-record(rec, {field1 :: Type1, field2 :: any(), field3 :: Type3}).</pre>
<p>
- In the presence of initial values for fields,
+ In the presence of initial values for fields,
the type must be declared after the initialization, as follows:
</p>
<pre>
@@ -409,12 +413,12 @@
The initial values for fields are to be compatible
with (that is, a member of) the corresponding types.
This is checked by the compiler and results in a compilation error
- if a violation is detected.
+ if a violation is detected.
</p>
<note>
<p>Before Erlang/OTP 19, for fields without initial values,
the singleton type <c>'undefined'</c> was added to all declared types.
- In other words, the following two record declarations had identical
+ In other words, the following two record declarations had identical
effects:</p>
<pre>
-record(rec, {f1 = 42 :: integer(),
@@ -430,22 +434,22 @@
</p>
</note>
<p>
- Any record, containing type information or not, once defined,
+ Any record, containing type information or not, once defined,
can be used as a type using the following syntax:
</p>
<pre> #rec{}</pre>
<p>
- In addition, the record fields can be further specified when using
+ In addition, the record fields can be further specified when using
a record type by adding type information about the field
as follows:
</p>
<pre> #rec{some_field :: Type}</pre>
<p>
- Any unspecified fields are assumed to have the type in the original
+ Any unspecified fields are assumed to have the type in the original
record declaration.
</p>
</section>
-
+
<section>
<title>Specifications for Functions</title>
<p>
@@ -459,9 +463,9 @@
else a compilation error occurs.
</p>
<p>
- This form can also be used in header files (.hrl) to declare type
- information for exported functions.
- Then these header files can be included in files that (implicitly or
+ This form can also be used in header files (.hrl) to declare type
+ information for exported functions.
+ Then these header files can be included in files that (implicitly or
explicitly) import these functions.
</p>
<p>
@@ -475,14 +479,14 @@
<pre>
-spec Function(ArgName1 :: Type1, ..., ArgNameN :: TypeN) -> RT.</pre>
<p>
- A function specification can be overloaded.
+ A function specification can be overloaded.
That is, it can have several types, separated by a semicolon (<c>;</c>):
</p>
<pre>
-spec foo(T1, T2) -> T3
; (T4, T5) -> T6.</pre>
<p>
- A current restriction, which currently results in a warning
+ A current restriction, which currently results in a warning
(not an error) by the compiler, is that the domains of
the argument types cannot overlap.
For example, the following specification results in a warning:
@@ -491,9 +495,9 @@
-spec foo(pos_integer()) -> pos_integer()
; (integer()) -> integer().</pre>
<p>
- Type variables can be used in specifications to specify relations for
- the input and output arguments of a function.
- For example, the following specification defines the type of a
+ Type variables can be used in specifications to specify relations for
+ the input and output arguments of a function.
+ For example, the following specification defines the type of a
polymorphic identity function:
</p>
<pre>
@@ -542,8 +546,8 @@
-spec foo({X, integer()}) -> X when X :: atom()
; ([Y]) -> Y when Y :: number().</pre>
<p>
- Some functions in Erlang are not meant to return;
- either because they define servers or because they are used to
+ Some functions in Erlang are not meant to return;
+ either because they define servers or because they are used to
throw exceptions, as in the following function:
</p>
<pre> my_error(Err) -> erlang:throw({error, Err}).</pre>
@@ -555,4 +559,3 @@
<pre> -spec my_error(term()) -> no_return().</pre>
</section>
</chapter>
-
diff --git a/system/doc/tutorial/c_portdriver.xmlsrc b/system/doc/tutorial/c_portdriver.xmlsrc
index 933e2395a3..da680642b6 100644
--- a/system/doc/tutorial/c_portdriver.xmlsrc
+++ b/system/doc/tutorial/c_portdriver.xmlsrc
@@ -161,8 +161,8 @@ decode([Int]) -> Int.</pre>
<title>Running the Example</title>
<p><em>Step 1.</em> Compile the C code:</p>
<pre>
-unix> <input>gcc -o exampledrv -fpic -shared complex.c port_driver.c</input>
-windows> <input>cl -LD -MD -Fe exampledrv.dll complex.c port_driver.c</input></pre>
+unix> <input>gcc -o example_drv.so -fpic -shared complex.c port_driver.c</input>
+windows> <input>cl -LD -MD -Fe example_drv.dll complex.c port_driver.c</input></pre>
<p><em>Step 2.</em> Start Erlang and compile the Erlang code:</p>
<pre>
> <input>erl</input>