aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/asn1/src/Makefile4
-rw-r--r--lib/asn1/src/asn1ct_check.erl151
-rw-r--r--lib/asn1/src/asn1ct_constructed_ber_bin_v2.erl3
-rw-r--r--lib/asn1/src/asn1ct_constructed_per.erl71
-rw-r--r--lib/asn1/src/asn1ct_gen.erl44
-rw-r--r--lib/asn1/src/asn1ct_imm.erl44
-rw-r--r--lib/asn1/src/asn1ct_value.erl17
-rw-r--r--lib/asn1/src/asn1rtt_check.erl91
-rw-r--r--lib/asn1/src/asn1rtt_per_common.erl41
-rw-r--r--lib/asn1/src/prepare_templates.erl72
-rw-r--r--lib/asn1/test/asn1_SUITE.erl36
-rw-r--r--lib/asn1/test/asn1_SUITE_data/Constraints.py45
-rw-r--r--lib/asn1/test/asn1_SUITE_data/Default.asn3
-rw-r--r--lib/asn1/test/asn1_SUITE_data/PrimStrings.asn18
-rw-r--r--lib/asn1/test/testConstraints.erl82
-rw-r--r--lib/asn1/test/testParamBasic.erl7
-rw-r--r--lib/asn1/test/testPrimStrings.erl41
-rw-r--r--lib/asn1/test/testSeqSetDefaultVal.erl191
-rw-r--r--lib/common_test/doc/src/ct_hooks.xml5
-rw-r--r--lib/common_test/src/ct_framework.erl274
-rw-r--r--lib/common_test/src/ct_logs.erl6
-rw-r--r--lib/common_test/src/ct_netconfc.erl8
-rw-r--r--lib/common_test/src/cth_log_redirect.erl12
-rw-r--r--lib/common_test/src/cth_surefire.erl5
-rw-r--r--lib/common_test/test/common_test.cover1
-rw-r--r--lib/common_test/test/ct_config_SUITE.erl34
-rw-r--r--lib/common_test/test/ct_config_info_SUITE.erl6
-rw-r--r--lib/common_test/test/ct_error_SUITE.erl52
-rw-r--r--lib/common_test/test/ct_group_info_SUITE.erl156
-rw-r--r--lib/common_test/test/ct_groups_test_2_SUITE.erl12
-rw-r--r--lib/common_test/test/ct_hooks_SUITE.erl26
-rw-r--r--lib/common_test/test/ct_netconfc_SUITE_data/netconfc1_SUITE.erl25
-rw-r--r--lib/common_test/test/ct_netconfc_SUITE_data/ns.erl9
-rw-r--r--lib/common_test/test/ct_pre_post_test_io_SUITE.erl43
-rw-r--r--lib/common_test/test/ct_pre_post_test_io_SUITE_data/cth_ctrl.erl3
-rw-r--r--lib/common_test/test/ct_repeat_1_SUITE.erl26
-rw-r--r--lib/common_test/test/ct_repeat_testrun_SUITE.erl21
-rw-r--r--lib/common_test/test/ct_sequence_1_SUITE.erl4
-rw-r--r--lib/common_test/test/ct_skip_SUITE.erl433
-rw-r--r--lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_12_SUITE.erl121
-rw-r--r--lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_3_SUITE.erl2
-rw-r--r--lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_6_SUITE.erl118
-rw-r--r--lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_7_SUITE.erl113
-rw-r--r--lib/common_test/test/ct_surefire_SUITE.erl7
-rw-r--r--lib/common_test/test/ct_test_server_if_1_SUITE.erl182
-rw-r--r--lib/common_test/test/ct_test_server_if_1_SUITE_data/test_server_if/test/ts_if_7_SUITE.erl16
-rw-r--r--lib/common_test/test/ct_test_support.erl30
-rw-r--r--lib/common_test/test/ct_testspec_1_SUITE.erl230
-rw-r--r--lib/common_test/test/ct_testspec_3_SUITE.erl10
-rw-r--r--lib/compiler/src/beam_clean.erl2
-rw-r--r--lib/compiler/src/beam_type.erl2
-rw-r--r--lib/compiler/src/beam_validator.erl2
-rw-r--r--lib/compiler/src/compile.erl4
-rw-r--r--lib/compiler/src/core_scan.erl4
-rw-r--r--lib/compiler/src/v3_core.erl2
-rw-r--r--lib/compiler/src/v3_kernel.erl8
-rw-r--r--lib/compiler/test/compilation_SUITE.erl6
-rw-r--r--lib/crypto/test/crypto_SUITE.erl14
-rw-r--r--lib/debugger/test/dbg_ui_SUITE_data/manual_data/src/lists1.erl4
-rw-r--r--lib/debugger/test/int_SUITE_data/lists1.erl4
-rw-r--r--lib/debugger/test/int_SUITE_data/my_lists.erl16
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_clean.erl2
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_type.erl2
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl4
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/core_scan.erl4
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/sys_pre_expand.erl4
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_core.erl2
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_kernel.erl6
-rw-r--r--lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_cgi.erl2
-rw-r--r--lib/dialyzer/test/small_SUITE_data/results/trec12
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/appmon_place.erl2
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/contract3.erl11
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/empty_list_infimum.erl6
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/gencall.erl2
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/maybe_improper.erl17
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/overloaded1.erl10
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/trec.erl16
-rwxr-xr-xlib/diameter/bin/diameterc30
-rw-r--r--lib/diameter/doc/src/diameter.xml78
-rw-r--r--lib/diameter/doc/src/diameter_app.xml4
-rw-r--r--lib/diameter/doc/src/diameter_codec.xml8
-rw-r--r--lib/diameter/doc/src/diameter_dict.xml2
-rw-r--r--lib/diameter/doc/src/diameter_intro.xml4
-rw-r--r--lib/diameter/doc/src/diameter_make.xml94
-rw-r--r--lib/diameter/doc/src/diameter_sctp.xml2
-rw-r--r--lib/diameter/doc/src/diameter_soc_rfc6733.xml2
-rw-r--r--lib/diameter/doc/src/diameterc.xml (renamed from lib/diameter/doc/src/diameter_compile.xml)6
-rw-r--r--lib/diameter/doc/src/files.mk2
-rw-r--r--lib/diameter/doc/src/notes.xml30
-rw-r--r--lib/diameter/doc/src/ref_man.xml4
-rw-r--r--lib/diameter/doc/src/seealso.ent4
-rw-r--r--lib/diameter/doc/standard/rfc7068.txt1627
-rw-r--r--lib/diameter/doc/standard/rfc7075.txt563
-rw-r--r--lib/diameter/examples/dict/.gitignore2
-rw-r--r--lib/diameter/examples/dict/GNUmakefile60
-rw-r--r--lib/diameter/examples/dict/depend.sed43
-rw-r--r--lib/diameter/examples/dict/rfc4004_mip.dia4
-rw-r--r--lib/diameter/examples/dict/rfc4005_nas.dia4
-rw-r--r--lib/diameter/examples/dict/rfc4006_cc.dia4
-rw-r--r--lib/diameter/examples/dict/rfc4072_eap.dia4
-rw-r--r--lib/diameter/examples/dict/rfc4590_digest.dia2
-rw-r--r--lib/diameter/examples/dict/rfc4740_sip.dia4
-rw-r--r--lib/diameter/src/base/diameter.erl2
-rw-r--r--lib/diameter/src/base/diameter_capx.erl19
-rw-r--r--lib/diameter/src/base/diameter_codec.erl20
-rw-r--r--lib/diameter/src/base/diameter_config.erl4
-rw-r--r--lib/diameter/src/base/diameter_service.erl17
-rw-r--r--lib/diameter/src/base/diameter_types.erl21
-rw-r--r--lib/diameter/src/base/diameter_watchdog.erl28
-rw-r--r--lib/diameter/src/compiler/diameter_codegen.erl548
-rw-r--r--lib/diameter/src/compiler/diameter_dict_util.erl7
-rw-r--r--lib/diameter/src/compiler/diameter_make.erl245
-rw-r--r--lib/diameter/src/diameter.appup.src33
-rw-r--r--lib/diameter/test/diameter_codec_test.erl6
-rw-r--r--lib/diameter/test/diameter_compiler_SUITE.erl162
-rw-r--r--lib/diameter/test/diameter_examples_SUITE.erl2
-rw-r--r--lib/diameter/vsn.mk2
-rw-r--r--lib/eldap/doc/src/eldap.xml33
-rw-r--r--lib/eldap/src/eldap.erl88
-rw-r--r--lib/eldap/test/README36
-rw-r--r--lib/eldap/test/eldap.cfg1
-rw-r--r--lib/eldap/test/eldap_basic_SUITE.erl183
-rw-r--r--lib/eldap/test/eldap_basic_SUITE_data/certs/README1
-rw-r--r--lib/eldap/test/ldap_server/slapd.conf30
-rw-r--r--lib/eldap/test/make_certs.erl313
-rw-r--r--lib/erl_interface/src/legacy/erl_eterm.c2
-rw-r--r--lib/erl_interface/test/all_SUITE_data/ei_runner.c4
-rw-r--r--lib/erl_interface/test/all_SUITE_data/runner.c4
-rw-r--r--lib/erl_interface/test/ei_accept_SUITE.erl2
-rw-r--r--lib/erl_interface/test/erl_connect_SUITE.erl2
-rw-r--r--lib/erl_interface/test/erl_eterm_SUITE.erl10
-rw-r--r--lib/erl_interface/test/erl_eterm_SUITE_data/eterm_test.c8
-rw-r--r--lib/eunit/src/eunit_surefire.erl2
-rw-r--r--lib/hipe/cerl/erl_types.erl13
-rw-r--r--lib/inets/doc/src/ftp.xml14
-rw-r--r--lib/inets/doc/src/httpc.xml7
-rw-r--r--lib/inets/src/ftp/ftp.erl449
-rw-r--r--lib/inets/src/ftp/ftp_response.erl2
-rw-r--r--lib/inets/src/http_client/httpc.erl23
-rw-r--r--lib/inets/src/http_client/httpc_handler.erl69
-rw-r--r--lib/inets/src/http_client/httpc_internal.hrl6
-rw-r--r--lib/inets/src/http_client/httpc_manager.erl128
-rw-r--r--lib/inets/src/http_server/httpd_conf.erl2
-rw-r--r--lib/inets/src/http_server/httpd_log.erl15
-rw-r--r--lib/inets/src/http_server/httpd_request_handler.erl11
-rw-r--r--lib/inets/src/http_server/httpd_response.erl33
-rw-r--r--lib/inets/src/http_server/mod_cgi.erl10
-rw-r--r--lib/inets/src/http_server/mod_head.erl4
-rw-r--r--lib/inets/src/inets_app/inets_internal.hrl2
-rw-r--r--lib/inets/test/Makefile15
-rw-r--r--lib/inets/test/ftp_SUITE.erl843
-rw-r--r--lib/inets/test/ftp_SUITE_data/ftpd_hosts.skel18
-rw-r--r--lib/inets/test/ftp_SUITE_data/vsftpd.conf26
-rw-r--r--lib/inets/test/ftp_freebsd_x86_test.erl160
-rw-r--r--lib/inets/test/ftp_linux_ppc_test.erl158
-rw-r--r--lib/inets/test/ftp_linux_x86_test.erl160
-rw-r--r--lib/inets/test/ftp_macosx_ppc_test.erl159
-rw-r--r--lib/inets/test/ftp_macosx_x86_test.erl159
-rw-r--r--lib/inets/test/ftp_netbsd_x86_test.erl159
-rw-r--r--lib/inets/test/ftp_openbsd_x86_test.erl158
-rw-r--r--lib/inets/test/ftp_solaris10_sparc_test.erl161
-rw-r--r--lib/inets/test/ftp_solaris10_x86_test.erl162
-rw-r--r--lib/inets/test/ftp_solaris8_sparc_test.erl159
-rw-r--r--lib/inets/test/ftp_solaris9_sparc_test.erl158
-rw-r--r--lib/inets/test/ftp_suite_lib.erl1675
-rw-r--r--lib/inets/test/ftp_ticket_test.erl61
-rw-r--r--lib/inets/test/ftp_windows_2003_server_test.erl167
-rw-r--r--lib/inets/test/ftp_windows_xp_test.erl157
-rw-r--r--lib/inets/test/httpc_SUITE.erl40
-rw-r--r--lib/inets/test/httpd_basic_SUITE.erl89
-rw-r--r--lib/inets/test/httpd_basic_SUITE_data/Makefile.src14
-rw-r--r--lib/inets/test/httpd_basic_SUITE_data/cgi_sleep.c26
-rw-r--r--lib/kernel/doc/src/gen_sctp.xml2
-rw-r--r--lib/kernel/doc/src/inet.xml92
-rw-r--r--lib/kernel/doc/src/os.xml11
-rw-r--r--lib/kernel/doc/src/rpc.xml2
-rw-r--r--lib/kernel/src/gen_sctp.erl4
-rw-r--r--lib/kernel/src/inet.erl54
-rw-r--r--lib/kernel/src/inet_int.hrl2
-rw-r--r--lib/kernel/src/os.erl8
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE.erl27
-rw-r--r--lib/kernel/test/gen_sctp_SUITE.erl333
-rw-r--r--lib/kernel/test/zlib_SUITE.erl8
-rw-r--r--lib/mnesia/src/mnesia.appup.src14
-rw-r--r--lib/mnesia/src/mnesia_locker.erl32
-rw-r--r--lib/mnesia/src/mnesia_monitor.erl6
-rw-r--r--lib/mnesia/src/mnesia_subscr.erl4
-rw-r--r--lib/mnesia/src/mnesia_tm.erl6
-rw-r--r--lib/mnesia/test/mnesia_dirty_access_test.erl7
-rw-r--r--lib/mnesia/test/mnesia_evil_coverage_test.erl25
-rw-r--r--lib/observer/doc/src/ttb.xml7
-rw-r--r--lib/observer/doc/src/ttb_ug.xml2
-rw-r--r--lib/observer/test/observer_SUITE.erl4
-rw-r--r--lib/odbc/configure.in5
-rw-r--r--lib/orber/test/multi_ORB_SUITE.erl47
-rw-r--r--lib/orber/test/orber_nat_SUITE.erl107
-rw-r--r--lib/os_mon/src/memsup.erl19
-rw-r--r--lib/public_key/asn1/OTP-PKIX.asn112
-rw-r--r--lib/public_key/doc/src/using_public_key.xml6
-rw-r--r--lib/public_key/src/pubkey_cert_records.erl11
-rw-r--r--lib/public_key/test/public_key_SUITE.erl38
-rw-r--r--lib/runtime_tools/src/observer_backend.erl4
-rw-r--r--lib/sasl/doc/src/error_logging.xml5
-rw-r--r--lib/sasl/doc/src/sasl_app.xml13
-rw-r--r--lib/sasl/doc/src/systools.xml6
-rw-r--r--lib/sasl/src/sasl.erl4
-rw-r--r--lib/sasl/src/systools_make.erl18
-rw-r--r--lib/sasl/test/release_handler_SUITE.erl33
-rw-r--r--lib/sasl/test/systools_SUITE.erl32
-rw-r--r--lib/snmp/doc/src/notes.xml121
-rw-r--r--lib/snmp/doc/src/snmp.xml80
-rw-r--r--lib/snmp/doc/src/snmp_app.xml7
-rw-r--r--lib/snmp/doc/src/snmp_config.xml9
-rw-r--r--lib/snmp/doc/src/snmpa.xml48
-rw-r--r--lib/snmp/doc/src/snmpm.xml49
-rw-r--r--lib/snmp/doc/src/snmpm_user.xml187
-rw-r--r--lib/snmp/examples/ex2/snmp_ex2_manager.erl9
-rw-r--r--lib/snmp/src/agent/snmpa.erl198
-rw-r--r--lib/snmp/src/agent/snmpa_local_db.erl8
-rw-r--r--lib/snmp/src/agent/snmpa_mpd.erl4
-rw-r--r--lib/snmp/src/agent/snmpa_supervisor.erl2
-rw-r--r--lib/snmp/src/agent/snmpa_symbolic_store.erl8
-rw-r--r--lib/snmp/src/agent/snmpa_usm.erl10
-rw-r--r--lib/snmp/src/app/Makefile4
-rw-r--r--lib/snmp/src/app/snmp.appup.src43
-rw-r--r--lib/snmp/src/app/snmp.erl119
-rw-r--r--lib/snmp/src/app/snmp_internal.hrl4
-rw-r--r--lib/snmp/src/manager/snmpm.erl218
-rw-r--r--lib/snmp/src/manager/snmpm_config.erl31
-rw-r--r--lib/snmp/src/manager/snmpm_server.erl184
-rw-r--r--lib/snmp/src/manager/snmpm_user.erl173
-rw-r--r--lib/snmp/src/manager/snmpm_usm.erl14
-rw-r--r--lib/snmp/src/misc/snmp_config.erl187
-rw-r--r--lib/snmp/src/misc/snmp_log.erl188
-rw-r--r--lib/snmp/src/misc/snmp_usm.erl23
-rw-r--r--lib/snmp/src/misc/snmp_verbosity.erl2
-rw-r--r--lib/snmp/test/snmp_agent_test.erl72
-rw-r--r--lib/snmp/test/snmp_log_test.erl77
-rw-r--r--lib/snmp/test/snmp_manager_config_test.erl43
-rw-r--r--lib/snmp/test/snmp_manager_test.erl66
-rw-r--r--lib/snmp/test/snmp_manager_user.erl17
-rw-r--r--lib/snmp/test/snmp_test_manager.erl20
-rw-r--r--lib/snmp/vsn.mk2
-rw-r--r--lib/ssh/doc/src/ssh.xml44
-rw-r--r--lib/ssh/doc/src/ssh_client_key_api.xml6
-rw-r--r--lib/ssh/doc/src/ssh_server_key_api.xml14
-rw-r--r--lib/ssh/src/Makefile3
-rw-r--r--lib/ssh/src/ssh.app.src3
-rw-r--r--lib/ssh/src/ssh.erl153
-rw-r--r--lib/ssh/src/ssh.hrl4
-rw-r--r--lib/ssh/src/ssh_acceptor.erl19
-rw-r--r--lib/ssh/src/ssh_acceptor_sup.erl6
-rw-r--r--lib/ssh/src/ssh_auth.erl99
-rw-r--r--lib/ssh/src/ssh_bits.erl334
-rw-r--r--lib/ssh/src/ssh_channel.erl2
-rw-r--r--lib/ssh/src/ssh_channel_sup.erl4
-rw-r--r--lib/ssh/src/ssh_cli.erl110
-rw-r--r--lib/ssh/src/ssh_connect.hrl5
-rw-r--r--lib/ssh/src/ssh_connection.erl653
-rw-r--r--lib/ssh/src/ssh_connection_controler.erl137
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl1289
-rw-r--r--lib/ssh/src/ssh_connection_manager.erl916
-rw-r--r--lib/ssh/src/ssh_connection_sup.erl87
-rw-r--r--lib/ssh/src/ssh_io.erl5
-rw-r--r--lib/ssh/src/ssh_message.erl529
-rw-r--r--lib/ssh/src/ssh_no_io.erl43
-rw-r--r--lib/ssh/src/ssh_sftpd.erl2
-rw-r--r--lib/ssh/src/ssh_subsystem_sup.erl16
-rw-r--r--lib/ssh/src/ssh_sup.erl15
-rw-r--r--lib/ssh/src/ssh_system_sup.erl20
-rw-r--r--lib/ssh/src/ssh_transport.erl203
-rw-r--r--lib/ssh/src/ssh_userreg.erl141
-rw-r--r--lib/ssh/src/ssh_xfer.erl4
-rw-r--r--lib/ssh/src/sshc_sup.erl6
-rw-r--r--lib/ssh/src/sshd_sup.erl11
-rw-r--r--lib/ssh/test/Makefile6
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl142
-rw-r--r--lib/ssh/test/ssh_connection_SUITE.erl33
-rw-r--r--lib/ssh/test/ssh_peername_sockname_server.erl54
-rw-r--r--lib/ssh/test/ssh_test_cli.erl81
-rw-r--r--lib/ssl/doc/src/ssl.xml21
-rw-r--r--lib/ssl/src/dtls_handshake.erl2
-rw-r--r--lib/ssl/src/inet_tls_dist.erl5
-rw-r--r--lib/ssl/src/ssl_cipher.erl2
-rw-r--r--lib/ssl/src/ssl_handshake.erl115
-rw-r--r--lib/ssl/src/ssl_handshake.hrl22
-rw-r--r--lib/ssl/src/ssl_internal.hrl3
-rw-r--r--lib/ssl/src/tls.erl9
-rw-r--r--lib/ssl/src/tls_connection.erl31
-rw-r--r--lib/ssl/src/tls_handshake.erl11
-rw-r--r--lib/ssl/test/ssl_handshake_SUITE.erl12
-rw-r--r--lib/stdlib/doc/src/re.xml6
-rw-r--r--lib/stdlib/src/dict.erl2
-rw-r--r--lib/stdlib/src/erl_eval.erl4
-rw-r--r--lib/stdlib/src/erl_tar.erl4
-rw-r--r--lib/stdlib/src/filelib.erl4
-rw-r--r--lib/stdlib/src/gen_server.erl2
-rw-r--r--lib/stdlib/src/io_lib.erl2
-rw-r--r--lib/stdlib/src/lists.erl4
-rw-r--r--lib/stdlib/src/math.erl6
-rw-r--r--lib/stdlib/src/string.erl4
-rw-r--r--lib/stdlib/test/slave_SUITE.erl2
-rw-r--r--lib/syntax_tools/src/erl_syntax.erl19
-rw-r--r--lib/syntax_tools/src/erl_syntax_lib.erl2
-rw-r--r--lib/syntax_tools/src/igor.erl33
-rw-r--r--lib/test_server/src/test_server.erl12
-rw-r--r--lib/test_server/src/test_server_ctrl.erl422
-rw-r--r--lib/test_server/src/ts.erl2
-rw-r--r--lib/test_server/test/test_server_SUITE.erl14
-rw-r--r--lib/test_server/test/test_server_test_lib.erl5
-rw-r--r--lib/tools/emacs/erlang-eunit.el4
-rw-r--r--lib/tools/emacs/erlang-start.el2
-rw-r--r--lib/tools/emacs/erlang.el40
-rw-r--r--lib/tools/src/cover.erl44
-rw-r--r--lib/tools/src/tags.erl4
-rw-r--r--lib/tools/test/cover_SUITE.erl37
-rw-r--r--lib/tools/test/cover_SUITE_data/otp_11439/t.erl11
-rw-r--r--lib/tools/test/eprof_SUITE_data/eed.erl8
-rw-r--r--lib/wx/doc/overview.edoc42
-rw-r--r--lib/xmerl/src/xmerl.erl25
-rw-r--r--lib/xmerl/src/xmerl_regexp.erl12
-rw-r--r--lib/xmerl/src/xmerl_xpath.erl51
-rw-r--r--lib/xmerl/test/xmerl_SUITE.erl7
-rw-r--r--lib/xmerl/test/xmerl_SUITE_data/xpath/purchaseOrder.xml5
-rw-r--r--lib/xmerl/test/xmerl_SUITE_data/xpath/xpath_abbrev.erl31
325 files changed, 12816 insertions, 9930 deletions
diff --git a/lib/asn1/src/Makefile b/lib/asn1/src/Makefile
index 3f24e15c04..500f4a1358 100644
--- a/lib/asn1/src/Makefile
+++ b/lib/asn1/src/Makefile
@@ -135,7 +135,7 @@ $(EBIN)/asn1ct_func.$(EMULATOR): asn1ct_func.erl
asn1ct_eval_%.erl: asn1ct_eval_%.funcs
$(gen_verbose)erl -pa $(EBIN) -noshell -noinput \
- -run prepare_templates gen_asn1ct_eval $< >$@
+ -run prepare_templates gen_asn1ct_eval $<
$(APP_TARGET): $(APP_SRC) ../vsn.mk
$(vsn_verbose)sed -e 's;%VSN%;$(VSN);' $< > $@
@@ -180,7 +180,7 @@ RT_TEMPLATES_TARGET = $(RT_TEMPLATES:%=%.$(EMULATOR))
asn1ct_rtt.erl: prepare_templates.$(EMULATOR) $(RT_TEMPLATES_TARGET)
$(gen_verbose)erl -noshell -noinput -run prepare_templates gen_asn1ct_rtt \
- $(RT_TEMPLATES_TARGET) >asn1ct_rtt.erl
+ $(RT_TEMPLATES_TARGET)
prepare_templates.$(EMULATOR): prepare_templates.erl
$(V_ERLC) prepare_templates.erl
diff --git a/lib/asn1/src/asn1ct_check.erl b/lib/asn1/src/asn1ct_check.erl
index eddcda0018..0a13801e08 100644
--- a/lib/asn1/src/asn1ct_check.erl
+++ b/lib/asn1/src/asn1ct_check.erl
@@ -2534,89 +2534,54 @@ normalize_integer(S,Int=#'Externalvaluereference'{value=Name},Type) ->
normalize_integer(_,Int,_) ->
exit({'Unknown INTEGER value',Int}).
-normalize_bitstring(S,Value,Type)->
- %% There are four different Erlang formats of BIT STRING:
- %% 1 - a list of ones and zeros.
- %% 2 - a list of atoms.
- %% 3 - as an integer, for instance in hexadecimal form.
- %% 4 - as a tuple {Unused, Binary} where Unused is an integer
- %% and tells how many bits of Binary are unused.
- %%
- %% normalize_bitstring/3 transforms Value according to:
- %% A to 3,
- %% B to 1,
- %% C to 1 or 3
- %% D to 2,
- %% Value can be on format:
- %% A - {hstring, String}, where String is a hexadecimal string.
- %% B - {bstring, String}, where String is a string on bit format
- %% C - #'Externalvaluereference'{value=V}, where V is a defined value
- %% D - list of #'Externalvaluereference', where each value component
- %% is an identifier corresponing to NamedBits in Type.
- %% E - list of ones and zeros, if Value already is normalized.
+%% normalize_bitstring(S, Value, Type) -> bitstring()
+%% Convert a literal value for a BIT STRING to an Erlang bit string.
+%%
+normalize_bitstring(S, Value, Type)->
case Value of
{hstring,String} when is_list(String) ->
- hstring_to_int(String);
+ hstring_to_bitstring(String);
{bstring,String} when is_list(String) ->
- bstring_to_bitlist(String);
- Rec when is_record(Rec,'Externalvaluereference') ->
- get_normalized_value(S,Value,Type,
- fun normalize_bitstring/3,[]);
+ bstring_to_bitstring(String);
+ #'Externalvaluereference'{} ->
+ get_normalized_value(S, Value, Type,
+ fun normalize_bitstring/3, []);
RecList when is_list(RecList) ->
- case Type of
- NBL when is_list(NBL) ->
- F = fun(#'Externalvaluereference'{value=Name}) ->
- case lists:keysearch(Name,1,NBL) of
- {value,{Name,_}} ->
- Name;
- Other ->
- throw({error,Other})
- end;
- (I) when I =:= 1; I =:= 0 ->
- I;
- (Other) ->
- throw({error,Other})
- end,
- case catch lists:map(F,RecList) of
- {error,Reason} ->
- asn1ct:warning("default value not "
- "compatible with type definition ~p~n",
- [Reason],S,
- "default value not "
- "compatible with type definition"),
- Value;
- NewList ->
- NewList
- end;
- _ ->
+ F = fun(#'Externalvaluereference'{value=Name}) ->
+ case lists:keymember(Name, 1, Type) of
+ true -> Name;
+ false -> throw({error,false})
+ end;
+ (Name) when is_atom(Name) ->
+ %% Already normalized.
+ Name;
+ (Other) ->
+ throw({error,Other})
+ end,
+ try
+ lists:map(F, RecList)
+ catch
+ throw:{error,Reason} ->
asn1ct:warning("default value not "
"compatible with type definition ~p~n",
- [RecList],S,
+ [Reason],S,
"default value not "
"compatible with type definition"),
Value
end;
- {Name,String} when is_atom(Name) ->
- normalize_bitstring(S,String,Type);
- Other ->
- asn1ct:warning("illegal default value ~p~n",[Other],S,
- "illegal default value"),
- Value
+ Bs when is_bitstring(Bs) ->
+ %% Already normalized.
+ Bs
end.
-hstring_to_int(L) when is_list(L) ->
- hstring_to_int(L,0).
-hstring_to_int([H|T],Acc) when H >= $A, H =< $F ->
- hstring_to_int(T,(Acc bsl 4) + (H - $A + 10) ) ;
-hstring_to_int([H|T],Acc) when H >= $0, H =< $9 ->
- hstring_to_int(T,(Acc bsl 4) + (H - $0));
-hstring_to_int([],Acc) ->
- Acc.
+hstring_to_bitstring(L) ->
+ << <<(hex_to_int(D)):4>> || D <- L >>.
-bstring_to_bitlist([H|T]) when H == $0; H == $1 ->
- [H - $0 | bstring_to_bitlist(T)];
-bstring_to_bitlist([]) ->
- [].
+bstring_to_bitstring(L) ->
+ << <<(D-$0):1>> || D <- L >>.
+
+hex_to_int(D) when $0 =< D, D =< $9 -> D - $0;
+hex_to_int(D) when $A =< D, D =< $F -> D - ($A - 10).
%% normalize_octetstring/1 changes representation of input Value to a
%% list of octets.
@@ -4229,9 +4194,10 @@ constraint_union(S,C) when is_list(C) ->
constraint_union(_S,C) ->
[C].
-constraint_union1(S,[A={'ValueRange',_},union,B={'ValueRange',_}|Rest],Acc) ->
- AunionB = constraint_union_vr([A,B]),
- constraint_union1(S, AunionB++Rest, Acc);
+constraint_union1(S, [{'ValueRange',{Lb1,Ub1}},union,
+ {'ValueRange',{Lb2,Ub2}}|Rest], Acc) ->
+ AunionB = {'ValueRange',{c_min(Lb1, Lb2),max(Ub1, Ub2)}},
+ constraint_union1(S, [AunionB|Rest], Acc);
constraint_union1(S,[A={'SingleValue',_},union,B={'SingleValue',_}|Rest],Acc) ->
AunionB = constraint_union_sv(S,[A,B]),
constraint_union1(S,Rest,Acc ++ AunionB);
@@ -4255,42 +4221,9 @@ constraint_union_sv(_S,SV) ->
[N] -> [{'SingleValue',N}];
L -> [{'SingleValue',L}]
end.
-
-%% REMOVE????
-%%constraint_union(S,VR,'ValueRange') ->
-%% constraint_union_vr(VR).
-
-%% constraint_union_vr(VR)
-%% VR = [{'ValueRange',{Lb,Ub}},...]
-%% Lb = 'MIN' | integer()
-%% Ub = 'MAX' | integer()
-%% Returns if possible only one ValueRange tuple with a range that
-%% is a union of all ranges in VR.
-constraint_union_vr(VR) ->
- %% Sort VR by Lb in first hand and by Ub in second hand
- Fun=fun({_,{'MIN',_B1}},{_,{A2,_B2}}) when is_integer(A2)->true;
- ({_,{A1,_B1}},{_,{'MAX',_B2}}) when is_integer(A1) -> true;
- ({_,{A1,_B1}},{_,{A2,_B2}}) when is_integer(A1),is_integer(A2),A1<A2 -> true;
- ({_,{A,B1}},{_,{A,B2}}) when B1=<B2->true;
- (_,_)->false end,
- SortedVR = lists:usort(Fun,VR),
- constraint_union_vr(SortedVR, []).
-
-constraint_union_vr([],Acc) ->
- lists:reverse(Acc);
-constraint_union_vr([C|Rest],[]) ->
- constraint_union_vr(Rest,[C]);
-constraint_union_vr([{_,{Lb,Ub2}}|Rest],[{_,{Lb,_Ub1}}|Acc]) -> %Ub2 > Ub1
- constraint_union_vr(Rest,[{'ValueRange',{Lb,Ub2}}|Acc]);
-constraint_union_vr([{_,{_,Ub}}|Rest],A=[{_,{_,Ub}}|_Acc]) ->
- constraint_union_vr(Rest,A);
-constraint_union_vr([{_,{Lb2,Ub2}}|Rest], [{_,{Lb1,Ub1}}|Acc])
- when Ub1 =< Lb2, Ub1 < Ub2 ->
- constraint_union_vr(Rest,[{'ValueRange',{Lb1,Ub2}}|Acc]);
-constraint_union_vr([{_,{_,Ub2}}|Rest],A=[{_,{_,Ub1}}|_Acc]) when Ub2=<Ub1->
- constraint_union_vr(Rest,A);
-constraint_union_vr([VR|Rest],Acc) ->
- constraint_union_vr(Rest,[VR|Acc]).
+c_min('MIN', _) -> 'MIN';
+c_min(_, 'MIN') -> 'MIN';
+c_min(A, B) -> min(A, B).
union_sv_vr(_S,{'SingleValue',SV},VR)
when is_integer(SV) ->
diff --git a/lib/asn1/src/asn1ct_constructed_ber_bin_v2.erl b/lib/asn1/src/asn1ct_constructed_ber_bin_v2.erl
index 8359b81b33..a38da8bcc2 100644
--- a/lib/asn1/src/asn1ct_constructed_ber_bin_v2.erl
+++ b/lib/asn1/src/asn1ct_constructed_ber_bin_v2.erl
@@ -1155,7 +1155,8 @@ gen_dec_line(Erules,TopType,Cname,CTags,Type,OptOrMand,DecObjInf) ->
emit([indent(4),"_ ->",nl]),
case OptOrMand of
- {'DEFAULT', Def} ->
+ {'DEFAULT', Def0} ->
+ Def = asn1ct_gen:conform_value(Type, Def0),
emit([indent(8),"{",{asis,Def},",",{prev,tlv},"}",nl]);
'OPTIONAL' ->
emit([indent(8),"{ asn1_NOVALUE, ",{prev,tlv},"}",nl])
diff --git a/lib/asn1/src/asn1ct_constructed_per.erl b/lib/asn1/src/asn1ct_constructed_per.erl
index 8d4afc0a0b..4672f7edd3 100644
--- a/lib/asn1/src/asn1ct_constructed_per.erl
+++ b/lib/asn1/src/asn1ct_constructed_per.erl
@@ -770,8 +770,10 @@ optionals(L) -> optionals(L,[],2).
optionals([#'ComponentType'{prop='OPTIONAL'}|Rest], Acc, Pos) ->
optionals(Rest, [Pos|Acc], Pos+1);
-optionals([#'ComponentType'{prop={'DEFAULT',Val}}|Rest], Acc, Pos) ->
- optionals(Rest, [{Pos,Val}|Acc], Pos+1);
+optionals([#'ComponentType'{typespec=T,prop={'DEFAULT',Val}}|Rest],
+ Acc, Pos) ->
+ Vals = def_values(T, Val),
+ optionals(Rest, [{Pos,Vals}|Acc], Pos+1);
optionals([#'ComponentType'{}|Rest], Acc, Pos) ->
optionals(Rest, Acc, Pos+1);
optionals([], Acc, _) ->
@@ -888,7 +890,8 @@ gen_enc_components_call1(Erule,TopType,
optional ->
asn1ct_imm:enc_absent(Element, [asn1_NOVALUE], Imm1);
{default,Def} ->
- asn1ct_imm:enc_absent(Element, [asn1_DEFAULT,Def], Imm1)
+ DefValues = def_values(Type, Def),
+ asn1ct_imm:enc_absent(Element, DefValues, Imm1)
end,
Imm = case Imm2 of
[] -> [];
@@ -899,6 +902,38 @@ gen_enc_components_call1(_Erule,_TopType,[],Pos,_,_, Acc) ->
ImmList = lists:reverse(Acc),
{ImmList,Pos}.
+def_values(#type{def=#'Externaltypereference'{module=Mod,type=Type}}, Def) ->
+ #typedef{typespec=T} = asn1_db:dbget(Mod, Type),
+ def_values(T, Def);
+def_values(#type{def={'BIT STRING',[]}}, Bs) when is_bitstring(Bs) ->
+ ListBs = [B || <<B:1>> <= Bs],
+ IntBs = lists:foldl(fun(B, A) ->
+ (A bsl 1) bor B
+ end, 0, lists:reverse(ListBs)),
+ Sz = bit_size(Bs),
+ Compact = case 8 - Sz rem 8 of
+ 8 ->
+ {0,Bs};
+ Unused ->
+ {Unused,<<Bs:Sz/bits,0:Unused>>}
+ end,
+ [asn1_DEFAULT,Bs,Compact,ListBs,IntBs];
+def_values(#type{def={'BIT STRING',[_|_]=Ns}}, List) when is_list(List) ->
+ Bs = asn1ct_gen:named_bitstring_value(List, Ns),
+ ListBs = [B || <<B:1>> <= Bs],
+ IntBs = lists:foldl(fun(B, A) ->
+ (A bsl 1) bor B
+ end, 0, lists:reverse(ListBs)),
+ Args = [List,Bs,ListBs,IntBs],
+ {call,per_common,is_default_bitstring,Args};
+def_values(#type{def={'INTEGER',Ns}}, Def) ->
+ [asn1_DEFAULT,Def|case lists:keyfind(Def, 2, Ns) of
+ false -> [];
+ {Val,Def} -> [Val]
+ end];
+def_values(_, Def) ->
+ [asn1_DEFAULT,Def].
+
gen_enc_line_imm(Erule, TopType, Cname, Type, Element, DynamicEnc, Ext) ->
Imm0 = gen_enc_line_imm_1(Erule, TopType, Cname, Type,
Element, DynamicEnc),
@@ -1207,7 +1242,8 @@ gen_dec_comp_call(Comp, Erule, TopType, Tpos, OptTable, DecInfObj,
comp_call_pre_post(noext, mandatory, _, _, _, _, _, _) ->
{[],[]};
-comp_call_pre_post(noext, Prop, _, _, TextPos, OptTable, NumOptionals, Ext) ->
+comp_call_pre_post(noext, Prop, _, Type, TextPos,
+ OptTable, NumOptionals, Ext) ->
%% OPTIONAL or DEFAULT
OptPos = get_optionality_pos(TextPos, OptTable),
Element = case NumOptionals - OptPos of
@@ -1225,7 +1261,7 @@ comp_call_pre_post(noext, Prop, _, _, TextPos, OptTable, NumOptionals, Ext) ->
emit([";",nl,
"0 ->",nl,
"{"]),
- gen_dec_component_no_val(Ext, Prop),
+ gen_dec_component_no_val(Ext, Type, Prop),
emit([",",{curr,bytes},"}",nl,
"end"]),
St
@@ -1247,10 +1283,10 @@ comp_call_pre_post({ext,_,_}, Prop, Pos, Type, _, _, _, Ext) ->
components=ExtGroupCompList2}}
when is_integer(Number2)->
emit("{extAddGroup,"),
- gen_dec_extaddGroup_no_val(Ext, ExtGroupCompList2),
+ gen_dec_extaddGroup_no_val(Ext, Type, ExtGroupCompList2),
emit("}");
_ ->
- gen_dec_component_no_val(Ext, Prop)
+ gen_dec_component_no_val(Ext, Type, Prop)
end,
emit([",",{curr,bytes},"}",nl,
"end"]),
@@ -1265,21 +1301,22 @@ is_mandatory_predef_tab_c(_, _, {"got objfun through args","ObjFun"}) ->
is_mandatory_predef_tab_c(_,_,_) ->
true.
-gen_dec_extaddGroup_no_val(Ext,[#'ComponentType'{prop=Prop}])->
- gen_dec_component_no_val(Ext,Prop),
+gen_dec_extaddGroup_no_val(Ext, Type, [#'ComponentType'{prop=Prop}])->
+ gen_dec_component_no_val(Ext, Type, Prop),
ok;
-gen_dec_extaddGroup_no_val(Ext,[#'ComponentType'{prop=Prop}|Rest])->
- gen_dec_component_no_val(Ext,Prop),
- emit({","}),
- gen_dec_extaddGroup_no_val(Ext,Rest);
-gen_dec_extaddGroup_no_val(_, []) ->
+gen_dec_extaddGroup_no_val(Ext, Type, [#'ComponentType'{prop=Prop}|Rest])->
+ gen_dec_component_no_val(Ext, Type, Prop),
+ emit(","),
+ gen_dec_extaddGroup_no_val(Ext, Type, Rest);
+gen_dec_extaddGroup_no_val(_, _, []) ->
ok.
-gen_dec_component_no_val(_,{'DEFAULT',DefVal}) ->
+gen_dec_component_no_val(_, Type, {'DEFAULT',DefVal0}) ->
+ DefVal = asn1ct_gen:conform_value(Type, DefVal0),
emit([{asis,DefVal}]);
-gen_dec_component_no_val(_,'OPTIONAL') ->
+gen_dec_component_no_val(_, _, 'OPTIONAL') ->
emit({"asn1_NOVALUE"});
-gen_dec_component_no_val({ext,_,_},mandatory) ->
+gen_dec_component_no_val({ext,_,_}, _, mandatory) ->
emit({"asn1_NOVALUE"}).
diff --git a/lib/asn1/src/asn1ct_gen.erl b/lib/asn1/src/asn1ct_gen.erl
index 3452d29085..30d337635b 100644
--- a/lib/asn1/src/asn1ct_gen.erl
+++ b/lib/asn1/src/asn1ct_gen.erl
@@ -33,7 +33,9 @@
insert_once/2,
ct_gen_module/1,
index2suffix/1,
- get_record_name_prefix/0]).
+ get_record_name_prefix/0,
+ conform_value/2,
+ named_bitstring_value/2]).
-export([pgen/5,
mk_var/1,
un_hyphen_var/1]).
@@ -1485,8 +1487,14 @@ gen_prim_check_call(PrimType, Default, Element, Type) ->
end,
check_call(check_int, [Default,Element,{asis,NNL}]);
'BIT STRING' ->
- {_,NBL} = Type#type.def,
- check_call(check_bitstring, [Default,Element,{asis,NBL}]);
+ case Type#type.def of
+ {_,[]} ->
+ check_call(check_bitstring,
+ [Default,Element]);
+ {_,[_|_]=NBL} ->
+ check_call(check_named_bitstring,
+ [Default,Element,{asis,NBL}])
+ end;
'OCTET STRING' ->
check_call(check_octetstring, [Default,Element]);
'NULL' ->
@@ -1640,9 +1648,33 @@ unify_if_string(PrimType) ->
Other -> Other
end.
-
-
-
+conform_value(#type{def={'BIT STRING',[]}}, Bs) ->
+ case asn1ct:get_bit_string_format() of
+ compact when is_binary(Bs) ->
+ {0,Bs};
+ compact when is_bitstring(Bs) ->
+ Sz = bit_size(Bs),
+ Unused = 8 - bit_size(Bs),
+ {Unused,<<Bs:Sz/bits,0:Unused>>};
+ legacy ->
+ [B || <<B:1>> <= Bs];
+ bitstring when is_bitstring(Bs) ->
+ Bs
+ end;
+conform_value(_, Value) -> Value.
+
+named_bitstring_value(List, Names) ->
+ Int = lists:foldl(fun(N, A) ->
+ {N,Pos} = lists:keyfind(N, 1, Names),
+ A bor (1 bsl Pos)
+ end, 0, List),
+ named_bitstring_value_1(<<>>, Int).
+
+named_bitstring_value_1(Bs, 0) ->
+ Bs;
+named_bitstring_value_1(Bs, Int) ->
+ B = Int band 1,
+ named_bitstring_value_1(<<Bs/bitstring,B:1>>, Int bsr 1).
get_inner(A) when is_atom(A) -> A;
get_inner(Ext) when is_record(Ext,'Externaltypereference') -> Ext;
diff --git a/lib/asn1/src/asn1ct_imm.erl b/lib/asn1/src/asn1ct_imm.erl
index 892178f61b..20785cda8c 100644
--- a/lib/asn1/src/asn1ct_imm.erl
+++ b/lib/asn1/src/asn1ct_imm.erl
@@ -319,14 +319,22 @@ per_enc_extensions(Val0, Pos0, NumBits, Aligned) when NumBits > 0 ->
{'cond',[[{eq,Bitmap,0}],
['_'|Length ++ PutBits]],{var,"Extensions"}}].
-per_enc_optional(Val0, {Pos,Def}, _Aligned) when is_integer(Pos) ->
+per_enc_optional(Val0, {Pos,DefVals}, _Aligned) when is_integer(Pos),
+ is_list(DefVals) ->
Val1 = lists:concat(["element(",Pos,", ",Val0,")"]),
{B,[Val]} = mk_vars(Val1, []),
Zero = {put_bits,0,1,[1]},
One = {put_bits,1,1,[1]},
- B++[{'cond',[[{eq,Val,asn1_DEFAULT},Zero],
- [{eq,Val,Def},Zero],
- ['_',One]]}];
+ B++[{'cond',
+ [[{eq,Val,DefVal},Zero] || DefVal <- DefVals] ++ [['_',One]]}];
+per_enc_optional(Val0, {Pos,{call,M,F,A}}, _Aligned) when is_integer(Pos) ->
+ Val1 = lists:concat(["element(",Pos,", ",Val0,")"]),
+ {B,[Val,Tmp]} = mk_vars(Val1, [tmp]),
+ Zero = {put_bits,0,1,[1]},
+ One = {put_bits,1,1,[1]},
+ B++[{call,M,F,[Val|A],Tmp},
+ {'cond',
+ [[{eq,Tmp,true},Zero],['_',One]]}];
per_enc_optional(Val0, Pos, _Aligned) when is_integer(Pos) ->
Val1 = lists:concat(["element(",Pos,", ",Val0,")"]),
{B,[Val]} = mk_vars(Val1, []),
@@ -352,7 +360,12 @@ per_enc_sof(Val0, Constraint, ElementVar, ElementImm, Aligned) ->
PreBlock ++ EncLen ++ Lc
end.
-enc_absent(Val0, AbsVals, Body) ->
+enc_absent(Val0, {call,M,F,A}, Body) ->
+ {B,[Var,Tmp]} = mk_vars(Val0, [tmp]),
+ B++[{call,M,F,[Var|A],Tmp},
+ {'cond',
+ [[{eq,Tmp,true}],['_'|Body]]}];
+enc_absent(Val0, AbsVals, Body) when is_list(AbsVals) ->
{B,[Var]} = mk_vars(Val0, []),
Cs = [[{eq,Var,Aval}] || Aval <- AbsVals] ++ [['_'|Body]],
B++build_cond(Cs).
@@ -994,6 +1007,25 @@ mk_var(Base, V) ->
per_enc_integer_1(Val, [], Aligned) ->
[{'cond',[['_'|per_enc_unconstrained(Val, Aligned)]]}];
+per_enc_integer_1(Val, [{{'SingleValue',[_|_]=Svs}=Constr,[]}], Aligned) ->
+ %% An extensible constraint such as (1|17, ...).
+ %%
+ %% A subtle detail is that the extension root as described in the
+ %% ASN.1 spec should be used to determine whether a particular value
+ %% belongs to the extension root (as opposed to the effective
+ %% constraint, which will be used for the actual encoding).
+ %%
+ %% So for the example above, only the integers 1 and 17 should be
+ %% encoded as root values (extension bit = 0).
+
+ [{'ValueRange',{Lb,Ub}}] = effective_constraint(integer, [Constr]),
+ Root = [begin
+ {[],_,Put} = per_enc_constrained(Sv, Lb, Ub, Aligned),
+ [{eq,Val,Sv},{put_bits,0,1,[1]}|Put]
+ end || Sv <- Svs],
+ Cs = Root ++ [['_',{put_bits,1,1,[1]}|
+ per_enc_unconstrained(Val, Aligned)]],
+ build_cond(Cs);
per_enc_integer_1(Val0, [{{_,_}=Constr,[]}], Aligned) ->
{Prefix,Check,Action} = per_enc_integer_2(Val0, Constr, Aligned),
Prefix++build_cond([[Check,{put_bits,0,1,[1]}|Action],
@@ -1004,7 +1036,7 @@ per_enc_integer_1(Val0, [Constr], Aligned) ->
Prefix++build_cond([[Check|Action],
['_',{error,Val0}]]).
-per_enc_integer_2(Val, {'SingleValue',Sv}, Aligned) ->
+per_enc_integer_2(Val, {'SingleValue',Sv}, Aligned) when is_integer(Sv) ->
per_enc_constrained(Val, Sv, Sv, Aligned);
per_enc_integer_2(Val0, {'ValueRange',{Lb,'MAX'}}, Aligned)
when is_integer(Lb) ->
diff --git a/lib/asn1/src/asn1ct_value.erl b/lib/asn1/src/asn1ct_value.erl
index 992210232f..862b3c4ea5 100644
--- a/lib/asn1/src/asn1ct_value.erl
+++ b/lib/asn1/src/asn1ct_value.erl
@@ -167,17 +167,16 @@ from_type_prim(M, D) ->
case D#type.def of
'INTEGER' ->
i_random(C);
- {'INTEGER',NamedNumberList} ->
- NN = [X||{X,_} <- NamedNumberList],
- case NN of
+ {'INTEGER',[_|_]=NNL} ->
+ case C of
[] ->
- i_random(C);
+ {N,_} = lists:nth(random(length(NNL)), NNL),
+ N;
_ ->
- case C of
- [] ->
- lists:nth(random(length(NN)),NN);
- _ ->
- lists:nth((fun(0)->1;(X)->X end(i_random(C))),NN)
+ V = i_random(C),
+ case lists:keyfind(V, 2, NNL) of
+ false -> V;
+ {N,V} -> N
end
end;
Enum when is_tuple(Enum),element(1,Enum)=='ENUMERATED' ->
diff --git a/lib/asn1/src/asn1rtt_check.erl b/lib/asn1/src/asn1rtt_check.erl
index e78b65a8fb..be4f9c8bff 100644
--- a/lib/asn1/src/asn1rtt_check.erl
+++ b/lib/asn1/src/asn1rtt_check.erl
@@ -20,7 +20,7 @@
-export([check_bool/2,
check_int/3,
- check_bitstring/3,
+ check_bitstring/2,check_named_bitstring/3,
check_octetstring/2,
check_null/2,
check_objectidentifier/2,
@@ -50,31 +50,54 @@ check_int(DefValue, Value, NNL) when is_atom(Value) ->
check_int(DefaultValue, _Value, _) ->
throw({error,DefaultValue}).
-%% Two equal lists or integers
-check_bitstring(_, asn1_DEFAULT, _) ->
+%% check_bitstring(Default, UserBitstring) -> true|false
+%% Default = bitstring()
+%% UserBitstring = integeger() | list(0|1) | {Unused,binary()} | bitstring()
+check_bitstring(_, asn1_DEFAULT) ->
true;
-check_bitstring(V, V, _) ->
- true;
-%% Default value as a list of 1 and 0 and user value as an integer
-check_bitstring(L=[H|T], Int, _) when is_integer(Int), is_integer(H) ->
- case bit_list_to_int(L, length(T)) of
- Int -> true;
- _ -> throw({error,L,Int})
+check_bitstring(DefVal, {Unused,Binary}) ->
+ %% User value in compact format.
+ Sz = bit_size(Binary) - Unused,
+ <<Val:Sz/bitstring,_:Unused>> = Binary,
+ check_bitstring(DefVal, Val);
+check_bitstring(DefVal, Val) when is_bitstring(Val) ->
+ case Val =:= DefVal of
+ false -> throw(error);
+ true -> true
end;
-%% Default value as an integer, val as list
-check_bitstring(Int, Val, NBL) when is_integer(Int), is_list(Val) ->
- BL = int_to_bit_list(Int, [], length(Val)),
- check_bitstring(BL, Val, NBL);
+check_bitstring(Def, Val) when is_list(Val) ->
+ check_bitstring_list(Def, Val);
+check_bitstring(Def, Val) when is_integer(Val) ->
+ check_bitstring_integer(Def, Val).
+
+check_bitstring_list(<<H:1,T1/bitstring>>, [H|T2]) ->
+ check_bitstring_list(T1, T2);
+check_bitstring_list(<<>>, []) ->
+ true;
+check_bitstring_list(_, _) ->
+ throw(error).
+
+check_bitstring_integer(<<H:1,T1/bitstring>>, Int) when H =:= Int band 1 ->
+ check_bitstring_integer(T1, Int bsr 1);
+check_bitstring_integer(<<>>, 0) ->
+ true;
+check_bitstring_integer(_, _) ->
+ throw(error).
+
+check_named_bitstring(_, asn1_DEFAULT, _) ->
+ true;
+check_named_bitstring(V, V, _) ->
+ true;
%% Default value and user value as lists of ones and zeros
-check_bitstring(L1=[H1|_T1], L2=[H2|_T2], NBL=[_H|_T]) when is_integer(H1), is_integer(H2) ->
+check_named_bitstring(L1=[H1|_T1], L2=[H2|_T2], NBL=[_H|_T]) when is_integer(H1), is_integer(H2) ->
L2new = remove_trailing_zeros(L2),
- check_bitstring(L1, L2new, NBL);
+ check_named_bitstring(L1, L2new, NBL);
%% Default value as a list of 1 and 0 and user value as a list of atoms
-check_bitstring(L1=[H1|_T1], L2=[H2|_T2], NBL) when is_integer(H1), is_atom(H2) ->
+check_named_bitstring(L1=[H1|_T1], L2=[H2|_T2], NBL) when is_integer(H1), is_atom(H2) ->
L3 = bit_list_to_nbl(L1, NBL, 0, []),
- check_bitstring(L3, L2, NBL);
+ check_named_bitstring(L3, L2, NBL);
%% Both default value and user value as a list of atoms
-check_bitstring(L1=[H1|T1], L2=[H2|_T2], _)
+check_named_bitstring(L1=[H1|T1], L2=[H2|_T2], _)
when is_atom(H1), is_atom(H2), length(L1) =:= length(L2) ->
case lists:member(H1, L2) of
true ->
@@ -82,27 +105,29 @@ check_bitstring(L1=[H1|T1], L2=[H2|_T2], _)
false -> throw({error,L2})
end;
%% Default value as a list of atoms and user value as a list of 1 and 0
-check_bitstring(L1=[H1|_T1], L2=[H2|_T2], NBL) when is_atom(H1), is_integer(H2) ->
+check_named_bitstring(L1=[H1|_T1], L2=[H2|_T2], NBL) when is_atom(H1), is_integer(H2) ->
L3 = bit_list_to_nbl(L2, NBL, 0, []),
- check_bitstring(L1, L3, NBL);
+ check_named_bitstring(L1, L3, NBL);
%% User value in compact format
-check_bitstring(DefVal,CBS={_,_}, NBL) ->
+check_named_bitstring(DefVal,CBS={_,_}, NBL) ->
NewVal = cbs_to_bit_list(CBS),
- check_bitstring(DefVal, NewVal, NBL);
-check_bitstring(DV, V, _) ->
+ check_named_bitstring(DefVal, NewVal, NBL);
+%% User value as a binary
+check_named_bitstring(DefVal, CBS, NBL) when is_binary(CBS) ->
+ NewVal = cbs_to_bit_list({0,CBS}),
+ check_named_bitstring(DefVal, NewVal, NBL);
+%% User value as a bitstring
+check_named_bitstring(DefVal, CBS, NBL) when is_bitstring(CBS) ->
+ BitSize = bit_size(CBS),
+ Unused = 8 - (BitSize band 7),
+ NewVal = cbs_to_bit_list({Unused,<<CBS:BitSize/bits,0:Unused>>}),
+ check_named_bitstring(DefVal, NewVal, NBL);
+check_named_bitstring(DV, V, _) ->
throw({error,DV,V}).
-
-bit_list_to_int([0|Bs], ShL)->
- bit_list_to_int(Bs, ShL-1) + 0;
-bit_list_to_int([1|Bs], ShL) ->
- bit_list_to_int(Bs, ShL-1) + (1 bsl ShL);
-bit_list_to_int([], _) ->
- 0.
-
int_to_bit_list(0, Acc, 0) ->
Acc;
-int_to_bit_list(Int, Acc, Len) ->
+int_to_bit_list(Int, Acc, Len) when Len > 0 ->
int_to_bit_list(Int bsr 1, [Int band 1|Acc], Len - 1).
bit_list_to_nbl([0|T], NBL, Pos, Acc) ->
diff --git a/lib/asn1/src/asn1rtt_per_common.erl b/lib/asn1/src/asn1rtt_per_common.erl
index 9e9fd87ec3..3309e6a4ca 100644
--- a/lib/asn1/src/asn1rtt_per_common.erl
+++ b/lib/asn1/src/asn1rtt_per_common.erl
@@ -37,6 +37,7 @@
bitstring_from_positions/1,bitstring_from_positions/2,
to_bitstring/1,to_bitstring/2,
to_named_bitstring/1,to_named_bitstring/2,
+ is_default_bitstring/5,
extension_bitmap/3]).
-define('16K',16384).
@@ -271,6 +272,36 @@ to_named_bitstring(Val, Lb) ->
%% for correctness, not speed.
adjust_trailing_zeroes(to_bitstring(Val), Lb).
+is_default_bitstring(asn1_DEFAULT, _, _, _, _) ->
+ true;
+is_default_bitstring({Unused,Bin}, V0, V1, V2, V3) when is_integer(Unused) ->
+ %% Convert compact bitstring to a bitstring.
+ Sz = bit_size(Bin) - Unused,
+ <<Bs:Sz/bitstring,_:Unused>> = Bin,
+ is_default_bitstring(Bs, V0, V1, V2, V3);
+is_default_bitstring(Named, Named, _, _, _) ->
+ true;
+is_default_bitstring(Bs, _, Bs, _, _) ->
+ true;
+is_default_bitstring(List, _, _, List, _) ->
+ true;
+is_default_bitstring(Int, _, _, _, Int) ->
+ true;
+is_default_bitstring(Val, _, Def, _, _) when is_bitstring(Val) ->
+ Sz = bit_size(Def),
+ case Val of
+ <<Def:Sz/bitstring,T/bitstring>> ->
+ NumZeroes = bit_size(T),
+ case T of
+ <<0:NumZeroes>> -> true;
+ _ -> false
+ end;
+ _ ->
+ false
+ end;
+is_default_bitstring(Val, _, _, List, _) when is_list(Val) ->
+ is_default_bitstring_list(List, Val);
+is_default_bitstring(_, _, _, _, _) -> false.
extension_bitmap(Val, Pos, Limit) ->
extension_bitmap(Val, Pos, Limit, 0).
@@ -447,6 +478,16 @@ ntz(Byte) ->
4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0},
element(Byte+1, T).
+is_default_bitstring_list([H|Def], [H|Val]) ->
+ is_default_bitstring_list(Def, Val);
+is_default_bitstring_list([], []) ->
+ true;
+is_default_bitstring_list([], [_|_]=Val) ->
+ lists:all(fun(0) -> true;
+ (_) -> false
+ end, Val);
+is_default_bitstring_list(_, _) -> false.
+
extension_bitmap(_Val, Pos, Limit, Acc) when Pos >= Limit ->
Acc;
extension_bitmap(Val, Pos, Limit, Acc) ->
diff --git a/lib/asn1/src/prepare_templates.erl b/lib/asn1/src/prepare_templates.erl
index 83155b2e52..ccd15548d8 100644
--- a/lib/asn1/src/prepare_templates.erl
+++ b/lib/asn1/src/prepare_templates.erl
@@ -21,69 +21,77 @@
-export([gen_asn1ct_rtt/1,gen_asn1ct_eval/1]).
gen_asn1ct_rtt(Ms) ->
- io:format("%% Generated by ~s. DO NOT EDIT THIS FILE.\n"
+ {ok,Fd} = file:open("asn1ct_rtt.erl", [write]),
+ io:format(Fd,
+ "%% Generated by ~s. DO NOT EDIT THIS FILE.\n"
"%%\n"
"%% Input files:\n", [?MODULE]),
- [io:put_chars(["%% ",M,$\n]) || M <- Ms],
- io:nl(),
- io:put_chars("-module(asn1ct_rtt).\n"
+ [io:put_chars(Fd, ["%% ",M,$\n]) || M <- Ms],
+ io:nl(Fd),
+ io:put_chars(Fd,
+ "-module(asn1ct_rtt).\n"
"-export([assert_defined/1,dependencies/1,code/0]).\n"
"\n"),
Forms = lists:sort(lists:append([abstract(M) || M <- Ms])),
Exp = lists:sort(exports(Forms)),
- defined(Exp),
- io:nl(),
+ defined(Fd, Exp),
+ io:nl(Fd),
Calls = calls(Forms),
R = sofs:relation(Calls),
Fam0 = sofs:relation_to_family(R),
Fam = sofs:to_external(Fam0),
- dependencies(Fam),
- io:nl(),
+ dependencies(Fd, Fam),
+ io:nl(Fd),
Funcs = [begin
Bin = list_to_binary([$\n|erl_pp:function(Func)]),
{{M,F,A},Bin}
end || {M,{function,_,F,A,_}=Func} <- Forms],
- io:format("code() ->\n~p.\n\n", [Funcs]),
+ io:format(Fd, "code() ->\n~p.\n\n", [Funcs]),
+ ok = file:close(Fd),
halt(0).
gen_asn1ct_eval([File]) ->
+ Output = filename:rootname(File, ".funcs") ++ ".erl",
+ {ok,Fd} = file:open(Output, [write]),
{ok,Funcs} = file:consult(File),
asn1ct_func:start_link(),
[asn1ct_func:need(MFA) || MFA <- Funcs],
- io:format("%% Generated by ~s. DO NOT EDIT THIS FILE.\n"
+ io:format(Fd,
+ "%% Generated by ~s. DO NOT EDIT THIS FILE.\n"
"%%\n"
"%% Input file: ~s\n\n", [?MODULE,File]),
- io:format("-module(~s).\n", [filename:rootname(File)]),
- gen_asn1ct_eval_exp(Funcs),
- asn1ct_func:generate(group_leader()),
+ io:format(Fd, "-module(~s).\n", [filename:rootname(File)]),
+ gen_asn1ct_eval_exp(Fd, Funcs),
+ asn1ct_func:generate(Fd),
+ ok = file:close(Fd),
halt(0).
-gen_asn1ct_eval_exp(Funcs) ->
- io:put_chars("-export(["),
- gen_asn1ct_eval_exp_1(Funcs, ""),
- io:put_chars("]).\n").
+gen_asn1ct_eval_exp(Fd, Funcs) ->
+ io:put_chars(Fd, "-export(["),
+ gen_asn1ct_eval_exp_1(Fd, Funcs, ""),
+ io:put_chars(Fd, "]).\n").
-gen_asn1ct_eval_exp_1([{_,F,A}|T], Sep) ->
- io:put_chars(Sep),
- io:format("~p/~p", [F,A]),
- gen_asn1ct_eval_exp_1(T, ",\n");
-gen_asn1ct_eval_exp_1([], _) -> ok.
+gen_asn1ct_eval_exp_1(Fd, [{_,F,A}|T], Sep) ->
+ io:put_chars(Fd, Sep),
+ io:format(Fd, "~p/~p", [F,A]),
+ gen_asn1ct_eval_exp_1(Fd, T, ",\n");
+gen_asn1ct_eval_exp_1(_, [], _) -> ok.
-defined([H|T]) ->
- io:format("assert_defined(~p) -> ok", [H]),
+defined(Fd, [H|T]) ->
+ io:format(Fd, "assert_defined(~p) -> ok", [H]),
case T of
[] ->
- io:put_chars(".\n");
+ io:put_chars(Fd, ".\n");
[_|_] ->
- io:put_chars(";\n"),
- defined(T)
+ io:put_chars(Fd, ";\n"),
+ defined(Fd, T)
end.
-dependencies([{K,V}|T]) ->
- io:format("dependencies(~p) ->\n~p;\n", [K,V]),
- dependencies(T);
-dependencies([]) ->
- io:put_chars("dependencies(_) -> [].\n").
+dependencies(Fd, [{K,V}|T]) ->
+ io:format(Fd, "dependencies(~p) ->\n~p;\n", [K,V]),
+ dependencies(Fd, T);
+dependencies(Fd, []) ->
+ io:put_chars(Fd, "dependencies(_) -> [].\n").
abstract(File) ->
{ok,{M0,[{abstract_code,Abstract}]}} =
diff --git a/lib/asn1/test/asn1_SUITE.erl b/lib/asn1/test/asn1_SUITE.erl
index 61b360ddf2..83bd66a631 100644
--- a/lib/asn1/test/asn1_SUITE.erl
+++ b/lib/asn1/test/asn1_SUITE.erl
@@ -96,7 +96,6 @@ groups() ->
testChoTypeRefSeq,
testChoTypeRefSet,
testMultipleLevels,
- testDef,
testOpt,
testSeqDefault,
% Uses 'External'
@@ -141,9 +140,9 @@ groups() ->
testDeepTConstr,
testExport,
testImport,
- % Uses 'ParamBasic'
- {group, [], [testParamBasic,
- testDER]},
+ testParamBasic,
+ testDER,
+ testDEFAULT,
testMvrasn6,
testContextSwitchingTypes,
testOpenTypeImplicitTag,
@@ -326,20 +325,21 @@ testCompactBitString(Config, Rule, Opts) ->
[Rule, compact_bit_string|Opts]),
testCompactBitString:otp_4869(Rule).
-testPrimStrings(Config) -> test(Config, fun testPrimStrings/3).
+testPrimStrings(Config) ->
+ test(Config, fun testPrimStrings/3, [ber,{ber,[der]},per,uper]).
testPrimStrings(Config, Rule, Opts) ->
asn1_test_lib:compile_all(["PrimStrings", "BitStr"], Config, [Rule|Opts]),
- testPrimStrings_cases(Rule),
+ testPrimStrings_cases(Rule, Opts),
asn1_test_lib:compile_all(["PrimStrings", "BitStr"], Config,
[legacy_bit_string,Rule|Opts]),
- testPrimStrings:bit_string(Rule),
+ testPrimStrings:bit_string(Rule, Opts),
asn1_test_lib:compile_all(["PrimStrings", "BitStr"], Config,
[compact_bit_string,Rule|Opts]),
- testPrimStrings:bit_string(Rule),
+ testPrimStrings:bit_string(Rule, Opts),
testPrimStrings:more_strings(Rule).
-testPrimStrings_cases(Rule) ->
- testPrimStrings:bit_string(Rule),
+testPrimStrings_cases(Rule, Opts) ->
+ testPrimStrings:bit_string(Rule, Opts),
testPrimStrings:octet_string(Rule),
testPrimStrings:numeric_string(Rule),
testPrimStrings:other_strings(Rule),
@@ -429,6 +429,13 @@ testDef(Config, Rule, Opts) ->
asn1_test_lib:compile("Def", Config, [Rule|Opts]),
testDef:main(Rule).
+testDEFAULT(Config) ->
+ test(Config, fun testDEFAULT/3, [ber,{ber,[der]},per,uper]).
+testDEFAULT(Config, Rule, Opts) ->
+ asn1_test_lib:compile_all(["Def","Default"], Config, [Rule|Opts]),
+ testDef:main(Rule),
+ testSeqSetDefaultVal:main(Rule, Opts).
+
testOpt(Config) -> test(Config, fun testOpt/3).
testOpt(Config, Rule, Opts) ->
asn1_test_lib:compile("Opt", Config, [Rule|Opts]),
@@ -516,7 +523,8 @@ testSetDefault(Config, Rule, Opts) ->
asn1_test_lib:compile("SetDefault", Config, [Rule|Opts]),
testSetDefault:main(Rule).
-testParamBasic(Config) -> test(Config, fun testParamBasic/3).
+testParamBasic(Config) ->
+ test(Config, fun testParamBasic/3, [ber,{ber,[der]},per,uper]).
testParamBasic(Config, Rule, Opts) ->
asn1_test_lib:compile("ParamBasic", Config, [Rule|Opts]),
testParamBasic:main(Rule).
@@ -873,11 +881,7 @@ testDER(Config) ->
test(Config, fun testDER/3, [ber]).
testDER(Config, Rule, Opts) ->
asn1_test_lib:compile("DERSpec", Config, [Rule, der|Opts]),
- testDER:test(),
- asn1_test_lib:compile("ParamBasic", Config, [Rule, der|Opts]),
- testParamBasic:main(der),
- asn1_test_lib:compile("Default", Config, [Rule, der|Opts]),
- testSeqSetDefaultVal:main(Rule).
+ testDER:test().
specialized_decodes(Config) ->
test(Config, fun specialized_decodes/3, [ber]).
diff --git a/lib/asn1/test/asn1_SUITE_data/Constraints.py b/lib/asn1/test/asn1_SUITE_data/Constraints.py
index e4bc987e4c..864a471b88 100644
--- a/lib/asn1/test/asn1_SUITE_data/Constraints.py
+++ b/lib/asn1/test/asn1_SUITE_data/Constraints.py
@@ -17,6 +17,11 @@ NegSemiConstrained ::= INTEGER (-128..MAX)
SemiConstrainedExt ::= INTEGER (42..MAX, ...)
NegSemiConstrainedExt ::= INTEGER (-128..MAX, ...)
+-- Union of single values
+Sv1 ::= INTEGER (2|3|17)
+Sv2 ::= INTEGER (2|3|17, ...)
+Sv3 ::= INTEGER {a(2),b(3),z(17)} (2|3|17, ...)
+
-- Other constraints
FixedSize ::= OCTET STRING (SIZE(10))
FixedSize2 ::= OCTET STRING (SIZE(10|20))
@@ -94,4 +99,44 @@ pdf OBJECT IDENTIFIER ::= {1,2,3,4,5}
ShorterExt ::= IA5String (SIZE (5, ...))
+SeqOverlapping ::= SEQUENCE {
+ v Overlapping
+}
+
+SeqNonOverlapping ::= SEQUENCE {
+ v NonOverlapping
+}
+
+Overlapping ::= INTEGER (7280..7560 |
+7580..7680 |
+7910..8210 |
+8600..8940 |
+9250..9600 |
+14759..15109 |
+15250..15590 |
+18050..18800 |
+19300..19950 |
+21100..21700 |
+26200..26900 |
+18500..19900 |
+20100..20250 |
+21100..21700 |
+23000..24000 |
+24960..26900)
+
+-- The same intervals, but merged and sorted --
+NonOverlapping ::= INTEGER (7280..7560 |
+7580..7680 |
+7910..8210 |
+8600..8940 |
+9250..9600 |
+14759..15109 |
+15250..15590 |
+18050..19950 |
+20100..20250 |
+21100..21700 |
+23000..24000 |
+24960..26900)
+
+
END
diff --git a/lib/asn1/test/asn1_SUITE_data/Default.asn b/lib/asn1/test/asn1_SUITE_data/Default.asn
index 6604953c1f..168ce50bb2 100644
--- a/lib/asn1/test/asn1_SUITE_data/Default.asn
+++ b/lib/asn1/test/asn1_SUITE_data/Default.asn
@@ -21,7 +21,8 @@ SeqBS ::= SEQUENCE {
a BIT STRING DEFAULT '1010110'B,
b BIT STRING DEFAULT 'A8A'H,
c BIT STRING {first(0),second(1),third(2)} DEFAULT {second},
- d BIT STRING DEFAULT onelist
+ d BIT STRING DEFAULT onelist,
+ e BIT STRING DEFAULT '01011010'B
}
SetBS ::= SET {
diff --git a/lib/asn1/test/asn1_SUITE_data/PrimStrings.asn1 b/lib/asn1/test/asn1_SUITE_data/PrimStrings.asn1
index 08e7f94ab6..a5b4c8a53d 100644
--- a/lib/asn1/test/asn1_SUITE_data/PrimStrings.asn1
+++ b/lib/asn1/test/asn1_SUITE_data/PrimStrings.asn1
@@ -46,7 +46,13 @@ BS256 ::= BIT STRING (SIZE (256))
BS1024 ::= BIT STRING (SIZE (1024))
-
+ BsDef1 ::= SEQUENCE {
+ s BIT STRING DEFAULT '101111'B
+ }
+
+ BsDef2 ::= SEQUENCE {
+ s BIT STRING DEFAULT 'DEADBEEF'H
+ }
Os ::= OCTET STRING
OsCon ::= [60] OCTET STRING
diff --git a/lib/asn1/test/testConstraints.erl b/lib/asn1/test/testConstraints.erl
index 9a1d62993d..23322f5e88 100644
--- a/lib/asn1/test/testConstraints.erl
+++ b/lib/asn1/test/testConstraints.erl
@@ -122,6 +122,42 @@ int_constraints(Rules) ->
range_error(Rules, 'X1', 21),
%%==========================================================
+ %% Union of single values
+ %% Sv1 ::= INTEGER (2|3|17)
+ %% Sv2 ::= INTEGER (2|3|17, ...)
+ %% Sv3 ::= INTEGER {a(2),b(3),z(17)} (2|3|17, ...)
+ %%==========================================================
+
+ range_error(Rules, 'Sv1', 1),
+ range_error(Rules, 'Sv1', 18),
+ roundtrip('Sv1', 2),
+ roundtrip('Sv1', 3),
+ roundtrip('Sv1', 7),
+
+ %% Encoded as root
+ v_roundtrip(Rules, 'Sv2', 2),
+ v_roundtrip(Rules, 'Sv2', 3),
+ v_roundtrip(Rules, 'Sv2', 17),
+
+ %% Encoded as extension
+ v_roundtrip(Rules, 'Sv2', 1),
+ v_roundtrip(Rules, 'Sv2', 4),
+ v_roundtrip(Rules, 'Sv2', 18),
+
+ %% Encoded as root
+ v_roundtrip(Rules, 'Sv3', a),
+ v_roundtrip(Rules, 'Sv3', b),
+ v_roundtrip(Rules, 'Sv3', z),
+ v_roundtrip(Rules, 'Sv3', 2, a),
+ v_roundtrip(Rules, 'Sv3', 3, b),
+ v_roundtrip(Rules, 'Sv3', 17, z),
+
+ %% Encoded as extension
+ v_roundtrip(Rules, 'Sv3', 1),
+ v_roundtrip(Rules, 'Sv3', 4),
+ v_roundtrip(Rules, 'Sv3', 18),
+
+ %%==========================================================
%% SemiConstrained
%%==========================================================
@@ -182,6 +218,15 @@ int_constraints(Rules) ->
roundtrip('ShorterExt', "abcde"),
roundtrip('ShorterExt', "abcdef"),
+ %%==========================================================
+ %% Unions of INTEGER constraints
+ %%==========================================================
+ seq_roundtrip(Rules, 'SeqOverlapping', 'SeqNonOverlapping', 7580),
+ seq_roundtrip(Rules, 'SeqOverlapping', 'SeqNonOverlapping', 9600),
+ seq_roundtrip(Rules, 'SeqOverlapping', 'SeqNonOverlapping', 18050),
+ seq_roundtrip(Rules, 'SeqOverlapping', 'SeqNonOverlapping', 19000),
+ seq_roundtrip(Rules, 'SeqOverlapping', 'SeqNonOverlapping', 26900),
+
ok.
%% PER: Ensure that if the lower bound is Lb, Lb+16#80 is encoded
@@ -197,7 +242,29 @@ v(per, 'SemiConstrainedExt', 42+128) -> "000180";
v(uper, 'SemiConstrainedExt', 42+128) -> "00C000";
v(ber, 'NegSemiConstrainedExt', 0) -> "020100";
v(per, 'NegSemiConstrainedExt', 0) -> "000180";
-v(uper, 'NegSemiConstrainedExt', 0) -> "00C000".
+v(uper, 'NegSemiConstrainedExt', 0) -> "00C000";
+v(ber, 'Sv2', 1) -> "020101";
+v(per, 'Sv2', 1) -> "800101";
+v(uper, 'Sv2', 1) -> "808080";
+v(ber, 'Sv2', 2) -> "020102";
+v(per, 'Sv2', 2) -> "00";
+v(uper, 'Sv2', 2) -> "00";
+v(ber, 'Sv2', 3) -> "020103";
+v(per, 'Sv2', 3) -> "08";
+v(uper, 'Sv2', 3) -> "08";
+v(ber, 'Sv2', 4) -> "020104";
+v(per, 'Sv2', 4) -> "800104";
+v(uper, 'Sv2', 4) -> "808200";
+v(ber, 'Sv2', 17) -> "020111";
+v(per, 'Sv2', 17) -> "78";
+v(uper, 'Sv2', 17) -> "78";
+v(ber, 'Sv2', 18) -> "020112";
+v(per, 'Sv2', 18) -> "800112";
+v(uper, 'Sv2', 18) -> "808900";
+v(Rule, 'Sv3', a) -> v(Rule, 'Sv2', 2);
+v(Rule, 'Sv3', b) -> v(Rule, 'Sv2', 3);
+v(Rule, 'Sv3', z) -> v(Rule, 'Sv2', 17);
+v(Rule, 'Sv3', Val) when is_integer(Val) -> v(Rule, 'Sv2', Val).
shorter_ext(per, "a") -> <<16#80,16#01,16#61>>;
shorter_ext(uper, "a") -> <<16#80,16#E1>>;
@@ -211,6 +278,10 @@ v_roundtrip(Erule, Type, Value) ->
Encoded = asn1_test_lib:hex_to_bin(v(Erule, Type, Value)),
Encoded = roundtrip('Constraints', Type, Value).
+v_roundtrip(Erule, Type, Value, Expected) ->
+ Encoded = asn1_test_lib:hex_to_bin(v(Erule, Type, Value)),
+ Encoded = asn1_test_lib:roundtrip_enc('Constraints', Type, Value, Expected).
+
roundtrip(Type, Value) ->
roundtrip('Constraints', Type, Value).
@@ -235,3 +306,12 @@ range_error(Per, Type, Value) when Per =:= per; Per =:= uper ->
%% on encode.
{error,_} = 'Constraints':encode(Type, Value),
ok.
+
+seq_roundtrip(Rules, Seq1, Seq2, Val) ->
+ Enc = roundtrip(Seq1, {Seq1,Val}),
+ case Rules of
+ ber ->
+ roundtrip(Seq2, {Seq2,Val});
+ _ ->
+ roundtrip_enc(Seq2, {Seq2,Val}, Enc)
+ end.
diff --git a/lib/asn1/test/testParamBasic.erl b/lib/asn1/test/testParamBasic.erl
index 3a55408e94..3db89ca174 100644
--- a/lib/asn1/test/testParamBasic.erl
+++ b/lib/asn1/test/testParamBasic.erl
@@ -38,7 +38,9 @@ main(Rules) ->
<<48,3,128,1,11>> =
roundtrip_enc('T11', #'T11'{number=11,string="hej"}),
<<48,3,128,1,11>> =
- roundtrip_enc('T12', #'T12'{number=11,string=[1,0,1,0]});
+ roundtrip_enc('T12',
+ #'T12'{number=11,string=[1,0,1,0]},
+ #'T12'{number=11,string = <<10:4>>});
_ -> ok
end,
ok.
@@ -48,3 +50,6 @@ roundtrip(Type, Value) ->
roundtrip_enc(Type, Value) ->
asn1_test_lib:roundtrip_enc('ParamBasic', Type, Value).
+
+roundtrip_enc(Type, Value, Expected) ->
+ asn1_test_lib:roundtrip_enc('ParamBasic', Type, Value, Expected).
diff --git a/lib/asn1/test/testPrimStrings.erl b/lib/asn1/test/testPrimStrings.erl
index be5409aa92..2fe0780701 100644
--- a/lib/asn1/test/testPrimStrings.erl
+++ b/lib/asn1/test/testPrimStrings.erl
@@ -19,7 +19,7 @@
%%
-module(testPrimStrings).
--export([bit_string/1]).
+-export([bit_string/2]).
-export([octet_string/1]).
-export([numeric_string/1]).
-export([other_strings/1]).
@@ -68,7 +68,7 @@ fragmented_lengths() ->
K64-1,K64,K64+1,K64+(1 bsl 7)-1,K64+(1 bsl 7),K64+(1 bsl 7)+1,
K64+K16-1,K64+K16,K64+K16+1].
-bit_string(Rules) ->
+bit_string(Rules, Opts) ->
%%==========================================================
%% Bs1 ::= BIT STRING
@@ -90,9 +90,10 @@ bit_string(Rules) ->
bs_roundtrip('Bs1', [0,1,0,0,1,0]),
bs_roundtrip('Bs1', [1,0,0,0,0,0,0,0,0]),
bs_roundtrip('Bs1', [0,1,0,0,1,0,1,1,1,1,1,0,0,0,1,0,0,1,1]),
-
- case Rules of
- ber ->
+
+
+ case {Rules,Opts} of
+ {ber,[]} ->
bs_decode('Bs1', <<35,8,3,2,0,73,3,2,4,32>>,
[0,1,0,0,1,0,0,1,0,0,1,0]),
bs_decode('Bs1', <<35,9,3,2,0,234,3,3,7,156,0>>,
@@ -100,7 +101,17 @@ bit_string(Rules) ->
bs_decode('Bs1', <<35,128,3,2,0,234,3,3,7,156,0,0,0>>,
[1,1,1,0,1,0,1,0,1,0,0,1,1,1,0,0,0]);
_ ->
- ok
+ %% DER, PER, UPER
+ consistent_def_enc('BsDef1',
+ [2#111101,
+ [1,0,1,1,1,1],
+ {2,<<2#101111:6,0:2>>},
+ <<2#101111:6>>]),
+ consistent_def_enc('BsDef2',
+ [[1,1,0,1, 1,1,1,0, 1,0,1,0, 1,1,0,1,
+ 1,0,1,1, 1,1,1,0, 1,1,1,0, 1,1,1,1],
+ {0,<<16#DEADBEEF:4/unit:8>>},
+ <<16#DEADBEEF:4/unit:8>>])
end,
@@ -217,6 +228,24 @@ bit_string(Rules) ->
_ -> per_bs_strings()
end.
+consistent_def_enc(Type, Vs) ->
+ M = 'PrimStrings',
+ {ok,Enc} = M:encode(Type, {Type,asn1_DEFAULT}),
+ {ok,Val} = M:decode(Type, Enc),
+
+ %% Ensure that the value has the correct format.
+ case {M:bit_string_format(),Val} of
+ {bitstring,{_,Bs}} when is_bitstring(Bs) -> ok;
+ {compact,{_,{Unused,Bin}}} when is_integer(Unused),
+ is_binary(Bin) -> ok;
+ {legacy,{_,Bs}} when is_list(Bs) -> ok
+ end,
+
+ %% All values should be recognized and encoded as the
+ %% the default value (i.e. not encoded at all).
+ _ = [{ok,Enc} = M:encode(Type, {Type,V}) || V <- Vs],
+ ok.
+
%% The PER encoding rules requires that a BIT STRING with
%% named positions should never have any trailing zeroes
%% (except to reach the minimum number of bits as given by
diff --git a/lib/asn1/test/testSeqSetDefaultVal.erl b/lib/asn1/test/testSeqSetDefaultVal.erl
index fb61bf1647..044099199f 100644
--- a/lib/asn1/test/testSeqSetDefaultVal.erl
+++ b/lib/asn1/test/testSeqSetDefaultVal.erl
@@ -18,7 +18,7 @@
%%
%%
-module(testSeqSetDefaultVal).
--export([main/1]).
+-export([main/2]).
-include("External.hrl").
-include_lib("test_server/include/test_server.hrl").
@@ -34,7 +34,8 @@
-record('SeqBS',{a = asn1_DEFAULT,
b = asn1_DEFAULT,
c = asn1_DEFAULT,
- d = asn1_DEFAULT}).
+ d = asn1_DEFAULT,
+ e = asn1_DEFAULT}).
-record('SetBS',{a = asn1_DEFAULT,
b = asn1_DEFAULT,
c = asn1_DEFAULT,
@@ -93,7 +94,119 @@
-record('S4_b',{ba = asn1_DEFAULT,
bb = asn1_DEFAULT}).
-main(_Rules) ->
+main(ber, []) ->
+ %% Nothing to test because plain BER will only use
+ %% default values when explicitly told to do so by
+ %% asn1_DEFAULT.
+ ok;
+main(Rule, Opts) ->
+ %% DER, PER, UPER. These encodings should not encode
+ %% values that are equal to the default value.
+
+ case {Rule,Opts} of
+ {ber,[der]} ->
+ der();
+ {_,_} ->
+ ok
+ end,
+
+ Ts = [{#'SeqInts'{},
+ [{#'SeqInts'.c,
+ [asn1_DEFAULT,
+ three,
+ 3]}]},
+
+ {#'SeqBS'{},
+ [{#'SeqBS'.a,
+ [asn1_DEFAULT,
+ 2#0110101,
+ [1,0,1,0,1,1,0],
+ {1,<<16#AC>>},
+ <<1:1,0:1,1:1,0:1,1:1,1:1,0:1>>]},
+ {#'SeqBS'.b,
+ [asn1_DEFAULT,
+ 2#10100010101,
+ [1,0,1,0,1,0,0,0,1,0,1,0],
+ {4,<<16#A8,16#A0>>},
+ <<16#A8:8,16#A:4>>]},
+ {#'SeqBS'.c,
+ [asn1_DEFAULT,
+ [second],
+ [0,1],
+ {6,<<0:1,1:1,0:6>>},
+ <<1:2>>]},
+ {#'SeqBS'.c, %Zeroes on the right
+ [asn1_DEFAULT,
+ [second],
+ [0,1,0,0,0],
+ {4,<<0:1,1:1,0:6>>},
+ <<1:2,0:17>>]},
+ {#'SeqBS'.d,
+ [asn1_DEFAULT,
+ 2#1001,
+ [1,0,0,1],
+ {4,<<2#1001:4,0:4>>},
+ <<2#1001:4>>]},
+ {#'SeqBS'.e,
+ [asn1_DEFAULT,
+ [0,1,0,1,1,0,1,0],
+ {0,<<2#01011010:8>>},
+ <<2#01011010:8>>]},
+ %% Not EQUAL to DEFAULT.
+ {#'SeqBS'.b,
+ [[1,1,0], %Not equal to DEFAULT
+ {5,<<6:3,0:5>>},
+ <<6:3>>]}
+ ]},
+
+ {#'SeqOS'{},
+ [{#'SeqOS'.a,
+ [asn1_DEFAULT,
+ [172]]}]},
+
+ {#'SeqOI'{},
+ [{#'SeqOI'.a,
+ [asn1_DEFAULT,
+ {1,2,14,15}]},
+ {#'SeqOI'.b,
+ [asn1_DEFAULT,
+%% {iso,'member-body',250,3,4},
+ {1,2,250,3,4}]},
+ {#'SeqOI'.c,
+ [asn1_DEFAULT,
+%% {iso,standard,8571,2,250,4},
+ {1,0,8571,2,250,4}]}]}
+ ],
+ io:format("~p\n", [Ts]),
+ R0 = [[consistency(Rec, Pos, Vs) || {Pos,Vs} <- Fs] || {Rec,Fs} <- Ts],
+ case lists:flatten(R0) of
+ [] ->
+ ok;
+ [_|_]=R ->
+ io:format("~p\n", [R]),
+ ?t:fail()
+ end.
+
+consistency(Rec0, Pos, [V|Vs]) ->
+ T = element(1, Rec0),
+ Rec = setelement(Pos, Rec0, V),
+ {ok,Enc} = 'Default':encode(T, Rec),
+ {ok,_SmokeTest} = 'Default':decode(T, Enc),
+ consistency_1(Vs, Rec0, Pos, Enc).
+
+consistency_1([V|Vs], Rec0, Pos, Enc) ->
+ Rec = setelement(Pos, Rec0, V),
+ case 'Default':encode(element(1, Rec), Rec) of
+ {ok,Enc} ->
+ consistency_1(Vs, Rec0, Pos, Enc);
+ {ok,WrongEnc} ->
+ [{Rec,{wrong,WrongEnc},{should_be,Enc}}|
+ consistency_1(Vs, Rec0, Pos, Enc)]
+ end;
+consistency_1([], _, _, _) -> [].
+
+der() ->
+ io:put_chars("Peforming DER-specific tests..."),
roundtrip(<<48,0>>,
'SeqInts',
#'SeqInts'{a=asn1_DEFAULT,b=asn1_DEFAULT,
@@ -117,50 +230,88 @@ main(_Rules) ->
roundtrip(<<48,0>>,
'SeqBS',
- #'SeqBS'{a=2#1010110,b=16#A8A,c=[second],d=[1,0,0,1]},
- #'SeqBS'{a=[1,0,1,0,1,1,0],b=16#A8A,c=[second],d=[1,0,0,1]}),
+ #'SeqBS'{a=2#0110101,
+ b=2#010100010101,
+ c=[second],
+ d=[1,0,0,1]},
+ #'SeqBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<2#1001:4>>,
+ e = <<2#01011010:8>>}),
roundtrip(<<48,0>>,
'SeqBS',
#'SeqBS'{a=[1,0,1,0,1,1,0],
b=[1,0,1,0,1,0,0,0,1,0,1,0],
c={5,<<64>>},
d=2#1001},
- #'SeqBS'{a=[1,0,1,0,1,1,0],b=16#A8A,c=[second],d=[1,0,0,1]}),
+ #'SeqBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<2#1001:4>>,
+ e = <<2#01011010:8>>}),
roundtrip(<<48,3,131,1,0>>,
'SeqBS',
#'SeqBS'{a=[1,0,1,0,1,1,0],
b=[1,0,1,0,1,0,0,0,1,0,1,0],
c={5,<<64>>},
d=0},
- #'SeqBS'{a=[1,0,1,0,1,1,0],
- b=16#A8A,
- c=[second],
- d = <<>>}),
+ #'SeqBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<>>,
+ e = <<2#01011010:8>>}),
+ roundtrip(<<48,3,131,1,0>>,
+ 'SeqBS',
+ #'SeqBS'{a = <<1:1,0:1,1:1,0:1,1:1,1:1,0:1>>,
+ b = <<1:1,0:1,1:1,0:1,1:1,0:1,0:1,0:1,1:1,0:1,1:1,0:1>>,
+ c = <<2:3>>,
+ d=0,
+ e = <<16#5A:8>>},
+ #'SeqBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<>>,
+ e = <<2#01011010:8>>}),
+
+ %% None of the default values are used.
+ roundtrip(<<48,19,128,2,7,128,129,2,5,64,130,2,5,32,131,1,0,132,2,5,224>>,
+ 'SeqBS',
+ #'SeqBS'{a = <<1:1>>,
+ b = {5,<<64>>},
+ c = [third],
+ d = 0,
+ e = <<7:3>>},
+ #'SeqBS'{a = <<1:1>>,
+ b = <<2:3>>,
+ c = [third],
+ d = <<>>,
+ e = <<7:3>>}),
roundtrip(<<49,0>>,
'SetBS',
- #'SetBS'{a=2#1010110,b=16#A8A,c=[second],d=[1,0,0,1]},
- #'SetBS'{a=[1,0,1,0,1,1,0],b=16#A8A,c=[second],d=[1,0,0,1]}),
+ #'SetBS'{a=2#0110101,
+ b=2#010100010101,
+ c=[second],
+ d=[1,0,0,1]},
+ #'SetBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<2#1001:4>>}),
roundtrip(<<49,0>>,
'SetBS',
#'SetBS'{a=[1,0,1,0,1,1,0],
b=[1,0,1,0,1,0,0,0,1,0,1,0],
c={5,<<64>>},
d=9},
- #'SetBS'{a=[1,0,1,0,1,1,0],
- b=16#A8A,
- c=[second],
- d=[1,0,0,1]}),
+ #'SetBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<2#1001:4>>}),
roundtrip(<<49,3,131,1,0>>,
'SetBS',
#'SetBS'{a=[1,0,1,0,1,1,0],
b=[1,0,1,0,1,0,0,0,1,0,1,0],
c={5,<<64>>},
d=0},
- #'SetBS'{a=[1,0,1,0,1,1,0],
- b=16#A8A,
- c=[second],
- d = <<>>}),
+ #'SetBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<>>}),
+ roundtrip(<<49,3,131,1,0>>,
+ 'SetBS',
+ #'SetBS'{a = <<1:1,0:1,1:1,0:1,1:1,1:1,0:1>>,
+ b = <<1:1,0:1,1:1,0:1,1:1,0:1,0:1,0:1,1:1,0:1,1:1,0:1>>,
+ c = <<2:3>>,
+ d=0},
+ #'SetBS'{a = <<2#1010110:7>>, b = <<16#A8A:12>>,
+ c=[second], d = <<>>}),
roundtrip(<<48,0>>, 'SeqOS',
#'SeqOS'{a=[172],b=[16#A8,16#A0],c='NULL'}),
diff --git a/lib/common_test/doc/src/ct_hooks.xml b/lib/common_test/doc/src/ct_hooks.xml
index b3e713c77f..859ff9df14 100644
--- a/lib/common_test/doc/src/ct_hooks.xml
+++ b/lib/common_test/doc/src/ct_hooks.xml
@@ -485,8 +485,9 @@
NewCTHState</name>
<fsummary>Called after the CTH scope ends</fsummary>
<type>
- <v>TestcaseName = end_per_suite | init_per_group |
- end_per_group | atom()</v>
+ <v>TestcaseName = end_per_suite | {init_per_group,GroupName} |
+ {end_per_group,GroupName} | atom()</v>
+ <v>GroupName = atom()</v>
<v>Reason = {tc_auto_skip | tc_user_skip, term()}</v>
<v>CTHState = NewCTHState = term()</v>
</type>
diff --git a/lib/common_test/src/ct_framework.erl b/lib/common_test/src/ct_framework.erl
index 276f902b05..e81b69a1b5 100644
--- a/lib/common_test/src/ct_framework.erl
+++ b/lib/common_test/src/ct_framework.erl
@@ -69,13 +69,13 @@ init_tc(Mod,Func,Config) ->
andalso Func=/=end_per_group
andalso ct_util:get_testdata(skip_rest) of
true ->
- {skip,"Repeated test stopped by force_stop option"};
+ {auto_skip,"Repeated test stopped by force_stop option"};
_ ->
case ct_util:get_testdata(curr_tc) of
{Suite,{suite0_failed,{require,Reason}}} ->
- {skip,{require_failed_in_suite0,Reason}};
+ {auto_skip,{require_failed_in_suite0,Reason}};
{Suite,{suite0_failed,_}=Failure} ->
- {skip,Failure};
+ {fail,Failure};
_ ->
ct_util:update_testdata(curr_tc,
fun(undefined) ->
@@ -103,7 +103,7 @@ init_tc(Mod,Func,Config) ->
end,
init_tc1(Mod,Suite,Func,Config);
{failed,Seq,BadFunc} ->
- {skip,{sequence_failed,Seq,BadFunc}}
+ {auto_skip,{sequence_failed,Seq,BadFunc}}
end
end
end.
@@ -115,9 +115,9 @@ init_tc1(?MODULE,_,error_in_suite,[Config0]) when is_list(Config0) ->
data={?MODULE,error_in_suite}}),
case ?val(error, Config0) of
undefined ->
- {skip,"unknown_error_in_suite"};
+ {fail,"unknown_error_in_suite"};
Reason ->
- {skip,Reason}
+ {fail,Reason}
end;
init_tc1(Mod,Suite,Func,[Config0]) when is_list(Config0) ->
@@ -159,22 +159,28 @@ init_tc1(Mod,Suite,Func,[Config0]) when is_list(Config0) ->
true ->
ct_config:delete_default_config(testcase)
end,
+ Initialize = fun() ->
+ ct_logs:init_tc(false),
+ ct_event:notify(#event{name=tc_start,
+ node=node(),
+ data={Mod,FuncSpec}})
+ end,
case add_defaults(Mod,Func,AllGroups) of
Error = {suite0_failed,_} ->
- ct_logs:init_tc(false),
- ct_event:notify(#event{name=tc_start,
- node=node(),
- data={Mod,FuncSpec}}),
+ Initialize(),
ct_util:set_testdata({curr_tc,{Suite,Error}}),
{error,Error};
+ Error = {group0_failed,_} ->
+ Initialize(),
+ {auto_skip,Error};
+ Error = {testcase0_failed,_} ->
+ Initialize(),
+ {auto_skip,Error};
{SuiteInfo,MergeResult} ->
case MergeResult of
{error,Reason} ->
- ct_logs:init_tc(false),
- ct_event:notify(#event{name=tc_start,
- node=node(),
- data={Mod,FuncSpec}}),
- {skip,Reason};
+ Initialize(),
+ {fail,Reason};
_ ->
init_tc2(Mod,Suite,Func,SuiteInfo,MergeResult,Config)
end
@@ -222,11 +228,11 @@ init_tc2(Mod,Suite,Func,SuiteInfo,MergeResult,Config) ->
{suite0_failed,Reason} ->
ct_util:set_testdata({curr_tc,{Mod,{suite0_failed,
{require,Reason}}}}),
- {skip,{require_failed_in_suite0,Reason}};
+ {auto_skip,{require_failed_in_suite0,Reason}};
{error,Reason} ->
{auto_skip,{require_failed,Reason}};
{'EXIT',Reason} ->
- {auto_skip,Reason};
+ {fail,Reason};
{ok,PostInitHook,Config1} ->
case get('$test_server_framework_test') of
undefined ->
@@ -272,6 +278,8 @@ add_defaults(Mod,Func, GroupPath) ->
SuiteInfo = merge_with_suite_defaults(Suite,[]),
SuiteInfoNoCTH = [I || I <- SuiteInfo, element(1,I) =/= ct_hooks],
case add_defaults1(Mod,Func, GroupPath, SuiteInfoNoCTH) of
+ Error = {group0_failed,_} -> Error;
+ Error = {testcase0_failed,_} -> Error;
Error = {error,_} -> {SuiteInfo,Error};
MergedInfo -> {SuiteInfo,MergedInfo}
end;
@@ -292,13 +300,16 @@ add_defaults(Mod,Func, GroupPath) ->
element(1,I) =/= ct_hooks],
case add_defaults1(Mod,Func, GroupPath,
SuiteInfoNoCTH) of
+ Error = {group0_failed,_} -> Error;
+ Error = {testcase0_failed,_} -> Error;
Error = {error,_} -> {SuiteInfo1,Error};
MergedInfo -> {SuiteInfo1,MergedInfo}
end;
false ->
ErrStr = io_lib:format("~n*** ERROR *** "
"Invalid return value from "
- "~w:suite/0: ~p~n", [Suite,SuiteInfo]),
+ "~w:suite/0: ~p~n",
+ [Suite,SuiteInfo]),
io:format(ErrStr, []),
io:format(user, ErrStr, []),
{suite0_failed,bad_return_value}
@@ -318,36 +329,69 @@ add_defaults1(Mod,Func, GroupPath, SuiteInfo) ->
%% [LevelXGroupInfo, LevelX-1GroupInfo, ..., TopLevelGroupInfo]
GroupPathInfo =
lists:map(fun(GroupProps) ->
- Name = ?val(name, GroupProps),
- case catch Suite:group(Name) of
- GrInfo when is_list(GrInfo) -> GrInfo;
- _ -> []
+ case ?val(name, GroupProps) of
+ undefined ->
+ [];
+ Name ->
+ case catch Suite:group(Name) of
+ GrInfo when is_list(GrInfo) -> GrInfo;
+ {'EXIT',{undef,_}} -> [];
+ BadGr0 -> {error,BadGr0,Name}
+ end
end
end, GroupPath),
- Args = if Func == init_per_group ; Func == end_per_group ->
- [?val(name, hd(GroupPath))];
- true ->
- []
- end,
- TestCaseInfo =
- case catch apply(Mod,Func,Args) of
- TCInfo when is_list(TCInfo) -> TCInfo;
- _ -> []
- end,
- %% let test case info (also for all config funcs) override group info,
- %% and lower level group info override higher level info
- TCAndGroupInfo = [TestCaseInfo | remove_info_in_prev(TestCaseInfo,
- GroupPathInfo)],
- %% find and save require terms found in suite info
- SuiteReqs =
- [SDDef || SDDef <- SuiteInfo,
- ((require == element(1,SDDef)) or
- (default_config == element(1,SDDef)))],
- case check_for_clashes(TestCaseInfo, GroupPathInfo, SuiteReqs) of
- [] ->
- add_defaults2(Mod,Func, TCAndGroupInfo,SuiteInfo,SuiteReqs);
- Clashes ->
- {error,{config_name_already_in_use,Clashes}}
+ case lists:keysearch(error, 1, GroupPathInfo) of
+ {value,{error,BadGr0Val,GrName}} ->
+ Gr0ErrStr = io_lib:format("~n*** ERROR *** "
+ "Invalid return value from "
+ "~w:group(~w): ~p~n",
+ [Mod,GrName,BadGr0Val]),
+ io:format(Gr0ErrStr, []),
+ io:format(user, Gr0ErrStr, []),
+ {group0_failed,bad_return_value};
+ _ ->
+ Args = if Func == init_per_group ; Func == end_per_group ->
+ [?val(name, hd(GroupPath))];
+ true ->
+ []
+ end,
+ TestCaseInfo =
+ case catch apply(Mod,Func,Args) of
+ TCInfo when is_list(TCInfo) -> TCInfo;
+ {'EXIT',{undef,_}} -> [];
+ BadTC0 -> {error,BadTC0}
+ end,
+
+ case TestCaseInfo of
+ {error,BadTC0Val} ->
+ TC0ErrStr = io_lib:format("~n*** ERROR *** "
+ "Invalid return value from "
+ "~w:~w/0: ~p~n",
+ [Mod,Func,BadTC0Val]),
+ io:format(TC0ErrStr, []),
+ io:format(user, TC0ErrStr, []),
+ {testcase0_failed,bad_return_value};
+ _ ->
+ %% let test case info (also for all config funcs) override
+ %% group info, and lower level group info override higher
+ %% level info
+ TCAndGroupInfo =
+ [TestCaseInfo | remove_info_in_prev(TestCaseInfo,
+ GroupPathInfo)],
+ %% find and save require terms found in suite info
+ SuiteReqs =
+ [SDDef || SDDef <- SuiteInfo,
+ ((require == element(1,SDDef))
+ or (default_config == element(1,SDDef)))],
+ case check_for_clashes(TestCaseInfo, GroupPathInfo,
+ SuiteReqs) of
+ [] ->
+ add_defaults2(Mod,Func, TCAndGroupInfo,
+ SuiteInfo,SuiteReqs);
+ Clashes ->
+ {error,{config_name_already_in_use,Clashes}}
+ end
+ end
end.
get_suite_name(?MODULE, [Cfg|_]) when is_list(Cfg), Cfg /= [] ->
@@ -621,31 +665,34 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) ->
_ -> Func
end,
- case get('$test_server_framework_test') of
- undefined ->
- {FinalResult,FinalNotify} =
- case ct_hooks:end_tc(
- Suite, FuncSpec, Args, Result, Return) of
- '$ct_no_change' ->
- {ok,Result};
- FinalResult1 ->
- {FinalResult1,FinalResult1}
- end,
- %% send sync notification so that event handlers may print
- %% in the log file before it gets closed
- ct_event:sync_notify(#event{name=tc_done,
- node=node(),
- data={Mod,FuncSpec,
- tag_cth(FinalNotify)}});
- Fun ->
- %% send sync notification so that event handlers may print
- %% in the log file before it gets closed
- ct_event:sync_notify(#event{name=tc_done,
- node=node(),
- data={Mod,FuncSpec,tag(Result)}}),
- FinalResult = Fun(end_tc, Return)
- end,
-
+ {Result1,FinalNotify} =
+ case ct_hooks:end_tc(
+ Suite, FuncSpec, Args, Result, Return) of
+ '$ct_no_change' ->
+ {ok,Result};
+ HookResult ->
+ {HookResult,HookResult}
+ end,
+ FinalResult =
+ case get('$test_server_framework_test') of
+ undefined ->
+ %% send sync notification so that event handlers may print
+ %% in the log file before it gets closed
+ ct_event:sync_notify(#event{name=tc_done,
+ node=node(),
+ data={Mod,FuncSpec,
+ tag_cth(FinalNotify)}}),
+ Result1;
+ Fun ->
+ %% send sync notification so that event handlers may print
+ %% in the log file before it gets closed
+ ct_event:sync_notify(#event{name=tc_done,
+ node=node(),
+ data={Mod,FuncSpec,
+ tag(FinalNotify)}}),
+ Fun(end_tc, Return)
+ end,
+
case FuncSpec of
{_,GroupName,_Props} ->
if Func == end_per_group ->
@@ -685,10 +732,10 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) ->
(Unexpected) ->
exit({error,{reset_curr_tc,{Mod,Func},Unexpected}})
end,
- ct_util:update_testdata(curr_tc,ClearCurrTC),
+ ct_util:update_testdata(curr_tc, ClearCurrTC),
case FinalResult of
- {skip,{sequence_failed,_,_}} ->
+ {auto_skip,{sequence_failed,_,_}} ->
%% ct_logs:init_tc is never called for a skipped test case
%% in a failing sequence, so neither should end_tc
ok;
@@ -711,8 +758,13 @@ end_tc(Mod,Func,TCPid,Result,Args,Return) ->
%% {error,Reason} | {skip,Reason} | {timetrap_timeout,TVal} |
%% {testcase_aborted,Reason} | testcase_aborted_or_killed |
%% {'EXIT',Reason} | Other (ignored return value, e.g. 'ok')
-tag({STag,Reason}) when STag == skip; STag == skipped ->
- {skipped,Reason};
+tag({STag,Reason}) when STag == skip; STag == skipped ->
+ case Reason of
+ {failed,{_,init_per_testcase,_}} -> {auto_skipped,Reason};
+ _ -> {skipped,Reason}
+ end;
+tag({auto_skip,Reason}) ->
+ {auto_skipped,Reason};
tag(E = {ETag,_}) when ETag == error; ETag == 'EXIT';
ETag == timetrap_timeout;
ETag == testcase_aborted ->
@@ -722,13 +774,20 @@ tag(E = testcase_aborted_or_killed) ->
tag(Other) ->
Other.
+tag_cth({skipped,Reason={failed,{_,init_per_testcase,_}}}) ->
+ {auto_skipped,Reason};
tag_cth({STag,Reason}) when STag == skip; STag == skipped ->
- {skipped,Reason};
-tag_cth({fail, Reason}) ->
- {failed, {error,Reason}};
+ case Reason of
+ {failed,{_,init_per_testcase,_}} -> {auto_skipped,Reason};
+ _ -> {skipped,Reason}
+ end;
+tag_cth({auto_skip,Reason}) ->
+ {auto_skipped,Reason};
+tag_cth({fail,Reason}) ->
+ {failed,{error,Reason}};
tag_cth(E = {ETag,_}) when ETag == error; ETag == 'EXIT';
- ETag == timetrap_timeout;
- ETag == testcase_aborted ->
+ ETag == timetrap_timeout;
+ ETag == testcase_aborted ->
{failed,E};
tag_cth(E = testcase_aborted_or_killed) ->
{failed,E};
@@ -1229,6 +1288,8 @@ report(What,Data) ->
ct_hooks:on_tc_skip(tc_auto_skip, Data);
{skipped,_} ->
ct_hooks:on_tc_skip(tc_user_skip, Data);
+ {auto_skipped,_} ->
+ ct_hooks:on_tc_skip(tc_auto_skip, Data);
_Else ->
ok
end,
@@ -1253,31 +1314,54 @@ report(What,Data) ->
add_to_stats(auto_skipped);
{_,{skipped,_}} ->
add_to_stats(user_skipped);
+ {_,{auto_skipped,_}} ->
+ add_to_stats(auto_skipped);
{_,{SkipOrFail,_Reason}} ->
add_to_stats(SkipOrFail)
end;
- tc_user_skip ->
- %% test case specified as skipped in testspec
- %% Data = {Suite,Case,Comment}
+ tc_user_skip ->
+ %% test case or config function specified as skipped in testspec,
+ %% or init config func for suite/group has returned {skip,Reason}
+ %% Data = {Suite,Case,Comment} |
+ %% {Suite,{GroupConfigFunc,GroupName},Comment}
+ {Func,Data1} = case Data of
+ {Suite,{ConfigFunc,undefined},Cmt} ->
+ {ConfigFunc,{Suite,ConfigFunc,Cmt}};
+ {_,{ConfigFunc,_},_} -> {ConfigFunc,Data};
+ {_,Case,_} -> {Case,Data}
+ end,
+
ct_event:sync_notify(#event{name=tc_user_skip,
node=node(),
- data=Data}),
- ct_hooks:on_tc_skip(What, Data),
- add_to_stats(user_skipped);
- tc_auto_skip ->
- %% test case skipped because of error in init_per_suite
- %% Data = {Suite,Case,Comment}
-
- {_Suite,Case,_Result} = Data,
+ data=Data1}),
+ ct_hooks:on_tc_skip(What, Data1),
+ if Func /= init_per_suite, Func /= init_per_group,
+ Func /= end_per_suite, Func /= end_per_group ->
+ add_to_stats(user_skipped);
+ true ->
+ ok
+ end;
+ tc_auto_skip ->
+ %% test case skipped because of error in config function, or
+ %% config function skipped because of error in info function
+ %% Data = {Suite,Case,Comment} |
+ %% {Suite,{GroupConfigFunc,GroupName},Comment}
+ {Func,Data1} = case Data of
+ {Suite,{ConfigFunc,undefined},Cmt} ->
+ {ConfigFunc,{Suite,ConfigFunc,Cmt}};
+ {_,{ConfigFunc,_},_} -> {ConfigFunc,Data};
+ {_,Case,_} -> {Case,Data}
+ end,
%% this test case does not have a log, so printouts
%% from event handlers should end up in the main log
ct_event:sync_notify(#event{name=tc_auto_skip,
node=node(),
- data=Data}),
- ct_hooks:on_tc_skip(What, Data),
- if Case /= end_per_suite,
- Case /= end_per_group ->
+ data=Data1}),
+ ct_hooks:on_tc_skip(What, Data1),
+
+ if Func /= end_per_suite,
+ Func /= end_per_group ->
add_to_stats(auto_skipped);
true ->
ok
diff --git a/lib/common_test/src/ct_logs.erl b/lib/common_test/src/ct_logs.erl
index 1a6e4d31a8..a7fb45a4e4 100644
--- a/lib/common_test/src/ct_logs.erl
+++ b/lib/common_test/src/ct_logs.erl
@@ -1249,7 +1249,8 @@ make_one_index_entry1(SuiteName, Link, Label, Success, Fail, UserSkip, AutoSkip,
integer_to_list(NotBuilt),"</a></td>\n"]
end,
FailStr =
- if Fail > 0 ->
+ if (Fail > 0) or (NotBuilt > 0) or
+ ((Success+Fail+UserSkip+AutoSkip) == 0) ->
["<font color=\"red\">",
integer_to_list(Fail),"</font>"];
true ->
@@ -1904,7 +1905,8 @@ runentry(Dir, undefined, _) ->
runentry(Dir, Totals={Node,Label,Logs,
{TotSucc,TotFail,UserSkip,AutoSkip,NotBuilt}}, Index) ->
TotFailStr =
- if TotFail > 0 ->
+ if (TotFail > 0) or (NotBuilt > 0) or
+ ((TotSucc+TotFail+UserSkip+AutoSkip) == 0) ->
["<font color=\"red\">",
integer_to_list(TotFail),"</font>"];
true ->
diff --git a/lib/common_test/src/ct_netconfc.erl b/lib/common_test/src/ct_netconfc.erl
index 7f10e1db09..64fe8b4bb0 100644
--- a/lib/common_test/src/ct_netconfc.erl
+++ b/lib/common_test/src/ct_netconfc.erl
@@ -1129,10 +1129,14 @@ handle_msg({Ref,timeout},
ct_gen_conn:return(Caller,{error,{hello_session_failed,timeout}}),
{stop,State#state{hello_status={error,timeout}}};
handle_msg({Ref,timeout},#state{pending=Pending} = State) ->
- {value,#pending{caller=Caller},Pending1} =
+ {value,#pending{op=Op,caller=Caller},Pending1} =
lists:keytake(Ref,#pending.ref,Pending),
ct_gen_conn:return(Caller,{error,timeout}),
- {noreply,State#state{pending=Pending1}}.
+ R = case Op of
+ close_session -> stop;
+ _ -> noreply
+ end,
+ {R,State#state{pending=Pending1}}.
%% @private
%% Called by ct_util_server to close registered connections before terminate.
diff --git a/lib/common_test/src/cth_log_redirect.erl b/lib/common_test/src/cth_log_redirect.erl
index 4ee7e48a67..8fed341600 100644
--- a/lib/common_test/src/cth_log_redirect.erl
+++ b/lib/common_test/src/cth_log_redirect.erl
@@ -34,13 +34,15 @@
%% Event handler Callbacks
-export([init/1,
handle_event/2, handle_call/2, handle_info/2,
- terminate/1]).
+ terminate/1, terminate/2, code_change/3]).
%% Other
-export([handle_remote_events/1]).
-include("ct.hrl").
+-behaviour(gen_event).
+
-record(eh_state, {log_func,
curr_suite,
curr_group,
@@ -184,10 +186,13 @@ handle_call({handle_remote_events,Bool}, State) ->
handle_call(_Query, _State) ->
{error, bad_query}.
-terminate(_State) ->
+terminate(_) ->
error_logger:delete_report_handler(?MODULE),
[].
+terminate(_Arg, _State) ->
+ ok.
+
tag_event(Event) ->
{calendar:local_time(), Event}.
@@ -236,3 +241,6 @@ format_header(#eh_state{curr_suite = Suite,
curr_func = TC}) ->
io_lib:format("System report during ~w:~w/1 in ~w",
[Suite,TC,Group]).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/lib/common_test/src/cth_surefire.erl b/lib/common_test/src/cth_surefire.erl
index 1a38b6584b..7ed2018bdf 100644
--- a/lib/common_test/src/cth_surefire.erl
+++ b/lib/common_test/src/cth_surefire.erl
@@ -138,6 +138,9 @@ on_tc_fail(_TC, Res, State) ->
{fail,lists:flatten(io_lib:format("~p",[Res]))} },
State#state{ test_cases = [NewTC | tl(TCs)]}.
+on_tc_skip({ConfigFunc,_GrName},{Type,_Reason} = Res, State0)
+ when Type == tc_auto_skip; Type == tc_user_skip ->
+ on_tc_skip(ConfigFunc, Res, State0);
on_tc_skip(Tc,{Type,_Reason} = Res, State0) when Type == tc_auto_skip ->
TcStr = atom_to_list(Tc),
State =
@@ -330,5 +333,7 @@ count_tcs([#testcase{result={fail,_}}|TCs],Ok,F,S) ->
count_tcs(TCs,Ok,F+1,S);
count_tcs([#testcase{result={skipped,_}}|TCs],Ok,F,S) ->
count_tcs(TCs,Ok,F,S+1);
+count_tcs([#testcase{result={auto_skipped,_}}|TCs],Ok,F,S) ->
+ count_tcs(TCs,Ok,F,S+1);
count_tcs([],Ok,F,S) ->
{Ok+F+S,F,S}.
diff --git a/lib/common_test/test/common_test.cover b/lib/common_test/test/common_test.cover
index 3aa49623e7..87d00c420f 100644
--- a/lib/common_test/test/common_test.cover
+++ b/lib/common_test/test/common_test.cover
@@ -4,7 +4,6 @@
test_server,
test_server_ctrl,
test_server_gl,
- test_server_h,
test_server_io,
test_server_node,
test_server_sup]}]}.
diff --git a/lib/common_test/test/ct_config_SUITE.erl b/lib/common_test/test/ct_config_SUITE.erl
index d92be9ec6e..284f836517 100644
--- a/lib/common_test/test/ct_config_SUITE.erl
+++ b/lib/common_test/test/ct_config_SUITE.erl
@@ -234,25 +234,25 @@ expected_events(config_static_SUITE)->
?sok(test_get_config_deep_nested,{3,0,{0,0}}),
?sok(test_default_suitewide,{4,0,{0,0}}),
?snok(test_config_name_already_in_use1,
- {skipped,{config_name_already_in_use,[x1]}},{4,0,{1,0}}),
- ?sok(test_default_tclocal,{5,0,{1,0}}),
+ {failed,{error,{config_name_already_in_use,[x1]}}},{4,1,{0,0}}),
+ ?sok(test_default_tclocal,{5,1,{0,0}}),
?snok(test_config_name_already_in_use2,
- {skipped,{config_name_already_in_use,[alias,x1]}},{5,0,{2,0}}),
- ?sok(test_alias_tclocal,{6,0,{2,0}}),
- ?sok(test_get_config_undefined,{7,0,{2,0}}),
- ?sok(test_require_subvals,{8,0,{2,0}}),
+ {failed,{error,{config_name_already_in_use,[alias,x1]}}},{5,2,{0,0}}),
+ ?sok(test_alias_tclocal,{6,2,{0,0}}),
+ ?sok(test_get_config_undefined,{7,2,{0,0}}),
+ ?sok(test_require_subvals,{8,2,{0,0}}),
?snok(test_require_subvals2,
- {skipped,{require_failed,
- {not_available,{gen_cfg,[a,b,c,d]}}}},{8,0,{2,1}}),
- ?sok(test_require_deep_config,{9,0,{2,1}}),
- ?sok(test_shadow_all,{10,0,{2,1}}),
- ?sok(test_element,{11,0,{2,1}}),
- ?sok(test_shadow_all_element,{12,0,{2,1}}),
- ?sok(test_internal_deep,{13,0,{2,1}}),
- ?sok(test_alias_tclocal_nested,{14,0,{2,1}}),
- ?sok(test_alias_tclocal_nested_backward_compat,{15,0,{2,1}}),
- ?sok(test_alias_tclocal_nested_backward_compat_subvals,{16,0,{2,1}}),
- ?sok(test_config_same_name_already_in_use,{17,0,{2,1}}),
+ {auto_skipped,{require_failed,
+ {not_available,{gen_cfg,[a,b,c,d]}}}},{8,2,{0,1}}),
+ ?sok(test_require_deep_config,{9,2,{0,1}}),
+ ?sok(test_shadow_all,{10,2,{0,1}}),
+ ?sok(test_element,{11,2,{0,1}}),
+ ?sok(test_shadow_all_element,{12,2,{0,1}}),
+ ?sok(test_internal_deep,{13,2,{0,1}}),
+ ?sok(test_alias_tclocal_nested,{14,2,{0,1}}),
+ ?sok(test_alias_tclocal_nested_backward_compat,{15,2,{0,1}}),
+ ?sok(test_alias_tclocal_nested_backward_compat_subvals,{16,2,{0,1}}),
+ ?sok(test_config_same_name_already_in_use,{17,2,{0,1}}),
{?eh,tc_start,{config_static_SUITE,end_per_suite}},
{?eh,tc_done,{config_static_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
diff --git a/lib/common_test/test/ct_config_info_SUITE.erl b/lib/common_test/test/ct_config_info_SUITE.erl
index 10fe8286dd..8f2f0eb75f 100644
--- a/lib/common_test/test/ct_config_info_SUITE.erl
+++ b/lib/common_test/test/ct_config_info_SUITE.erl
@@ -127,7 +127,7 @@ test_events(config_info) ->
{failed,{timetrap_timeout,350}}}},
{?eh,tc_auto_skip,{config_info_1_SUITE,t11,
{failed,{config_info_1_SUITE,init_per_group,{timetrap_timeout,350}}}}},
- {?eh,tc_auto_skip,{config_info_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{config_info_1_SUITE,{end_per_group,g1},
{failed,{config_info_1_SUITE,init_per_group,
{timetrap_timeout,350}}}}}],
@@ -145,12 +145,12 @@ test_events(config_info) ->
{?eh,tc_auto_skip,{config_info_1_SUITE,t41,
{failed,{config_info_1_SUITE,init_per_group,
{timetrap_timeout,400}}}}},
- {?eh,tc_auto_skip,{config_info_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{config_info_1_SUITE,{end_per_group,g4},
{failed,{config_info_1_SUITE,init_per_group,
{timetrap_timeout,400}}}}}],
{?eh,tc_start,{config_info_1_SUITE,t31}},
{?eh,tc_done,{config_info_1_SUITE,t31,
- {skipped,{failed,{config_info_1_SUITE,init_per_testcase,
+ {auto_skipped,{failed,{config_info_1_SUITE,init_per_testcase,
{timetrap_timeout,250}}}}}},
{?eh,tc_start,{config_info_1_SUITE,t32}},
{?eh,tc_done,{config_info_1_SUITE,t32,
diff --git a/lib/common_test/test/ct_error_SUITE.erl b/lib/common_test/test/ct_error_SUITE.erl
index 3881ced17d..28f0494d20 100644
--- a/lib/common_test/test/ct_error_SUITE.erl
+++ b/lib/common_test/test/ct_error_SUITE.erl
@@ -465,7 +465,7 @@ test_events(cfg_error) ->
{'EXIT',{init_per_group_fails,g1}}}}}},
{?eh,test_stats,{4,0,{0,11}}},
{?eh,tc_auto_skip,
- {cfg_error_8_SUITE,end_per_group,
+ {cfg_error_8_SUITE,{end_per_group,g1},
{failed,{cfg_error_8_SUITE,init_per_group,
{'EXIT',{init_per_group_fails,g1}}}}}}],
@@ -476,7 +476,7 @@ test_events(cfg_error) ->
{failed,{cfg_error_8_SUITE,init_per_group,
{timetrap_timeout,2000}}}}},
{?eh,test_stats,{4,0,{0,12}}},
- {?eh,tc_auto_skip,{cfg_error_8_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{cfg_error_8_SUITE,{end_per_group,g2},
{failed,{cfg_error_8_SUITE,init_per_group,
{timetrap_timeout,2000}}}}}],
@@ -490,7 +490,7 @@ test_events(cfg_error) ->
{'EXIT',{{badmatch,42},'_'}}}}}},
{?eh,test_stats,{4,0,{0,13}}},
{?eh,tc_auto_skip,
- {cfg_error_8_SUITE,end_per_group,
+ {cfg_error_8_SUITE,{end_per_group,g3},
{failed,{cfg_error_8_SUITE,init_per_group,
{'EXIT',{{badmatch,42},'_'}}}}}}],
@@ -516,7 +516,7 @@ test_events(cfg_error) ->
{'EXIT',{sub_group_failed,g6}}}}}},
{?eh,test_stats,{6,0,{0,14}}},
{?eh,tc_auto_skip,
- {cfg_error_8_SUITE,end_per_group,
+ {cfg_error_8_SUITE,{end_per_group,g6},
{failed,{cfg_error_8_SUITE,init_per_group,
{'EXIT',{sub_group_failed,g6}}}}}}],
{?eh,tc_start,{cfg_error_8_SUITE,tc3}},
@@ -550,23 +550,23 @@ test_events(cfg_error) ->
{?eh,tc_done,{cfg_error_9_SUITE,init_per_suite,ok}},
{?eh,tc_start,{cfg_error_9_SUITE,tc1}},
{?eh,tc_done,{cfg_error_9_SUITE,tc1,
- {skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,
- {tc1_should_be_skipped,'_'}}}}}},
+ {auto_skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,
+ {tc1_should_be_skipped,'_'}}}}}},
{?eh,test_stats,{9,0,{0,15}}},
{?eh,tc_start,{cfg_error_9_SUITE,tc2}},
{?eh,tc_done,{cfg_error_9_SUITE,tc2,
- {skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,
- {timetrap_timeout,2000}}}}}},
+ {auto_skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,
+ {timetrap_timeout,2000}}}}}},
{?eh,test_stats,{9,0,{0,16}}},
{?eh,tc_start,{cfg_error_9_SUITE,tc3}},
{?eh,tc_done,{cfg_error_9_SUITE,tc3,
- {skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,
- {{badmatch,undefined},'_'}}}}}},
+ {auto_skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,
+ {{badmatch,undefined},'_'}}}}}},
{?eh,test_stats,{9,0,{0,17}}},
{?eh,tc_start,{cfg_error_9_SUITE,tc4}},
{?eh,tc_done,
{cfg_error_9_SUITE,tc4,
- {skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,bad_return}}}}},
+ {auto_skipped,{failed,{cfg_error_9_SUITE,init_per_testcase,bad_return}}}}},
{?eh,test_stats,{9,0,{0,18}}},
{?eh,tc_start,{cfg_error_9_SUITE,tc5}},
{?eh,tc_done,
@@ -622,41 +622,41 @@ test_events(cfg_error) ->
{?eh,tc_done,{cfg_error_11_SUITE,init_per_suite,ok}},
{?eh,tc_start,{cfg_error_11_SUITE,tc1}},
{?eh,tc_done, {cfg_error_11_SUITE,tc1,
- {skipped,{config_name_already_in_use,[dummy_alias]}}}},
- {?eh,test_stats,{12,6,{1,19}}},
+ {failed,{error,{config_name_already_in_use,[dummy_alias]}}}}},
+ {?eh,test_stats,{12,7,{0,19}}},
{?eh,tc_start,{cfg_error_11_SUITE,tc2}},
{?eh,tc_done,{cfg_error_11_SUITE,tc2,ok}},
- {?eh,test_stats,{13,6,{1,19}}},
+ {?eh,test_stats,{13,7,{0,19}}},
{?eh,tc_start,{cfg_error_11_SUITE,end_per_suite}},
{?eh,tc_done,{cfg_error_11_SUITE,end_per_suite,ok}},
{?eh,tc_start,{cfg_error_12_SUITE,tc1}},
{?eh,tc_done,{ct_framework,init_tc,{framework_error,{timetrap,500}}}},
- {?eh,test_stats,{13,7,{1,19}}},
+ {?eh,test_stats,{13,8,{0,19}}},
{?eh,tc_start,{cfg_error_12_SUITE,tc2}},
{?eh,tc_done,{cfg_error_12_SUITE,tc2,{failed,
{cfg_error_12_SUITE,end_per_testcase,
{timetrap_timeout,500}}}}},
- {?eh,test_stats,{14,7,{1,19}}},
+ {?eh,test_stats,{14,8,{0,19}}},
{?eh,tc_start,{cfg_error_12_SUITE,tc3}},
{?eh,tc_done,{cfg_error_12_SUITE,tc3,ok}},
- {?eh,test_stats,{15,7,{1,19}}},
+ {?eh,test_stats,{15,8,{0,19}}},
{?eh,tc_start,{cfg_error_12_SUITE,tc4}},
{?eh,tc_done,{cfg_error_12_SUITE,tc4,{failed,
{cfg_error_12_SUITE,end_per_testcase,
{timetrap_timeout,500}}}}},
- {?eh,test_stats,{16,7,{1,19}}},
+ {?eh,test_stats,{16,8,{0,19}}},
{?eh,tc_start,{cfg_error_13_SUITE,init_per_suite}},
{?eh,tc_done,{cfg_error_13_SUITE,init_per_suite,ok}},
{?eh,tc_start,{cfg_error_13_SUITE,tc1}},
{?eh,tc_done,{cfg_error_13_SUITE,tc1,ok}},
- {?eh,test_stats,{17,7,{1,19}}},
+ {?eh,test_stats,{17,8,{0,19}}},
{?eh,tc_start,{cfg_error_13_SUITE,end_per_suite}},
{?eh,tc_done,{cfg_error_13_SUITE,end_per_suite,ok}},
{?eh,tc_start,{cfg_error_14_SUITE,init_per_suite}},
{?eh,tc_done,{cfg_error_14_SUITE,init_per_suite,ok}},
{?eh,tc_start,{cfg_error_14_SUITE,tc1}},
{?eh,tc_done,{cfg_error_14_SUITE,tc1,ok}},
- {?eh,test_stats,{18,7,{1,19}}},
+ {?eh,test_stats,{18,8,{0,19}}},
{?eh,tc_start,{cfg_error_14_SUITE,end_per_suite}},
{?eh,tc_done,{cfg_error_14_SUITE,end_per_suite,
{comment,
@@ -1102,7 +1102,7 @@ test_events(timetrap_fun_group) ->
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{0,11,{0,2}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{end_per_group,g4},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}}],
@@ -1117,7 +1117,7 @@ test_events(timetrap_fun_group) ->
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{0,11,{0,4}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{end_per_group,g5},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}}],
@@ -1132,7 +1132,7 @@ test_events(timetrap_fun_group) ->
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}},
{?eh,test_stats,{0,11,{0,6}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{end_per_group,g6},
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}}],
@@ -1285,7 +1285,7 @@ test_events(timetrap_fun_group) ->
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{4,26,{0,8}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{end_per_group,pg4},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}}]},
@@ -1301,7 +1301,7 @@ test_events(timetrap_fun_group) ->
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}},
{?eh,test_stats,{4,26,{0,10}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{end_per_group,pg5},
{failed,{timetrap_8_SUITE,init_per_group,
{user_timetrap_error,{kaboom,'_'}}}}}}]},
@@ -1317,7 +1317,7 @@ test_events(timetrap_fun_group) ->
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}},
{?eh,test_stats,{4,26,{0,12}}},
- {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,{end_per_group,pg6},
{failed,{timetrap_8_SUITE,init_per_group,
{timetrap_timeout,'_'}}}}}]},
diff --git a/lib/common_test/test/ct_group_info_SUITE.erl b/lib/common_test/test/ct_group_info_SUITE.erl
index c56fa952e8..e7bc5baaa1 100644
--- a/lib/common_test/test/ct_group_info_SUITE.erl
+++ b/lib/common_test/test/ct_group_info_SUITE.erl
@@ -269,13 +269,19 @@ test_events(timetrap_all) ->
{?eh,tc_done,{group_timetrap_1_SUITE,{end_per_group,g10,[]},ok}}],
[{?eh,tc_start,{group_timetrap_1_SUITE,{init_per_group,g11,[]}}},
- {?eh,tc_done,{group_timetrap_1_SUITE,{init_per_group,g11,[]},ok}},
- {?eh,tc_done,{group_timetrap_1_SUITE,t111,{failed,{timetrap_timeout,1000}}}},
- {?eh,test_stats,{0,14,{0,0}}},
- {?eh,tc_start,{group_timetrap_1_SUITE,{end_per_group,g11,[]}}},
- {?eh,tc_done,{group_timetrap_1_SUITE,{end_per_group,g11,[]},ok}}],
-
+ {?eh,tc_done,{group_timetrap_1_SUITE,
+ {init_per_group,g11,[]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,
+ {group_timetrap_1_SUITE,t111,{group0_failed,bad_return_value}}},
+ {?eh,test_stats,{0,13,{0,1}}},
+ {?eh,tc_auto_skip,{group_timetrap_1_SUITE,
+ {end_per_group,g11},
+ {group0_failed,bad_return_value}}}],
+
+ {?eh,tc_start,{group_timetrap_1_SUITE,end_per_suite}},
{?eh,tc_done,{group_timetrap_1_SUITE,end_per_suite,ok}},
+
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
];
@@ -422,13 +428,15 @@ test_events(timetrap_all_no_ips) ->
{?eh,tc_done,{group_timetrap_2_SUITE,{end_per_group,g10,[]},ok}}],
[{?eh,tc_start,{group_timetrap_2_SUITE,{init_per_group,g11,[]}}},
- {?eh,tc_done,{group_timetrap_2_SUITE,{init_per_group,g11,[]},ok}},
- {?eh,tc_done,{group_timetrap_2_SUITE,t111,{failed,{timetrap_timeout,1000}}}},
- {?eh,test_stats,{0,14,{0,0}}},
- {?eh,tc_start,{group_timetrap_2_SUITE,{end_per_group,g11,[]}}},
- {?eh,tc_done,{group_timetrap_2_SUITE,{end_per_group,g11,[]},ok}}],
-
- {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,tc_done,{group_timetrap_2_SUITE,
+ {init_per_group,g11,[]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,{group_timetrap_2_SUITE,t111,
+ {group0_failed,bad_return_value}}},
+ {?eh,test_stats,{0,13,{0,1}}},
+ {?eh,tc_auto_skip,{group_timetrap_2_SUITE,
+ {end_per_group,g11},
+ {group0_failed,bad_return_value}}}],
{?eh,stop_logging,[]}
];
@@ -501,11 +509,13 @@ test_events(timetrap_all_no_ipg) ->
{?eh,tc_done,{ct_framework,{end_per_group,g10,[{suite,group_timetrap_3_SUITE}]},ok}}],
[{?eh,tc_start,{ct_framework,{init_per_group,g11,[{suite,group_timetrap_3_SUITE}]}}},
- {?eh,tc_done,{ct_framework,{init_per_group,g11,[{suite,group_timetrap_3_SUITE}]},ok}},
- {?eh,tc_done,{group_timetrap_3_SUITE,t111,{failed,{timetrap_timeout,1000}}}},
- {?eh,test_stats,{0,14,{0,0}}},
- {?eh,tc_start,{ct_framework,{end_per_group,g11,[{suite,group_timetrap_3_SUITE}]}}},
- {?eh,tc_done,{ct_framework,{end_per_group,g11,[{suite,group_timetrap_3_SUITE}]},ok}}],
+ {?eh,tc_done,{ct_framework,
+ {init_per_group,g11,[{suite,group_timetrap_3_SUITE}]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,{group_timetrap_3_SUITE,t111,{group0_failed,bad_return_value}}},
+ {?eh,test_stats,{0,13,{0,1}}},
+ {?eh,tc_auto_skip,{ct_framework,{end_per_group,g11},
+ {group0_failed,bad_return_value}}}],
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
@@ -539,11 +549,13 @@ test_events(require) ->
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g4,[]}}},
{?eh,tc_done,{group_require_1_SUITE,{init_per_group,g4,[]},
- {skipped,{require_failed,{name_in_use,common2_alias,common2}}}}},
+ {auto_skipped,{require_failed,
+ {name_in_use,common2_alias,common2}}}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,t41,
- {require_failed,{name_in_use,common2_alias,common2}}}},
+ {require_failed,
+ {name_in_use,common2_alias,common2}}}},
{?eh,test_stats,{4,0,{0,1}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{end_per_group,g4},
{require_failed,{name_in_use,common2_alias,common2}}}}],
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g5,[]}}},
@@ -566,16 +578,18 @@ test_events(require) ->
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g8,[]}}},
{?eh,tc_done,{group_require_1_SUITE,
{init_per_group,g8,[]},
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,non_existing}}}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,t81,
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{end_per_group,g8},
{require_failed,{not_available,non_existing}}}}],
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g9,[]}}},
{?eh,tc_done,{group_require_1_SUITE,{init_per_group,g9,[]},ok}},
{?eh,tc_done,{group_require_1_SUITE,t91,
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,non_existing}}}}},
{?eh,test_stats,{8,0,{0,3}}},
{?eh,tc_start,{group_require_1_SUITE,{end_per_group,g9,[]}}},
{?eh,tc_done,{group_require_1_SUITE,{end_per_group,g9,[]},ok}}],
@@ -587,12 +601,16 @@ test_events(require) ->
{?eh,tc_done,{group_require_1_SUITE,{end_per_group,g10,[]},ok}}],
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g11,[]}}},
- {?eh,tc_done,{group_require_1_SUITE,{init_per_group,g11,[]},ok}},
- {?eh,tc_done,{group_require_1_SUITE,t111,ok}},
- {?eh,test_stats,{10,0,{0,3}}},
- {?eh,tc_start,{group_require_1_SUITE,{end_per_group,g11,[]}}},
- {?eh,tc_done,{group_require_1_SUITE,{end_per_group,g11,[]},ok}}],
-
+ {?eh,tc_done,{group_require_1_SUITE,
+ {init_per_group,g11,[]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,{group_require_1_SUITE,t111,
+ {group0_failed,bad_return_value}}},
+ {?eh,test_stats,{9,0,{0,4}}},
+ {?eh,tc_auto_skip,{group_require_1_SUITE,
+ {end_per_group,g11},
+ {group0_failed,bad_return_value}}}],
+
{?eh,tc_done,{group_require_1_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
@@ -627,11 +645,11 @@ test_events(require_default) ->
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g4,[]}}},
{?eh,tc_done,{group_require_1_SUITE,
{init_per_group,g4,[]},
- {skipped,{require_failed,{not_available,common3}}}}},
+ {auto_skipped,{require_failed,{not_available,common3}}}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,t41,
{require_failed,{not_available,common3}}}},
{?eh,test_stats,{4,0,{0,1}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{end_per_group,g4},
{require_failed,{not_available,common3}}}}],
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g5,[]}}},
@@ -654,17 +672,19 @@ test_events(require_default) ->
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g8,[]}}},
{?eh,tc_done,{group_require_1_SUITE,
{init_per_group,g8,[]},
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,non_existing}}}}},
{?eh,tc_auto_skip,{group_require_1_SUITE,t81,
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
- {?eh,tc_auto_skip,{group_require_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{group_require_1_SUITE,{end_per_group,g8},
{require_failed,{not_available,non_existing}}}}],
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g9,[]}}},
{?eh,tc_done,{group_require_1_SUITE,{init_per_group,g9,[]},ok}},
{?eh,tc_done,{group_require_1_SUITE,t91,
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,non_existing}}}}},
{?eh,test_stats,{8,0,{0,3}}},
{?eh,tc_start,{group_require_1_SUITE,{end_per_group,g9,[]}}},
{?eh,tc_done,{group_require_1_SUITE,{end_per_group,g9,[]},ok}}],
@@ -676,11 +696,15 @@ test_events(require_default) ->
{?eh,tc_done,{group_require_1_SUITE,{end_per_group,g10,[]},ok}}],
[{?eh,tc_start,{group_require_1_SUITE,{init_per_group,g11,[]}}},
- {?eh,tc_done,{group_require_1_SUITE,{init_per_group,g11,[]},ok}},
- {?eh,tc_done,{group_require_1_SUITE,t111,ok}},
- {?eh,test_stats,{10,0,{0,3}}},
- {?eh,tc_start,{group_require_1_SUITE,{end_per_group,g11,[]}}},
- {?eh,tc_done,{group_require_1_SUITE,{end_per_group,g11,[]},ok}}],
+ {?eh,tc_done,{group_require_1_SUITE,
+ {init_per_group,g11,[]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,{group_require_1_SUITE,t111,
+ {group0_failed,bad_return_value}}},
+ {?eh,test_stats,{9,0,{0,4}}},
+ {?eh,tc_auto_skip,{group_require_1_SUITE,
+ {end_per_group,g11},
+ {group0_failed,bad_return_value}}}],
{?eh,tc_done,{group_require_1_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
@@ -714,11 +738,12 @@ test_events(require_no_ips) ->
[{?eh,tc_start,{group_require_2_SUITE,{init_per_group,g4,[]}}},
{?eh,tc_done,{group_require_2_SUITE,{init_per_group,g4,[]},
- {skipped,{require_failed,{name_in_use,common2_alias,common2}}}}},
+ {auto_skipped,{require_failed,
+ {name_in_use,common2_alias,common2}}}}},
{?eh,tc_auto_skip,{group_require_2_SUITE,t41,
{require_failed,{name_in_use,common2_alias,common2}}}},
{?eh,test_stats,{4,0,{0,1}}},
- {?eh,tc_auto_skip,{group_require_2_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{group_require_2_SUITE,{end_per_group,g4},
{require_failed,{name_in_use,common2_alias,common2}}}}],
[{?eh,tc_start,{group_require_2_SUITE,{init_per_group,g5,[]}}},
@@ -741,16 +766,18 @@ test_events(require_no_ips) ->
[{?eh,tc_start,{group_require_2_SUITE,{init_per_group,g8,[]}}},
{?eh,tc_done,{group_require_2_SUITE,
{init_per_group,g8,[]},
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,non_existing}}}}},
{?eh,tc_auto_skip,{group_require_2_SUITE,t81,
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
- {?eh,tc_auto_skip,{group_require_2_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{group_require_2_SUITE,{end_per_group,g8},
{require_failed,{not_available,non_existing}}}}],
[{?eh,tc_start,{group_require_2_SUITE,{init_per_group,g9,[]}}},
{?eh,tc_done,{group_require_2_SUITE,{init_per_group,g9,[]},ok}},
{?eh,tc_done,{group_require_2_SUITE,t91,
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,non_existing}}}}},
{?eh,test_stats,{8,0,{0,3}}},
{?eh,tc_start,{group_require_2_SUITE,{end_per_group,g9,[]}}},
{?eh,tc_done,{group_require_2_SUITE,{end_per_group,g9,[]},ok}}],
@@ -760,13 +787,17 @@ test_events(require_no_ips) ->
{?eh,tc_done,{group_require_2_SUITE,t101,ok}},
{?eh,tc_start,{group_require_2_SUITE,{end_per_group,g10,[]}}},
{?eh,tc_done,{group_require_2_SUITE,{end_per_group,g10,[]},ok}}],
-
+
[{?eh,tc_start,{group_require_2_SUITE,{init_per_group,g11,[]}}},
- {?eh,tc_done,{group_require_2_SUITE,{init_per_group,g11,[]},ok}},
- {?eh,tc_done,{group_require_2_SUITE,t111,ok}},
- {?eh,test_stats,{10,0,{0,3}}},
- {?eh,tc_start,{group_require_2_SUITE,{end_per_group,g11,[]}}},
- {?eh,tc_done,{group_require_2_SUITE,{end_per_group,g11,[]},ok}}],
+ {?eh,tc_done,{group_require_2_SUITE,
+ {init_per_group,g11,[]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,{group_require_2_SUITE,t111,
+ {group0_failed,bad_return_value}}},
+ {?eh,test_stats,{9,0,{0,4}}},
+ {?eh,tc_auto_skip,{group_require_2_SUITE,
+ {end_per_group,g11},
+ {group0_failed,bad_return_value}}}],
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
@@ -799,11 +830,11 @@ test_events(require_no_ipg) ->
[{?eh,tc_start,{ct_framework,{init_per_group,g4,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{init_per_group,g4,[{suite,group_require_3_SUITE}]},
- {skipped,{require_failed,{name_in_use,common2_alias,common2}}}}},
+ {auto_skipped,{require_failed,{name_in_use,common2_alias,common2}}}}},
{?eh,tc_auto_skip,{group_require_3_SUITE,t41,
{require_failed,{name_in_use,common2_alias,common2}}}},
{?eh,test_stats,{4,0,{0,1}}},
- {?eh,tc_auto_skip,{ct_framework,end_per_group,
+ {?eh,tc_auto_skip,{ct_framework,{end_per_group,g4},
{require_failed,{name_in_use,common2_alias,common2}}}}],
[{?eh,tc_start,{ct_framework,{init_per_group,g5,[{suite,group_require_3_SUITE}]}}},
@@ -825,16 +856,16 @@ test_events(require_no_ipg) ->
[{?eh,tc_start,{ct_framework,{init_per_group,g8,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{init_per_group,g8,[{suite,group_require_3_SUITE}]},
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,{not_available,non_existing}}}}},
{?eh,tc_auto_skip,{group_require_3_SUITE,t81,
{require_failed,{not_available,non_existing}}}},
{?eh,test_stats,{8,0,{0,2}}},
- {?eh,tc_auto_skip,{ct_framework,end_per_group,
+ {?eh,tc_auto_skip,{ct_framework,{end_per_group,g8},
{require_failed,{not_available,non_existing}}}}],
[{?eh,tc_start,{ct_framework,{init_per_group,g9,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{init_per_group,g9,[{suite,group_require_3_SUITE}]},ok}},
{?eh,tc_done,{group_require_3_SUITE,t91,
- {skipped,{require_failed,{not_available,non_existing}}}}},
+ {auto_skipped,{require_failed,{not_available,non_existing}}}}},
{?eh,test_stats,{8,0,{0,3}}},
{?eh,tc_start,{ct_framework,{end_per_group,g9,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{end_per_group,g9,[{suite,group_require_3_SUITE}]},ok}}],
@@ -844,13 +875,14 @@ test_events(require_no_ipg) ->
{?eh,tc_done,{group_require_3_SUITE,t101,ok}},
{?eh,tc_start,{ct_framework,{end_per_group,g10,[{suite,group_require_3_SUITE}]}}},
{?eh,tc_done,{ct_framework,{end_per_group,g10,[{suite,group_require_3_SUITE}]},ok}}],
-
+
[{?eh,tc_start,{ct_framework,{init_per_group,g11,[{suite,group_require_3_SUITE}]}}},
- {?eh,tc_done,{ct_framework,{init_per_group,g11,[{suite,group_require_3_SUITE}]},ok}},
- {?eh,tc_done,{group_require_3_SUITE,t111,ok}},
- {?eh,test_stats,{10,0,{0,3}}},
- {?eh,tc_start,{ct_framework,{end_per_group,g11,[{suite,group_require_3_SUITE}]}}},
- {?eh,tc_done,{ct_framework,{end_per_group,g11,[{suite,group_require_3_SUITE}]},ok}}],
+ {?eh,tc_done,{ct_framework,{init_per_group,g11,[{suite,group_require_3_SUITE}]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,{group_require_3_SUITE,t111,{group0_failed,bad_return_value}}},
+ {?eh,test_stats,{9,0,{0,4}}},
+ {?eh,tc_auto_skip,{ct_framework,{end_per_group,g11},
+ {group0_failed,bad_return_value}}}],
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
diff --git a/lib/common_test/test/ct_groups_test_2_SUITE.erl b/lib/common_test/test/ct_groups_test_2_SUITE.erl
index 1b2ad12e2f..8b0de98709 100644
--- a/lib/common_test/test/ct_groups_test_2_SUITE.erl
+++ b/lib/common_test/test/ct_groups_test_2_SUITE.erl
@@ -171,16 +171,20 @@ test_events(missing_conf) ->
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
{?eh,start_info,{1,1,2}},
- {?eh,tc_start,{ct_framework,{init_per_group,group1,[]}}},
- {?eh,tc_done,{ct_framework,{init_per_group,group1,[]},ok}},
+ {?eh,tc_start,{ct_framework,{init_per_group,group1,
+ [{suite,missing_conf_SUITE}]}}},
+ {?eh,tc_done,{ct_framework,{init_per_group,group1,
+ [{suite,missing_conf_SUITE}]},ok}},
{?eh,tc_start,{missing_conf_SUITE,tc1}},
{?eh,tc_done,{missing_conf_SUITE,tc1,ok}},
{?eh,test_stats,{1,0,{0,0}}},
{?eh,tc_start,{missing_conf_SUITE,tc2}},
{?eh,tc_done,{missing_conf_SUITE,tc2,ok}},
{?eh,test_stats,{2,0,{0,0}}},
- {?eh,tc_start,{ct_framework,{end_per_group,group1,[]}}},
- {?eh,tc_done,{ct_framework,{end_per_group,group1,[]},ok}},
+ {?eh,tc_start,{ct_framework,{end_per_group,group1,
+ [{suite,missing_conf_SUITE}]}}},
+ {?eh,tc_done,{ct_framework,{end_per_group,group1,
+ [{suite,missing_conf_SUITE}]},ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
];
diff --git a/lib/common_test/test/ct_hooks_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE.erl
index 596bfe3ff0..083c87b49e 100644
--- a/lib/common_test/test/ct_hooks_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE.erl
@@ -749,11 +749,10 @@ test_events(skip_pre_suite_cth) ->
{?eh,cth,{'_',on_tc_skip,
[init_per_suite,{tc_user_skip,{skipped,"Test skip"}},[]]}},
- {?eh,tc_auto_skip,{ct_cth_empty_SUITE,test_case,"Test skip"}},
- {?eh,cth,{'_',on_tc_skip,[test_case,{tc_auto_skip,"Test skip"},[]]}},
+ {?eh,tc_user_skip,{ct_cth_empty_SUITE,test_case,"Test skip"}},
+ {?eh,cth,{'_',on_tc_skip,[test_case,{tc_user_skip,"Test skip"},[]]}},
- {?eh,tc_auto_skip, {ct_cth_empty_SUITE, end_per_suite,"Test skip"}},
- {?eh,cth,{'_',on_tc_skip,[end_per_suite,{tc_auto_skip,"Test skip"},[]]}},
+ {?eh,tc_user_skip, {ct_cth_empty_SUITE, end_per_suite,"Test skip"}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,cth, {'_',terminate,[[]]}},
@@ -773,11 +772,10 @@ test_events(skip_post_suite_cth) ->
{?eh,cth,{'_',on_tc_skip,
[init_per_suite,{tc_user_skip,{skipped,"Test skip"}},[]]}},
- {?eh,tc_auto_skip,{ct_cth_empty_SUITE,test_case,"Test skip"}},
- {?eh,cth,{'_',on_tc_skip,[test_case,{tc_auto_skip,"Test skip"},[]]}},
+ {?eh,tc_user_skip,{ct_cth_empty_SUITE,test_case,"Test skip"}},
+ {?eh,cth,{'_',on_tc_skip,[test_case,{tc_user_skip,"Test skip"},[]]}},
- {?eh,tc_auto_skip, {ct_cth_empty_SUITE, end_per_suite,"Test skip"}},
- {?eh,cth,{'_',on_tc_skip,[end_per_suite,{tc_auto_skip,"Test skip"},[]]}},
+ {?eh,tc_user_skip, {ct_cth_empty_SUITE, end_per_suite,"Test skip"}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,cth,{'_',terminate,[[]]}},
@@ -1219,11 +1217,15 @@ test_events(cth_log) ->
{?eh,tc_start,{cth_log_SUITE,init_per_suite}},
{parallel,
- [{?eh,tc_start,{ct_framework,{init_per_group,g1,[parallel]}}},
- {?eh,tc_done,{ct_framework,{init_per_group,g1,[parallel]},ok}},
+ [{?eh,tc_start,{ct_framework,{init_per_group,g1,
+ [{suite,cth_log_SUITE},parallel]}}},
+ {?eh,tc_done,{ct_framework,{init_per_group,g1,
+ [{suite,cth_log_SUITE},parallel]},ok}},
{?eh,test_stats,{30,0,{0,0}}},
- {?eh,tc_start,{ct_framework,{end_per_group,g1,[parallel]}}},
- {?eh,tc_done,{ct_framework,{end_per_group,g1,[parallel]},ok}}]},
+ {?eh,tc_start,{ct_framework,{end_per_group,g1,
+ [{suite,cth_log_SUITE},parallel]}}},
+ {?eh,tc_done,{ct_framework,{end_per_group,g1,
+ [{suite,cth_log_SUITE},parallel]},ok}}]},
{?eh,tc_done,{cth_log_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
diff --git a/lib/common_test/test/ct_netconfc_SUITE_data/netconfc1_SUITE.erl b/lib/common_test/test/ct_netconfc_SUITE_data/netconfc1_SUITE.erl
index 6ee7fdd6f6..2bcfeeec0c 100644
--- a/lib/common_test/test/ct_netconfc_SUITE_data/netconfc1_SUITE.erl
+++ b/lib/common_test/test/ct_netconfc_SUITE_data/netconfc1_SUITE.erl
@@ -84,7 +84,9 @@ all() ->
no_host,
no_port,
invalid_opt,
+ timeout_close_session,
get,
+ timeout_get,
get_xpath,
get_config,
get_config_xpath,
@@ -143,11 +145,10 @@ init_per_suite(Config) ->
end.
end_per_suite(Config) ->
- PrivDir = ?config(priv_dir, Config),
?NS:stop(?config(server,Config)),
ssh:stop(),
crypto:stop(),
- remove_id_keys(PrivDir),
+ remove_id_keys(Config),
Config.
hello(Config) ->
@@ -344,6 +345,15 @@ invalid_opt(Config) ->
{error,{invalid_option,{some_other_opt,true}}} = ct_netconfc:open(Opts2),
ok.
+timeout_close_session(Config) ->
+ DataDir = ?config(data_dir,Config),
+ {ok,Client} = open_success(DataDir),
+ ?NS:expect('close-session'),
+ true = erlang:is_process_alive(Client),
+ {error,timeout} = ct_netconfc:close_session(Client,1000),
+ false = erlang:is_process_alive(Client),
+ ok.
+
get(Config) ->
DataDir = ?config(data_dir,Config),
{ok,Client} = open_success(DataDir),
@@ -354,6 +364,15 @@ get(Config) ->
?ok = ct_netconfc:close_session(Client),
ok.
+timeout_get(Config) ->
+ DataDir = ?config(data_dir,Config),
+ {ok,Client} = open_success(DataDir),
+ ?NS:expect('get'),
+ {error,timeout} = ct_netconfc:get(Client,{server,[{xmlns,"myns"}],[]},1000),
+ ?NS:expect_do_reply('close-session',close,ok),
+ ?ok = ct_netconfc:close_session(Client),
+ ok.
+
get_xpath(Config) ->
DataDir = ?config(data_dir,Config),
{ok,Client} = open_success(DataDir),
@@ -698,7 +717,7 @@ timeout_receive_chunked_data(Config) ->
?ok = ct_netconfc:close_session(Client),
ok.
-%% Same as receive_chunked_data, but timeout waiting for last part.
+%% Same as receive_chunked_data, but close while waiting for last part.
close_while_waiting_for_chunked_data(Config) ->
DataDir = ?config(data_dir,Config),
{ok,Client} = open_success(DataDir),
diff --git a/lib/common_test/test/ct_netconfc_SUITE_data/ns.erl b/lib/common_test/test/ct_netconfc_SUITE_data/ns.erl
index 09217f60a3..fb0734d48e 100644
--- a/lib/common_test/test/ct_netconfc_SUITE_data/ns.erl
+++ b/lib/common_test/test/ct_netconfc_SUITE_data/ns.erl
@@ -98,8 +98,9 @@ start(Dir) ->
%% Stop the netconf server
stop(Pid) ->
- Pid ! {stop,self()},
- receive stopped -> ok end.
+ Ref = erlang:monitor(process,Pid),
+ Pid ! stop,
+ receive {'DOWN',Ref,process,Pid,_} -> ok end.
%% Set the session id for the hello message.
%% If this is not called prior to starting the session, no hello
@@ -177,9 +178,9 @@ init_server(Dir) ->
loop(Daemon) ->
receive
- {stop,From} ->
+ stop ->
ssh:stop_daemon(Daemon),
- From ! stopped;
+ ok;
{table_trans,Fun,Args,From} ->
%% Simple transaction mechanism for ets table
R = apply(Fun,Args),
diff --git a/lib/common_test/test/ct_pre_post_test_io_SUITE.erl b/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
index 84341a0b99..5de1ecc2bd 100644
--- a/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
+++ b/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
@@ -89,28 +89,31 @@ pre_post_io(Config) ->
%%!--------------------------------------------------------------------
spawn(fun() ->
+ ct:pal("CONTROLLER: Started!", []),
%% --- test run 1 ---
- ct:sleep(3000),
- ct_test_support:ct_rpc({cth_log_redirect,
- handle_remote_events,
- [true]}, Config),
- ct:sleep(2000),
- io:format(user, "Starting test run!~n", []),
- ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config),
- ct:sleep(6000),
- io:format(user, "Finishing off!~n", []),
- ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config),
+ timer:sleep(3000),
+ ct:pal("CONTROLLER: Handle remote events = true", []),
+ ok = ct_test_support:ct_rpc({cth_log_redirect,
+ handle_remote_events,
+ [true]}, Config),
+ timer:sleep(2000),
+ ct:pal("CONTROLLER: Proceeding with test run #1!", []),
+ ok = ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config),
+ timer:sleep(6000),
+ ct:pal("CONTROLLER: Proceeding with shutdown #1!", []),
+ ok = ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config),
%% --- test run 2 ---
- ct:sleep(3000),
- ct_test_support:ct_rpc({cth_log_redirect,
- handle_remote_events,
- [true]}, Config),
- ct:sleep(2000),
- io:format(user, "Starting test run!~n", []),
- ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config),
- ct:sleep(6000),
- io:format(user, "Finishing off!~n", []),
- ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config)
+ timer:sleep(3000),
+ ct:pal("CONTROLLER: Handle remote events = true", []),
+ ok = ct_test_support:ct_rpc({cth_log_redirect,
+ handle_remote_events,
+ [true]}, Config),
+ timer:sleep(2000),
+ ct:pal("CONTROLLER: Proceeding with test run #2!", []),
+ ok = ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config),
+ timer:sleep(6000),
+ ct:pal("CONTROLLER: Proceeding with shutdown #2!", []),
+ ok = ct_test_support:ct_rpc({cth_ctrl,proceed,[]}, Config)
end),
ct_test_support:run(Opts, Config),
Events = ct_test_support:get_events(ERPid, Config),
diff --git a/lib/common_test/test/ct_pre_post_test_io_SUITE_data/cth_ctrl.erl b/lib/common_test/test/ct_pre_post_test_io_SUITE_data/cth_ctrl.erl
index c8c08a5735..2ba991fc61 100644
--- a/lib/common_test/test/ct_pre_post_test_io_SUITE_data/cth_ctrl.erl
+++ b/lib/common_test/test/ct_pre_post_test_io_SUITE_data/cth_ctrl.erl
@@ -28,7 +28,8 @@
%%%===================================================================
proceed() ->
- ?MODULE ! proceed.
+ ?MODULE ! proceed,
+ ok.
%%--------------------------------------------------------------------
%% Hook functions
diff --git a/lib/common_test/test/ct_repeat_1_SUITE.erl b/lib/common_test/test/ct_repeat_1_SUITE.erl
index 090002d0c2..98eaa28763 100644
--- a/lib/common_test/test/ct_repeat_1_SUITE.erl
+++ b/lib/common_test/test/ct_repeat_1_SUITE.erl
@@ -229,7 +229,7 @@ test_events(repeat_cs_and_grs) ->
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}},
{?eh,test_stats,{3,1,{0,1}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
{?eh,test_stats,{4,1,{0,1}}},
@@ -251,7 +251,7 @@ test_events(repeat_cs_and_grs) ->
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}},
{?eh,test_stats,{7,2,{0,2}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
{?eh,test_stats,{8,2,{0,2}}},
@@ -311,7 +311,7 @@ test_events(repeat_seq) ->
{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
{?eh,test_stats,{7,2,{0,5}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,
{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
@@ -410,7 +410,7 @@ test_events(repeat_gr_until_any_ok) ->
{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
{?eh,test_stats,{1,1,{0,1}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
{?eh,test_stats,{1,2,{0,1}}},
@@ -452,7 +452,7 @@ test_events(repeat_gr_until_any_ok) ->
{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
{?eh,test_stats,{5,6,{0,3}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
{?eh,tc_done,{repeat_1_SUITE,
@@ -469,7 +469,7 @@ test_events(repeat_gr_until_any_ok) ->
{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
{?eh,test_stats,{7,7,{0,4}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
{?eh,tc_done,{repeat_1_SUITE,
@@ -770,7 +770,7 @@ test_events(repeat_gr_until_any_fail) ->
{'EXIT',failing_this_time}}}}},
{?eh,test_stats,{14,0,{0,1}}},
{?eh,tc_auto_skip,
- {repeat_1_SUITE,end_per_group,
+ {repeat_1_SUITE,{end_per_group,gr_ok_then_fail_init},
{failed,
{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}}],
@@ -967,7 +967,7 @@ test_events(repeat_gr_until_all_ok) ->
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}},
{?eh,test_stats,{7,1,{0,1}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init_then_ok},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}}],
{?eh,tc_done,{repeat_1_SUITE,tc_ok_1,ok}},
@@ -1083,7 +1083,7 @@ test_events(repeat_gr_until_all_fail) ->
{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
{?eh,test_stats,{0,1,{0,1}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
{?eh,tc_done,{repeat_1_SUITE,tc_ok_then_fail_1,ok}},
@@ -1102,7 +1102,7 @@ test_events(repeat_gr_until_all_fail) ->
{init_per_group,gr_fail_init,[]},
{failed,{error,fails_on_purpose}}}},
{?eh,test_stats,{2,2,{0,2}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',fails_on_purpose}}}}}],
{?eh,tc_done,{repeat_1_SUITE,tc_ok_then_fail_1,
@@ -1134,7 +1134,7 @@ test_events(repeat_gr_until_all_fail) ->
{init_per_group,gr_ok_then_fail_init,[]},
{failed,{error,failing_this_time}}}},
{?eh,test_stats,{4,4,{0,3}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_ok_then_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}}],
{?eh,tc_start,{repeat_1_SUITE,tc_fail_1}},
@@ -1293,7 +1293,7 @@ test_events(repeat_seq_until_any_fail) ->
{init_per_group,gr_ok_then_fail_init,[]},
{failed,{error,failing_this_time}}}},
{?eh,test_stats,{24,1,{0,5}}},
- {?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_ok_then_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}}],
{?eh,tc_auto_skip,{repeat_1_SUITE,tc_ok_1,
@@ -1516,7 +1516,7 @@ test_events(repeat_shuffled_seq_until_any_fail) ->
{init_per_group,repeat_shuffled_seq_until_any_fail_5,
[{shuffle,'_'},{repeat_until_any_fail,2},
sequence]},ok}},
- [{?eh,tc_auto_skip,{repeat_1_SUITE,end_per_group,
+ [{?eh,tc_auto_skip,{repeat_1_SUITE,{end_per_group,gr_ok_then_fail_init},
{failed,{repeat_1_SUITE,init_per_group,
{'EXIT',failing_this_time}}}}}],
{?eh,tc_start,{repeat_1_SUITE,
diff --git a/lib/common_test/test/ct_repeat_testrun_SUITE.erl b/lib/common_test/test/ct_repeat_testrun_SUITE.erl
index 35d67a10f2..bb2aba2c5a 100644
--- a/lib/common_test/test/ct_repeat_testrun_SUITE.erl
+++ b/lib/common_test/test/ct_repeat_testrun_SUITE.erl
@@ -36,7 +36,7 @@
-define(eh, ct_test_support_eh).
-define(skip_reason, "Repeated test stopped by force_stop option").
--define(skipped, {skipped, ?skip_reason}).
+-define(skipped, {auto_skipped, ?skip_reason}).
%% Timers used in this test.
@@ -78,7 +78,7 @@ init_per_suite(Config0) ->
{1,0,{0,0}} = ct_test_support:run(ct,run_test,[Opts2],Config),
%% Time the shortest testcase to use for offset
- {T0,{1,0,{0,0}}} = timer:tc(ct_test_support,run,[ct,run_test,[Opts1],Config]),
+ {_T0,{1,0,{0,0}}} = timer:tc(ct_test_support,run,[ct,run_test,[Opts1],Config]),
%% -2 is to ensure we hit inside the target test case and not after
% T = round(T0/1000000)-2,
@@ -222,7 +222,7 @@ until_force_stop_skip_rest_group(Config) when is_list(Config) ->
fun() ->
[_] = ct_test_support:run_ct_run_test(
Opts++[{until,until_str(?t3,1,Config)}],Config),
- 0 = ct_test_support:run_ct_script_start(
+ 1 = ct_test_support:run_ct_script_start(
Opts++[{until,until_str(?t3,1,Config)}],Config)
end,
ok = execute(ExecuteFun,
@@ -341,19 +341,18 @@ skip_first_tc1(Suite) ->
{?eh,tc_done,{Suite,tc1,ok}},
{?eh,test_stats,{'_',0,{0,0}}},
{?eh,tc_done,{Suite,tc2,?skipped}},
- {?eh,test_stats,{'_',0,{1,0}}},
+ {?eh,test_stats,{'_',0,{0,1}}},
{?eh,tc_done,{Suite,{init_per_group,g,[]},?skipped}},
{?eh,tc_auto_skip,{Suite,tc1,?skip_reason}},
- {?eh,test_stats,{'_',0,{1,1}}},
+ {?eh,test_stats,{'_',0,{0,2}}},
{?eh,tc_auto_skip,{Suite,tc2,?skip_reason}},
- {?eh,test_stats,{'_',0,{1,2}}},
- {?eh,tc_auto_skip,{Suite,end_per_group,?skip_reason}},
+ {?eh,test_stats,{'_',0,{0,3}}},
+ {?eh,tc_auto_skip,{Suite,{end_per_group,g},?skip_reason}},
{?eh,tc_done,{Suite,tc2,?skipped}},
- {?eh,test_stats,{'_',0,{2,2}}},
+ {?eh,test_stats,{'_',0,{0,4}}},
{?eh,tc_start,{Suite,end_per_suite}},
{?eh,tc_done,{Suite,end_per_suite,ok}}].
-
skip_tc1_in_group(Suite) ->
[{?eh,tc_start,{Suite,init_per_suite}},
{?eh,tc_done,{Suite,init_per_suite,ok}},
@@ -369,10 +368,10 @@ skip_tc1_in_group(Suite) ->
{?eh,tc_done,{Suite,tc1,ok}},
{?eh,test_stats,{'_',0,{0,0}}},
{?eh,tc_done,{Suite,tc2,?skipped}},
- {?eh,test_stats,{'_',0,{1,0}}},
+ {?eh,test_stats,{'_',0,{0,1}}},
{?eh,tc_start,{Suite,{end_per_group,g,[]}}},
{?eh,tc_done,{Suite,{end_per_group,g,[]},ok}}],
{?eh,tc_done,{Suite,tc2,?skipped}},
- {?eh,test_stats,{'_',0,{2,0}}},
+ {?eh,test_stats,{'_',0,{0,2}}},
{?eh,tc_start,{Suite,end_per_suite}},
{?eh,tc_done,{Suite,end_per_suite,ok}}].
diff --git a/lib/common_test/test/ct_sequence_1_SUITE.erl b/lib/common_test/test/ct_sequence_1_SUITE.erl
index 5facf90656..8c87236838 100644
--- a/lib/common_test/test/ct_sequence_1_SUITE.erl
+++ b/lib/common_test/test/ct_sequence_1_SUITE.erl
@@ -212,7 +212,7 @@ test_events(subgroup_init_fail) ->
{failed,{subgroups_1_SUITE,init_per_group,
{'EXIT',init_per_group_fails_on_purpose}}}}},
{?eh,test_stats,{0,0,{0,1}}},
- {?eh,tc_auto_skip,{subgroups_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{subgroups_1_SUITE,{end_per_group,fail_init},
{failed,{subgroups_1_SUITE,init_per_group,
{'EXIT',init_per_group_fails_on_purpose}}}}}],
{?eh,tc_auto_skip,{subgroups_1_SUITE,ok_tc,{group_result,fail_init,failed}}},
@@ -294,7 +294,7 @@ test_events(case_after_subgroup_fail_init) ->
{subgroups_1_SUITE,init_per_group,
{'EXIT',init_per_group_fails_on_purpose}}}}},
{?eh,test_stats,{0,0,{0,1}}},
- {?eh,tc_auto_skip,{subgroups_1_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{subgroups_1_SUITE,{end_per_group,fail_init},
{failed,
{subgroups_1_SUITE,init_per_group,
{'EXIT',init_per_group_fails_on_purpose}}}}}],
diff --git a/lib/common_test/test/ct_skip_SUITE.erl b/lib/common_test/test/ct_skip_SUITE.erl
index b8be55f43a..b0a6c839a2 100644
--- a/lib/common_test/test/ct_skip_SUITE.erl
+++ b/lib/common_test/test/ct_skip_SUITE.erl
@@ -59,7 +59,7 @@ end_per_testcase(TestCase, Config) ->
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
- [auto_skip, user_skip].
+ [auto_skip, user_skip, testspec_skip].
groups() ->
[].
@@ -91,7 +91,8 @@ auto_skip(Config) when is_list(Config) ->
Join(DataDir, "auto_skip_8_SUITE"),
Join(DataDir, "auto_skip_9_SUITE"),
Join(DataDir, "auto_skip_10_SUITE"),
- Join(DataDir, "auto_skip_11_SUITE")
+ Join(DataDir, "auto_skip_11_SUITE"),
+ Join(DataDir, "auto_skip_12_SUITE")
],
{Opts,ERPid} = setup({suite,Suites}, Config),
@@ -116,7 +117,8 @@ user_skip(Config) when is_list(Config) ->
Join(DataDir, "user_skip_2_SUITE"),
Join(DataDir, "user_skip_3_SUITE"),
Join(DataDir, "user_skip_4_SUITE"),
- Join(DataDir, "user_skip_5_SUITE")],
+ Join(DataDir, "user_skip_5_SUITE"),
+ Join(DataDir, "user_skip_6_SUITE")],
{Opts,ERPid} = setup({suite,Suites}, Config),
ok = ct_test_support:run(Opts, Config),
@@ -131,8 +133,56 @@ user_skip(Config) when is_list(Config) ->
ok = ct_test_support:verify_events(TestEvents, Events, Config).
%%%-----------------------------------------------------------------
+%%%
+testspec_skip(Config) when is_list(Config) ->
+ TestDir = filename:join(?config(data_dir, Config),
+ filename:join("skip", "test")),
+ TestSpec1 = [{suites, TestDir, user_skip_7_SUITE},
+ {skip_cases, TestDir, user_skip_7_SUITE, [tc1,tc3], "SKIPPED"}],
+
+ TestSpec2 = [{suites, TestDir, user_skip_7_SUITE},
+ {skip_groups, TestDir, user_skip_7_SUITE, ptop1, "SKIPPED"}],
+
+ TestSpec3 = [{suites, TestDir, user_skip_7_SUITE},
+ {skip_groups, TestDir, user_skip_7_SUITE, psub1, "SKIPPED"}],
+
+ TestSpec4 = [{suites, TestDir, user_skip_7_SUITE},
+ {skip_suites, TestDir, user_skip_7_SUITE, "SKIPPED"}],
+
+ TestSpec5 = [{groups, TestDir, user_skip_6_SUITE, ptop1},
+ {skip_groups, TestDir, user_skip_6_SUITE, psub1, "SKIPPED"}],
+
+ {Opts,ERPid} = setup_testspec([{ts1,TestSpec1},
+ {ts2,TestSpec2},
+ {ts3,TestSpec3},
+ {ts4,TestSpec4},
+ {ts5,TestSpec5}], Config),
+
+ ok = ct_test_support:run(Opts, Config),
+
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(testspec_skip,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+
+ TestEvents = events_to_check(testspec_skip),
+ ok = ct_test_support:verify_events(TestEvents, Events, Config).
+
+%%%-----------------------------------------------------------------
%%% HELP FUNCTIONS
%%%-----------------------------------------------------------------
+setup_testspec(TestSpecs, Config) ->
+ SpecFiles =
+ [begin SpecFile = filename:join(?config(priv_dir, Config),
+ atom_to_list(SpecName)++".spec"),
+ {ok,Dev} = file:open(SpecFile, [write]),
+ [io:format(Dev, "~p.~n", [Term]) || Term <- TestSpec],
+ file:close(Dev),
+ SpecFile
+ end || {SpecName,TestSpec} <- TestSpecs],
+ setup({spec,SpecFiles}, Config).
setup(Test, Config) ->
Opts0 = ct_test_support:get_opts(Config),
@@ -163,7 +213,7 @@ test_events(auto_skip) ->
[
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
- {?eh,start_info,{11,11,34}},
+ {?eh,start_info,{12,12,43}},
{?eh,tc_start,{auto_skip_1_SUITE,init_per_suite}},
{?eh,tc_done,
@@ -196,8 +246,8 @@ test_events(auto_skip) ->
{?eh,tc_start,{auto_skip_3_SUITE,tc1}},
{?eh,tc_done,
{auto_skip_3_SUITE,tc1,
- {skipped,{failed,{auto_skip_3_SUITE,init_per_testcase,
- {{init_per_testcase,tc1,failed},'_'}}}}}},
+ {auto_skipped,{failed,{auto_skip_3_SUITE,init_per_testcase,
+ {'init_per_testcase for tc1 failed','_'}}}}}},
{?eh,test_stats,{0,0,{0,4}}},
{?eh,tc_start,{auto_skip_3_SUITE,tc2}},
{?eh,tc_done,{auto_skip_3_SUITE,tc2,ok}},
@@ -209,7 +259,7 @@ test_events(auto_skip) ->
{?eh,tc_done,{auto_skip_4_SUITE,init_per_suite,ok}},
{?eh,tc_start,{auto_skip_4_SUITE,tc1}},
{?eh,tc_done,{auto_skip_4_SUITE,tc1,
- {skipped,{failed,{auto_skip_4_SUITE,init_per_testcase,
+ {auto_skipped,{failed,{auto_skip_4_SUITE,init_per_testcase,
{timetrap_timeout,1000}}}}}},
{?eh,test_stats,{1,0,{0,5}}},
{?eh,tc_start,{auto_skip_4_SUITE,tc2}},
@@ -232,8 +282,9 @@ test_events(auto_skip) ->
{'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,7}}},
{?eh,tc_auto_skip,
- {auto_skip_5_SUITE,end_per_group,{failed,{auto_skip_5_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}}],
+ {auto_skip_5_SUITE,{end_per_group,g1},
+ {failed,{auto_skip_5_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}}],
{?eh,tc_start,{auto_skip_5_SUITE,end_per_suite}},
{?eh,tc_done,{auto_skip_5_SUITE,end_per_suite,ok}},
@@ -260,8 +311,9 @@ test_events(auto_skip) ->
{'EXIT',{group,g1,failed}}}}}},
{?eh,test_stats,{2,0,{0,11}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,end_per_group,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g1,failed}}}}}}],
+ {auto_skip_6_SUITE,{end_per_group,g1},
+ {failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g1,failed}}}}}}],
[{?eh,tc_start,{auto_skip_6_SUITE,{init_per_group,g3,[]}}},
{?eh,tc_done,{auto_skip_6_SUITE,{init_per_group,g3,[]},ok}},
@@ -280,8 +332,9 @@ test_events(auto_skip) ->
{'EXIT',{group,g4,failed}}}}}},
{?eh,test_stats,{3,0,{0,13}}},
{?eh,tc_auto_skip,
- {auto_skip_6_SUITE,end_per_group,{failed,{auto_skip_6_SUITE,init_per_group,
- {'EXIT',{group,g4,failed}}}}}}],
+ {auto_skip_6_SUITE,{end_per_group,g4},
+ {failed,{auto_skip_6_SUITE,init_per_group,
+ {'EXIT',{group,g4,failed}}}}}}],
{?eh,tc_start,{auto_skip_6_SUITE,tc2}},
{?eh,tc_done,{auto_skip_6_SUITE,tc2,ok}},
{?eh,test_stats,{4,0,{0,13}}},
@@ -320,7 +373,7 @@ test_events(auto_skip) ->
{?eh,test_stats,{5,0,{0,17}}},
{?eh,tc_start,{auto_skip_9_SUITE,tc2}},
{?eh,tc_done,{auto_skip_9_SUITE,tc2,
- {skipped,{failed,{auto_skip_9_SUITE,init_per_testcase,bad_return}}}}},
+ {auto_skipped,{failed,{auto_skip_9_SUITE,init_per_testcase,bad_return}}}}},
{?eh,test_stats,{5,0,{0,18}}},
[{?eh,tc_start,{auto_skip_9_SUITE,{init_per_group,g1,[]}}},
@@ -343,7 +396,7 @@ test_events(auto_skip) ->
{?eh,tc_done,{auto_skip_9_SUITE,{init_per_group,g3,[]},ok}},
{?eh,tc_start,{auto_skip_9_SUITE,tc5}},
{?eh,tc_done,{auto_skip_9_SUITE,tc5,
- {skipped,{failed,{auto_skip_9_SUITE,init_per_testcase,bad_return}}}}},
+ {auto_skipped,{failed,{auto_skip_9_SUITE,init_per_testcase,bad_return}}}}},
{?eh,test_stats,{7,0,{0,19}}},
{?eh,tc_start,{auto_skip_9_SUITE,{end_per_group,g3,[]}}},
{?eh,tc_done,{auto_skip_9_SUITE,{end_per_group,g3,[]},ok}}],
@@ -363,7 +416,7 @@ test_events(auto_skip) ->
{?eh,tc_start,{auto_skip_9_SUITE,tc8}},
{?eh,tc_done,
{auto_skip_9_SUITE,tc8,
- {skipped,{failed,{auto_skip_9_SUITE,init_per_testcase,
+ {auto_skipped,{failed,{auto_skip_9_SUITE,init_per_testcase,
{{badmatch,undefined},'_'}}}}}},
{?eh,tc_start,
{auto_skip_9_SUITE,{end_per_group,g5,[parallel]}}},
@@ -383,26 +436,26 @@ test_events(auto_skip) ->
{?eh,tc_start,{auto_skip_10_SUITE,init_per_suite}},
{?eh,tc_done,{auto_skip_10_SUITE,init_per_suite,
- {skipped,
- {require_failed_in_suite0,
- {not_available,undefined_config_variable}}}}},
- {?eh,tc_auto_skip,
- {auto_skip_10_SUITE,tc1,
- {require_failed_in_suite0,{not_available,undefined_config_variable}}}},
+ {auto_skipped,{require_failed_in_suite0,
+ {not_available,undefined_config_variable}}}}},
+ {?eh,tc_auto_skip,{auto_skip_10_SUITE,tc1,
+ {require_failed_in_suite0,
+ {not_available,undefined_config_variable}}}},
{?eh,test_stats,{9,0,{0,21}}},
- {?eh,tc_auto_skip,
- {auto_skip_10_SUITE,tc2,
- {require_failed_in_suite0,{not_available,undefined_config_variable}}}},
+ {?eh,tc_auto_skip,{auto_skip_10_SUITE,tc2,
+ {require_failed_in_suite0,
+ {not_available,undefined_config_variable}}}},
{?eh,test_stats,{9,0,{0,22}}},
- {?eh,tc_auto_skip,
- {auto_skip_10_SUITE,end_per_suite,
- {require_failed_in_suite0,{not_available,undefined_config_variable}}}},
+ {?eh,tc_auto_skip,{auto_skip_10_SUITE,end_per_suite,
+ {require_failed_in_suite0,
+ {not_available,undefined_config_variable}}}},
{?eh,tc_start,{auto_skip_11_SUITE,init_per_suite}},
{?eh,tc_done,{auto_skip_11_SUITE,init_per_suite,ok}},
{?eh,tc_start,{auto_skip_11_SUITE,tc1}},
{?eh,tc_done,{auto_skip_11_SUITE,tc1,
- {skipped,{require_failed,{not_available,undefined_config_variable}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,undefined_config_variable}}}}},
{?eh,test_stats,{9,0,{0,23}}},
{?eh,tc_start,{auto_skip_11_SUITE,tc2}},
{?eh,tc_done,{auto_skip_11_SUITE,tc2,ok}},
@@ -421,7 +474,8 @@ test_events(auto_skip) ->
{?eh,tc_start,{auto_skip_11_SUITE,tc3}},
{?eh,tc_done,
{auto_skip_11_SUITE,tc3,
- {skipped,{require_failed,{not_available,undefined_config_variable}}}}},
+ {auto_skipped,{require_failed,
+ {not_available,undefined_config_variable}}}}},
{?eh,test_stats,{10,0,{0,24}}},
{?eh,tc_start,
{auto_skip_11_SUITE,{end_per_group,g2,[parallel]}}},
@@ -434,6 +488,75 @@ test_events(auto_skip) ->
{?eh,tc_start,{auto_skip_11_SUITE,end_per_suite}},
{?eh,tc_done,{auto_skip_11_SUITE,end_per_suite,ok}},
+
+ {?eh,tc_start,{auto_skip_12_SUITE,init_per_suite}},
+ {?eh,tc_done,{auto_skip_12_SUITE,init_per_suite,ok}},
+
+ [{?eh,tc_start,{ct_framework,{init_per_group,g1,
+ [{suite,auto_skip_12_SUITE}]}}},
+ {?eh,tc_done,{ct_framework,{init_per_group,g1,
+ [{suite,auto_skip_12_SUITE}]},
+ {auto_skipped,
+ {require_failed,{not_available,unknown_variable_g1}}}}},
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc1,
+ {require_failed,{not_available,unknown_variable_g1}}}},
+ {?eh,test_stats,{10,0,{0,25}}},
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc2,
+ {require_failed,{not_available,unknown_variable_g1}}}},
+ {?eh,test_stats,{10,0,{0,26}}},
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc3,
+ {require_failed,{not_available,unknown_variable_g1}}}},
+ {?eh,test_stats,{10,0,{0,27}}},
+ {?eh,tc_auto_skip,{ct_framework,{end_per_group,g1},
+ {require_failed,{not_available,unknown_variable_g1}}}}],
+
+ [{?eh,tc_start,{ct_framework,{init_per_group,g1,
+ [{suite,auto_skip_12_SUITE}]}}},
+ {?eh,tc_done,{ct_framework,{init_per_group,g1,
+ [{suite,auto_skip_12_SUITE}]},
+ {auto_skipped,
+ {require_failed,{not_available,unknown_variable_g1}}}}},
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc1,
+ {require_failed,{not_available,unknown_variable_g1}}}},
+ {?eh,test_stats,{10,0,{0,28}}},
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc2,
+ {require_failed,{not_available,unknown_variable_g1}}}},
+ {?eh,test_stats,{10,0,{0,29}}},
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc3,
+ {require_failed,{not_available,unknown_variable_g1}}}},
+ {?eh,test_stats,{10,0,{0,30}}},
+ {?eh,tc_auto_skip,{ct_framework,{end_per_group,g1},
+ {require_failed,{not_available,unknown_variable_g1}}}}],
+
+ [{?eh,tc_start,{ct_framework,{init_per_group,g3,
+ [{suite,auto_skip_12_SUITE}]}}},
+ {?eh,tc_done,{ct_framework,{init_per_group,g3,
+ [{suite,auto_skip_12_SUITE}]},ok}},
+ {?eh,tc_start,{auto_skip_12_SUITE,tc1}},
+ {?eh,tc_done,{auto_skip_12_SUITE,tc1,ok}},
+ {?eh,test_stats,{11,0,{0,30}}},
+ {?eh,tc_start,{auto_skip_12_SUITE,tc2}},
+ {?eh,tc_done,{auto_skip_12_SUITE,tc2,ok}},
+ {?eh,test_stats,{12,0,{0,30}}},
+ [{?eh,tc_start,{ct_framework,{init_per_group,g4,
+ [{suite,auto_skip_12_SUITE}]}}},
+ {?eh,tc_done,{ct_framework,{init_per_group,g4,
+ [{suite,auto_skip_12_SUITE}]},
+ {auto_skipped,
+ {require_failed,{not_available,unknown_variable_g4}}}}},
+ {?eh,tc_auto_skip,{auto_skip_12_SUITE,tc3,
+ {require_failed,{not_available,unknown_variable_g4}}}},
+ {?eh,test_stats,{12,0,{0,31}}},
+ {?eh,tc_auto_skip,{ct_framework,{end_per_group,g4},
+ {require_failed,{not_available,unknown_variable_g4}}}}],
+
+ {?eh,tc_start,{ct_framework,{end_per_group,g3,
+ [{suite,auto_skip_12_SUITE}]}}},
+ {?eh,tc_done,{ct_framework,{end_per_group,g3,
+ [{suite,auto_skip_12_SUITE}]},ok}}],
+
+ {?eh,tc_start,{auto_skip_12_SUITE,end_per_suite}},
+ {?eh,tc_done,{auto_skip_12_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
@@ -442,46 +565,46 @@ test_events(auto_skip) ->
test_events(user_skip) ->
[{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
- {?eh,start_info,{5,5,27}},
+ {?eh,start_info,{6,6,35}},
{?eh,tc_start,{user_skip_1_SUITE,init_per_suite}},
{?eh,tc_done,
{user_skip_1_SUITE,init_per_suite,{skipped,"Whole suite skipped"}}},
- {?eh,tc_auto_skip,
+ {?eh,tc_user_skip,
{user_skip_1_SUITE,tc1,"Whole suite skipped"}},
- {?eh,test_stats,{0,0,{0,1}}},
- {?eh,tc_auto_skip,
+ {?eh,test_stats,{0,0,{1,0}}},
+ {?eh,tc_user_skip,
{user_skip_1_SUITE,tc2,"Whole suite skipped"}},
- {?eh,test_stats,{0,0,{0,2}}},
- {?eh,tc_auto_skip,
+ {?eh,test_stats,{0,0,{2,0}}},
+ {?eh,tc_user_skip,
{user_skip_1_SUITE,tc3,"Whole suite skipped"}},
- {?eh,test_stats,{0,0,{0,3}}},
- {?eh,tc_auto_skip,
+ {?eh,test_stats,{0,0,{3,0}}},
+ {?eh,tc_user_skip,
{user_skip_1_SUITE,tc4,"Whole suite skipped"}},
- {?eh,test_stats,{0,0,{0,4}}},
- {?eh,tc_auto_skip,
+ {?eh,test_stats,{0,0,{4,0}}},
+ {?eh,tc_user_skip,
{user_skip_1_SUITE,end_per_suite,"Whole suite skipped"}},
{?eh,tc_start,{user_skip_2_SUITE,init_per_suite}},
{?eh,tc_done,{user_skip_2_SUITE,init_per_suite,ok}},
{?eh,tc_start,{user_skip_2_SUITE,tc1}},
{?eh,tc_done,{user_skip_2_SUITE,tc1,{skipped,{tc1,skipped}}}},
- {?eh,test_stats,{0,0,{1,4}}},
+ {?eh,test_stats,{0,0,{5,0}}},
[{?eh,tc_start,{user_skip_2_SUITE,{init_per_group,g1,[]}}},
{?eh,tc_done,{user_skip_2_SUITE,{init_per_group,g1,[]},ok}},
{?eh,tc_start,{user_skip_2_SUITE,tc2}},
{?eh,tc_done,{user_skip_2_SUITE,tc2,ok}},
- {?eh,test_stats,{1,0,{1,4}}},
+ {?eh,test_stats,{1,0,{5,0}}},
{?eh,tc_start,{user_skip_2_SUITE,tc3}},
{?eh,tc_done,{user_skip_2_SUITE,tc3,{skipped,{tc3,skipped}}}},
- {?eh,test_stats,{1,0,{2,4}}},
+ {?eh,test_stats,{1,0,{6,0}}},
{?eh,tc_start,{user_skip_2_SUITE,{end_per_group,g1,[]}}},
{?eh,tc_done,{user_skip_2_SUITE,{end_per_group,g1,[]},ok}}],
{?eh,tc_start,{user_skip_2_SUITE,tc4}},
{?eh,tc_done,{user_skip_2_SUITE,tc4,ok}},
- {?eh,test_stats,{2,0,{2,4}}},
+ {?eh,test_stats,{2,0,{6,0}}},
{?eh,tc_start,{user_skip_2_SUITE,end_per_suite}},
{?eh,tc_done,{user_skip_2_SUITE,end_per_suite,ok}},
@@ -489,16 +612,16 @@ test_events(user_skip) ->
{?eh,tc_done,{user_skip_3_SUITE,init_per_suite,ok}},
{?eh,tc_start,{user_skip_3_SUITE,tc1}},
{?eh,tc_done,{user_skip_3_SUITE,tc1,{skipped,"Test case skipped"}}},
- {?eh,test_stats,{2,0,{3,4}}},
+ {?eh,test_stats,{2,0,{7,0}}},
[{?eh,tc_start,{user_skip_3_SUITE,{init_per_group,g1,[]}}},
{?eh,tc_done,{user_skip_3_SUITE,{init_per_group,g1,[]},ok}},
{?eh,tc_start,{user_skip_3_SUITE,tc2}},
{?eh,tc_done,{user_skip_3_SUITE,tc2,ok}},
- {?eh,test_stats,{3,0,{3,4}}},
+ {?eh,test_stats,{3,0,{7,0}}},
{?eh,tc_start,{user_skip_3_SUITE,tc3}},
{?eh,tc_done,{user_skip_3_SUITE,tc3,{skipped,"Test case skipped"}}},
- {?eh,test_stats,{3,0,{4,4}}},
+ {?eh,test_stats,{3,0,{8,0}}},
{?eh,tc_start,{user_skip_3_SUITE,{end_per_group,g1,[]}}},
{?eh,tc_done,{user_skip_3_SUITE,{end_per_group,g1,[]},ok}}],
@@ -506,7 +629,7 @@ test_events(user_skip) ->
{?eh,tc_done,{user_skip_3_SUITE,tc4,
{skipped,{proc_info,{{current_function,{user_skip_3_SUITE,tc4,1}},
{initial_call,{erlang,apply,2}}}}}}},
- {?eh,test_stats,{3,0,{5,4}}},
+ {?eh,test_stats,{3,0,{9,0}}},
{?eh,tc_start,{user_skip_3_SUITE,end_per_suite}},
{?eh,tc_done,{user_skip_3_SUITE,end_per_suite,ok}},
@@ -515,63 +638,219 @@ test_events(user_skip) ->
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g1,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g1,[]},{skipped,"Group skipped"}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc1,"Group skipped"}},
- {?eh,test_stats,{3,0,{5,5}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc2,"Group skipped"}},
- {?eh,test_stats,{3,0,{5,6}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,end_per_group,"Group skipped"}}],
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc1,"Group skipped"}},
+ {?eh,test_stats,{3,0,{10,0}}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc2,"Group skipped"}},
+ {?eh,test_stats,{3,0,{11,0}}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{end_per_group,g1},"Group skipped"}}],
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g2,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g2,[]},ok}},
{?eh,tc_start,{user_skip_4_SUITE,tc3}},
{?eh,tc_done,{user_skip_4_SUITE,tc3,ok}},
- {?eh,test_stats,{4,0,{5,6}}},
+ {?eh,test_stats,{4,0,{11,0}}},
{?eh,tc_start,{user_skip_4_SUITE,tc4}},
{?eh,tc_done,{user_skip_4_SUITE,tc4,ok}},
- {?eh,test_stats,{5,0,{5,6}}},
+ {?eh,test_stats,{5,0,{11,0}}},
{?eh,tc_start,{user_skip_4_SUITE,{end_per_group,g2,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{end_per_group,g2,[]},ok}}],
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g3,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g3,[]},{skipped,"Group skipped"}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc5,"Group skipped"}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc6,"Group skipped"}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc7,"Group skipped"}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc8,"Group skipped"}},
- {?eh,test_stats,{5,0,{5,10}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,end_per_group,"Group skipped"}}],
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc5,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc6,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc7,"Group skipped"}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc8,"Group skipped"}},
+ {?eh,test_stats,{5,0,{15,0}}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{end_per_group,g3},"Group skipped"}}],
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g5,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g5,[]},ok}},
{?eh,tc_start,{user_skip_4_SUITE,tc9}},
{?eh,tc_done,{user_skip_4_SUITE,tc9,ok}},
- {?eh,test_stats,{6,0,{5,10}}},
+ {?eh,test_stats,{6,0,{15,0}}},
[{?eh,tc_start,{user_skip_4_SUITE,{init_per_group,g6,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{init_per_group,g6,[]},{skipped,"Group skipped"}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc10,"Group skipped"}},
- {?eh,test_stats,{6,0,{5,11}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,tc11,"Group skipped"}},
- {?eh,test_stats,{6,0,{5,12}}},
- {?eh,tc_auto_skip,{user_skip_4_SUITE,end_per_group,"Group skipped"}}],
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc10,"Group skipped"}},
+ {?eh,test_stats,{6,0,{16,0}}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,tc11,"Group skipped"}},
+ {?eh,test_stats,{6,0,{17,0}}},
+ {?eh,tc_user_skip,{user_skip_4_SUITE,{end_per_group,g6},"Group skipped"}}],
{?eh,tc_start,{user_skip_4_SUITE,{end_per_group,g5,[]}}},
{?eh,tc_done,{user_skip_4_SUITE,{end_per_group,g5,[]},ok}}],
{?eh,tc_start,{user_skip_4_SUITE,end_per_suite}},
{?eh,tc_done,{user_skip_4_SUITE,end_per_suite,ok}},
- {ct_test_support_eh,tc_start,{user_skip_5_SUITE,init_per_suite}},
+ {?eh,tc_start,{user_skip_5_SUITE,init_per_suite}},
{?eh,tc_done,{user_skip_5_SUITE,init_per_suite,
{skipped,{bad,'Whole suite skipped'}}}},
- {?eh,tc_auto_skip,{user_skip_5_SUITE,tc1,{bad,'Whole suite skipped'}}},
- {?eh,test_stats,{6,0,{5,13}}},
- {?eh,tc_auto_skip,{user_skip_5_SUITE,tc2,{bad,'Whole suite skipped'}}},
- {?eh,test_stats,{6,0,{5,14}}},
- {?eh,tc_auto_skip,{user_skip_5_SUITE,tc3,{bad,'Whole suite skipped'}}},
- {?eh,test_stats,{6,0,{5,15}}},
- {?eh,tc_auto_skip,{user_skip_5_SUITE,tc4,{bad,'Whole suite skipped'}}},
- {?eh,test_stats,{6,0,{5,16}}},
- {?eh,tc_auto_skip,{user_skip_5_SUITE,end_per_suite,{bad,'Whole suite skipped'}}},
+ {?eh,tc_user_skip,{user_skip_5_SUITE,tc1,{bad,'Whole suite skipped'}}},
+ {?eh,test_stats,{6,0,{18,0}}},
+ {?eh,tc_user_skip,{user_skip_5_SUITE,tc2,{bad,'Whole suite skipped'}}},
+ {?eh,test_stats,{6,0,{19,0}}},
+ {?eh,tc_user_skip,{user_skip_5_SUITE,tc3,{bad,'Whole suite skipped'}}},
+ {?eh,test_stats,{6,0,{20,0}}},
+ {?eh,tc_user_skip,{user_skip_5_SUITE,tc4,{bad,'Whole suite skipped'}}},
+ {?eh,test_stats,{6,0,{21,0}}},
+ {?eh,tc_user_skip,{user_skip_5_SUITE,end_per_suite,{bad,'Whole suite skipped'}}},
+
+ {parallel,
+ [{?eh,tc_start,{user_skip_6_SUITE,{init_per_group,ptop1,[parallel]}}},
+ {?eh,tc_done,{user_skip_6_SUITE,
+ {init_per_group,ptop1,[parallel]},
+ {skipped,"Top group skipped"}}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc1,"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc3,"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc4,"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc2,"Top group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{end_per_group,ptop1},
+ "Top group skipped"}}]},
+
+ {parallel,
+ [{?eh,tc_start,{user_skip_6_SUITE,{init_per_group,ptop2,[parallel]}}},
+ {?eh,tc_done,{user_skip_6_SUITE,{init_per_group,ptop2,[parallel]},ok}},
+ {?eh,tc_start,{user_skip_6_SUITE,tc1}},
+ {?eh,tc_done,{user_skip_6_SUITE,tc1,ok}},
+
+ {parallel,
+ [{?eh,tc_start,{user_skip_6_SUITE,{init_per_group,psub2,[parallel]}}},
+ {?eh,tc_done,{user_skip_6_SUITE,
+ {init_per_group,psub2,[parallel]},
+ {skipped,"Sub group skipped"}}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc3,"Sub group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc4,"Sub group skipped"}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,{end_per_group,psub2},
+ "Sub group skipped"}}]},
+
+ {?eh,tc_start,{user_skip_6_SUITE,tc2}},
+ {?eh,tc_done,{user_skip_6_SUITE,tc2,ok}},
+ {?eh,test_stats,{8,0,{27,0}}},
+ {?eh,tc_start,{user_skip_6_SUITE,{end_per_group,ptop2,[parallel]}}},
+ {?eh,tc_done,{user_skip_6_SUITE,{end_per_group,ptop2,[parallel]},ok}}]},
+
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]}
+ ];
+
+test_events(testspec_skip) ->
+ [
+ {?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
+ {?eh,start_info,{1,1,4}},
+ {?eh,tc_start,{ct_framework,init_per_suite}},
+ {?eh,tc_done,{ct_framework,init_per_suite,ok}},
+ {parallel,
+ [{?eh,tc_start,
+ {user_skip_7_SUITE,{init_per_group,ptop1,[parallel]}}},
+ {?eh,tc_done,
+ {user_skip_7_SUITE,{init_per_group,ptop1,[parallel]},ok}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc1,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{1,0}}},
+ {parallel,
+ [{?eh,tc_start,
+ {user_skip_7_SUITE,{init_per_group,psub1,[parallel]}}},
+ {?eh,tc_done,
+ {user_skip_7_SUITE,{init_per_group,psub1,[parallel]},ok}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc3,"SKIPPED"}},
+ {?eh,tc_start,{user_skip_7_SUITE,tc4}},
+ {?eh,tc_done,{user_skip_7_SUITE,tc4,ok}},
+ {?eh,test_stats,{1,0,{2,0}}},
+ {?eh,tc_start,
+ {user_skip_7_SUITE,{end_per_group,psub1,[parallel]}}},
+ {?eh,tc_done,
+ {user_skip_7_SUITE,{end_per_group,psub1,[parallel]},ok}}]},
+ {?eh,tc_start,{user_skip_7_SUITE,tc2}},
+ {?eh,tc_done,{user_skip_7_SUITE,tc2,ok}},
+ {?eh,test_stats,{2,0,{2,0}}},
+ {?eh,tc_start,
+ {user_skip_7_SUITE,{end_per_group,ptop1,[parallel]}}},
+ {?eh,tc_done,
+ {user_skip_7_SUITE,{end_per_group,ptop1,[parallel]},ok}}]},
+ {?eh,tc_start,{ct_framework,end_per_suite}},
+ {?eh,tc_done,{ct_framework,end_per_suite,ok}},
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]},
+
+ {?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
+ {?eh,start_info,{1,1,4}},
+ {?eh,tc_start,{ct_framework,init_per_suite}},
+ {?eh,tc_done,{ct_framework,init_per_suite,ok}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{init_per_group,ptop1},"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc1,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{1,0}}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc3,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{2,0}}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc4,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{3,0}}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc2,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{4,0}}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{end_per_group,ptop1},"SKIPPED"}},
+ {?eh,tc_start,{ct_framework,end_per_suite}},
+ {?eh,tc_done,{ct_framework,end_per_suite,ok}},
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]},
+ {?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
+ {?eh,start_info,{1,1,4}},
+ {?eh,tc_start,{ct_framework,init_per_suite}},
+ {?eh,tc_done,{ct_framework,init_per_suite,ok}},
+ {parallel,
+ [{?eh,tc_start,
+ {user_skip_7_SUITE,{init_per_group,ptop1,[parallel]}}},
+ {?eh,tc_done,
+ {user_skip_7_SUITE,{init_per_group,ptop1,[parallel]},ok}},
+ {?eh,tc_user_skip,
+ {user_skip_7_SUITE,{init_per_group,psub1},"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc3,"SKIPPED"}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,tc4,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{2,0}}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,{end_per_group,psub1},"SKIPPED"}},
+ {?eh,tc_start,{user_skip_7_SUITE,tc1}},
+ {?eh,tc_done,{user_skip_7_SUITE,tc1,ok}},
+ {?eh,tc_start,{user_skip_7_SUITE,tc2}},
+ {?eh,tc_done,{user_skip_7_SUITE,tc2,ok}},
+ {?eh,test_stats,{2,0,{2,0}}},
+ {?eh,tc_start,{user_skip_7_SUITE,{end_per_group,ptop1,[parallel]}}},
+ {?eh,tc_done,{user_skip_7_SUITE,{end_per_group,ptop1,[parallel]},ok}}]},
+ {?eh,tc_start,{ct_framework,end_per_suite}},
+ {?eh,tc_done,{ct_framework,end_per_suite,ok}},
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]},
+
+ {?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
+ {?eh,start_info,{1,1,0}},
+ {?eh,tc_user_skip,{user_skip_7_SUITE,all,"SKIPPED"}},
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]},
+
+ {?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
+ {?eh,start_info,{1,1,4}},
+ {?eh,tc_start,{ct_framework,init_per_suite}},
+ {?eh,tc_done,{ct_framework,init_per_suite,ok}},
+ {parallel,
+ [{?eh,tc_start,{user_skip_6_SUITE,{init_per_group,ptop1,[parallel]}}},
+ {?eh,tc_done,{user_skip_6_SUITE,
+ {init_per_group,ptop1,[parallel]},
+ {skipped,"Top group skipped"}}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc1,"Top group skipped"}},
+ {?eh,test_stats,{0,0,{1,0}}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc3,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{2,0}}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc4,"SKIPPED"}},
+ {?eh,test_stats,{0,0,{3,0}}},
+ {?eh,tc_user_skip,{user_skip_6_SUITE,tc2,"Top group skipped"}},
+ {?eh,test_stats,{0,0,{4,0}}},
+ {?eh,tc_user_skip,
+ {user_skip_6_SUITE,{end_per_group,ptop1},"Top group skipped"}}]},
+ {?eh,tc_start,{ct_framework,end_per_suite}},
+ {?eh,tc_done,{ct_framework,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
].
+
+
diff --git a/lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_12_SUITE.erl b/lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_12_SUITE.erl
new file mode 100644
index 0000000000..a168afd386
--- /dev/null
+++ b/lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_12_SUITE.erl
@@ -0,0 +1,121 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(auto_skip_12_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+%%--------------------------------------------------------------------
+%% Function: suite() -> Info
+%% Info = [tuple()]
+%%--------------------------------------------------------------------
+suite() ->
+ [].
+
+%%--------------------------------------------------------------------
+%% Function: init_per_suite(Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_suite(Config0) -> void() | {save_config,Config1}
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_suite(_Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: group(Name) -> Info
+%% Info = [tuple()]
+%%--------------------------------------------------------------------
+group(g1) ->
+ [{require,unknown_variable_g1}];
+group(g4) ->
+ [{require,unknown_variable_g4}];
+group(_) ->
+ [].
+
+%%--------------------------------------------------------------------
+%% Function: init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%%--------------------------------------------------------------------
+groups() ->
+ [{g1,[],[tc1,tc2,{g2,[],[tc3]}]},
+ {g1,[],[tc1,tc2,{g2,[],[tc3]}]},
+ {g3,[],[tc1,tc2,{g4,[],[tc3]}]}].
+
+%%--------------------------------------------------------------------
+%% Function: all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%%--------------------------------------------------------------------
+all() ->
+ [{group,g1},
+ {group,g3}].
+
+%%--------------------------------------------------------------------
+%% Function: TestCase(Config0) ->
+%% ok | exit() | {skip,Reason} | {comment,Comment} |
+%% {save_config,Config1} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% Comment = term()
+%%--------------------------------------------------------------------
+tc1(_Config) ->
+ ok.
+
+tc2(_Config) ->
+ ok.
+
+tc3(_Config) ->
+ ok.
diff --git a/lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_3_SUITE.erl b/lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_3_SUITE.erl
index cb64cb76c5..4ef9f50514 100644
--- a/lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_3_SUITE.erl
+++ b/lib/common_test/test/ct_skip_SUITE_data/skip/test/auto_skip_3_SUITE.erl
@@ -72,7 +72,7 @@ end_per_group(_GroupName, _Config) ->
%% Reason = term()
%%--------------------------------------------------------------------
init_per_testcase(tc1, _Config) ->
- exit({init_per_testcase,tc1,failed});
+ exit('init_per_testcase for tc1 failed');
init_per_testcase(_TestCase, Config) ->
Config.
diff --git a/lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_6_SUITE.erl b/lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_6_SUITE.erl
new file mode 100644
index 0000000000..5f0e5db6f2
--- /dev/null
+++ b/lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_6_SUITE.erl
@@ -0,0 +1,118 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(user_skip_6_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+%%--------------------------------------------------------------------
+%% Function: suite() -> Info
+%% Info = [tuple()]
+%%--------------------------------------------------------------------
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+%%--------------------------------------------------------------------
+%% Function: init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_group(ptop1, Config) ->
+ {skip,"Top group skipped"};
+init_per_group(psub2, Config) ->
+ {skip,"Sub group skipped"};
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%%--------------------------------------------------------------------
+groups() ->
+ [{ptop1,[parallel],[tc1,{psub1,[parallel],[tc3,tc4]},tc2]},
+ {ptop2,[parallel],[tc1,{psub2,[parallel],[tc3,tc4]},tc2]}].
+
+%%--------------------------------------------------------------------
+%% Function: all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%%--------------------------------------------------------------------
+all() ->
+ [{group,ptop1},{group,ptop2}].
+
+%%--------------------------------------------------------------------
+%% Function: TestCase(Config0) ->
+%% ok | exit() | {skip,Reason} | {comment,Comment} |
+%% {save_config,Config1} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% Comment = term()
+%%--------------------------------------------------------------------
+tc1(_) ->
+ ok.
+
+tc2(_) ->
+ ok.
+
+tc3(_) ->
+ ok.
+
+tc4(_) ->
+ ok.
diff --git a/lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_7_SUITE.erl b/lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_7_SUITE.erl
new file mode 100644
index 0000000000..82ce536a79
--- /dev/null
+++ b/lib/common_test/test/ct_skip_SUITE_data/skip/test/user_skip_7_SUITE.erl
@@ -0,0 +1,113 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(user_skip_7_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+%%--------------------------------------------------------------------
+%% Function: suite() -> Info
+%% Info = [tuple()]
+%%--------------------------------------------------------------------
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+%%--------------------------------------------------------------------
+%% Function: init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%%--------------------------------------------------------------------
+groups() ->
+ [{ptop1,[parallel],[tc1,{psub1,[parallel],[tc3,tc4]},tc2]}].
+
+%%--------------------------------------------------------------------
+%% Function: all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%%--------------------------------------------------------------------
+all() ->
+ [{group,ptop1}].
+
+%%--------------------------------------------------------------------
+%% Function: TestCase(Config0) ->
+%% ok | exit() | {skip,Reason} | {comment,Comment} |
+%% {save_config,Config1} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% Comment = term()
+%%--------------------------------------------------------------------
+tc1(_) ->
+ ok.
+
+tc2(_) ->
+ ok.
+
+tc3(_) ->
+ ok.
+
+tc4(_) ->
+ ok.
diff --git a/lib/common_test/test/ct_surefire_SUITE.erl b/lib/common_test/test/ct_surefire_SUITE.erl
index b86b47f0a2..c5e44682b0 100644
--- a/lib/common_test/test/ct_surefire_SUITE.erl
+++ b/lib/common_test/test/ct_surefire_SUITE.erl
@@ -182,7 +182,7 @@ test_events(_) ->
{?eh,test_stats,{1,1,{1,0}}},
{?eh,tc_start,{surefire_SUITE,tc_autoskip_require}},
{?eh,tc_done,{surefire_SUITE,tc_autoskip_require,
- {skipped,{require_failed,'_'}}}},
+ {auto_skipped,{require_failed,'_'}}}},
{?eh,test_stats,{1,1,{1,1}}},
[{?eh,tc_start,{surefire_SUITE,{init_per_group,g,[]}}},
{?eh,tc_done,{surefire_SUITE,{init_per_group,g,[]},ok}},
@@ -198,7 +198,7 @@ test_events(_) ->
{?eh,test_stats,{2,2,{2,1}}},
{?eh,tc_start,{surefire_SUITE,tc_autoskip_require}},
{?eh,tc_done,{surefire_SUITE,tc_autoskip_require,
- {skipped,{require_failed,'_'}}}},
+ {auto_skipped,{require_failed,'_'}}}},
{?eh,test_stats,{2,2,{2,2}}},
{?eh,tc_start,{surefire_SUITE,{end_per_group,g,[]}}},
{?eh,tc_done,{surefire_SUITE,{end_per_group,g,[]},ok}}],
@@ -210,7 +210,7 @@ test_events(_) ->
{surefire_SUITE,init_per_group,
{'EXIT',all_cases_should_be_skipped}}}}},
{?eh,test_stats,{2,2,{2,3}}},
- {?eh,tc_auto_skip,{surefire_SUITE,end_per_group,
+ {?eh,tc_auto_skip,{surefire_SUITE,{end_per_group,g_fail},
{failed,
{surefire_SUITE,init_per_group,
{'EXIT',all_cases_should_be_skipped}}}}}],
@@ -328,6 +328,7 @@ events_to_result([]) ->
result(ok) ->[];
result({skipped,_}) -> [s];
+result({auto_skipped,_}) -> [s];
result({failed,_}) -> [f].
%% Using the expected events' last test_stats element to produce the
diff --git a/lib/common_test/test/ct_test_server_if_1_SUITE.erl b/lib/common_test/test/ct_test_server_if_1_SUITE.erl
index 8e4852369d..9882fa980c 100644
--- a/lib/common_test/test/ct_test_server_if_1_SUITE.erl
+++ b/lib/common_test/test/ct_test_server_if_1_SUITE.erl
@@ -65,12 +65,12 @@ groups() ->
[].
init_per_group(_GroupName, Config) ->
- Config.
+ Config.
end_per_group(_GroupName, Config) ->
- Config.
+ Config.
+
-
%%--------------------------------------------------------------------
%% TEST CASES
@@ -104,7 +104,7 @@ ts_if_1(Config) when is_list(Config) ->
TestEvents = events_to_check(ts_if_1),
ok = ct_test_support:verify_events(TestEvents, Events, Config).
-
+
%%%-----------------------------------------------------------------
%%% HELP FUNCTIONS
@@ -112,17 +112,17 @@ ts_if_1(Config) when is_list(Config) ->
setup(Test, Config) ->
Opts0 = ct_test_support:get_opts(Config),
-% Level = ?config(trace_level, Config),
-% EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
-% Opts = Opts0 ++ [Test,{event_handler,{?eh,EvHArgs}}],
+ % Level = ?config(trace_level, Config),
+ % EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ % Opts = Opts0 ++ [Test,{event_handler,{?eh,EvHArgs}}],
Opts = [Test | Opts0],
ERPid = ct_test_support:start_event_receiver(Config),
{Opts,ERPid}.
reformat(Events, EH) ->
ct_test_support:reformat(Events, EH).
-%reformat(Events, _EH) ->
-% Events.
+ %reformat(Events, _EH) ->
+ % Events.
%%%-----------------------------------------------------------------
%%% TEST EVENTS
@@ -140,14 +140,15 @@ test_events(ts_if_1) ->
[
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
- {?eh,start_info,{10,6,26}},
+ {?eh,start_info,{10,8,25}},
+
{?eh,tc_start,{ts_if_1_SUITE,init_per_suite}},
{?eh,tc_done,{ts_if_1_SUITE,init_per_suite,ok}},
{?eh,tc_start,{ts_if_1_SUITE,tc1}},
- {?eh,tc_done,{ts_if_1_SUITE,tc1,{skipped,
- {failed,
- {ts_if_1_SUITE,init_per_testcase,
- {timetrap_timeout,2000}}}}}},
+ {?eh,tc_done,{ts_if_1_SUITE,tc1,
+ {auto_skipped,
+ {failed,
+ {ts_if_1_SUITE,init_per_testcase,{timetrap_timeout,2000}}}}}},
{?eh,test_stats,{0,0,{0,1}}},
{?eh,tc_start,{ts_if_1_SUITE,tc2}},
{?eh,tc_done,{ts_if_1_SUITE,tc2,
@@ -159,115 +160,162 @@ test_events(ts_if_1) ->
{?eh,tc_start,{ts_if_1_SUITE,tc4}},
{?eh,tc_done,{ts_if_1_SUITE,tc4,{failed,{error,failed_on_purpose}}}},
{?eh,test_stats,{1,2,{0,1}}},
- {?eh,tc_done,{ts_if_1_SUITE,tc5,{skipped,{sequence_failed,seq1,tc4}}}},
- {?eh,test_stats,{1,2,{1,1}}},
+ {?eh,tc_done,{ts_if_1_SUITE,tc5,{auto_skipped,{sequence_failed,seq1,tc4}}}},
+ {?eh,test_stats,{1,2,{0,2}}},
[{?eh,tc_start,{ts_if_1_SUITE,{init_per_group,seq2,[sequence]}}},
{?eh,tc_done,{ts_if_1_SUITE,{init_per_group,seq2,[sequence]},ok}},
{?eh,tc_start,{ts_if_1_SUITE,tc4}},
{?eh,tc_done,{ts_if_1_SUITE,tc4,{failed,{error,failed_on_purpose}}}},
- {?eh,test_stats,{1,3,{1,1}}},
+ {?eh,test_stats,{1,3,{0,2}}},
{?eh,tc_auto_skip,{ts_if_1_SUITE,tc5,{failed,{ts_if_1_SUITE,tc4}}}},
- {?eh,test_stats,{1,3,{1,2}}},
+ {?eh,test_stats,{1,3,{0,3}}},
{?eh,tc_start,{ts_if_1_SUITE,{end_per_group,seq2,[sequence]}}},
{?eh,tc_done,{ts_if_1_SUITE,{end_per_group,seq2,[sequence]},ok}}],
{?eh,tc_start,{ts_if_1_SUITE,tc6}},
- {?eh,tc_done,{ts_if_1_SUITE,tc6,{skipped,{require_failed,{not_available,void}}}}},
- {?eh,test_stats,{1,3,{1,3}}},
- {?eh,tc_start,{ts_if_1_SUITE,tc7}},
- {?eh,tc_done,{ts_if_1_SUITE,tc7,ok}},
- {?eh,test_stats,{2,3,{1,3}}},
+ {?eh,tc_done,{ts_if_1_SUITE,tc6,{auto_skipped,{require_failed,
+ {not_available,void}}}}},
+ {?eh,test_stats,{1,3,{0,4}}},
+ {?eh,tc_done,{ts_if_1_SUITE,tc7,{auto_skipped,
+ {testcase0_failed,bad_return_value}}}},
+ {?eh,test_stats,{1,3,{0,5}}},
{?eh,tc_start,{ts_if_1_SUITE,tc8}},
{?eh,tc_done,{ts_if_1_SUITE,tc8,{skipped,"tc8 skipped"}}},
- {?eh,test_stats,{2,3,{2,3}}},
+ {?eh,test_stats,{1,3,{1,5}}},
{?eh,tc_start,{ts_if_1_SUITE,tc9}},
{?eh,tc_done,{ts_if_1_SUITE,tc9,{skipped,'tc9 skipped'}}},
- {?eh,test_stats,{2,3,{3,3}}},
+ {?eh,test_stats,{1,3,{2,5}}},
{?eh,tc_start,{ts_if_1_SUITE,tc10}},
- {?eh,tc_done,{ts_if_1_SUITE,tc10,{failed,{error,{function_clause,'_'}}}}},
- {?eh,test_stats,{2,4,{3,3}}},
+ {?eh,tc_done,{ts_if_1_SUITE,tc10,
+ {failed,{error,{function_clause,'_'}}}}},
+ {?eh,test_stats,{1,4,{2,5}}},
{?eh,tc_start,{ts_if_1_SUITE,tc11}},
{?eh,tc_done,{ts_if_1_SUITE,tc11,
- {skipped,{failed,{ts_if_1_SUITE,init_per_testcase,bad_return}}}}},
- {?eh,test_stats,{2,4,{3,4}}},
+ {auto_skipped,
+ {failed,{ts_if_1_SUITE,init_per_testcase,bad_return}}}}},
+ {?eh,test_stats,{1,4,{2,6}}},
[{?eh,tc_start,{ts_if_1_SUITE,{init_per_group,g1,[]}}},
- {?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g1,[]},{skipped,g1_got_skipped}}},
- {?eh,tc_auto_skip,{ts_if_1_SUITE,gtc1,g1_got_skipped}},
- {?eh,test_stats,{2,4,{3,5}}},
- {?eh,tc_auto_skip,{ts_if_1_SUITE,end_per_group,g1_got_skipped}}],
-
+ {?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g1,[]},
+ {skipped,g1_got_skipped}}},
+ {?eh,tc_user_skip,{ts_if_1_SUITE,gtc1,g1_got_skipped}},
+ {?eh,test_stats,{1,4,{3,6}}},
+ {?eh,tc_user_skip,{ts_if_1_SUITE,{end_per_group,g1},g1_got_skipped}}],
+
{parallel,
[{?eh,tc_start,{ts_if_1_SUITE,{init_per_group,g2,[parallel]}}},
{?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g2,[parallel]},ok}},
[{?eh,tc_start,{ts_if_1_SUITE,{init_per_group,g3,[]}}},
- {?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g3,[]},{skipped,g3_got_skipped}}},
- {?eh,tc_auto_skip,{ts_if_1_SUITE,gtc2,g3_got_skipped}},
- {?eh,test_stats,{2,4,{3,6}}},
- {?eh,tc_auto_skip,{ts_if_1_SUITE,end_per_group,g3_got_skipped}}],
+ {?eh,tc_done,{ts_if_1_SUITE,{init_per_group,g3,[]},{skipped,g3_got_skipped}}},
+ {?eh,tc_user_skip,{ts_if_1_SUITE,gtc2,g3_got_skipped}},
+ {?eh,test_stats,{1,4,{4,6}}},
+ {?eh,tc_user_skip,{ts_if_1_SUITE,{end_per_group,g3},g3_got_skipped}}],
{?eh,tc_start,{ts_if_1_SUITE,{end_per_group,g2,[parallel]}}},
{?eh,tc_done,{ts_if_1_SUITE,{end_per_group,g2,[parallel]},ok}}]},
{?eh,tc_start,{ts_if_1_SUITE,tc12}},
{?eh,tc_done,{ts_if_1_SUITE,tc12,{failed,{testcase_aborted,'stopping tc12'}}}},
- {?eh,test_stats,{2,5,{3,6}}},
+ {?eh,test_stats,{1,5,{4,6}}},
{?eh,tc_start,{ts_if_1_SUITE,tc13}},
{?eh,tc_done,{ts_if_1_SUITE,tc13,ok}},
- {?eh,test_stats,{3,5,{3,6}}},
+ {?eh,test_stats,{2,5,{4,6}}},
{?eh,tc_start,{ts_if_1_SUITE,end_per_suite}},
{?eh,tc_done,{ts_if_1_SUITE,end_per_suite,ok}},
+
{?eh,tc_start,{ts_if_2_SUITE,init_per_suite}},
- {?eh,tc_done,{ts_if_2_SUITE,init_per_suite,
- {failed,{error,{suite0_failed,{exited,suite0_goes_boom}}}}}},
- {?eh,tc_auto_skip,{ts_if_2_SUITE,my_test_case,
- {failed,{error,{suite0_failed,{exited,suite0_goes_boom}}}}}},
- {?eh,test_stats,{3,5,{3,7}}},
- {?eh,tc_auto_skip,{ts_if_2_SUITE,end_per_suite,
- {failed,{error,{suite0_failed,{exited,suite0_goes_boom}}}}}},
+ {?eh,tc_done,
+ {ts_if_2_SUITE,init_per_suite,
+ {failed,{error,{suite0_failed,{exited,suite0_goes_boom}}}}}},
+ {?eh,tc_auto_skip,
+ {ts_if_2_SUITE,my_test_case,
+ {failed,{error,{suite0_failed,{exited,suite0_goes_boom}}}}}},
+ {?eh,test_stats,{2,5,{4,7}}},
+ {?eh,tc_auto_skip,
+ {ts_if_2_SUITE,end_per_suite,
+ {failed,{error,{suite0_failed,{exited,suite0_goes_boom}}}}}},
+
{?eh,tc_start,{ct_framework,error_in_suite}},
- {?eh,test_stats,{3,5,{4,7}}},
+ {?eh,test_stats,{2,6,{4,7}}},
+
{?eh,tc_start,{ct_framework,error_in_suite}},
- {?eh,test_stats,{3,5,{5,7}}},
+ {?eh,test_stats,{2,7,{4,7}}},
+
{?eh,tc_start,{ts_if_5_SUITE,init_per_suite}},
{?eh,tc_done,{ts_if_5_SUITE,init_per_suite,
- {skipped,{require_failed_in_suite0,{not_available,undef_variable}}}}},
- {?eh,tc_auto_skip,{ts_if_5_SUITE,my_test_case,
- {require_failed_in_suite0,{not_available,undef_variable}}}},
- {?eh,test_stats,{3,5,{5,8}}},
- {?eh,tc_auto_skip,{ts_if_5_SUITE,end_per_suite,
- {require_failed_in_suite0,{not_available,undef_variable}}}},
+ {auto_skipped,
+ {require_failed_in_suite0,{not_available,undef_variable}}}}},
+ {?eh,tc_auto_skip,
+ {ts_if_5_SUITE,my_test_case,
+ {require_failed_in_suite0,{not_available,undef_variable}}}},
+ {?eh,test_stats,{2,7,{4,8}}},
+ {?eh,tc_auto_skip,
+ {ts_if_5_SUITE,end_per_suite,
+ {require_failed_in_suite0,{not_available,undef_variable}}}},
+
{?eh,tc_start,{ct_framework,init_per_suite}},
{?eh,tc_done,{ct_framework,init_per_suite,
{failed,{error,{suite0_failed,{exited,suite0_byebye}}}}}},
- {?eh,tc_auto_skip,{ts_if_6_SUITE,tc1,
- {failed,{error,{suite0_failed,{exited,suite0_byebye}}}}}},
- {?eh,test_stats,{3,5,{5,9}}},
+ {?eh,tc_auto_skip,
+ {ts_if_6_SUITE,tc1,
+ {failed,{error,{suite0_failed,{exited,suite0_byebye}}}}}},
+ {?eh,test_stats,{2,7,{4,9}}},
+ {?eh,tc_auto_skip,
+ {ct_framework,end_per_suite,
+ {failed,{error,{suite0_failed,{exited,suite0_byebye}}}}}},
- {?eh,tc_start,{ts_if_7_SUITE,tc1}},
- {?eh,tc_done,{ts_if_7_SUITE,tc1,ok}},
- {?eh,test_stats,{4,5,{5,9}}},
+ {?eh,tc_start,{ct_framework,init_per_suite}},
+ {?eh,tc_done,{ct_framework,init_per_suite,ok}},
+ {?eh,tc_done,
+ {ts_if_7_SUITE,tc1,{auto_skipped,{testcase0_failed,bad_return_value}}}},
+ {?eh,test_stats,{2,7,{4,10}}},
+ {?eh,tc_done,{ts_if_7_SUITE,
+ {init_per_group,g1,[]},
+ {auto_skipped,{group0_failed,bad_return_value}}}},
+ {?eh,tc_auto_skip,
+ {ts_if_7_SUITE,tc2,{group0_failed,bad_return_value}}},
+ {?eh,test_stats,{2,7,{4,11}}},
+ {?eh,tc_auto_skip,
+ {ts_if_7_SUITE,{end_per_group,g1},{group0_failed,bad_return_value}}},
+
+ [{?eh,tc_start,{ts_if_7_SUITE,{init_per_group,g2,[]}}},
+ {?eh,tc_done,{ts_if_7_SUITE,{init_per_group,g2,[]},ok}},
+ {?eh,tc_done,{ts_if_7_SUITE,tc2,
+ {auto_skipped,{testcase0_failed,bad_return_value}}}},
+ {?eh,test_stats,{2,7,{4,12}}},
+ {?eh,tc_start,{ts_if_7_SUITE,{end_per_group,g2,[]}}},
+ {?eh,tc_done,{ts_if_7_SUITE,{end_per_group,g2,[]},ok}}],
+
+ {?eh,tc_start,{ct_framework,end_per_suite}},
+ {?eh,tc_done,{ct_framework,end_per_suite,ok}},
+
+
+ {?eh,tc_start,{ct_framework,init_per_suite}},
+ {?eh,tc_done,{ct_framework,init_per_suite,ok}},
{?eh,tc_start,{ts_if_8_SUITE,tc1}},
{?eh,tc_done,{ts_if_8_SUITE,tc1,{failed,{error,failed_on_purpose}}}},
- {?eh,test_stats,{4,6,{5,9}}},
-
+ {?eh,test_stats,{2,8,{4,12}}},
+ {?eh,tc_start,{ct_framework,end_per_suite}},
+ {?eh,tc_done,{ct_framework,end_per_suite,ok}},
+
+
{?eh,tc_user_skip,{skipped_by_spec_1_SUITE,all,"should be skipped"}},
- {?eh,test_stats,{4,6,{6,9}}},
-
+ {?eh,test_stats,{2,8,{5,12}}},
{?eh,tc_start,{skipped_by_spec_2_SUITE,init_per_suite}},
{?eh,tc_done,{skipped_by_spec_2_SUITE,init_per_suite,ok}},
{?eh,tc_user_skip,{skipped_by_spec_2_SUITE,tc1,"should be skipped"}},
- {?eh,test_stats,{4,6,{7,9}}},
+ {?eh,test_stats,{2,8,{6,12}}},
{?eh,tc_start,{skipped_by_spec_2_SUITE,end_per_suite}},
{?eh,tc_done,{skipped_by_spec_2_SUITE,end_per_suite,ok}},
-
+
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
].
+
diff --git a/lib/common_test/test/ct_test_server_if_1_SUITE_data/test_server_if/test/ts_if_7_SUITE.erl b/lib/common_test/test/ct_test_server_if_1_SUITE_data/test_server_if/test/ts_if_7_SUITE.erl
index a2254848d0..20e04c464b 100644
--- a/lib/common_test/test/ct_test_server_if_1_SUITE_data/test_server_if/test/ts_if_7_SUITE.erl
+++ b/lib/common_test/test/ct_test_server_if_1_SUITE_data/test_server_if/test/ts_if_7_SUITE.erl
@@ -73,7 +73,8 @@ end_per_testcase(_TestCase, _Config) ->
%% N = integer() | forever
%%--------------------------------------------------------------------
groups() ->
- [].
+ [{g1,[],[tc2]},
+ {g2,[],[tc2]}].
%%--------------------------------------------------------------------
%% Function: all() -> GroupsAndTestCases | {skip,Reason}
@@ -83,7 +84,12 @@ groups() ->
%% Reason = term()
%%--------------------------------------------------------------------
all() ->
- [tc1].
+ [tc1,{group,g1},{group,g2}].
+
+group(g1) ->
+ exit(g1_byebye);
+group(_) ->
+ [].
tc1() ->
exit(tc1_byebye).
@@ -91,3 +97,9 @@ tc1() ->
tc1(_) ->
done.
+tc2() ->
+ exit(tc2_byebye).
+
+tc2(_) ->
+ done.
+
diff --git a/lib/common_test/test/ct_test_support.erl b/lib/common_test/test/ct_test_support.erl
index 67e430f821..772274ce7e 100644
--- a/lib/common_test/test/ct_test_support.erl
+++ b/lib/common_test/test/ct_test_support.erl
@@ -692,8 +692,10 @@ locate({parallel,TEvs}, Node, Evs, Config) ->
test_server:format("Found ~p!", [TEv]),
{Done,RemEvs2,length(RemEvs2)}
end;
- %% end_per_group auto skipped
- (TEv={TEH,tc_auto_skip,{M,end_per_group,R}}, {Done,RemEvs,_RemSize}) ->
+ %% end_per_group auto- or user skipped
+ (TEv={TEH,AutoOrUserSkip,{M,end_per_group,R}}, {Done,RemEvs,_RemSize})
+ when AutoOrUserSkip == tc_auto_skip;
+ AutoOrUserSkip == tc_user_skip ->
RemEvs1 =
lists:dropwhile(
fun({EH,#event{name=tc_auto_skip,
@@ -704,10 +706,18 @@ locate({parallel,TEvs}, Node, Evs, Config) ->
match -> false;
_ -> true
end;
+ ({EH,#event{name=tc_user_skip,
+ node=EvNode,
+ data={Mod,end_per_group,Reason}}}) when
+ EH == TEH, EvNode == Node, Mod == M ->
+ case match_data(R, Reason) of
+ match -> false;
+ _ -> true
+ end;
({EH,#event{name=stop_logging,
node=EvNode,data=_}}) when
EH == TEH, EvNode == Node ->
- exit({tc_auto_skip_not_found,TEv});
+ exit({tc_auto_or_user_skip_not_found,TEv});
(_) ->
true
end, RemEvs),
@@ -925,8 +935,10 @@ locate({shuffle,TEvs}, Node, Evs, Config) ->
test_server:format("Found ~p!", [TEv]),
{Done,RemEvs2,length(RemEvs2)}
end;
- %% end_per_group auto skipped
- (TEv={TEH,tc_auto_skip,{M,end_per_group,R}}, {Done,RemEvs,_RemSize}) ->
+ %% end_per_group auto-or user skipped
+ (TEv={TEH,AutoOrUserSkip,{M,end_per_group,R}}, {Done,RemEvs,_RemSize})
+ when AutoOrUserSkip == tc_auto_skip;
+ AutoOrUserSkip == tc_user_skip ->
RemEvs1 =
lists:dropwhile(
fun({EH,#event{name=tc_auto_skip,
@@ -934,6 +946,11 @@ locate({shuffle,TEvs}, Node, Evs, Config) ->
data={Mod,end_per_group,Reason}}}) when
EH == TEH, EvNode == Node, Mod == M, Reason == R ->
false;
+ ({EH,#event{name=tc_user_skip,
+ node=EvNode,
+ data={Mod,end_per_group,Reason}}}) when
+ EH == TEH, EvNode == Node, Mod == M, Reason == R ->
+ false;
({EH,#event{name=stop_logging,
node=EvNode,data=_}}) when
EH == TEH, EvNode == Node ->
@@ -1178,6 +1195,9 @@ log_events1([E={_EH,tc_done,{_M,{end_per_group,_GrName,Props},_R}} | Evs], Dev,
log_events1([E={_EH,tc_auto_skip,{_M,end_per_group,_Reason}} | Evs], Dev, Ind) ->
io:format(Dev, "~s~p],~n", [Ind,E]),
log_events1(Evs, Dev, Ind--" ");
+log_events1([E={_EH,tc_user_skip,{_M,end_per_group,_Reason}} | Evs], Dev, Ind) ->
+ io:format(Dev, "~s~p],~n", [Ind,E]),
+ log_events1(Evs, Dev, Ind--" ");
log_events1([E], Dev, Ind) ->
io:format(Dev, "~s~p~n].~n", [Ind,E]),
ok;
diff --git a/lib/common_test/test/ct_testspec_1_SUITE.erl b/lib/common_test/test/ct_testspec_1_SUITE.erl
index 6a4a4acd80..187b5e6d3a 100644
--- a/lib/common_test/test/ct_testspec_1_SUITE.erl
+++ b/lib/common_test/test/ct_testspec_1_SUITE.erl
@@ -760,16 +760,42 @@ test_events(all_groups) ->
test_events(skip_all_groups) ->
[
{?eh,start_logging,'_'},
+ {?eh,start_info,{1,1,12}},
{?eh,tc_start,{groups_11_SUITE,init_per_suite}},
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_1a},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1a},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
{?eh,test_stats,{0,0,{1,0}}},
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_1b},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
{?eh,test_stats,{0,0,{2,0}}},
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_2},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1a},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1b},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
{?eh,test_stats,{0,0,{3,0}}},
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_4},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
{?eh,test_stats,{0,0,{4,0}}},
- {?eh,tc_done,{groups_11_SUITE,end_per_suite,'_'}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1b},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_2},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_2a,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{5,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_3a,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{6,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_3b,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{7,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_2b,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{8,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_2},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_4},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_5a,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{9,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_7a,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{10,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_7b,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{11,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{12,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_4},"SKIPPED!"}},
+ {?eh,tc_start,{groups_11_SUITE,end_per_suite}},
+ {?eh,tc_done,{groups_11_SUITE,end_per_suite,init}},
{negative,{?eh,tc_start,'_'},{?eh,stop_logging,'_'}}
];
@@ -788,21 +814,35 @@ test_events(group) ->
test_events(skip_group) ->
[
- {?eh,start_logging,'_'},
- {?eh,tc_start,{groups_11_SUITE,init_per_suite}},
-
- {?eh,tc_start,{groups_11_SUITE,{init_per_group,test_group_1a,[]}}},
- {?eh,tc_start,{groups_11_SUITE,testcase_1a}},
- {?eh,tc_start,{groups_11_SUITE,testcase_1b}},
- {?eh,test_stats,{2,0,{0,0}}},
- {?eh,tc_done,{groups_11_SUITE,{end_per_group,test_group_1a,[]},'_'}},
-
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_1b},"SKIPPED!"}},
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_2},"SKIPPED!"}},
- %%! But not test_group_7 since it's a sub-group!
- {?eh,test_stats,{2,0,{2,0}}},
- {negative,{?eh,tc_user_skip,'_'},{?eh,stop_logging,'_'}}
- ];
+ {?eh,start_logging,'_'},
+ {?eh,start_info,{1,1,8}},
+ {?eh,tc_start,{groups_11_SUITE,init_per_suite}},
+ [{?eh,tc_start,{groups_11_SUITE,{init_per_group,test_group_1a,[]}}},
+ {?eh,tc_done,{groups_11_SUITE,{init_per_group,test_group_1a,[]},ok}},
+ {?eh,tc_start,{groups_11_SUITE,testcase_1a}},
+ {?eh,tc_start,{groups_11_SUITE,testcase_1b}},
+ {?eh,test_stats,{2,0,{0,0}}},
+ {?eh,tc_start,{groups_11_SUITE,{end_per_group,test_group_1a,[]}}},
+ {?eh,tc_done,{groups_11_SUITE,{end_per_group,test_group_1a,[]},ok}}],
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1b},
+ "SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,test_stats,{2,0,{2,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1b},
+ "SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_2},
+ "SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_2a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_3a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_3b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_2b,"SKIPPED!"}},
+ {?eh,test_stats,{2,0,{6,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_2},
+ "SKIPPED!"}},
+ {?eh,tc_done,{groups_11_SUITE,end_per_suite,init}},
+ {negative,{?eh,tc_start,'_'},{?eh,stop_logging,'_'}}
+ ];
test_events(group_all_testcases) ->
[
@@ -820,11 +860,23 @@ test_events(group_all_testcases) ->
test_events(skip_group_all_testcases) ->
[
{?eh,start_logging,'_'},
+ {?eh,start_info,{1,1,4}},
{?eh,tc_start,{groups_11_SUITE,init_per_suite}},
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_1a},"SKIPPED!"}},
- {?eh,tc_user_skip, {groups_11_SUITE,{group,test_group_1b},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1a},
+ "SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
{?eh,test_stats,{0,0,{2,0}}},
- {?eh,tc_start,{groups_11_SUITE,end_per_suite}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1a},
+ "SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{init_per_group,test_group_1b},
+ "SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_11_SUITE,testcase_1b,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{4,0}}},
+ {?eh,tc_user_skip,{groups_11_SUITE,{end_per_group,test_group_1b},
+ "SKIPPED!"}},
+ {?eh,tc_done,{groups_11_SUITE,end_per_suite,init}},
{negative,{?eh,tc_start,'_'},{?eh,stop_logging,'_'}}
];
@@ -934,7 +986,7 @@ test_events(subgroup) ->
[
{?eh,start_logging,'_'},
{?eh,tc_start,{groups_12_SUITE,init_per_suite}},
-
+
{parallel,
[{?eh,tc_start,
{groups_12_SUITE,{init_per_group,test_group_2,[parallel]}}},
@@ -962,36 +1014,58 @@ test_events(subgroup) ->
test_events(skip_subgroup) ->
[
{?eh,start_logging,'_'},
+ {?eh,start_info,{1,1,6}},
{?eh,tc_start,{groups_12_SUITE,init_per_suite}},
- [{?eh,tc_start,
- {groups_12_SUITE,{init_per_group,test_group_4,[]}}},
+ [{?eh,tc_start,{groups_12_SUITE,{init_per_group,test_group_4,[]}}},
+ {?eh,tc_done,{groups_12_SUITE,{init_per_group,test_group_4,[]},ok}},
+
{parallel,
- [{?eh,tc_start,
- {groups_12_SUITE,{init_per_group,test_group_5,[parallel]}}},
- {?eh,tc_done,
- {groups_12_SUITE,{init_per_group,test_group_5,[parallel]},ok}},
- {parallel,
- [{?eh,tc_start,
- {groups_12_SUITE,{init_per_group,test_group_6,[parallel]}}},
- {?eh,tc_done,
- {groups_12_SUITE,{init_per_group,test_group_6,[parallel]},ok}},
- [{?eh,tc_start,{groups_12_SUITE,{init_per_group,test_group_7,'_'}}},
- {?eh,tc_start,{groups_12_SUITE,{end_per_group,test_group_7,'_'}}}],
- {?eh,tc_user_skip,
- {groups_12_SUITE,{group,test_group_8},"SKIPPED!"}},
- {?eh,tc_start,
- {groups_12_SUITE,{end_per_group,test_group_6,[parallel]}}},
- {?eh,tc_done,
- {groups_12_SUITE,{end_per_group,test_group_6,[parallel]},ok}}
- ]},
- {?eh,tc_start,
- {groups_12_SUITE,{end_per_group,test_group_5,[parallel]}}},
- {?eh,tc_done,
- {groups_12_SUITE,{end_per_group,test_group_5,[parallel]},ok}}]},
- {?eh,tc_start,{groups_12_SUITE,{end_per_group,test_group_4,[]}}}],
+ [{?eh,tc_start,{groups_12_SUITE,
+ {init_per_group,test_group_5,[parallel]}}},
+ {?eh,tc_done,{groups_12_SUITE,
+ {init_per_group,test_group_5,[parallel]},ok}},
+
+ {parallel,
+ [{?eh,tc_start,{groups_12_SUITE,
+ {init_per_group,test_group_6,[parallel]}}},
+ {?eh,tc_done,{groups_12_SUITE,
+ {init_per_group,test_group_6,[parallel]},ok}},
+
+ [{?eh,tc_start,{groups_12_SUITE,
+ {init_per_group,test_group_7,[sequence]}}},
+ {?eh,tc_done,{groups_12_SUITE,
+ {init_per_group,test_group_7,[sequence]},ok}},
+ {?eh,tc_done,{groups_12_SUITE,testcase_7a,ok}},
+ {?eh,tc_done,{groups_12_SUITE,testcase_7b,ok}},
+ {?eh,tc_start,{groups_12_SUITE,
+ {end_per_group,test_group_7,[sequence]}}},
+ {?eh,tc_done,{groups_12_SUITE,
+ {end_per_group,test_group_7,[sequence]},ok}}],
+
+ {?eh,tc_user_skip,{groups_12_SUITE,
+ {init_per_group,test_group_8},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,
+ {end_per_group,test_group_8},"SKIPPED!"}},
+
+ {?eh,tc_start,{groups_12_SUITE,
+ {end_per_group,test_group_6,[parallel]}}},
+ {?eh,tc_done,{groups_12_SUITE,
+ {end_per_group,test_group_6,[parallel]},ok}}]},
+
+ {?eh,test_stats,{4,0,{2,0}}},
+ {?eh,tc_start,{groups_12_SUITE,
+ {end_per_group,test_group_5,[parallel]}}},
+ {?eh,tc_done,{groups_12_SUITE,
+ {end_per_group,test_group_5,[parallel]},ok}}]},
- {?eh,tc_done,{groups_12_SUITE,end_per_suite,'_'}},
+ {?eh,tc_start,{groups_12_SUITE,{end_per_group,test_group_4,[]}}},
+ {?eh,tc_done,{groups_12_SUITE,{end_per_group,test_group_4,[]},ok}}],
+
+ {?eh,tc_start,{groups_12_SUITE,end_per_suite}},
+ {?eh,tc_done,{groups_12_SUITE,end_per_suite,init}},
{negative,{?eh,tc_start,'_'},{?eh,stop_logging,'_'}}
];
@@ -1066,17 +1140,26 @@ test_events(subgroup_all_testcases) ->
test_events(skip_subgroup_all_testcases) ->
[
{?eh,start_logging,'_'},
+ {?eh,start_info,{1,1,6}},
{?eh,tc_start,{groups_12_SUITE,init_per_suite}},
-
- [{?eh,tc_start,
- {groups_12_SUITE,{init_per_group,test_group_4,[]}}},
- {?eh,tc_done,
- {groups_12_SUITE,{init_per_group,test_group_4,[]},ok}},
- {?eh,tc_user_skip,{groups_12_SUITE,{group,test_group_5},"SKIPPED!"}},
- {?eh,tc_start,{groups_12_SUITE,{end_per_group,test_group_4,[]}}},
- {?eh,tc_done,{groups_12_SUITE,{end_per_group,test_group_4,[]},ok}}
- ],
-
+ [{?eh,tc_start,{groups_12_SUITE,{init_per_group,test_group_4,[]}}},
+ {?eh,tc_done,{groups_12_SUITE,{init_per_group,test_group_4,[]},ok}},
+ {?eh,tc_user_skip,{groups_12_SUITE,
+ {init_per_group,test_group_5},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_5a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_7a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_7b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{6,0}}},
+ {?eh,tc_user_skip,{groups_12_SUITE,
+ {end_per_group,test_group_5},"SKIPPED!"}},
+ {?eh,tc_start,{groups_12_SUITE,
+ {end_per_group,test_group_4,[]}}},
+ {?eh,tc_done,{groups_12_SUITE,
+ {end_per_group,test_group_4,[]},ok}}],
+ {?eh,tc_start,{groups_12_SUITE,end_per_suite}},
{?eh,tc_done,{groups_12_SUITE,end_per_suite,'_'}},
{negative,{?eh,tc_start,'_'},{?eh,stop_logging,'_'}}
];
@@ -1194,13 +1277,30 @@ test_events(skip_subgroup_testcase) ->
test_events(sub_skipped_by_top) ->
[
{?eh,start_logging,'_'},
+ {?eh,start_info,{1,1,12}},
{?eh,tc_start,{groups_12_SUITE,init_per_suite}},
-
- {?eh,tc_user_skip,{groups_12_SUITE,{group,test_group_4},"SKIPPED!"}},
- {?eh,tc_user_skip,{groups_12_SUITE,{group,test_group_4},"SKIPPED!"}},
-
+ {?eh,tc_user_skip,{groups_12_SUITE,{init_per_group,test_group_4},
+ "SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_5a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_7a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_7b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,
+ {end_per_group,test_group_4},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,
+ {init_per_group,test_group_4},"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_5a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_7a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_7b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8a,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_8b,"SKIPPED!"}},
+ {?eh,tc_user_skip,{groups_12_SUITE,testcase_5b,"SKIPPED!"}},
+ {?eh,test_stats,{0,0,{12,0}}},
+ {?eh,tc_user_skip,{groups_12_SUITE,
+ {end_per_group,test_group_4},"SKIPPED!"}},
{?eh,tc_done,{groups_12_SUITE,end_per_suite,'_'}},
-
{negative,{?eh,tc_start,'_'},{?eh,stop_logging,'_'}}
];
diff --git a/lib/common_test/test/ct_testspec_3_SUITE.erl b/lib/common_test/test/ct_testspec_3_SUITE.erl
index 5fa187e5b4..9f8ca84e45 100644
--- a/lib/common_test/test/ct_testspec_3_SUITE.erl
+++ b/lib/common_test/test/ct_testspec_3_SUITE.erl
@@ -1539,7 +1539,7 @@ flat_spec1_events() ->
{?eh,test_stats,{1,2,{0,0}}},
{?eh,tc_start,{t11_SUITE,autoskip_tc}},
{?eh,tc_done,
- {t11_SUITE,autoskip_tc,{skipped,
+ {t11_SUITE,autoskip_tc,{auto_skipped,
{failed,
{t11_SUITE,init_per_testcase,
{kaboom,'_'}}}}}},
@@ -1562,7 +1562,7 @@ flat_spec1_events() ->
{?eh,test_stats,{2,4,{1,1}}},
{?eh,tc_start,{t21_SUITE,autoskip_tc}},
{?eh,tc_done,
- {t21_SUITE,autoskip_tc,{skipped,
+ {t21_SUITE,autoskip_tc,{auto_skipped,
{failed,
{t21_SUITE,init_per_testcase,
{kaboom,'_'}}}}}},
@@ -1589,7 +1589,7 @@ flat_spec2_events() ->
{?eh,test_stats,{1,2,{0,0}}},
{?eh,tc_start,{t12_SUITE,autoskip_tc}},
{?eh,tc_done,
- {t12_SUITE,autoskip_tc,{skipped,
+ {t12_SUITE,autoskip_tc,{auto_skipped,
{failed,
{t12_SUITE,init_per_testcase,
{kaboom,'_'}}}}}},
@@ -1612,7 +1612,7 @@ flat_spec2_events() ->
{?eh,test_stats,{2,4,{1,1}}},
{?eh,tc_start,{t12_SUITE,autoskip_tc}},
{?eh,tc_done,
- {t12_SUITE,autoskip_tc,{skipped,
+ {t12_SUITE,autoskip_tc,{auto_skipped,
{failed,
{t12_SUITE,init_per_testcase,
{kaboom,'_'}}}}}},
@@ -1635,7 +1635,7 @@ flat_spec2_events() ->
{?eh,test_stats,{3,6,{2,2}}},
{?eh,tc_start,{t22_SUITE,autoskip_tc}},
{?eh,tc_done,
- {t22_SUITE,autoskip_tc,{skipped,
+ {t22_SUITE,autoskip_tc,{auto_skipped,
{failed,
{t22_SUITE,init_per_testcase,
{kaboom,'_'}}}}}},
diff --git a/lib/compiler/src/beam_clean.erl b/lib/compiler/src/beam_clean.erl
index e208ffec1f..9d89e21a4e 100644
--- a/lib/compiler/src/beam_clean.erl
+++ b/lib/compiler/src/beam_clean.erl
@@ -86,7 +86,7 @@ add_to_work_list(F, {Fs,Used}=Sets) ->
false -> {[F|Fs],sets:add_element(F, Used)}
end.
-
+
%%%
%%% Coalesce adjacent labels. Renumber all labels to eliminate gaps.
%%% This cleanup will slightly reduce file size and slightly speed up loading.
diff --git a/lib/compiler/src/beam_type.erl b/lib/compiler/src/beam_type.erl
index 3b51216a6c..3ec57a67da 100644
--- a/lib/compiler/src/beam_type.erl
+++ b/lib/compiler/src/beam_type.erl
@@ -600,7 +600,7 @@ checkerror_1([], OrigIs) -> OrigIs.
checkerror_2(OrigIs) -> [{set,[],[],fcheckerror}|OrigIs].
-
+
%%% Routines for maintaining a type database. The type database
%%% associates type information with registers.
%%%
diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl
index 70279ab658..48f5135aca 100644
--- a/lib/compiler/src/beam_validator.erl
+++ b/lib/compiler/src/beam_validator.erl
@@ -530,7 +530,7 @@ valfun_2(I, #vst{current=#st{ct=[[Fail]|_]}}=Vst) when is_integer(Fail) ->
%% Update branched state
valfun_3(I, branch_state(Fail, Vst));
valfun_2(_, _) ->
- error(ambigous_catch_try_state).
+ error(ambiguous_catch_try_state).
%% Handle the remaining floating point instructions here.
%% Floating point.
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index 802e3dfa2f..47d446273b 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -1556,7 +1556,7 @@ restore_expand_module([F|Fs]) ->
[F|restore_expand_module(Fs)];
restore_expand_module([]) -> [].
-
+
-spec options() -> 'ok'.
options() ->
@@ -1593,7 +1593,7 @@ help([_|T]) ->
help(_) ->
ok.
-
+
%% compile(AbsFileName, Outfilename, Options)
%% Compile entry point for erl_compile.
diff --git a/lib/compiler/src/core_scan.erl b/lib/compiler/src/core_scan.erl
index 0ca2f57dde..c0dfecd1dc 100644
--- a/lib/compiler/src/core_scan.erl
+++ b/lib/compiler/src/core_scan.erl
@@ -96,7 +96,7 @@ format_error(Other) -> io_lib:write(Other).
string_thing($') -> "atom"; %' stupid emacs
string_thing($") -> "string". %" stupid emacs
-
+
%% Re-entrant pre-scanner.
%%
%% If the input list of characters is insufficient to build a term the
@@ -214,7 +214,7 @@ pre_comment(eof, Sofar, Pos) ->
pre_error(E, Epos, Pos) ->
{error,{Epos,core_scan,E}, Pos}.
-
+
%% scan(CharList, StartPos)
%% This takes a list of characters and tries to tokenise them.
%%
diff --git a/lib/compiler/src/v3_core.erl b/lib/compiler/src/v3_core.erl
index 1195937d91..01bb8635cd 100644
--- a/lib/compiler/src/v3_core.erl
+++ b/lib/compiler/src/v3_core.erl
@@ -1912,7 +1912,7 @@ new_in_all([Le|Les]) ->
foldl(fun (L, Ns) -> intersection((get_anno(L))#a.ns, Ns) end,
(get_anno(Le))#a.ns, Les);
new_in_all([]) -> [].
-
+
%% The AfterVars are the variables which are used afterwards. We need
%% this to work out which variables are actually exported and used
%% from case/receive. In subblocks/clauses the AfterVars of the block
diff --git a/lib/compiler/src/v3_kernel.erl b/lib/compiler/src/v3_kernel.erl
index 2b2b8bf550..65f1251099 100644
--- a/lib/compiler/src/v3_kernel.erl
+++ b/lib/compiler/src/v3_kernel.erl
@@ -160,8 +160,8 @@ function({#c_var{name={F,Arity}=FA},Body}, St0) ->
io:fwrite("Function: ~w/~w\n", [F,Arity]),
erlang:raise(Class, Error, Stack)
end.
-
-
+
+
%% body(Cexpr, Sub, State) -> {Kexpr,[PreKepxr],State}.
%% Do the main sequence of a body. A body ends in an atomic value or
%% values. Must check if vector first so do expr.
@@ -834,7 +834,7 @@ last([_|T]) -> last(T).
first([_]) -> [];
first([H|T]) -> [H|first(T)].
-
+
%% This code implements the algorithm for an optimizing compiler for
%% pattern matching given "The Implementation of Functional
%% Programming Languages" by Simon Peyton Jones. The code is much
@@ -1428,7 +1428,7 @@ arg_val(Arg, C) ->
{set_kanno(S, []),U,T,Fs}
end
end.
-
+
%% ubody_used_vars(Expr, State) -> [UsedVar]
%% Return all used variables for the body sequence. Much more
%% efficient than using ubody/3 if the body contains nested letrecs.
diff --git a/lib/compiler/test/compilation_SUITE.erl b/lib/compiler/test/compilation_SUITE.erl
index 8e7b7292cc..d517029b1b 100644
--- a/lib/compiler/test/compilation_SUITE.erl
+++ b/lib/compiler/test/compilation_SUITE.erl
@@ -50,7 +50,7 @@ groups() ->
trycatch_4,opt_crash,otp_5404,otp_5436,otp_5481,
otp_5553,otp_5632,otp_5714,otp_5872,otp_6121,
otp_6121a,otp_6121b,otp_7202,otp_7345,on_load,
- string_table,otp_8949_a,otp_8949_a,split_cases,
+ string_table,otp_8949_a,otp_8949_b,split_cases,
beam_utils_liveopt]}].
init_per_suite(Config) ->
@@ -630,13 +630,13 @@ string_table(Config) when is_list(Config) ->
ok.
otp_8949_a(Config) when is_list(Config) ->
- value = otp_8949_a(),
+ value = do_otp_8949_a(),
ok.
-record(cs, {exs,keys = [],flags = 1}).
-record(exs, {children = []}).
-otp_8949_a() ->
+do_otp_8949_a() ->
case id([#cs{}]) of
[#cs{}=Cs] ->
SomeVar = id(value),
diff --git a/lib/crypto/test/crypto_SUITE.erl b/lib/crypto/test/crypto_SUITE.erl
index 58aaa78d28..645b1203ef 100644
--- a/lib/crypto/test/crypto_SUITE.erl
+++ b/lib/crypto/test/crypto_SUITE.erl
@@ -143,7 +143,8 @@ app(Config) when is_list(Config) ->
hash() ->
[{doc, "Test all different hash functions"}].
hash(Config) when is_list(Config) ->
- {Type, Msgs, Digests} = proplists:get_value(hash, Config),
+ {Type, MsgsLE, Digests} = proplists:get_value(hash, Config),
+ Msgs = lists:map(fun lazy_eval/1, MsgsLE),
[LongMsg | _] = lists:reverse(Msgs),
Inc = iolistify(LongMsg),
[IncrDigest | _] = lists:reverse(Digests),
@@ -154,7 +155,8 @@ hash(Config) when is_list(Config) ->
hmac() ->
[{doc, "Test all different hmac functions"}].
hmac(Config) when is_list(Config) ->
- {Type, Keys, Data, Expected} = proplists:get_value(hmac, Config),
+ {Type, Keys, DataLE, Expected} = proplists:get_value(hmac, Config),
+ Data = lists:map(fun lazy_eval/1, DataLE),
hmac(Type, Keys, Data, Expected),
hmac(Type, lists:map(fun iolistify/1, Keys), lists:map(fun iolistify/1, Data), Expected),
hmac_increment(Type).
@@ -795,7 +797,13 @@ rfc_4634_sha512_digests() ->
hexstr2bin("8E959B75DAE313DA8CF4F72814FC143F8F7779C6EB9F7FA17299AEADB6889018501D289E4900F7E4331B99DEC4B5433AC7D329EEB6DD26545E96E55B874BE909")].
long_msg() ->
- lists:duplicate(1000000, $a).
+ fun() -> lists:duplicate(1000000, $a) end.
+
+%% Building huge terms (like long_msg/0) in init_per_group seems to cause
+%% test_server crash with 'no_answer_from_tc_supervisor' sometimes on some
+%% machines. Therefore lazy evaluation when test case has started.
+lazy_eval(F) when is_function(F) -> F();
+lazy_eval(Term) -> Term.
long_sha_digest() ->
hexstr2bin("34aa973c" "d4c4daa4" "f61eeb2b" "dbad2731" "6534016f").
diff --git a/lib/debugger/test/dbg_ui_SUITE_data/manual_data/src/lists1.erl b/lib/debugger/test/dbg_ui_SUITE_data/manual_data/src/lists1.erl
index 0214983c11..db84ee5fc8 100644
--- a/lib/debugger/test/dbg_ui_SUITE_data/manual_data/src/lists1.erl
+++ b/lib/debugger/test/dbg_ui_SUITE_data/manual_data/src/lists1.erl
@@ -236,7 +236,7 @@ flatlength([H|T], L) when list(H) ->
flatlength([H|T], L) ->
flatlength(T, L + 1);
flatlength([], L) -> L.
-
+
%% keymember(Key, Index, [Tuple])
%% keysearch(Key, Index, [Tuple])
%% keydelete(Key, Index, [Tuple])
@@ -298,7 +298,7 @@ keymap(Fun, ExtraArgs, Index, [Tup|Tail]) ->
[setelement(Index, Tup, apply(Fun, [element(Index, Tup)|ExtraArgs]))|
keymap(Fun, ExtraArgs, Index, Tail)];
keymap( _, _ , _, []) -> [].
-
+
%% all(Predicate, List)
%% any(Predicate, List)
%% map(Function, List)
diff --git a/lib/debugger/test/int_SUITE_data/lists1.erl b/lib/debugger/test/int_SUITE_data/lists1.erl
index 0214983c11..db84ee5fc8 100644
--- a/lib/debugger/test/int_SUITE_data/lists1.erl
+++ b/lib/debugger/test/int_SUITE_data/lists1.erl
@@ -236,7 +236,7 @@ flatlength([H|T], L) when list(H) ->
flatlength([H|T], L) ->
flatlength(T, L + 1);
flatlength([], L) -> L.
-
+
%% keymember(Key, Index, [Tuple])
%% keysearch(Key, Index, [Tuple])
%% keydelete(Key, Index, [Tuple])
@@ -298,7 +298,7 @@ keymap(Fun, ExtraArgs, Index, [Tup|Tail]) ->
[setelement(Index, Tup, apply(Fun, [element(Index, Tup)|ExtraArgs]))|
keymap(Fun, ExtraArgs, Index, Tail)];
keymap( _, _ , _, []) -> [].
-
+
%% all(Predicate, List)
%% any(Predicate, List)
%% map(Function, List)
diff --git a/lib/debugger/test/int_SUITE_data/my_lists.erl b/lib/debugger/test/int_SUITE_data/my_lists.erl
index 98eb4396e3..f9399b1085 100644
--- a/lib/debugger/test/int_SUITE_data/my_lists.erl
+++ b/lib/debugger/test/int_SUITE_data/my_lists.erl
@@ -237,7 +237,7 @@ flatlength([H|T], L) when list(H) ->
flatlength([H|T], L) ->
flatlength(T, L + 1);
flatlength([], L) -> L.
-
+
%% keymember(Key, Index, [Tuple])
%% keysearch(Key, Index, [Tuple])
%% keydelete(Key, Index, [Tuple])
@@ -299,7 +299,7 @@ keymap(Fun, ExtraArgs, Index, [Tup|Tail]) ->
[setelement(Index, Tup, apply(Fun, [element(Index, Tup)|ExtraArgs]))|
keymap(Fun, ExtraArgs, Index, Tail)];
keymap( _, _ , _, []) -> [].
-
+
%% all(Predicate, List)
%% any(Predicate, List)
%% map(Function, List)
@@ -698,7 +698,7 @@ flatlength_1([H|T], L) when list(H) ->
flatlength_1([H|T], L) ->
flatlength_1(T, L + 1);
flatlength_1([], L) -> L.
-
+
%% keymember(Key, Index, [Tuple])
%% keysearch(Key, Index, [Tuple])
%% keydelete(Key, Index, [Tuple])
@@ -760,7 +760,7 @@ keymap_1(Fun, ExtraArgs, Index, [Tup|Tail]) ->
[setelement(Index, Tup, apply(Fun, [element(Index, Tup)|ExtraArgs]))|
keymap_1(Fun, ExtraArgs, Index, Tail)];
keymap_1( _, _ , _, []) -> [].
-
+
%% all(Predicate, List)
%% any(Predicate, List)
%% map(Function, List)
@@ -1162,7 +1162,7 @@ flatlength_2([H|T], L) when list(H) ->
flatlength_2([H|T], L) ->
flatlength_2(T, L + 1);
flatlength_2([], L) -> L.
-
+
%% keymember_2(Key, Index, [Tuple])
%% keysearch_2(Key, Index, [Tuple])
%% keydelete_2(Key, Index, [Tuple])
@@ -1224,7 +1224,7 @@ keymap_2(Fun, ExtraArgs, Index, [Tup|Tail]) ->
[setelement(Index, Tup, apply(Fun, [element(Index, Tup)|ExtraArgs]))|
keymap_2(Fun, ExtraArgs, Index, Tail)];
keymap_2( _, _ , _, []) -> [].
-
+
%% all_2(Predicate, List)
%% any_2(Predicate, List)
%% map_2(Function, List)
@@ -1624,7 +1624,7 @@ flatlength_3([H|T], L) when list(H) ->
flatlength_3([H|T], L) ->
flatlength_3(T, L + 1);
flatlength_3([], L) -> L.
-
+
%% keymember_3(Key, Index, [Tuple])
%% keysearch_3(Key, Index, [Tuple])
%% keydelete_3(Key, Index, [Tuple])
@@ -1686,7 +1686,7 @@ keymap_3(Fun, ExtraArgs, Index, [Tup|Tail]) ->
[setelement(Index, Tup, apply(Fun, [element(Index, Tup)|ExtraArgs]))|
keymap_3(Fun, ExtraArgs, Index, Tail)];
keymap_3( _, _ , _, []) -> [].
-
+
%% all_3(Predicate, List)
%% any_3(Predicate, List)
%% map_3(Function, List)
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_clean.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_clean.erl
index 04225e9bd0..4fc4e89ce9 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_clean.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_clean.erl
@@ -80,7 +80,7 @@ add_to_work_list(F, {Fs,Used}=Sets) ->
false -> {[F|Fs],sets:add_element(F, Used)}
end.
-
+
%%%
%%% Coalesce adjacent labels. Renumber all labels to eliminate gaps.
%%% This cleanup will slightly reduce file size and slightly speed up loading.
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_type.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_type.erl
index d2ac3fcd99..f59cc897d6 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_type.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/beam_type.erl
@@ -456,7 +456,7 @@ are_live_regs_determinable([{'%live',_}|_]) -> true;
are_live_regs_determinable([_|Is]) -> are_live_regs_determinable(Is);
are_live_regs_determinable([]) -> false.
-
+
%%% Routines for maintaining a type database. The type database
%%% associates type information with registers.
%%%
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
index 2b6d14e300..9b56d384ab 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
@@ -990,7 +990,7 @@ listing(LFun, Ext, St) ->
Es = [{Lfile,[{none,compile,write_error}]}],
{error,St#compile{errors=St#compile.errors ++ Es}}
end.
-
+
options() ->
help(standard_passes()).
@@ -1022,7 +1022,7 @@ help([_|T]) ->
help(_) ->
ok.
-
+
%% compile(AbsFileName, Outfilename, Options)
%% Compile entry point for erl_compile.
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/core_scan.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/core_scan.erl
index f4e609bf5b..879af3efea 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/core_scan.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/core_scan.erl
@@ -123,7 +123,7 @@ format_error(Other) -> io_lib:write(Other).
string_thing($') -> "atom";
string_thing($") -> "string".
-
+
%% Re-entrant pre-scanner.
%%
%% If the input list of characters is insufficient to build a term the
@@ -241,7 +241,7 @@ pre_comment(eof, Sofar, Pos) ->
pre_error(E, Epos, Pos) ->
{error,{Epos,core_scan,E}, Pos}.
-
+
%% scan(CharList, StartPos)
%% This takes a list of characters and tries to tokenise them.
%%
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/sys_pre_expand.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/sys_pre_expand.erl
index 41b7cb248d..590cc682c9 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/sys_pre_expand.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/sys_pre_expand.erl
@@ -647,7 +647,7 @@ new_fun_name(#expand{func=F,arity=A,fcount=I}=St) ->
++ "-fun-" ++ integer_to_list(I) ++ "-",
{list_to_atom(Name),St#expand{fcount=I+1}}.
-
+
%% normalise_fields([RecDef]) -> [Field].
%% Normalise the field definitions to always have a default value. If
%% none has been given then use 'undefined'.
@@ -881,7 +881,7 @@ bin_expand_strings(Es) ->
end, Es1, S);
(E, Es1) -> [E|Es1]
end, [], Es).
-
+
%% new_var_name(State) -> {VarName,State}.
new_var_name(St) ->
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_core.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_core.erl
index c96837ab5e..45a8bc4ad9 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_core.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_core.erl
@@ -1127,7 +1127,7 @@ new_in_all([Le|Les]) ->
foldl(fun (L, Ns) -> intersection((core_lib:get_anno(L))#a.ns, Ns) end,
(core_lib:get_anno(Le))#a.ns, Les);
new_in_all([]) -> [].
-
+
%% The AfterVars are the variables which are used afterwards. We need
%% this to work out which variables are actually exported and used
%% from case/receive. In subblocks/clauses the AfterVars of the block
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_kernel.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_kernel.erl
index d7c3e1add9..ecba19b1d1 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_kernel.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/v3_kernel.erl
@@ -129,7 +129,7 @@ function(#c_def{anno=Af,name=#c_fname{id=F,arity=Arity},val=Body}, St0) ->
%%B1 = B0, St3 = St2, %Null second pass
{#k_fdef{anno=#k{us=[],ns=[],a=Af ++ Ab},
func=F,arity=Arity,vars=Kvs,body=B1},St3}.
-
+
%% body(Cexpr, Sub, State) -> {Kexpr,[PreKepxr],State}.
%% Do the main sequence of a body. A body ends in an atomic value or
%% values. Must check if vector first so do expr.
@@ -719,7 +719,7 @@ last([_|T]) -> last(T).
first([_]) -> [];
first([H|T]) -> [H|first(T)].
-
+
%% This code implements the algorithm for an optimizing compiler for
%% pattern matching given "The Implementation of Functional
%% Programming Languages" by Simon Peyton Jones. The code is much
@@ -1143,7 +1143,7 @@ arg_val(Arg) ->
#k_bin_end{} -> 0;
#k_binary{} -> 0
end.
-
+
%% ubody(Expr, Break, State) -> {Expr,[UsedVar],State}.
%% Tag the body sequence with its used variables. These bodies
%% either end with a #k_break{}, or with #k_return{} or an expression
diff --git a/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_cgi.erl b/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_cgi.erl
index d3f67eb77a..8c91b6f430 100644
--- a/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_cgi.erl
+++ b/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_cgi.erl
@@ -338,7 +338,7 @@ exec_script(false,Info,Script,_AfterScript,_RequestURI) ->
%%
proxy(#mod{config_db = ConfigDb} = Info, Port) ->
- Timeout = httpd_util:lookup(ConfigDb, cgi_timeout, ?DEFAULT_CGI_TIMEOUT),
+ Timeout = httpd_util:lookup(ConfigDb, script_timeout, ?DEFAULT_CGI_TIMEOUT),
proxy(Info, Port, 0, undefined,[], Timeout).
proxy(Info, Port, Size, StatusCode, AccResponse, Timeout) ->
diff --git a/lib/dialyzer/test/small_SUITE_data/results/trec b/lib/dialyzer/test/small_SUITE_data/results/trec
index 01ccc63761..b95df1e6ef 100644
--- a/lib/dialyzer/test/small_SUITE_data/results/trec
+++ b/lib/dialyzer/test/small_SUITE_data/results/trec
@@ -1,7 +1,7 @@
-trec.erl:26: Function test/0 has no local return
-trec.erl:27: The call trec:mk_foo_loc(42,any()) will never return since it differs in the 1st argument from the success typing arguments: ('undefined',atom())
-trec.erl:29: Function mk_foo_loc/2 has no local return
-trec.erl:30: Record construction violates the declared type for #foo{} since variable A cannot be of type atom()
-trec.erl:36: Function mk_foo_exp/2 has no local return
-trec.erl:37: Record construction violates the declared type for #foo{} since variable A cannot be of type atom()
+trec.erl:28: Function test/0 has no local return
+trec.erl:29: The call trec:mk_foo_loc(42,any()) will never return since it differs in the 1st argument from the success typing arguments: ('undefined',atom())
+trec.erl:31: Function mk_foo_loc/2 has no local return
+trec.erl:32: Record construction violates the declared type for #foo{} since variable A cannot be of type atom()
+trec.erl:38: Function mk_foo_exp/2 has no local return
+trec.erl:39: Record construction violates the declared type for #foo{} since variable A cannot be of type atom()
diff --git a/lib/dialyzer/test/small_SUITE_data/src/appmon_place.erl b/lib/dialyzer/test/small_SUITE_data/src/appmon_place.erl
index 60ffbe818f..ddb97796fb 100644
--- a/lib/dialyzer/test/small_SUITE_data/src/appmon_place.erl
+++ b/lib/dialyzer/test/small_SUITE_data/src/appmon_place.erl
@@ -1,6 +1,6 @@
%%---------------------------------------------------------------------
%% This is added as a test because it was giving a false positive
-%% (function move/4 will nevr be called) due to the strange use of
+%% (function move/4 will never be called) due to the strange use of
%% self-recursive fun construction in placex/3.
%%
%% The analysis was getting confused that the foldl call will never
diff --git a/lib/dialyzer/test/small_SUITE_data/src/contract3.erl b/lib/dialyzer/test/small_SUITE_data/src/contract3.erl
index 5b0bee9694..a6ce91882e 100644
--- a/lib/dialyzer/test/small_SUITE_data/src/contract3.erl
+++ b/lib/dialyzer/test/small_SUITE_data/src/contract3.erl
@@ -18,16 +18,23 @@ t(X, Y, Z) ->
(atom()|list()) -> atom().
t1(X) ->
- foo:bar(X).
+ f(X).
-spec t2(atom(), integer()) -> integer();
(atom(), list()) -> atom().
t2(X, Y) ->
- foo:bar(X, Y).
+ g(X, Y).
-spec t3(atom(), integer(), list()) -> integer();
(X, integer(), list()) -> X.
t3(X, Y, Z) ->
X.
+
+%% dummy functions below
+
+f(X) -> X.
+
+g(X, Y) when is_atom(X), is_integer(Y) -> Y;
+g(X, Y) when is_atom(X), is_list(Y) -> X.
diff --git a/lib/dialyzer/test/small_SUITE_data/src/empty_list_infimum.erl b/lib/dialyzer/test/small_SUITE_data/src/empty_list_infimum.erl
index b58fa732cb..3acc5ca065 100644
--- a/lib/dialyzer/test/small_SUITE_data/src/empty_list_infimum.erl
+++ b/lib/dialyzer/test/small_SUITE_data/src/empty_list_infimum.erl
@@ -1,6 +1,6 @@
%%
-%% The Original Code is RabbitMQ.
-%%
+%% This is stripped down code from RabbitMQ. It is used to report an
+%% invalid type specification for function list_vhost_permissions/1.
%% The Initial Developer of the Original Code is VMware, Inc.
%%
@@ -38,7 +38,7 @@ vhost_perms_info_keys() ->
-spec list_vhost_permissions(vhost()) -> infos().
list_vhost_permissions(VHostPath) ->
- list_permissions(vhost_perms_info_keys(), rabbit_foo:some_list()).
+ list_permissions(vhost_perms_info_keys(), some_mod:some_function()).
filter_props(Keys, Props) ->
[T || T = {K, _} <- Props, lists:member(K, Keys)].
diff --git a/lib/dialyzer/test/small_SUITE_data/src/gencall.erl b/lib/dialyzer/test/small_SUITE_data/src/gencall.erl
index d2875c9df1..762be55007 100644
--- a/lib/dialyzer/test/small_SUITE_data/src/gencall.erl
+++ b/lib/dialyzer/test/small_SUITE_data/src/gencall.erl
@@ -7,6 +7,6 @@
f() ->
gen_server:call(1,2,3),
ets:lookup(1,2,3),
- gencall2:foo(),
+ some_mod:some_function(),
gencall:foo(),
gen_server:handle_cast(1,2).
diff --git a/lib/dialyzer/test/small_SUITE_data/src/maybe_improper.erl b/lib/dialyzer/test/small_SUITE_data/src/maybe_improper.erl
index 1743d81493..6d2a35b7c8 100644
--- a/lib/dialyzer/test/small_SUITE_data/src/maybe_improper.erl
+++ b/lib/dialyzer/test/small_SUITE_data/src/maybe_improper.erl
@@ -1,7 +1,18 @@
+%%%========================================================================
+%%% Tests handling of maybe improper lists
+%%%========================================================================
-module(maybe_improper).
--export([s/1]).
+-export([s/1, t/0]).
-spec s(maybe_improper_list(X,Y)) -> {[X], maybe_improper_list(X,Y)}.
-s(A) ->
- lists:split(2,A).
+s(L) ->
+ lists:split(2, L).
+
+%% Having a remote type in the 'tail' of the list crashed dialyzer.
+%% The problem was fixed for R16B03.
+-type t_mil() :: maybe_improper_list(integer(), orddict:orddict()).
+
+-spec t() -> t_mil().
+t() ->
+ [42 | []].
diff --git a/lib/dialyzer/test/small_SUITE_data/src/overloaded1.erl b/lib/dialyzer/test/small_SUITE_data/src/overloaded1.erl
index 0af4f7446f..074a93e2fe 100644
--- a/lib/dialyzer/test/small_SUITE_data/src/overloaded1.erl
+++ b/lib/dialyzer/test/small_SUITE_data/src/overloaded1.erl
@@ -1,5 +1,5 @@
%%-----------------------------------------------------------------------------
-%% Test that tests overloaded contratcs.
+%% Test that tests overloaded contracts.
%% In December 2008 it works as far as intersection types are concerned (test1)
%% However, it does NOT work as far as type variables are concerned (test2)
%%-----------------------------------------------------------------------------
@@ -16,13 +16,13 @@ test2() ->
-type mod() :: atom().
--spec foo(ATM, list()) -> {'ok', ATM} | {'error', _} when is_subtype(ATM, mod())
- ; (MFA, list()) -> {'ok', MFA} | {'error', _} when is_subtype(MFA, mfa()).
+-spec foo(ATM, list()) -> {'ok', ATM} | {'error', _} when ATM :: mod()
+ ; (MFA, list()) -> {'ok', MFA} | {'error', _} when MFA :: mfa().
foo(F, _) when is_atom(F) ->
case atom_to_list(F) of
- [42|_] -> {ok, F};
- _Other -> {error, mod:bar(F)}
+ [42|_] -> {ok, F};
+ _Other -> {error, some_mod:some_function()}
end;
foo({M,F,A}, _) ->
case A =:= 0 of
diff --git a/lib/dialyzer/test/small_SUITE_data/src/trec.erl b/lib/dialyzer/test/small_SUITE_data/src/trec.erl
index ba50c3b401..06706162c1 100644
--- a/lib/dialyzer/test/small_SUITE_data/src/trec.erl
+++ b/lib/dialyzer/test/small_SUITE_data/src/trec.erl
@@ -18,20 +18,22 @@
%% ('undefined',atom())
%% 3. Function mk_foo_loc/2 has no local return
%%
-%% Arguably, the second warning is not what most users have in mind
-%% when they wrote the type declarations in the 'foo' record, so no
-%% doubt they'll find it confusing. But note that it is also inconsistent!
-%% How come there is a success typing for a function that has no local return?
+%% Arguably, the second warning is not what most users have in mind when
+%% they wrote the type declarations in the 'foo' record, so no doubt
+%% they'll find it confusing. But note that it is also quite confusing!
+%% Many users may be wondering: How come there is a success typing for a
+%% function that has no local return? Running typer on this module
+%% reveals a success typing for this function that is interesting indeed.
%%
test() ->
- mk_foo_loc(42, bar:f()).
+ mk_foo_loc(42, some_mod:some_function()).
mk_foo_loc(A, B) ->
#foo{a = A, b = [A,B]}.
%%
-%% For this function we currently get "has no local return" but we get
-%% no reason; I want us to get a reason.
+%% For this function we used to get a "has no local return" warning
+%% but we got no reason. This has now been fixed.
%%
mk_foo_exp(A, B) when is_integer(A) ->
#foo{a = A, b = [A,B]}.
diff --git a/lib/diameter/bin/diameterc b/lib/diameter/bin/diameterc
index 2f5834d359..d31f341c36 100755
--- a/lib/diameter/bin/diameterc
+++ b/lib/diameter/bin/diameterc
@@ -50,13 +50,10 @@ usage() ->
" -i dir = set an include directory for inherited beams~n"
" -E = no .erl output~n"
" -H = no .hrl output~n"
- " -d = write intermediate files (.spec and .forms)~n",
+ " -d = write intermediate files (.D and .F)~n",
[?MODULE]).
main(Args) ->
- %% Add the ebin directory relative to the script path.
- BinDir = filename:dirname(escript:script_name()),
- code:add_path(filename:join([BinDir, "..", "ebin"])),
halt(gen(Args)).
gen(Args) ->
@@ -72,15 +69,12 @@ gen(Args) ->
1
end.
-compile(#argv{file = File, options = Opts} = A) ->
- try diameter_dict_util:parse({path, File}, Opts) of
- {ok, Spec} ->
- maybe_output(A, Spec, Opts, spec), %% the spec file
- maybe_output(A, Spec, Opts, erl), %% the erl file
- maybe_output(A, Spec, Opts, hrl), %% The hrl file
+compile(#argv{file = File, options = Opts, output = Out}) ->
+ try diameter_make:codec({path, File}, Opts ++ Out) of
+ ok ->
0;
{error, Reason} ->
- error_msg(diameter_dict_util:format_error(Reason), []),
+ error_msg(Reason, []),
1
catch
error: Reason ->
@@ -88,10 +82,6 @@ compile(#argv{file = File, options = Opts} = A) ->
2
end.
-maybe_output(#argv{file = File, output = Output}, Spec, Opts, Mode) ->
- lists:member(Mode, Output)
- andalso diameter_codegen:from_dict(File, Spec, Opts, Mode).
-
error_msg({Fmt, Args}) ->
error_msg(Fmt, Args).
@@ -119,8 +109,9 @@ arg(["-o", Dir | Args], #argv{options = Opts} = A) ->
true = dir_exists(Dir),
arg(Args, A#argv{options = [{outdir, Dir} | Opts]});
-arg(["-i", Dir | Args], #argv{options = Opts} = A) ->
- arg(Args, A#argv{options = Opts ++ [{include, Dir}]});
+arg(["-i", Dir | Args], #argv{} = A) ->
+ code:add_patha(Dir), %% Set path here instead of passing an include
+ arg(Args, A); %% option so it's set before calling diameter_make.
arg(["--name", Name | Args], #argv{options = Opts} = A) ->
arg(Args, A#argv{options = [{name, Name} | Opts]});
@@ -137,9 +128,8 @@ arg(["-E" | Args], #argv{output = Output} = A) ->
arg(["-H" | Args], #argv{output = Output} = A) ->
arg(Args, A#argv{output = lists:delete(hrl, Output)});
-arg(["-d" | Args], #argv{options = Opts, output = Output} = A) ->
- arg(Args, A#argv{options = [debug | Opts],
- output = [spec | Output]});
+arg(["-d" | Args], #argv{output = Output} = A) ->
+ arg(Args, A#argv{output = [parse, forms | Output -- [parse, forms]]});
arg([[$- = M, C, H | T] | Args], A) %% clustered options
when C /= $i, C /= $o, C /= $- ->
diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml
index db19fbb271..726343abb2 100644
--- a/lib/diameter/doc/src/diameter.xml
+++ b/lib/diameter/doc/src/diameter.xml
@@ -575,7 +575,7 @@ The RFC 3539 watchdog state machine has
transitioned into (<c>up</c>) or out of (<c>down</c>) the OKAY
state.
If a <c>#diameter_packet{}</c> is present in an <c>up</c> event
-then there has been a capabilties exchange on a newly established
+then there has been a capabilities exchange on a newly established
transport connection and the record contains the received CER or CEA.
Otherwise a connection has reestablished without the loss or
connectivity.</p>
@@ -584,7 +584,7 @@ connectivity.</p>
Note that a single <c>up</c> or <c>down</c> event for a given peer
corresponds to multiple &app_peer_up; or &app_peer_down;
callbacks, one for each of the Diameter applications negotiated during
-capablilities exchange.
+capabilities exchange.
That is, the event communicates connectivity with the
peer as a whole while the callbacks communicate connectivity with
respect to individual Diameter applications.</p>
@@ -599,7 +599,7 @@ Opts = [&transport_opt;]
<p>
A connecting transport is attempting to establish/reestablish a
-transport connection with a peer following &reconnect_timer; or
+transport connection with a peer following &connect_timer; or
&watchdog_timer; expiry.</p>
</item>
@@ -765,7 +765,7 @@ the application's &dictionary; file.</p>
The capabilities advertised by a node must match its configured
applications. In particular, <c>application</c> configuration must
be matched by corresponding &capability; configuration, of
-Application-Id AVP's in particular.</p>
+*-Application-Id AVPs in particular.</p>
</warning>
</item>
@@ -804,7 +804,7 @@ Defaults to <c>nodes</c>.</p>
<p>
Specifies a constant value <c>H</c> for the topmost <c>32-N</c> bits of
of 32-bit End-to-End and Hop-by-Hop identifiers generated
-by the service, either explicity or as a return value of a function
+by the service, either explicitly or as a return value of a function
to be evaluated at &start_service;.
In particular, an identifier <c>Id</c> is mapped to a new identifier
as follows.</p>
@@ -946,7 +946,7 @@ Applications not configured on the service in question are ignored.</p>
The capabilities advertised by a node must match its configured
applications.
In particular, setting <c>applications</c> on a transport typically
-implies having to set matching Application-Id AVP's in a
+implies having to set matching *-Application-Id AVPs in a
&capabilities; tuple.</p>
</warning>
@@ -956,7 +956,7 @@ implies having to set matching Application-Id AVP's in a
<tag><c>{capabilities, [&capability;]}</c></tag>
<item>
<p>
-AVP's used to construct outgoing CER/CEA messages.
+AVPs used to construct outgoing CER/CEA messages.
Values take precedence over any specified on the service in
question.</p>
@@ -1022,13 +1022,43 @@ The number of milliseconds after which a transport process having an
established transport connection will be terminated if the expected
capabilities exchange message (CER or CEA) is not received from the peer.
For a connecting transport, the timing of reconnection attempts is
-governed by &watchdog_timer; or &reconnect_timer; expiry.
+governed by &watchdog_timer; or &connect_timer; expiry.
For a listening transport, the peer determines the timing.</p>
<p>
Defaults to 10000.</p>
</item>
+<marker id="connect_timer"/>
+<tag><c>{connect_timer, Tc}</c></tag>
+<item>
+<pre>
+Tc = &dict_Unsigned32;
+</pre>
+
+<p>
+For a connecting transport, the &the_rfc; Tc timer, in milliseconds.
+Note that this timer determines the frequency with which a transport
+will attempt to establish an initial connection with its peer
+following transport configuration: once an initial connection has been
+established it's &watchdog_timer; that determines the frequency of
+reconnection attempts, as required by RFC 3539.</p>
+
+<p>
+For a listening transport, the timer specifies the time after which a
+previously connected peer will be forgotten: a connection after this time is
+regarded as an initial connection rather than a reestablishment,
+causing the RFC 3539 state machine to pass to state OKAY rather than
+REOPEN.
+Note that these semantics are not governed by the RFC and
+that a listening transport's &connect_timer; should be greater
+than its peer's Tw plus jitter.</p>
+
+<p>
+Defaults to 30000 for a connecting transport and 60000 for a listening
+transport.</p>
+</item>
+
<marker id="disconnect_cb"/>
<tag><c>{disconnect_cb, &evaluable;}</c></tag>
@@ -1145,36 +1175,6 @@ See &man_tcp; for the behaviour of that module.</p>
</note>
</item>
-<marker id="reconnect_timer"/>
-<tag><c>{reconnect_timer, Tc}</c></tag>
-<item>
-<pre>
-Tc = &dict_Unsigned32;
-</pre>
-
-<p>
-For a connecting transport, the &the_rfc; Tc timer, in milliseconds.
-Note that this timer determines the frequency with which a transport
-will attempt to establish a connection with its peer only <em>before</em>
-an initial connection is established: once there is an initial
-connection it's &watchdog_timer; that determines the
-frequency of reconnection attempts, as required by RFC 3539.</p>
-
-<p>
-For a listening transport, the timer specifies the time after which a
-previously connected peer will be forgotten: a connection after this time is
-regarded as an initial connection rather than a reestablishment,
-causing the RFC 3539 state machine to pass to state OKAY rather than
-REOPEN.
-Note that these semantics are not governed by the RFC and
-that a listening transport's &reconnect_timer; should be greater
-than its peer's Tw plus jitter.</p>
-
-<p>
-Defaults to 30000 for a connecting transport and 60000 for a listening
-transport.</p>
-</item>
-
<marker id="spawn_opt"/>
<tag><c>{spawn_opt, [term()]}</c></tag>
<item>
@@ -1661,7 +1661,7 @@ R_Flag}</c>.</p>
Note that <c>watchdog</c>, <c>peer</c>, <c>apps</c>, <c>caps</c>
and <c>port</c> entries depend on connectivity
with the peer and may not be present.
-Note also that the <c>statistics</c> entry presents values acuumulated
+Note also that the <c>statistics</c> entry presents values accumulated
during the lifetime of the transport configuration.</p>
<p>
diff --git a/lib/diameter/doc/src/diameter_app.xml b/lib/diameter/doc/src/diameter_app.xml
index e6c9cc9a90..0b6839dcb2 100644
--- a/lib/diameter/doc/src/diameter_app.xml
+++ b/lib/diameter/doc/src/diameter_app.xml
@@ -308,7 +308,7 @@ The return value <c>{Peer, NewState}</c> is only allowed if
the Diameter application in question was configured with the
&mod_application_opt; <c>{call_mutates_state, true}</c>.
Otherwise, the <c>State</c> argument is always
-the intial value as configured on the application, not any subsequent
+the initial value as configured on the application, not any subsequent
value returned by a &peer_up;
or &peer_down; callback.</p>
</warning>
@@ -565,7 +565,7 @@ Equivalent to</p>
</pre>
<p>
where <c>Avps</c> sets the Origin-Host, Origin-Realm, the specified
-Result-Code and (if the request contained one) Session-Id AVP's, and
+Result-Code and (if the request contained one) Session-Id AVPs, and
possibly Failed-AVP as described below.</p>
<p>
diff --git a/lib/diameter/doc/src/diameter_codec.xml b/lib/diameter/doc/src/diameter_codec.xml
index 4a77d5435b..9d26466b25 100644
--- a/lib/diameter/doc/src/diameter_codec.xml
+++ b/lib/diameter/doc/src/diameter_codec.xml
@@ -13,7 +13,7 @@
<erlref>
<header>
<copyright>
-<year>2012</year>
+<year>2012</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -73,7 +73,7 @@ are defined in diameter.hrl, which can be included as follows.</p>
</pre>
<p>
-Application-specific records are definied in the hrl
+Application-specific records are defined in the hrl
files resulting from dictionary file compilation.</p>
</description>
@@ -122,7 +122,7 @@ Fields have the following types.</p>
<item>
<p>
Values in the AVP header, corresponding to AVP Code, the M flag, P
-flags and Vendor-ID respectivelty.
+flags and Vendor-ID respectively.
A Vendor-ID other than <c>undefined</c> implies a set V flag.</p>
</item>
@@ -222,7 +222,7 @@ header.</p>
<tag><c>is_retransmitted = boolean()</c></tag>
<item>
<p>
-Values correspoding to the R(equest), P(roxiable), E(rror)
+Values corresponding to the R(equest), P(roxiable), E(rror)
and T(Potentially re-transmitted message) flags of the Diameter
header.</p>
</item>
diff --git a/lib/diameter/doc/src/diameter_dict.xml b/lib/diameter/doc/src/diameter_dict.xml
index 8bf4a14240..4f51a12ebc 100644
--- a/lib/diameter/doc/src/diameter_dict.xml
+++ b/lib/diameter/doc/src/diameter_dict.xml
@@ -431,7 +431,7 @@ equivalent to specifying it with <c>@avp_vendor_id</c>.</p>
Defines values of AVP Name having type Enumerated.
Section content consists of names and corresponding integer values.
Integer values can be prefixed with 0x to be interpreted as
-hexidecimal.</p>
+hexadecimal.</p>
<p>
Note that the AVP in question can be defined in an inherited
diff --git a/lib/diameter/doc/src/diameter_intro.xml b/lib/diameter/doc/src/diameter_intro.xml
index 288ebc0c7c..6c1d1910d2 100644
--- a/lib/diameter/doc/src/diameter_intro.xml
+++ b/lib/diameter/doc/src/diameter_intro.xml
@@ -40,7 +40,7 @@ under the License.
The diameter application is an implementation of the Diameter protocol
as defined by &the_rfc;.
It supports arbitrary Diameter applications by way of a
-<em>dictionary</em> interface that allows messages and AVP's to be
+<em>dictionary</em> interface that allows messages and AVPs to be
defined and input into diameter as configuration.
It has support for all roles defined in the RFC: client, server and
agent.
@@ -69,7 +69,7 @@ interface</seealso>.</p>
<p>
While a service typically implements a single Diameter node (as
identified by an Origin-Host AVP), transports can themselves be
-associated with capabilities AVP's so that a single service can be
+associated with capabilities AVPs so that a single service can be
used to implement more than one Diameter node.</p>
<p>
diff --git a/lib/diameter/doc/src/diameter_make.xml b/lib/diameter/doc/src/diameter_make.xml
index ec71251be1..1c1eff6c6a 100644
--- a/lib/diameter/doc/src/diameter_make.xml
+++ b/lib/diameter/doc/src/diameter_make.xml
@@ -1,5 +1,7 @@
<?xml version="1.0" encoding="latin1" ?>
<!DOCTYPE erlref SYSTEM "erlref.dtd" [
+ <!ENTITY compile_forms2
+ '<seealso marker="compiler:compile#forms-2">compile:forms/2</seealso>'>
<!ENTITY filename
'<seealso marker="kernel:file#type-name">file:name()</seealso>'>
<!ENTITY dictionary
@@ -51,7 +53,7 @@ under the License.
The function &codec; is used to compile a diameter
&dictionary; into Erlang source.
The resulting source implements the interface diameter required
-to encode and decode the dictionary's messages and AVP's.</p>
+to encode and decode the dictionary's messages and AVPs.</p>
<p>
The utility &man_compile; provides an alternate compilation
@@ -64,16 +66,47 @@ interface.</p>
<funcs>
<func>
-<name>codec(Path::string(), [Opt]) -> ok | {error, Reason}</name>
+<name>codec(File :: iolist() | binary(), [Opt]) -> ok
+ | {ok, [Out]}
+ | {error, Reason}</name>
<fsummary>Compile a dictionary file into Erlang source.</fsummary>
<desc>
<p>
-Compile a single dictionary file to Erlang source.
-<c>Opt</c> can have the following types.</p>
+Compile a single dictionary file.
+The input <c>File</c> can be either a path or a literal dictionary,
+the occurrence of newline (ascii NL) or carriage return (ascii CR)
+identifying the latter.
+<c>Opt</c> determines the format of the results and whether they are
+written to file or returned, and can have the following types.</p>
<taglist>
+<tag><c>parse | forms | erl | hrl</c></tag>
+<item>
+<p>
+Specifies an output format.
+Whether the output is returned or written to file depends on whether
+or not option <c>return</c> is specified.
+When written to file, the resulting file(s) will have extensions
+<c>.D</c>, <c>.F</c>, <c>.erl</c>, and <c>.hrl</c>
+respectively, basenames defaulting to <c>dictionary</c> if the input
+dictionary is literal and does not specify <c>&dict_name;</c>.
+When returned, results are in the order of the corresponding format
+options.
+Format options default to <c>erl</c> and <c>hrl</c> (in this order) if
+unspecified.</p>
+
+<p>
+The <c>parse</c> format is an internal representation that can be
+passed to &flatten; and &format;, while the <c>forms</c> format can be
+passed to &compile_forms2;.
+The <c>erl</c> and <c>hrl</c> formats are returned as
+iolists.</p>
+<!-- That codec/2 can take the parsed format is undocumented, and
+ options name and inherits have no effect in this case. -->
+</item>
+
<tag><c>{include, string()}</c></tag>
<item>
<p>
@@ -90,7 +123,15 @@ Multiple <c>include</c> options can be specified.</p>
<item>
<p>
Write generated source to the specified directory.
-Defaults to the current working directory.</p>
+Defaults to the current working directory.
+Has no effect if option <c>return</c> is specified.</p>
+</item>
+
+<tag><c>return</c></tag>
+<item>
+<p>
+Return results in a <c>{ok, [Out]}</c> tuple instead of writing to
+file and returning <c>ok</c>.</p>
</item>
<tag><c>{name|prefix, string()}</c></tag>
@@ -108,7 +149,7 @@ Transform the input dictionary before compilation, appending
<c>&dict_inherits;</c> of the specified string.</p>
<p>
-Two forms of <c>@inherits</c> have special meaning:</p>
+Two forms have special meaning:</p>
<pre>
{inherits, "-"}
@@ -127,6 +168,41 @@ Multiple <c>inherits</c> options can be specified.</p>
</taglist>
+<p>
+Note that a dictionary's <c>&dict_name;</c>, together with the
+<c>outdir</c> option, determine the output paths when the
+<c>return</c> option is not specified.
+The <c>&dict_name;</c> of a literal input dictionary defaults to
+<c>dictionary</c>.</p>
+
+</desc>
+</func>
+
+<!-- ===================================================================== -->
+
+<func>
+<name>format(Parsed) -> iolist()</name>
+<fsummary>Format a parsed dictionary.</fsummary>
+<desc>
+<p>
+Turns a parsed dictionary, as returned by &codec;, back into the
+dictionary format.</p>
+</desc>
+</func>
+
+<!-- ===================================================================== -->
+
+<func>
+<name>flatten(Parsed) -> term()</name>
+<fsummary>Flatten a parsed dictionary.</fsummary>
+<desc>
+
+<p>
+Reconstitute a parsed dictionary, as returned by &codec;, without
+using <c>&dict_inherits;</c>.
+That is, construct an equivalent dictionary in which all AVP's are
+definined in the dictionary itself.
+The return value is also a parsed dictionary.</p>
</desc>
</func>
@@ -138,11 +214,7 @@ Multiple <c>inherits</c> options can be specified.</p>
<title>BUGS</title>
<p>
-All options are string-valued.
-In particular, it is not currently possible to specify
-an &dict_inherits; module as an atom(), or a path as an arbitrary
-&filename;</p>
-
+Unrecognized options are silently ignored.</p>
</section>
<!-- ===================================================================== -->
diff --git a/lib/diameter/doc/src/diameter_sctp.xml b/lib/diameter/doc/src/diameter_sctp.xml
index 5fe14b1ef6..2be77e3dfd 100644
--- a/lib/diameter/doc/src/diameter_sctp.xml
+++ b/lib/diameter/doc/src/diameter_sctp.xml
@@ -90,7 +90,7 @@ Options <c>raddr</c> and <c>rport</c> specify the remote address
and port for a connecting transport and not valid for a listening
transport: the former is required while latter defaults to 3868 if
unspecified.
-Mupltiple <c>raddr</c> options can be specified, in which case the
+Multiple <c>raddr</c> options can be specified, in which case the
connecting transport in question attempts each in sequence until
an association is established.</p>
diff --git a/lib/diameter/doc/src/diameter_soc_rfc6733.xml b/lib/diameter/doc/src/diameter_soc_rfc6733.xml
index 8d85569650..deb4d05b0f 100644
--- a/lib/diameter/doc/src/diameter_soc_rfc6733.xml
+++ b/lib/diameter/doc/src/diameter_soc_rfc6733.xml
@@ -1272,7 +1272,7 @@ during capabilities exchange.)</p>
<p>
The frequency of reconnection attempts is configured with the
-&mod_transport_opt; <c>reconnect_timer</c> and
+&mod_transport_opt; <c>connect_timer</c> and
<c>watchdog_timer</c>.</p>
<pre>
diff --git a/lib/diameter/doc/src/diameter_compile.xml b/lib/diameter/doc/src/diameterc.xml
index 6630019e5c..039f4f9cdd 100644
--- a/lib/diameter/doc/src/diameter_compile.xml
+++ b/lib/diameter/doc/src/diameterc.xml
@@ -29,7 +29,7 @@ supplied.
<docno></docno>
<date></date>
<rev></rev>
-<file>diameter_compile.xml</file>
+<file>diameterc.xml</file>
</header>
<com>diameterc</com>
@@ -41,7 +41,7 @@ supplied.
The diameterc utility is used to compile a diameter
&dictionary; into Erlang source.
The resulting source implements the interface diameter required
-to encode and decode the dictionary's messages and AVP's.</p>
+to encode and decode the dictionary's messages and AVPs.</p>
<p>
The module &man_make; provides an alternate compilation interface.</p>
@@ -83,7 +83,7 @@ Defaults to the current working directory.</p>
<tag><![CDATA[-H]]></tag>
<item>
<p>
-Supress erl and hrl generation, respectively.</p>
+Suppress erl and hrl generation, respectively.</p>
</item>
<tag><![CDATA[--name <name>]]></tag>
diff --git a/lib/diameter/doc/src/files.mk b/lib/diameter/doc/src/files.mk
index 510786a7fb..6e8b1f9068 100644
--- a/lib/diameter/doc/src/files.mk
+++ b/lib/diameter/doc/src/files.mk
@@ -21,7 +21,7 @@ XML_APPLICATION_FILES = \
ref_man.xml
XML_REF1_FILES = \
- diameter_compile.xml
+ diameterc.xml
XML_REF3_FILES = \
diameter.xml \
diff --git a/lib/diameter/doc/src/notes.xml b/lib/diameter/doc/src/notes.xml
index 32082e565d..cf87a13225 100644
--- a/lib/diameter/doc/src/notes.xml
+++ b/lib/diameter/doc/src/notes.xml
@@ -42,6 +42,36 @@ first.</p>
<!-- ===================================================================== -->
+<section><title>diameter 1.4.4</title>
+
+ <section><title>Known Bugs and Problems</title>
+ <list>
+ <item>
+ <p>
+ Fix setting of End-to-End and Hop-by-Hop Identifiers in
+ outgoing DWA.</p>
+ <p>
+ Broken by OTP-11184, which caused the identifiers to be
+ set anew, discarding the values from the incoming DWR.</p>
+ <p>
+ Own Id: OTP-11367</p>
+ </item>
+ <item>
+ <p>
+ Fix handling of 5014, DIAMETER_INVALID_AVP_LENGTH.</p>
+ <p>
+ The error was detected as 5004,
+ DIAMETER_INVALID_AVP_VALUE, for some Diameter types, in
+ which case an AVP length that pointed past the end of a
+ message resulted in encode failure.</p>
+ <p>
+ Own Id: OTP-11395</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>diameter 1.4.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/diameter/doc/src/ref_man.xml b/lib/diameter/doc/src/ref_man.xml
index 4b99fe716d..1095887144 100644
--- a/lib/diameter/doc/src/ref_man.xml
+++ b/lib/diameter/doc/src/ref_man.xml
@@ -6,7 +6,7 @@
<header>
<copyright>
<year>2011</year>
-<year>2012</year>
+<year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -39,7 +39,7 @@ applications on top of the Diameter protocol. </p>
</description>
<xi:include href="diameter.xml"/>
-<xi:include href="diameter_compile.xml"/>
+<xi:include href="diameterc.xml"/>
<xi:include href="diameter_app.xml"/>
<xi:include href="diameter_codec.xml"/>
<xi:include href="diameter_dict.xml"/>
diff --git a/lib/diameter/doc/src/seealso.ent b/lib/diameter/doc/src/seealso.ent
index 76b9823f79..7bf7460351 100644
--- a/lib/diameter/doc/src/seealso.ent
+++ b/lib/diameter/doc/src/seealso.ent
@@ -66,7 +66,7 @@ significant.
<!ENTITY disconnect_cb '<seealso marker="#disconnect_cb">disconnect_cb</seealso>'>
<!ENTITY transport_config '<seealso marker="#transport_config">transport_config</seealso>'>
<!ENTITY transport_module '<seealso marker="#transport_module">transport_module</seealso>'>
-<!ENTITY reconnect_timer '<seealso marker="#reconnect_timer">reconnect_timer</seealso>'>
+<!ENTITY connect_timer '<seealso marker="#connect_timer">connect_timer</seealso>'>
<!ENTITY watchdog_timer '<seealso marker="#watchdog_timer">watchdog_timer</seealso>'>
<!-- diameter_app -->
@@ -115,6 +115,8 @@ significant.
<!-- diameter_make -->
<!ENTITY make_codec '<seealso marker="diameter_make#codec-2">diameter_make:codec/2</seealso>'>
+<!ENTITY make_format '<seealso marker="diameter_make#format-1">diameter_make:format/1</seealso>'>
+<!ENTITY make_flatten '<seealso marker="diameter_make#flatten-1">diameter_make:flatten/1</seealso>'>
<!-- diameter_transport -->
diff --git a/lib/diameter/doc/standard/rfc7068.txt b/lib/diameter/doc/standard/rfc7068.txt
new file mode 100644
index 0000000000..70fc24fab0
--- /dev/null
+++ b/lib/diameter/doc/standard/rfc7068.txt
@@ -0,0 +1,1627 @@
+
+
+
+
+
+
+Internet Engineering Task Force (IETF) E. McMurry
+Request for Comments: 7068 B. Campbell
+Category: Informational Oracle
+ISSN: 2070-1721 November 2013
+
+
+ Diameter Overload Control Requirements
+
+Abstract
+
+ When a Diameter server or agent becomes overloaded, it needs to be
+ able to gracefully reduce its load, typically by advising clients to
+ reduce traffic for some period of time. Otherwise, it must continue
+ to expend resources parsing and responding to Diameter messages,
+ possibly resulting in a progressively severe overload condition. The
+ existing Diameter mechanisms are not sufficient for managing overload
+ conditions. This document describes the limitations of the existing
+ mechanisms. Requirements for new overload management mechanisms are
+ also provided.
+
+Status of This Memo
+
+ This document is not an Internet Standards Track specification; it is
+ published for informational purposes.
+
+ This document is a product of the Internet Engineering Task Force
+ (IETF). It represents the consensus of the IETF community. It has
+ received public review and has been approved for publication by the
+ Internet Engineering Steering Group (IESG). Not all documents
+ approved by the IESG are a candidate for any level of Internet
+ Standard; see Section 2 of RFC 5741.
+
+ Information about the current status of this document, any errata,
+ and how to provide feedback on it may be obtained at
+ http://www.rfc-editor.org/info/rfc7068.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 1]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+Copyright Notice
+
+ Copyright (c) 2013 IETF Trust and the persons identified as the
+ document authors. All rights reserved.
+
+ This document is subject to BCP 78 and the IETF Trust's Legal
+ Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info) in effect on the date of
+ publication of this document. Please review these documents
+ carefully, as they describe your rights and restrictions with respect
+ to this document. Code Components extracted from this document must
+ include Simplified BSD License text as described in Section 4.e of
+ the Trust Legal Provisions and are provided without warranty as
+ described in the Simplified BSD License.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 2]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+Table of Contents
+
+ 1. Introduction ....................................................4
+ 1.1. Documentation Conventions ..................................4
+ 1.2. Causes of Overload .........................................5
+ 1.3. Effects of Overload ........................................6
+ 1.4. Overload vs. Network Congestion ............................6
+ 1.5. Diameter Applications in a Broader Network .................7
+ 2. Overload Control Scenarios ......................................7
+ 2.1. Peer-to-Peer Scenarios .....................................8
+ 2.2. Agent Scenarios ...........................................10
+ 2.3. Interconnect Scenario .....................................14
+ 3. Diameter Overload Case Studies .................................15
+ 3.1. Overload in Mobile Data Networks ..........................15
+ 3.2. 3GPP Study on Core Network Overload .......................16
+ 4. Existing Mechanisms ............................................17
+ 5. Issues with the Current Mechanisms .............................18
+ 5.1. Problems with Implicit Mechanism ..........................18
+ 5.2. Problems with Explicit Mechanisms .........................18
+ 6. Extensibility and Application Independence .....................19
+ 7. Solution Requirements ..........................................20
+ 7.1. General ...................................................20
+ 7.2. Performance ...............................................21
+ 7.3. Heterogeneous Support for Solution ........................22
+ 7.4. Granular Control ..........................................23
+ 7.5. Priority and Policy .......................................23
+ 7.6. Security ..................................................23
+ 7.7. Flexibility and Extensibility .............................24
+ 8. Security Considerations ........................................25
+ 8.1. Access Control ............................................25
+ 8.2. Denial-of-Service Attacks .................................26
+ 8.3. Replay Attacks ............................................26
+ 8.4. Man-in-the-Middle Attacks .................................26
+ 8.5. Compromised Hosts .........................................27
+ 9. References .....................................................27
+ 9.1. Normative References ......................................27
+ 9.2. Informative References ....................................27
+ Appendix A. Contributors ..........................................29
+ Appendix B. Acknowledgements ......................................29
+
+
+
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 3]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+1. Introduction
+
+ A Diameter [RFC6733] node is said to be overloaded when it has
+ insufficient resources to successfully process all of the Diameter
+ requests that it receives. When a node becomes overloaded, it needs
+ to be able to gracefully reduce its load, typically by advising
+ clients to reduce traffic for some period of time. Otherwise, it
+ must continue to expend resources parsing and responding to Diameter
+ messages, possibly resulting in a progressively severe overload
+ condition. The existing mechanisms provided by Diameter are not
+ sufficient for managing overload conditions. This document describes
+ the limitations of the existing mechanisms and provides requirements
+ for new overload management mechanisms.
+
+ This document draws on the work done on SIP overload control
+ ([RFC5390], [RFC6357]) as well as on experience gained via overload
+ handling in Signaling System No. 7 (SS7) networks and studies done by
+ the Third Generation Partnership Project (3GPP) (Section 3).
+
+ Diameter is not typically an end-user protocol; rather, it is
+ generally used as one component in support of some end-user activity.
+
+ For example, a SIP server might use Diameter to authenticate and
+ authorize user access. Overload in the Diameter backend
+ infrastructure will likely impact the experience observed by the end
+ user in the SIP application.
+
+ The impact of Diameter overload on the client application (a client
+ application may use the Diameter protocol and other protocols to do
+ its job) is beyond the scope of this document.
+
+ This document presents non-normative descriptions of causes of
+ overload, along with related scenarios and studies. Finally, it
+ offers a set of normative requirements for an improved overload
+ indication mechanism.
+
+1.1. Documentation Conventions
+
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as defined in [RFC2119], with the
+ exception that they are not intended for interoperability of
+ implementations. Rather, they are used to describe requirements
+ towards future specifications where the interoperability requirements
+ will be defined.
+
+ The terms "client", "server", "agent", "node", "peer", "upstream",
+ and "downstream" are used as defined in [RFC6733].
+
+
+
+McMurry & Campbell Informational [Page 4]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+1.2. Causes of Overload
+
+ Overload occurs when an element, such as a Diameter server or agent,
+ has insufficient resources to successfully process all of the traffic
+ it is receiving. Resources include all of the capabilities of the
+ element used to process a request, including CPU processing, memory,
+ I/O, and disk resources. It can also include external resources such
+ as a database or DNS server, in which case the CPU, processing,
+ memory, I/O, and disk resources of those elements are effectively
+ part of the logical element processing the request.
+
+ External resources can include upstream Diameter nodes; for example,
+ a Diameter agent can become effectively overloaded if one or more
+ upstream nodes are overloaded.
+
+ A Diameter node can become overloaded due to request levels that
+ exceed its capacity, a reduction of available resources (for example,
+ a local or upstream hardware failure), or a combination of the two.
+
+ Overload can occur for many reasons, including:
+
+ Inadequate capacity: When designing Diameter networks, that is,
+ application-layer multi-node Diameter deployments, it can be very
+ difficult to predict all scenarios that may cause elevated
+ traffic. It may also be more costly to implement support for some
+ scenarios than a network operator may deem worthwhile. This
+ results in the likelihood that a Diameter network will not have
+ adequate capacity to handle all situations.
+
+ Dependency failures: A Diameter node can become overloaded because a
+ resource on which it depends has failed or become overloaded,
+ greatly reducing the logical capacity of the node. In these
+ cases, even minimal traffic might cause the node to go into
+ overload. Examples of such dependency overloads include DNS
+ servers, databases, disks, and network interfaces that have failed
+ or become overloaded.
+
+ Component failures: A Diameter node can become overloaded when it is
+ a member of a cluster of servers that each share the load of
+ traffic and one or more of the other members in the cluster fail.
+ In this case, the remaining nodes take over the work of the failed
+ nodes. Normally, capacity planning takes such failures into
+ account, and servers are typically run with enough spare capacity
+ to handle failure of another node. However, unusual failure
+ conditions can cause many nodes to fail at once. This is often
+ the case with software failures, where a bad packet or bad
+ database entry hits the same bug in a set of nodes in a cluster.
+
+
+
+
+McMurry & Campbell Informational [Page 5]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ Network-initiated traffic flood: Certain access network events can
+ precipitate floods of Diameter signaling traffic. For example,
+ operational changes can trigger avalanche restarts, or frequent
+ radio overlay handovers can generate excessive authorization
+ requests. Failure of a Diameter proxy may also result in a large
+ amount of signaling as connections and sessions are reestablished.
+
+ Subscriber-initiated traffic flood: Large gatherings of subscribers
+ or events that result in many subscribers interacting with the
+ network in close time proximity can result in Diameter signaling
+ traffic floods. For example, the finale of a large fireworks show
+ could be immediately followed by many subscribers posting
+ messages, pictures, and videos concentrated on one portion of a
+ network. Subscriber devices such as smartphones may use
+ aggressive registration strategies that generate unusually high
+ Diameter traffic loads.
+
+ DoS attacks: An attacker wishing to disrupt service in the network
+ can cause a large amount of traffic to be launched at a target
+ element. This can be done from a central source of traffic or
+ through a distributed DoS attack. In all cases, the volume of
+ traffic well exceeds the capacity of the element, sending the
+ system into overload.
+
+1.3. Effects of Overload
+
+ Modern Diameter networks, composed of application-layer multi-node
+ deployments of Diameter elements, may operate at very large
+ transaction volumes. If a Diameter node becomes overloaded or, even
+ worse, fails completely, a large number of messages may be lost very
+ quickly. Even with redundant servers, many messages can be lost in
+ the time it takes for failover to complete. While a Diameter client
+ or agent should be able to retry such requests, an overloaded peer
+ may cause a sudden large increase in the number of transactions
+ needing to be retried, rapidly filling local queues or otherwise
+ contributing to local overload. Therefore, Diameter devices need to
+ be able to shed load before critical failures can occur.
+
+1.4. Overload vs. Network Congestion
+
+ This document uses the term "overload" to refer to application-layer
+ overload at Diameter nodes. This is distinct from "network
+ congestion", that is, congestion that occurs at the lower networking
+ layers that may impact the delivery of Diameter messages between
+ nodes. This document recognizes that element overload and network
+ congestion are interrelated, and that overload can contribute to
+ network congestion and vice versa.
+
+
+
+
+McMurry & Campbell Informational [Page 6]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ Network congestion issues are better handled by the transport
+ protocols. Diameter uses TCP and the Stream Control Transmission
+ Protocol (SCTP), both of which include congestion management
+ features. Analysis of whether those features are sufficient for
+ transport-level congestion between Diameter nodes and of any work to
+ further mitigate network congestion is out of scope for both this
+ document and the work proposed by it.
+
+1.5. Diameter Applications in a Broader Network
+
+ Most elements using Diameter applications do not use Diameter
+ exclusively. It is important to realize that overload of an element
+ can be caused by a number of factors that may be unrelated to the
+ processing of Diameter or Diameter applications.
+
+ An element that doesn't use Diameter exclusively needs to be able to
+ signal to Diameter peers that it is experiencing overload regardless
+ of the cause of the overload, since the overload will affect that
+ element's ability to process Diameter transactions. If the element
+ communicates with protocols other than Diameter, it may also need to
+ signal the overload situation on these protocols, depending on its
+ function and the architecture of the network and application for
+ which it is providing services. Whether that is necessary can only
+ be decided within the context of that architecture and use cases.
+ This specification details the requirements for a mechanism for
+ signaling overload with Diameter; this mechanism provides Diameter
+ nodes the ability to inform their Diameter peers of overload,
+ mitigating that part of the issue. Diameter nodes may need to use
+ this, as well as other mechanisms, to solve their broader overload
+ issues. Indicating overload on protocols other than Diameter is out
+ of scope for this document and for the work proposed by it.
+
+2. Overload Control Scenarios
+
+ Several Diameter deployment scenarios exist that may impact overload
+ management. The following scenarios help motivate the requirements
+ for an overload management mechanism.
+
+ These scenarios are by no means exhaustive and are in general
+ simplified for the sake of clarity. In particular, this document
+ assumes for the sake of clarity that the client sends Diameter
+ requests to the server, and the server sends responses to the client,
+ even though Diameter supports bidirectional applications. Each
+ direction in such an application can be modeled separately.
+
+ In a large-scale deployment, many of the nodes represented in these
+ scenarios would be deployed as clusters of servers. This document
+ assumes that such a cluster is responsible for managing its own
+
+
+
+McMurry & Campbell Informational [Page 7]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ internal load-balancing and overload management so that it appears as
+ a single Diameter node. That is, other Diameter nodes can treat it
+ as a single, monolithic node for the purposes of overload management.
+
+ These scenarios do not illustrate the client application. As
+ mentioned in Section 1, Diameter is not typically an end-user
+ protocol; rather, it is generally used in support of some other
+ client application. These scenarios do not consider the impact of
+ Diameter overload on the client application.
+
+2.1. Peer-to-Peer Scenarios
+
+ This section describes Diameter peer-to-peer scenarios, that is,
+ scenarios where a Diameter client talks directly with a Diameter
+ server, without the use of a Diameter agent.
+
+ Figure 1 illustrates the simplest possible Diameter relationship.
+ The client and server share a one-to-one peer-to-peer relationship.
+ If the server becomes overloaded, either because the client exceeds
+ the server's capacity or because the server's capacity is reduced due
+ to some resource dependency, the client needs to reduce the amount of
+ Diameter traffic it sends to the server. Since the client cannot
+ forward requests to another server, it must either queue requests
+ until the server recovers or itself become overloaded in the context
+ of the client application and other protocols it may also use.
+
+ +------------------+
+ | |
+ | |
+ | Server |
+ | |
+ +--------+---------+
+ |
+ |
+ +--------+---------+
+ | |
+ | |
+ | Client |
+ | |
+ +------------------+
+
+ Figure 1: Basic Peer-to-Peer Scenario
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 8]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ Figure 2 shows a similar scenario, except in this case the client has
+ multiple servers that can handle work for a specific realm and
+ application. If Server 1 becomes overloaded, the client can forward
+ traffic to Server 2. Assuming that Server 2 has sufficient reserve
+ capacity to handle the forwarded traffic, the client should be able
+ to continue serving client application protocol users. If Server 1
+ is approaching overload, but can still handle some number of new
+ requests, it needs to be able to instruct the client to forward a
+ subset of its traffic to Server 2.
+
+ +------------------+ +------------------+
+ | | | |
+ | | | |
+ | Server 1 | | Server 2 |
+ | | | |
+ +--------+-`.------+ +------.'+---------+
+ `. .'
+ `. .'
+ `. .'
+ `. .'
+ +-------`.'--------+
+ | |
+ | |
+ | Client |
+ | |
+ +------------------+
+
+ Figure 2: Multiple-Server Peer-to-Peer Scenario
+
+ Figure 3 illustrates a peer-to-peer scenario with multiple Diameter
+ realm and application combinations. In this example, Server 2 can
+ handle work for both applications. Each application might have
+ different resource dependencies. For example, a server might need to
+ access one database for Application A and another for Application B.
+ This creates a possibility that Server 2 could become overloaded for
+ Application A but not for Application B, in which case the client
+ would need to divert some part of its Application A requests to
+ Server 1, but the client should not divert any Application B
+ requests. This requires that Server 2 be able to distinguish between
+ applications when it indicates an overload condition to the client.
+
+ On the other hand, it's possible that the servers host many
+ applications. If Server 2 becomes overloaded for all applications,
+ it would be undesirable for it to have to notify the client
+ separately for each application. Therefore, it also needs a way to
+ indicate that it is overloaded for all possible applications.
+
+
+
+
+
+McMurry & Campbell Informational [Page 9]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ +---------------------------------------------+
+ | Application A +----------------------+----------------------+
+ |+------------------+ | +----------------+ | +------------------+|
+ || | | | | | | ||
+ || | | | | | | ||
+ || Server 1 | | | Server 2 | | | Server 3 ||
+ || | | | | | | ||
+ |+--------+---------+ | +-------+--------+ | +-+----------------+|
+ | | | | | | |
+ +---------+-----------+----------+-----------+ | |
+ | | | | |
+ | | | | Application B |
+ | +----------+----------------+-----------------+
+ ``-.._ | |
+ `-..__ | _.-''
+ `--._ | _.-''
+ ``-._ | _.-''
+ +-----`-.-''-----+
+ | |
+ | |
+ | Client |
+ | |
+ +----------------+
+
+ Figure 3: Multiple-Application Peer-to-Peer Scenario
+
+2.2. Agent Scenarios
+
+ This section describes scenarios that include a Diameter agent, in
+ the form of either a Diameter relay or Diameter proxy. These
+ scenarios do not consider Diameter redirect agents, since they are
+ more readily modeled as end servers. The examples have been kept
+ simple deliberately, to illustrate basic concepts. Significantly
+ more complicated topologies are possible with Diameter, including
+ multiple intermediate agents in a path connected in a variety
+ of ways.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 10]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ Figure 4 illustrates a simple Diameter agent scenario with a single
+ client, agent, and server. In this case, overload can occur at the
+ server, at the agent, or both. But in most cases, client behavior is
+ the same whether overload occurs at the server or at the agent. From
+ the client's perspective, server overload and agent overload are the
+ same thing.
+
+ +------------------+
+ | |
+ | |
+ | Server |
+ | |
+ +--------+---------+
+ |
+ |
+ +--------+---------+
+ | |
+ | |
+ | Agent |
+ | |
+ +--------+---------+
+ |
+ |
+ +--------+---------+
+ | |
+ | |
+ | Client |
+ | |
+ +------------------+
+
+ Figure 4: Basic Agent Scenario
+
+ Figure 5 shows an agent scenario with multiple servers. If Server 1
+ becomes overloaded but Server 2 has sufficient reserve capacity, the
+ agent may be able to transparently divert some or all Diameter
+ requests originally bound for Server 1 to Server 2.
+
+ In most cases, the client does not have detailed knowledge of the
+ Diameter topology upstream of the agent. If the agent uses dynamic
+ discovery to find eligible servers, the set of eligible servers may
+ not be enumerable from the perspective of the client. Therefore, in
+ most cases the agent needs to deal with any upstream overload issues
+ in a way that is transparent to the client. If one server notifies
+ the agent that it has become overloaded, the notification should not
+ be passed back to the client in a way that the client could
+ mistakenly perceive the agent itself as being overloaded. If the set
+
+
+
+
+
+McMurry & Campbell Informational [Page 11]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ of all possible destinations upstream of the agent no longer has
+ sufficient capacity for incoming load, the agent itself becomes
+ effectively overloaded.
+
+ On the other hand, there are cases where the client needs to be able
+ to select a particular server from behind an agent. For example, if
+ a Diameter request is part of a multiple-round-trip authentication,
+ or is otherwise part of a Diameter "session", it may have a
+ Destination-Host Attribute-Value Pair (AVP) that requires that the
+ request be served by Server 1. Therefore, the agent may need to
+ inform a client that a particular upstream server is overloaded or
+ otherwise unavailable. Note that there can be many ways a server can
+ be specified, which may have different implications (e.g., by IP
+ address, by host name, etc).
+
+ +------------------+ +------------------+
+ | | | |
+ | | | |
+ | Server 1 | | Server 2 |
+ | | | |
+ +--------+-`.------+ +------.'+---------+
+ `. .'
+ `. .'
+ `. .'
+ `. .'
+ +-------`.'--------+
+ | |
+ | |
+ | Agent |
+ | |
+ +--------+---------+
+ |
+ |
+ |
+ +--------+---------+
+ | |
+ | |
+ | Client |
+ | |
+ +------------------+
+
+ Figure 5: Multiple-Server Agent Scenario
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 12]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ Figure 6 shows a scenario where an agent routes requests to a set of
+ servers for more than one Diameter realm and application. In this
+ scenario, if Server 1 becomes overloaded or unavailable while
+ Server 2 still has available capacity, the agent may effectively
+ operate at reduced capacity for Application A but at full capacity
+ for Application B. Therefore, the agent needs to be able to report
+ that it is overloaded for one application but not for another.
+
+ +--------------------------------------------+
+ | Application A +----------------------+----------------------+
+ |+------------------+ | +----------------+ | +------------------+|
+ || | | | | | | ||
+ || | | | | | | ||
+ || Server 1 | | | Server 2 | | | Server 3 ||
+ || | | | | | | ||
+ |+---------+--------+ | +-------+--------+ | +--+---------------+|
+ | | | | | | |
+ +----------+----------+----------+-----------+ | |
+ | | | | |
+ | | | | Application B |
+ | +----------+-----------------+----------------+
+ | | |
+ ``--.__ | _.
+ ``-.__ | __.--''
+ `--.._ | _..--'
+ +----``-+.''-----+
+ | |
+ | |
+ | Agent |
+ | |
+ +-------+--------+
+ |
+ |
+ +-------+--------+
+ | |
+ | |
+ | Client |
+ | |
+ +----------------+
+
+ Figure 6: Multiple-Application Agent Scenario
+
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 13]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+2.3. Interconnect Scenario
+
+ Another scenario to consider when looking at Diameter overload is
+ that of multiple network operators using Diameter components
+ connected through an interconnect service, e.g., using IPX (IP Packet
+ eXchange). IPX [IR.34] is an Inter-Operator IP Backbone that
+ provides a roaming interconnection network between mobile operators
+ and service providers. IPX is also used to transport Diameter
+ signaling between operators [IR.88]. Figure 7 shows two network
+ operators with an interconnect network between them. There could be
+ any number of these networks between any two network operators'
+ networks.
+
+ +-------------------------------------------+
+ | Interconnect |
+ | |
+ | +--------------+ +--------------+ |
+ | | Server 3 |------| Server 4 | |
+ | +--------------+ +--------------+ |
+ | .' `. |
+ +------.-'--------------------------`.------+
+ .' `.
+ .-' `.
+ ------------.'-----+ +----`.-------------
+ +----------+ | | +----------+
+ | Server 1 | | | | Server 2 |
+ +----------+ | | +----------+
+ | |
+ Network Operator 1 | | Network Operator 2
+ -------------------+ +-------------------
+
+ Figure 7: Two-Network Interconnect Scenario
+
+ The characteristics of the information that an operator would want to
+ share over such a connection are different from the information
+ shared between components within a network operator's network. For
+ example, network operators may not want to convey topology or
+ operational information; this would in turn limit how much overload
+ and loading information can be sent. For the interconnect scenario
+ shown in Figure 7, Server 2 may want to signal overload to Server 1,
+ to affect traffic coming from Network Operator 1.
+
+ This case is distinct from those internal to a network operator's
+ network, where there may be many more elements in a more complicated
+ topology. Also, the elements in the interconnect network may not
+ support Diameter overload control, and the network operators may not
+ want the interconnect network to use overload or loading information.
+ They may only want the information to pass through the interconnect
+
+
+
+McMurry & Campbell Informational [Page 14]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ network without further processing or action by the interconnect
+ network, even if the elements in the interconnect network do support
+ Diameter overload control.
+
+3. Diameter Overload Case Studies
+
+3.1. Overload in Mobile Data Networks
+
+ As the number of smartphone devices that are Third Generation (3G)
+ and Long Term Evolution (LTE) enabled continues to expand in mobile
+ networks, there have been situations where high signaling traffic
+ load led to overload events at the Diameter-based Home Location
+ Registers (HLRs) and/or Home Subscriber Servers (HSS) [TR23.843].
+ The root causes of the HLR overload events were manifold but included
+ hardware failure and procedural errors. The result was high
+ signaling traffic load on the HLR and HSS.
+
+ The 3GPP architecture [TS23.002] makes extensive use of Diameter. It
+ is used for mobility management [TS29.272], the IP Multimedia
+ Subsystem (IMS) [TS29.228], and policy and charging control
+ [TS29.212], as well as other functions. The details of the
+ architecture are out of scope for this document, but it is worth
+ noting that there are quite a few Diameter applications, some with
+ quite large amounts of Diameter signaling in deployed networks.
+
+ The 3GPP specifications do not currently address overload for
+ Diameter applications or provide a load control mechanism equivalent
+ to those provided in the more traditional SS7 elements in the Global
+ System for Mobile Communications (GSM); see [TS29.002]. The
+ capabilities specified in the 3GPP standards do not adequately
+ address the abnormal condition where excessively high signaling
+ traffic load situations are experienced.
+
+ Smartphones, which comprise an increasingly large percentage of
+ mobile devices, contribute much more heavily, relative to
+ non-smartphones, to the continuation of a registration surge, due to
+ their very aggressive registration algorithms. Smartphone behavior
+ contributes to network loading and can contribute to overload
+ conditions. The aggressive smartphone logic is designed to:
+
+ a. always have voice and data registration, and
+
+ b. constantly try to be on 3G or LTE data (and thus on 3G voice or
+ Voice over LTE (VoLTE) [IR.92]) for their added benefits.
+
+ Non-smartphones typically have logic to wait for a time period after
+ registering successfully on voice and data.
+
+
+
+
+McMurry & Campbell Informational [Page 15]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ The aggressive smartphone registration is problematic in two ways:
+
+ o first, by generating excessive signaling load towards the HSS that
+ is ten times the load from a non-smartphone, and
+
+ o second, by causing continual registration attempts when a network
+ failure affects registrations through the 3G data network.
+
+3.2. 3GPP Study on Core Network Overload
+
+ A study in the 3GPP System Aspects working group 2 (SA2) on core
+ network overload has produced the technical report [TR23.843]. This
+ enumerates several causes of overload in mobile core networks,
+ including portions that are signaled using Diameter. [TR23.843] is a
+ work in progress and is not complete. However, it is useful for
+ pointing out scenarios and the general need for an overload control
+ mechanism for Diameter.
+
+ It is common for mobile networks to employ more than one radio
+ technology and to do so in an overlay fashion with multiple
+ technologies present in the same location (such as 2nd or 3rd
+ generation mobile technologies, along with LTE). This presents
+ opportunities for traffic storms when issues occur on one overlay and
+ not another as all devices that had been on the overlay with issues
+ switch. This causes a large amount of Diameter traffic as locations
+ and policies are updated.
+
+ Another scenario called out by this study is a flood of registration
+ and mobility management events caused by some element in the core
+ network failing. This flood of traffic from end nodes falls under
+ the network-initiated traffic flood category. There is likely to
+ also be traffic resulting directly from the component failure in this
+ case. A similar flood can occur when elements or components recover
+ as well.
+
+ Subscriber-initiated traffic floods are also indicated in this study
+ as an overload mechanism where a large number of mobile devices are
+ attempting to access services at the same time, such as in response
+ to an entertainment event or a catastrophic event.
+
+ While this 3GPP study is concerned with the broader effects of these
+ scenarios on wireless networks and their elements, they have
+ implications specifically for Diameter signaling. One of the goals
+ of this document is to provide guidance for a core mechanism that can
+ be used to mitigate the scenarios called out by this study.
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 16]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+4. Existing Mechanisms
+
+ Diameter offers both implicit and explicit mechanisms for a Diameter
+ node to learn that a peer is overloaded or unreachable. The implicit
+ mechanism is simply the lack of responses to requests. If a client
+ fails to receive a response in a certain time period, it assumes that
+ the upstream peer is unavailable or is overloaded to the point of
+ effective unavailability. The watchdog mechanism [RFC3539] ensures
+ that transaction responses occur at a certain rate even when there is
+ otherwise little or no other Diameter traffic.
+
+ The explicit mechanism can involve specific protocol error responses,
+ where an agent or server tells a downstream peer that it is either
+ too busy to handle a request (DIAMETER_TOO_BUSY) or unable to route a
+ request to an upstream destination (DIAMETER_UNABLE_TO_DELIVER)
+ perhaps because that destination itself is overloaded to the point of
+ unavailability.
+
+ Another explicit mechanism, a DPR (Disconnect-Peer-Request) message,
+ can be sent with a Disconnect-Cause of BUSY. This signals the
+ sender's intent to close the transport connection and requests that
+ the client not reconnect.
+
+ Once a Diameter node learns via one of these mechanisms that an
+ upstream peer has become overloaded, it can then attempt to take
+ action to reduce the load. This usually means forwarding traffic to
+ an alternate destination, if available. If no alternate destination
+ is available, the node must either reduce the number of messages it
+ originates (in the case of a client) or inform the client to reduce
+ traffic (in the case of an agent).
+
+ Diameter requires the use of a congestion-managed transport layer,
+ currently TCP or SCTP, to mitigate network congestion. It is
+ expected that these transports manage network congestion and that
+ issues with transport (e.g., congestion propagation and window
+ management) are managed at that level. But even with a congestion-
+ managed transport, a Diameter node can become overloaded at the
+ Diameter protocol or application layers due to the causes described
+ in Section 1.2, and congestion-managed transports do not provide
+ facilities (and are at the wrong level) to handle server overload.
+ Transport-level congestion management is also not sufficient to
+ address overload in cases of multi-hop and multi-destination
+ signaling.
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 17]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+5. Issues with the Current Mechanisms
+
+ The currently available Diameter mechanisms for indicating an
+ overload condition are not adequate to avoid service outages due to
+ overload. This inadequacy may, in turn, contribute to broader
+ impacts resulting from overload due to unresponsive Diameter nodes
+ causing application-layer or transport-layer retransmissions. In
+ particular, they do not allow a Diameter agent or server to shed load
+ as it approaches overload. At best, a node can only indicate that it
+ needs to entirely stop receiving requests, i.e., that it has
+ effectively failed. Even that is problematic due to the inability to
+ indicate durational validity on the transient errors available in the
+ base Diameter protocol. Diameter offers no mechanism to allow a node
+ to indicate different overload states for different categories of
+ messages, for example, if it is overloaded for one Diameter
+ application but not another.
+
+5.1. Problems with Implicit Mechanism
+
+ The implicit mechanism doesn't allow an agent or server to inform the
+ client of a problem until it is effectively too late to do anything
+ about it. The client does not know that it needs to take action
+ until the upstream node has effectively failed. A Diameter node has
+ no opportunity to shed load early to avoid collapse in the first
+ place.
+
+ Additionally, the implicit mechanism cannot distinguish between
+ overload of a Diameter node and network congestion. Diameter treats
+ the failure to receive an answer as a transport failure.
+
+5.2. Problems with Explicit Mechanisms
+
+ The Diameter specification is ambiguous on how a client should handle
+ receipt of a DIAMETER_TOO_BUSY response. The base specification
+ [RFC6733] indicates that the sending client should attempt to send
+ the request to a different peer. It makes no suggestion that the
+ receipt of a DIAMETER_TOO_BUSY response should affect future Diameter
+ messages in any way.
+
+ The Authentication, Authorization, and Accounting (AAA) Transport
+ Profile [RFC3539] recommends that a AAA node that receives a "Busy"
+ response failover all remaining requests to a different agent or
+ server. But while the Diameter base specification explicitly depends
+ on [RFC3539] to define transport behavior, it does not refer to
+ [RFC3539] in the description of behavior on receipt of a
+ DIAMETER_TOO_BUSY error. There's a strong likelihood that at least
+ some implementations will continue to send Diameter requests to an
+ upstream peer even after receiving a DIAMETER_TOO_BUSY error.
+
+
+
+McMurry & Campbell Informational [Page 18]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ BCP 41 [RFC2914] describes, among other things, how end-to-end
+ application behavior can help avoid congestion collapse. In
+ particular, an application should avoid sending messages that will
+ never be delivered or processed. The DIAMETER_TOO_BUSY behavior as
+ described in the Diameter base specification fails at this, since if
+ an upstream node becomes overloaded, a client attempts each request
+ and does not discover the need to failover the request until the
+ initial attempt fails.
+
+ The situation is improved if implementations follow the [RFC3539]
+ recommendation to keep state about upstream peer overload. But even
+ then, the Diameter specification offers no guidance on how long a
+ client should wait before retrying the overloaded destination. If an
+ agent or server supports multiple realms and/or applications,
+ DIAMETER_TOO_BUSY offers no way to indicate that it is overloaded for
+ one application but not another. A DIAMETER_TOO_BUSY error can only
+ indicate overload at a "whole server" scope.
+
+ Agent processing of a DIAMETER_TOO_BUSY response is also problematic
+ as described in the base specification. DIAMETER_TOO_BUSY is defined
+ as a protocol error. If an agent receives a protocol error, it may
+ either handle it locally or forward the response back towards the
+ downstream peer. If a downstream peer receives the DIAMETER_TOO_BUSY
+ response, it may stop sending all requests to the agent for some
+ period of time, even though the agent may still be able to deliver
+ requests to other upstream peers.
+
+ DIAMETER_UNABLE_TO_DELIVER errors, or using DPR with cause code BUSY,
+ also have no mechanisms for specifying the scope or cause of the
+ failure, or the durational validity.
+
+ The issues with error responses described in [RFC6733] extend beyond
+ the particular issues for overload control and have been addressed in
+ an ad hoc fashion by various implementations. Addressing these in a
+ standard way would be a useful exercise, but it is beyond the scope
+ of this document.
+
+6. Extensibility and Application Independence
+
+ Given the variety of scenarios in which Diameter elements can be
+ deployed and the variety of roles they can fulfill with Diameter and
+ other technologies, a single algorithm for handling overload may not
+ be sufficient. For purposes of this discussion, an algorithm is
+ inclusive of behavior for control of overload but does not encompass
+ the general mechanism for transporting control information. This
+ effort cannot anticipate all possible future scenarios and roles.
+ Extensibility, particularly of algorithms used to deal with overload,
+ will be important to cover these cases.
+
+
+
+McMurry & Campbell Informational [Page 19]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ Similarly, the scopes to which overload information may apply may
+ include cases that have not yet been considered. Extensibility in
+ this area will also be important.
+
+ The basic mechanism is intended to be application independent, that
+ is, a Diameter node can use it across any existing and future
+ Diameter applications and expect reasonable results. Certain
+ Diameter applications might, however, benefit from application-
+ specific behavior over and above the mechanism's defaults. For
+ example, an application specification might specify relative
+ priorities of messages or selection of a specific overload control
+ algorithm.
+
+7. Solution Requirements
+
+ This section proposes requirements for an improved mechanism to
+ control Diameter overload, with the goals of addressing the issues
+ described in Section 5 and supporting the scenarios described in
+ Section 2. These requirements are stated primarily in terms of
+ individual node behavior to inform the design of the improved
+ mechanism; solution designers should keep in mind that the overall
+ goal is improved overall system behavior across all the nodes
+ involved, not just improved behavior from specific individual nodes.
+
+7.1. General
+
+ REQ 1: The solution MUST provide a communication method for Diameter
+ nodes to exchange load and overload information.
+
+ REQ 2: The solution MUST allow Diameter nodes to support overload
+ control regardless of which Diameter applications they
+ support. Diameter clients and agents must be able to use the
+ received load and overload information to support graceful
+ behavior during an overload condition. Graceful behavior
+ under overload conditions is best described by REQ 3.
+
+ REQ 3: The solution MUST limit the impact of overload on the overall
+ useful throughput of a Diameter server, even when the
+ incoming load on the network is far in excess of its
+ capacity. The overall useful throughput under load is the
+ ultimate measure of the value of a solution.
+
+ REQ 4: Diameter allows requests to be sent from either side of a
+ connection, and either side of a connection may have need to
+ provide its overload status. The solution MUST allow each
+ side of a connection to independently inform the other of its
+ overload status.
+
+
+
+
+McMurry & Campbell Informational [Page 20]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ REQ 5: Diameter allows nodes to determine their peers via dynamic
+ discovery or manual configuration. The solution MUST work
+ consistently without regard to how peers are determined.
+
+ REQ 6: The solution designers SHOULD seek to minimize the amount of
+ new configuration required in order to work. For example, it
+ is better to allow peers to advertise or negotiate support
+ for the solution, rather than to require that this knowledge
+ be configured at each node.
+
+7.2. Performance
+
+ REQ 7: The solution and any associated default algorithm(s) MUST
+ ensure that the system remains stable. At some point after
+ an overload condition has ended, the solution MUST enable
+ capacity to stabilize and become equal to what it would be in
+ the absence of an overload condition. Note that this also
+ requires that the solution MUST allow nodes to shed load
+ without introducing non-converging oscillations during or
+ after an overload condition.
+
+ REQ 8: Supporting nodes MUST be able to distinguish current overload
+ information from stale information.
+
+ REQ 9: The solution MUST function across fully loaded as well as
+ quiescent transport connections. This is partially derived
+ from the requirement for stability in REQ 7.
+
+ REQ 10: Consumers of overload information MUST be able to determine
+ when the overload condition improves or ends.
+
+ REQ 11: The solution MUST be able to operate in networks of different
+ sizes.
+
+ REQ 12: When a single network node fails, goes into overload, or
+ suffers from reduced processing capacity, the solution MUST
+ make it possible to limit the impact of the affected node on
+ other nodes in the network. This helps to prevent a small-
+ scale failure from becoming a widespread outage.
+
+ REQ 13: The solution MUST NOT introduce substantial additional work
+ for a node in an overloaded state. For example, a
+ requirement for an overloaded node to send overload
+ information every time it received a new request would
+ introduce substantial work.
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 21]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ REQ 14: Some scenarios that result in overload involve a rapid
+ increase of traffic with little time between normal levels
+ and levels that induce overload. The solution SHOULD provide
+ for rapid feedback when traffic levels increase.
+
+ REQ 15: The solution MUST NOT interfere with the congestion control
+ mechanisms of underlying transport protocols. For example, a
+ solution that opened additional TCP connections when the
+ network is congested would reduce the effectiveness of the
+ underlying congestion control mechanisms.
+
+7.3. Heterogeneous Support for Solution
+
+ REQ 16: The solution is likely to be deployed incrementally. The
+ solution MUST support a mixed environment where some, but not
+ all, nodes implement it.
+
+ REQ 17: In a mixed environment with nodes that support the solution
+ and nodes that do not, the solution MUST NOT result in
+ materially less useful throughput during overload as would
+ have resulted if the solution were not present. It SHOULD
+ result in less severe overload in this environment.
+
+ REQ 18: In a mixed environment of nodes that support the solution and
+ nodes that do not, the solution MUST NOT preclude elements
+ that support overload control from treating elements that do
+ not support overload control in an equitable fashion relative
+ to those that do. Users and operators of nodes that do not
+ support the solution MUST NOT unfairly benefit from the
+ solution. The solution specification SHOULD provide guidance
+ to implementors for dealing with elements not supporting
+ overload control.
+
+ REQ 19: It MUST be possible to use the solution between nodes in
+ different realms and in different administrative domains.
+
+ REQ 20: Any explicit overload indication MUST be clearly
+ distinguishable from other errors reported via Diameter.
+
+ REQ 21: In cases where a network node fails, is so overloaded that it
+ cannot process messages, or cannot communicate due to a
+ network failure, it may not be able to provide explicit
+ indications of the nature of the failure or its levels of
+ overload. The solution MUST result in at least as much
+ useful throughput as would have resulted if the solution were
+ not in place.
+
+
+
+
+
+McMurry & Campbell Informational [Page 22]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+7.4. Granular Control
+
+ REQ 22: The solution MUST provide a way for a node to throttle the
+ amount of traffic it receives from a peer node. This
+ throttling SHOULD be graded so that it can be applied
+ gradually as offered load increases. Overload is not a
+ binary state; there may be degrees of overload.
+
+ REQ 23: The solution MUST provide sufficient information to enable a
+ load-balancing node to divert messages that are rejected or
+ otherwise throttled by an overloaded upstream node to other
+ upstream nodes that are the most likely to have sufficient
+ capacity to process them.
+
+ REQ 24: The solution MUST provide a mechanism for indicating load
+ levels, even when not in an overload condition, to assist
+ nodes in making decisions to prevent overload conditions from
+ occurring.
+
+7.5. Priority and Policy
+
+ REQ 25: The base specification for the solution SHOULD offer general
+ guidance on which message types might be desirable to send or
+ process over others during times of overload, based on
+ application-specific considerations. For example, it may be
+ more beneficial to process messages for existing sessions
+ ahead of new sessions. Some networks may have a requirement
+ to give priority to requests associated with emergency
+ sessions. Any normative or otherwise detailed definition of
+ the relative priorities of message types during an overload
+ condition will be the responsibility of the application
+ specification.
+
+ REQ 26: The solution MUST NOT prevent a node from prioritizing
+ requests based on any local policy, so that certain requests
+ are given preferential treatment, given additional
+ retransmission, not throttled, or processed ahead of others.
+
+7.6. Security
+
+ REQ 27: The solution MUST NOT provide new vulnerabilities to
+ malicious attack or increase the severity of any existing
+ vulnerabilities. This includes vulnerabilities to DoS and
+ DDoS attacks as well as replay and man-in-the-middle attacks.
+ Note that the Diameter base specification [RFC6733] lacks
+ end-to-end security, and this must be considered (see
+ Security Considerations in this document (Section 8)). Note
+
+
+
+
+McMurry & Campbell Informational [Page 23]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ that this requirement was expressed at a high level so as to
+ not preclude any particular solution. Is is expected that
+ the solution will address this in more detail.
+
+ REQ 28: The solution MUST NOT depend on being deployed in
+ environments where all Diameter nodes are completely trusted.
+ It SHOULD operate as effectively as possible in environments
+ where other nodes are malicious; this includes preventing
+ malicious nodes from obtaining more than a fair share of
+ service. Note that this does not imply any responsibility on
+ the solution to detect, or take countermeasures against,
+ malicious nodes.
+
+ REQ 29: It MUST be possible for a supporting node to make
+ authorization decisions about what information will be sent
+ to peer nodes based on the identity of those nodes. This
+ allows a domain administrator who considers the load of their
+ nodes to be sensitive information to restrict access to that
+ information. Of course, in such cases, there is no
+ expectation that the solution itself will help prevent
+ overload from that peer node.
+
+ REQ 30: The solution MUST NOT interfere with any Diameter-compliant
+ method that a node may use to protect itself from overload
+ from non-supporting nodes or from denial-of-service attacks.
+
+7.7. Flexibility and Extensibility
+
+ REQ 31: There are multiple situations where a Diameter node may be
+ overloaded for some purposes but not others. For example,
+ this can happen to an agent or server that supports multiple
+ applications, or when a server depends on multiple external
+ resources, some of which may become overloaded while others
+ are fully available. The solution MUST allow Diameter nodes
+ to indicate overload with sufficient granularity to allow
+ clients to take action based on the overloaded resources
+ without unreasonably forcing available capacity to go unused.
+ The solution MUST support specification of overload
+ information with granularities of at least "Diameter node",
+ "realm", and "Diameter application" and MUST allow
+ extensibility for others to be added in the future.
+
+ REQ 32: The solution MUST provide a method for extending the
+ information communicated and the algorithms used for overload
+ control.
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 24]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ REQ 33: The solution MUST provide a default algorithm that is
+ mandatory to implement.
+
+ REQ 34: The solution SHOULD provide a method for exchanging overload
+ and load information between elements that are connected by
+ intermediaries that do not support the solution.
+
+8. Security Considerations
+
+ A Diameter overload control mechanism is primarily concerned with the
+ load-related and overload-related behavior of nodes in a Diameter
+ network, and the information used to affect that behavior. Load and
+ overload information is shared between nodes and directly affects the
+ behavior, and thus the information is potentially vulnerable to a
+ number of methods of attack.
+
+ Load and overload information may also be sensitive from both
+ business and network protection viewpoints. Operators of Diameter
+ equipment want to control the visibility of load and overload
+ information to keep it from being used for competitive intelligence
+ or for targeting attacks. It is also important that the Diameter
+ overload control mechanism not introduce any way in which any other
+ information carried by Diameter is sent inappropriately.
+
+ Note that the Diameter base specification [RFC6733] lacks end-to-end
+ security, making it difficult for non-adjacent nodes to verify the
+ authenticity and ownership of load and overload information.
+ Authentication of load and overload information helps to alleviate
+ several of the security issues listed in this section.
+
+ This document includes requirements intended to mitigate the effects
+ of attacks and to protect the information used by the mechanism.
+ This section discusses potential security considerations for overload
+ control solutions. This discussion provides the motivation for
+ several normative requirements described in Section 7. The
+ discussion includes specific references to the normative requirements
+ that apply for each issue.
+
+8.1. Access Control
+
+ To control the visibility of load and overload information, sending
+ should be subject to some form of authentication and authorization of
+ the receiver. It is also important to the receivers that they are
+ confident the load and overload information they receive is from a
+ legitimate source. REQ 28 requires that the solution work without
+ assuming that all Diameter nodes in a network are trusted for the
+ purposes of exchanging overload and load information. REQ 29
+ requires that the solution let nodes restrict unauthorized parties
+
+
+
+McMurry & Campbell Informational [Page 25]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ from seeing overload information. Note that this implies a certain
+ amount of configurability on the nodes supporting the Diameter
+ overload control mechanism.
+
+8.2. Denial-of-Service Attacks
+
+ An overload control mechanism provides a very attractive target for
+ denial-of-service attacks. A small number of messages may effect a
+ large service disruption by falsely reporting overload conditions.
+ Alternately, attacking servers nearing, or in, overload may also be
+ facilitated by disrupting their overload indications, potentially
+ preventing them from mitigating their overload condition.
+
+ A design goal for the Diameter overload control mechanism is to
+ minimize or eliminate the possibility of using the mechanism for this
+ type of attack. More strongly, REQ 27 forbids the solution from
+ introducing new vulnerabilities to malicious attack. Additionally,
+ REQ 30 stipulates that the solution not interfere with other
+ mechanisms used for protection against denial-of-service attacks.
+
+ As the intent of some denial-of-service attacks is to induce overload
+ conditions, an effective overload control mechanism should help to
+ mitigate the effects of such an attack.
+
+8.3. Replay Attacks
+
+ An attacker that has managed to obtain some messages from the
+ overload control mechanism may attempt to affect the behavior of
+ nodes supporting the mechanism by sending those messages at
+ potentially inopportune times. In addition to time shifting, replay
+ attacks may send messages to other nodes as well (target shifting).
+
+ A design goal for the Diameter overload control solution is to
+ minimize or eliminate the possibility of causing disruption by using
+ a replay attack on the Diameter overload control mechanism.
+ (Allowing a replay attack using the overload control solution would
+ violate REQ 27.)
+
+8.4. Man-in-the-Middle Attacks
+
+ By inserting themselves between two nodes supporting the Diameter
+ overload control mechanism, an attacker may potentially both access
+ and alter the information sent between those nodes. This can be used
+ for information gathering for business intelligence and attack
+ targeting, as well as direct attacks.
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 26]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ REQs 27, 28, and 29 imply a need to prevent man-in-the-middle attacks
+ on the overload control solution. A transport using Transport Layer
+ Security (TLS) and/or IPsec may be desirable for this purpose.
+
+8.5. Compromised Hosts
+
+ A compromised host that supports the Diameter overload control
+ mechanism could be used for information gathering as well as for
+ sending malicious information to any Diameter node that would
+ normally accept information from it. While it is beyond the scope of
+ the Diameter overload control mechanism to mitigate any operational
+ interruption to the compromised host, REQs 28 and 29 imply a need to
+ minimize the impact that a compromised host can have on other nodes
+ through the use of the Diameter overload control mechanism. Of
+ course, a compromised host could be used to cause damage in a number
+ of other ways. This is out of scope for a Diameter overload control
+ mechanism.
+
+9. References
+
+9.1. Normative References
+
+ [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate
+ Requirement Levels", BCP 14, RFC 2119, March 1997.
+
+ [RFC6733] Fajardo, V., Arkko, J., Loughney, J., and G. Zorn,
+ "Diameter Base Protocol", RFC 6733, October 2012.
+
+ [RFC2914] Floyd, S., "Congestion Control Principles", BCP 41,
+ RFC 2914, September 2000.
+
+ [RFC3539] Aboba, B. and J. Wood, "Authentication, Authorization and
+ Accounting (AAA) Transport Profile", RFC 3539, June 2003.
+
+9.2. Informative References
+
+ [RFC5390] Rosenberg, J., "Requirements for Management of Overload
+ in the Session Initiation Protocol", RFC 5390,
+ December 2008.
+
+ [RFC6357] Hilt, V., Noel, E., Shen, C., and A. Abdelal, "Design
+ Considerations for Session Initiation Protocol (SIP)
+ Overload Control", RFC 6357, August 2011.
+
+ [TR23.843] 3GPP, "Study on Core Network (CN) overload solutions",
+ TR 23.843 1.2.0, Work in Progress, October 2013.
+
+
+
+
+
+McMurry & Campbell Informational [Page 27]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+ [IR.34] GSMA, "Inter-Service Provider IP Backbone Guidelines",
+ IR 34 9.1, May 2013.
+
+ [IR.88] GSMA, "LTE Roaming Guidelines", IR 88 9.0, January 2013.
+
+ [IR.92] GSMA, "IMS Profile for Voice and SMS", IR 92 7.0,
+ March 2013.
+
+ [TS23.002] 3GPP, "Network Architecture", TS 23.002 12.2.0,
+ June 2013.
+
+ [TS29.272] 3GPP, "Evolved Packet System (EPS); Mobility Management
+ Entity (MME) and Serving GPRS Support Node (SGSN) related
+ interfaces based on Diameter protocol", TS 29.272 12.2.0,
+ September 2013.
+
+ [TS29.212] 3GPP, "Policy and Charging Control (PCC) over Gx/Sd
+ reference point", TS 29.212 12.2.0, September 2013.
+
+ [TS29.228] 3GPP, "IP Multimedia (IM) Subsystem Cx and Dx interfaces;
+ Signalling flows and message contents", TS 29.228 12.0.0,
+ September 2013.
+
+ [TS29.002] 3GPP, "Mobile Application Part (MAP) specification",
+ TS 29.002 12.2.0, September 2013.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 28]
+
+RFC 7068 Diameter Overload Control Requirements November 2013
+
+
+Appendix A. Contributors
+
+ Significant contributions to this document were made by Adam Roach
+ and Eric Noel.
+
+Appendix B. Acknowledgements
+
+ Review of, and contributions to, this specification by Martin Dolly,
+ Carolyn Johnson, Jianrong Wang, Imtiaz Shaikh, Jouni Korhonen, Robert
+ Sparks, Dieter Jacobsohn, Janet Gunn, Jean-Jacques Trottin, Laurent
+ Thiebaut, Andrew Booth, and Lionel Morand were most appreciated. We
+ would like to thank them for their time and expertise.
+
+Authors' Addresses
+
+ Eric McMurry
+ Oracle
+ 17210 Campbell Rd.
+ Suite 250
+ Dallas, TX 75252
+ US
+
+ EMail: emcmurry@computer.org
+
+
+ Ben Campbell
+ Oracle
+ 17210 Campbell Rd.
+ Suite 250
+ Dallas, TX 75252
+ US
+
+ EMail: ben@nostrum.com
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+McMurry & Campbell Informational [Page 29]
+
diff --git a/lib/diameter/doc/standard/rfc7075.txt b/lib/diameter/doc/standard/rfc7075.txt
new file mode 100644
index 0000000000..f5fd905e72
--- /dev/null
+++ b/lib/diameter/doc/standard/rfc7075.txt
@@ -0,0 +1,563 @@
+
+
+
+
+
+
+Internet Engineering Task Force (IETF) T. Tsou
+Request for Comments: 7075 Huawei Technologies (USA)
+Updates: 6733 R. Hao
+Category: Standards Track Comcast Cable
+ISSN: 2070-1721 T. Taylor, Ed.
+ Huawei Technologies
+ November 2013
+
+
+ Realm-Based Redirection In Diameter
+
+Abstract
+
+ The Diameter protocol includes a capability for message redirection,
+ controlled by an application-independent "redirect agent". In some
+ circumstances, an operator may wish to redirect messages to an
+ alternate domain without specifying individual hosts. This document
+ specifies an application-specific mechanism by which a Diameter
+ server or proxy (node) can perform such a redirection when the
+ Straightforward-Naming Authority Pointer (S-NAPTR) is not used for
+ dynamic peer discovery. A node performing this new function is
+ referred to as a "Realm-based Redirect Server".
+
+ This memo updates Sections 6.13 and 6.14 of RFC 6733 with respect to
+ the usage of the Redirect-Host-Usage and Redirect-Max-Cache-Time
+ Attribute-Value Pairs (AVPs).
+
+Status of This Memo
+
+ This is an Internet Standards Track document.
+
+ This document is a product of the Internet Engineering Task Force
+ (IETF). It represents the consensus of the IETF community. It has
+ received public review and has been approved for publication by the
+ Internet Engineering Steering Group (IESG). Further information on
+ Internet Standards is available in Section 2 of RFC 5741.
+
+ Information about the current status of this document, any errata,
+ and how to provide feedback on it may be obtained at
+ http://www.rfc-editor.org/info/rfc7075.
+
+
+
+
+
+
+
+
+
+
+
+Tsou, et al. Standards Track [Page 1]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+Copyright Notice
+
+ Copyright (c) 2013 IETF Trust and the persons identified as the
+ document authors. All rights reserved.
+
+ This document is subject to BCP 78 and the IETF Trust's Legal
+ Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info) in effect on the date of
+ publication of this document. Please review these documents
+ carefully, as they describe your rights and restrictions with respect
+ to this document. Code Components extracted from this document must
+ include Simplified BSD License text as described in Section 4.e of
+ the Trust Legal Provisions and are provided without warranty as
+ described in the Simplified BSD License.
+
+Table of Contents
+
+ 1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . 3
+ 1.1. Terminology . . . . . . . . . . . . . . . . . . . . . . . 3
+ 2. Support of Realm-Based Redirection Within Applications . . . 4
+ 3. Realm-Based Redirection . . . . . . . . . . . . . . . . . . . 5
+ 3.1. Configuration of the Realm-Based Redirect Server . . . . 5
+ 3.2. Behavior of Diameter Nodes . . . . . . . . . . . . . . . 6
+ 3.2.1. Behavior at the Realm-Based Redirect Server . . . . . 6
+ 3.2.2. Proxy Behavior . . . . . . . . . . . . . . . . . . . 6
+ 3.2.3. Client Behavior . . . . . . . . . . . . . . . . . . . 7
+ 3.3. The Redirect-Realm AVP . . . . . . . . . . . . . . . . . 7
+ 3.4. DIAMETER_REALM_REDIRECT_INDICATION Protocol Error Code . 7
+ 4. Security Considerations . . . . . . . . . . . . . . . . . . . 8
+ 5. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 8
+ 6. Acknowledgements . . . . . . . . . . . . . . . . . . . . . . 9
+ 7. References . . . . . . . . . . . . . . . . . . . . . . . . . 9
+ 7.1. Normative References . . . . . . . . . . . . . . . . . . 9
+ 7.2. Informative References . . . . . . . . . . . . . . . . . 9
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Tsou, et al. Standards Track [Page 2]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+1. Introduction
+
+ The Diameter base protocol [RFC6733] specifies a basic redirection
+ service provided by a redirect agent. The redirect indication
+ returned by the redirect agent is described in Section 6.1.8 and
+ Sections 6.12 through 6.14 of [RFC6733]. It provides one or more
+ individual hosts to the message sender as the destination of the
+ redirected message.
+
+ However, consider the case where an operator has offered a specific
+ service but no longer wishes to do so. The operator has arranged for
+ an alternative domain to provide the service. To aid in the
+ transition to the new arrangement, the original operator maintains a
+ redirect server to indicate to the message sender the alternative
+ domain to which the redirect the request should be sent. However,
+ the original operator should not have to configure the redirect
+ server with a list of hosts to contact in the alternative operator's
+ domain; the original operator should simply be able to provide
+ redirect indications to the domain as a whole.
+
+1.1. Terminology
+
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in [RFC2119].
+
+ Within this specification, the term "realm-based redirection" is used
+ to refer to a mode of operation where a realm, rather than an
+ individual host, is returned as the redirect indication.
+
+ The term "Realm-based Redirect Server" denotes the Diameter node
+ (Diameter server or proxy) that returns the realm-based redirection.
+ The behavior of the Realm-based Redirect Server itself is a slight
+ modification to the behavior of a basic redirect agent as described
+ in Section 6.1.8 of [RFC6733].
+
+ The use of a number of terms in this document is consistent with the
+ usage in [RFC6733]: "Diameter client", "Diameter node", "Diameter
+ peer", "Diameter server", "proxy", "realm" or "domain", "redirect
+ agent", and "session" as defined in Section 1.2, and "application" as
+ defined implicitly by Sections 1.3.4, 2.3, and 2.4.
+
+
+
+
+
+
+
+
+
+
+Tsou, et al. Standards Track [Page 3]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+2. Support of Realm-Based Redirection Within Applications
+
+ The DNS-based dynamic peer discovery mechanism defined in the
+ Diameter base protocol [RFC6733] provides a simple mechanism for
+ realm-based redirection using the S-NAPTR DDDS application [RFC3958].
+ When S-NAPTR is used for peer discovery, redirection of Diameter
+ requests from the original realm to a new realm may be performed by
+ updating the existing NAPTR resource records (RRs) for the original
+ realm as follows: the NAPTR RR for the desired application(s) and
+ supported application protocol(s) provided by the new realm will have
+ an empty FLAG field and the REPLACEMENT field will contain the new
+ realm to use for the next DNS lookup. The new realm can be
+ arbitrary; the restriction in [RFC6733] that the NAPTR replacement
+ field match the domain of the original query does not apply for
+ realm-based redirect purposes.
+
+ However, the use of DNS-based dynamic peer discovery is optional for
+ Diameter implementations. For deployments that do not make use of
+ S-NAPTR peer discovery, support of realm-based redirection needs to
+ be specified as part of the functionality supported by a Diameter
+ application. In this way, support of the considered Diameter
+ application (discovered during capabilities exchange phase as defined
+ in Diameter base protocol [RFC6733]) indicates implicit support of
+ the realm-based redirection mechanism. A new application
+ specification can incorporate the mechanism specified here by making
+ it mandatory to implement for the application and referencing this
+ specification normatively.
+
+ The result of making realm-based redirection an application-specific
+ behavior is that it cannot be performed by a redirect agent as
+ defined in [RFC6733], but MUST be performed instead by an
+ application-aware Diameter node (Diameter server or proxy) (hereafter
+ called a "Realm-based Redirect Server").
+
+ An application can specify that realm-based redirection operates only
+ on complete sessions beginning with the initial message or on every
+ message within the application, even if earlier messages of the same
+ session were not redirected. This distinction matters only when
+ realm-based redirection is first initiated. In the former case,
+ existing sessions will not be disrupted by the deployment of realm-
+ based redirection. In the latter case, existing sessions will be
+ disrupted if they are stateful.
+
+
+
+
+
+
+
+
+
+Tsou, et al. Standards Track [Page 4]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+3. Realm-Based Redirection
+
+ This section specifies an extension of the Diameter base protocol
+ [RFC6733] to achieve realm-based redirection. The elements of this
+ solution are:
+
+ o a new result code, DIAMETER_REALM_REDIRECT_INDICATION (3011);
+
+ o a new attribute-value pair (AVP), Redirect-Realm (620); and
+
+ o associated behavior at Diameter nodes implementing this
+ specification.
+
+ This behavior includes the optional use of the Redirect-Host-Usage
+ and Redirect-Max-Cache-Time AVPs. In this document, these AVPs apply
+ to the peer discovered by a node acting on the redirect server's
+ response, an extension to their normal usage as described in Sections
+ 6.13 and 6.14 of [RFC6733].
+
+ Section 3.2.2 and Section 3.2.3 describe how a proxy or client may
+ update its routing table for the application and initial realm as a
+ result of selecting a peer in the new realm after realm-based
+ redirection. Note that as a result, the proxy or client will
+ automatically route subsequent requests for that application to the
+ new realm (with the possible exception of requests within sessions
+ already established with the initial realm) until the cached routing
+ entry expires. This should be borne in mind if the rerouting is
+ intended to be temporary.
+
+3.1. Configuration of the Realm-Based Redirect Server
+
+ A Diameter node (Diameter server or proxy) acting as a Realm-based
+ Redirect Server MUST be configured as follows to execute realm-based
+ redirection:
+
+ o configured with an application that incorporates realm-based
+ redirection;
+
+ o the Local Action field of the routing table described in
+ Section 2.7 of [RFC6733] is set to LOCAL;
+
+ o an application-specific field is set to indicate that the required
+ local action is to perform realm-based redirection;
+
+ o an associated application-specific field is configured with the
+ identities of one or more realms to which the request should be
+ redirected.
+
+
+
+
+Tsou, et al. Standards Track [Page 5]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+3.2. Behavior of Diameter Nodes
+
+3.2.1. Behavior at the Realm-Based Redirect Server
+
+ As mentioned in Section 2, an application can specify that realm-
+ based redirection operates only on complete sessions beginning with
+ the initial message (i.e., to prevent disruption of established
+ sessions) or on every message within the application, even if earlier
+ messages of the same session were not redirected.
+
+ If a Realm-based Redirect Server configured as described in
+ Section 3.1 receives a request to which realm-based redirection
+ applies, the Realm-based Redirect Server MUST reply with an answer
+ message with the 'E' bit set, while maintaining the Hop-by-Hop
+ Identifier in the header. The Realm-based Redirect Server MUST
+ include the Result-Code AVP set to
+ DIAMETER_REALM_REDIRECT_INDICATION. The Realm-based Redirect Server
+ MUST also include the alternate realm identifier(s) with which it has
+ been configured, each in a separate Redirect-Realm AVP instance.
+
+ The Realm-based Redirect Server MAY include a copy of the Redirect-
+ Host-Usage AVP, which SHOULD be set to REALM_AND_APPLICATION. If
+ this AVP is added, the Redirect-Max-Cache-Time AVP MUST also be
+ included. Note that these AVPs apply to the peer discovered by a
+ node acting on the Realm-based Redirect Server's response as
+ described in the next section. This is an extension of their normal
+ usage as described by Sections 6.13 and 6.14 of [RFC6733].
+
+ Realm-based redirection MAY be applied even if a Destination-Host
+ AVP is present in the request, depending on the operator-based
+ policy.
+
+3.2.2. Proxy Behavior
+
+ A proxy conforming to this specification that receives an answer
+ message with the Result-Code AVP set to
+ DIAMETER_REALM_REDIRECT_INDICATION MUST attempt to reroute the
+ original request to a server in a realm identified by a Redirect-
+ Realm AVP instance in the answer message, and if it fails MUST
+ forward the indication toward the client. To reroute the request, it
+ MUST take the following actions:
+
+ 1. Select a specific realm from amongst those identified in
+ instances of the Redirect-Realm AVP in the answer message.
+
+ 2. If successful, locate and establish a route to a peer in the
+ realm given by the Redirect-Realm AVP, using normal discovery
+ procedures as described in Section 5.2 of [RFC6733].
+
+
+
+Tsou, et al. Standards Track [Page 6]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+ 3. If again successful:
+
+ A. update its cache of routing entries for the realm and
+ application to which the original request was directed,
+ taking into account the Redirect-Host-Usage and Redirect-Max-
+ Cache-Time AVPs, if present in the answer.
+
+ B. Remove the Destination-Host (if present) and Destination-
+ Realm AVPs from the original request and add a new
+ Destination-Realm AVP containing the realm selected in the
+ initial step.
+
+ C. Forward the modified request.
+
+ 4. If either of the preceding steps 2-3 fail and additional realms
+ have been identified in the original answer, select another
+ instance of the Redirect-Realm AVP in that answer and repeat
+ steps 2-3 for the realm that it identifies.
+
+3.2.3. Client Behavior
+
+ A client conforming to this specification MUST be prepared to receive
+ either an answer message containing a Result-Code AVP set to
+ DIAMETER_REALM_REDIRECT_INDICATION, or, as the result of proxy
+ action, some other result from a realm differing from the one to
+ which it sent the original request. In the case where it receives
+ DIAMETER_REALM_REDIRECT_INDICATION, the client SHOULD follow the same
+ steps prescribed in the previous section for a proxy, in order to
+ both update its routing table and obtain service for the original
+ request.
+
+3.3. The Redirect-Realm AVP
+
+ The Redirect-Realm AVP (620) is of type DiameterIdentity. It
+ specifies a realm to which a node receiving a redirect indication
+ containing the result code value DIAMETER_REALM_REDIRECT_INDICATION
+ and the Redirect-Realm AVP SHOULD route the original request.
+
+3.4. DIAMETER_REALM_REDIRECT_INDICATION Protocol Error Code
+
+ The DIAMETER_REALM_REDIRECT_INDICATION (3011) Protocol error code
+ indicates that a server has determined that the request within an
+ application supporting realm-based redirection could not be satisfied
+ locally, and the initiator of the request SHOULD direct the request
+ directly to a peer within a realm that has been identified in the
+ response. When set, the Redirect-Realm AVP MUST be present.
+
+
+
+
+
+Tsou, et al. Standards Track [Page 7]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+4. Security Considerations
+
+ The general recommendations given in Section 13 of the Diameter base
+ protocol [RFC6733] apply. Specific security recommendations related
+ to the realm-based redirection defined in this document are described
+ below.
+
+ Realm-based redirection implies a change in the business relationship
+ between organizations. Before redirecting a request towards a realm
+ different from the initial realm, the client or proxy MUST ensure
+ that the authorization checks have been performed at each connection
+ along the path toward the realm identified in the realm-based
+ redirect indication. Details on Diameter authorization path set-up
+ are given in Section 2.9 of [RFC6733]. Section 13 of [RFC6733]
+ provides recommendations on how to authenticate and secure each peer-
+ to-peer connection (using TLS, DTLS, or IPsec) along the way, thus
+ permitting the necessary hop-by-hop authorization checks.
+
+ Although it is assumed that the administrative domains are secure, a
+ compromised Diameter node acting as a Realm-based Redirect Server
+ would be able to redirect a large number of Diameter requests towards
+ a victim domain that would then be flooded with undesired Diameter
+ requests. Such an attack is nevertheless discouraged by the use of
+ secure Diameter peer-to-peer connections and authorization checks,
+ since these would enable a potential victim domain to discover from
+ where an attack is coming. That in itself, however, does not prevent
+ such a DoS attack.
+
+ Because realm-based redirection defined in this document implies that
+ the Destination-Realm AVP in a client-initiated request can be
+ changed by a Diameter proxy in the path between the client and the
+ server, any cryptographic algorithm that would use the Destination-
+ Realm AVP as input to the calculation performed by the client and the
+ server would be broken by this form of redirection. Application
+ specifications that would rely on such cryptographic algorithms
+ SHOULD NOT incorporate this realm-based redirection.
+
+5. IANA Considerations
+
+ This specification allocates a new AVP code Redirect-Realm (620) in
+ the "AVP Codes" registry under "Authentication, Authorization, and
+ Accounting (AAA) Parameters".
+
+ This specification allocates a new Result-Code value
+ DIAMETER_REALM_REDIRECT_INDICATION (3011) in the "Result-Code AVP
+ Values (code 268) - Protocol Errors" registry under "Authentication,
+ Authorization, and Accounting (AAA) Parameters".
+
+
+
+
+Tsou, et al. Standards Track [Page 8]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+6. Acknowledgements
+
+ Glen Zorn, Sebastien Decugis, Wolfgang Steigerwald, Mark Jones,
+ Victor Fajardo, Jouni Korhonen, Avi Lior, and Lionel Morand
+ contributed comments that helped to shape this document. As
+ shepherd, Lionel contributed a second set of comments that added
+ polish to the document before it was submitted to the IESG. Benoit
+ Claise picked up additional points that were quickly resolved with
+ Lionel's help. During IETF Last Call Review, Enrico Marocco picked
+ up some important editorial corrections. Stefan Winter contributed
+ text on the use of S-NAPTR as an alternative method of realm-based
+ redirection already specified in [RFC6733]. Derek Atkins performed a
+ review on behalf of the Security Directorate. Lionel noted one more
+ correction.
+
+ Finally, this document benefited from comments and discussion raised
+ by IESG members Stewart Bryant, Stephen Farrell, Barry Leiba, Pete
+ Resnick, Jari Arkko, and Sean Turner during IESG review.
+
+ The authors are very grateful to Lionel Morand for his active role as
+ document shepherd. At each stage, he worked to summarize and resolve
+ comments, making the editor's role easy. Thank you.
+
+7. References
+
+7.1. Normative References
+
+ [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate
+ Requirement Levels", BCP 14, RFC 2119, March 1997.
+
+ [RFC6733] Fajardo, V., Arkko, J., Loughney, J., and G. Zorn,
+ "Diameter Base Protocol", RFC 6733, October 2012.
+
+7.2. Informative References
+
+ [RFC3958] Daigle, L. and A. Newton, "Domain-Based Application
+ Service Location Using SRV RRs and the Dynamic Delegation
+ Discovery Service (DDDS)", RFC 3958, January 2005.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Tsou, et al. Standards Track [Page 9]
+
+RFC 7075 Realm-Based Redirection In Diameter November 2013
+
+
+Authors' Addresses
+
+ Tina Tsou
+ Huawei Technologies (USA)
+ 2330 Central Expressway
+ Santa Clara, CA 95050
+ USA
+
+ Phone: +1 408 330 4424
+ EMail: Tina.Tsou.Zouting@huawei.com
+ URI: http://tinatsou.weebly.com/contact.html
+
+
+ Ruibing Hao
+ Comcast Cable
+ One Comcast Center
+ Philadelphia, PA 19103
+ USA
+
+ Phone: +1 215 286 3991(O)
+ EMail: Ruibing_Hao@cable.comcast.com
+
+
+ Tom Taylor (editor)
+ Huawei Technologies
+ Ottawa
+ Canada
+
+ EMail: tom.taylor.stds@gmail.com
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Tsou, et al. Standards Track [Page 10]
+
diff --git a/lib/diameter/examples/dict/.gitignore b/lib/diameter/examples/dict/.gitignore
new file mode 100644
index 0000000000..feeb378fd8
--- /dev/null
+++ b/lib/diameter/examples/dict/.gitignore
@@ -0,0 +1,2 @@
+
+/depend.mk
diff --git a/lib/diameter/examples/dict/GNUmakefile b/lib/diameter/examples/dict/GNUmakefile
new file mode 100644
index 0000000000..60c95c08f9
--- /dev/null
+++ b/lib/diameter/examples/dict/GNUmakefile
@@ -0,0 +1,60 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2013. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Build example dictionaries. Assumes erlc and diameterc are on PATH.
+#
+
+DICTS = rfc4004_mip \
+ rfc4005_nas \
+ rfc4006_cc \
+ rfc4072_eap \
+ rfc4590_digest \
+ rfc4740_sip
+
+FILES = $(DICTS:%=%.dia)
+BEAMS = $(DICTS:%=%.beam)
+
+COMMON = diameter_gen_base_rfc6733
+
+%.erl: %.dia
+ diameterc --name $(basename $@) \
+ --prefix $(basename $@) \
+ --inherits common/$(COMMON) \
+ $<
+
+%.beam: %.erl
+ erlc -Wall +debug_info $<
+
+all: $(BEAMS)
+
+clean:
+ rm -f $(DICTS:%=%.erl) $(DICTS:%=%.hrl) $(BEAMS) depend.mk
+
+-include depend.mk
+
+depend.mk: depend.sed $(FILES) GNUmakefile
+ (for f in $(FILES); do \
+ (echo $$f; cat $$f) | sed -f depend.sed; \
+ done) \
+ > $@
+
+.PHONY: all clean
+
+.SECONDARY: $(DICTS:%=%.erl)
diff --git a/lib/diameter/examples/dict/depend.sed b/lib/diameter/examples/dict/depend.sed
new file mode 100644
index 0000000000..fd9a38479c
--- /dev/null
+++ b/lib/diameter/examples/dict/depend.sed
@@ -0,0 +1,43 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2013. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+#
+
+#
+# Extract dependencies from .dia files. First line of input is the
+# dictionary's filename, the rest is its contents.
+#
+
+1{
+ s@\.[^.]*$@@
+ h
+ d
+}
+
+# Only interested in @inherits.
+/^@inherits */!d
+
+s///
+s/ .*//
+
+# Ignore the common application.
+/^common$/d
+
+# Retrieve the dictionary name from the hold space and output
+# a dependency.
+G
+s@^\(.*\)\n\(.*\)@\2.erl: \1.beam@
diff --git a/lib/diameter/examples/dict/rfc4004_mip.dia b/lib/diameter/examples/dict/rfc4004_mip.dia
index 575ad4394a..0595cfe9ef 100644
--- a/lib/diameter/examples/dict/rfc4004_mip.dia
+++ b/lib/diameter/examples/dict/rfc4004_mip.dia
@@ -1,7 +1,7 @@
;;
;; %CopyrightBegin%
;;
-;; Copyright Ericsson AB 2010-2011. All Rights Reserved.
+;; Copyright Ericsson AB 2010-2013. All Rights Reserved.
;;
;; The contents of this file are subject to the Erlang Public License,
;; Version 1.1, (the "License"); you may not use this file except in
@@ -30,7 +30,7 @@
@id 2
-@inherits rfc3588_base
+@inherits common
;; ===========================================================================
diff --git a/lib/diameter/examples/dict/rfc4005_nas.dia b/lib/diameter/examples/dict/rfc4005_nas.dia
index a4b44e38bb..6f0e7c1ce5 100644
--- a/lib/diameter/examples/dict/rfc4005_nas.dia
+++ b/lib/diameter/examples/dict/rfc4005_nas.dia
@@ -1,7 +1,7 @@
;;
;; %CopyrightBegin%
;;
-;; Copyright Ericsson AB 2010-2011. All Rights Reserved.
+;; Copyright Ericsson AB 2010-2013. All Rights Reserved.
;;
;; The contents of this file are subject to the Erlang Public License,
;; Version 1.1, (the "License"); you may not use this file except in
@@ -37,7 +37,7 @@
@id 1
-@inherits rfc3588_base
+@inherits common
;; ===========================================================================
diff --git a/lib/diameter/examples/dict/rfc4006_cc.dia b/lib/diameter/examples/dict/rfc4006_cc.dia
index b723e4ddbb..b45ffc8090 100644
--- a/lib/diameter/examples/dict/rfc4006_cc.dia
+++ b/lib/diameter/examples/dict/rfc4006_cc.dia
@@ -1,7 +1,7 @@
;;
;; %CopyrightBegin%
;;
-;; Copyright Ericsson AB 2010-2011. All Rights Reserved.
+;; Copyright Ericsson AB 2010-2013. All Rights Reserved.
;;
;; The contents of this file are subject to the Erlang Public License,
;; Version 1.1, (the "License"); you may not use this file except in
@@ -23,7 +23,7 @@
@id 4
-@inherits rfc3588_base
+@inherits common
@inherits rfc4005_nas Filter-Id
;; ===========================================================================
diff --git a/lib/diameter/examples/dict/rfc4072_eap.dia b/lib/diameter/examples/dict/rfc4072_eap.dia
index 111516b347..676b1b8b9b 100644
--- a/lib/diameter/examples/dict/rfc4072_eap.dia
+++ b/lib/diameter/examples/dict/rfc4072_eap.dia
@@ -1,7 +1,7 @@
;;
;; %CopyrightBegin%
;;
-;; Copyright Ericsson AB 2010-2011. All Rights Reserved.
+;; Copyright Ericsson AB 2010-2013. All Rights Reserved.
;;
;; The contents of this file are subject to the Erlang Public License,
;; Version 1.1, (the "License"); you may not use this file except in
@@ -30,7 +30,7 @@
@id 5
-@inherits rfc3588_base
+@inherits common
@inherits rfc4005_nas
;; ===========================================================================
diff --git a/lib/diameter/examples/dict/rfc4590_digest.dia b/lib/diameter/examples/dict/rfc4590_digest.dia
index a4ebe0c456..de68a6ef7e 100644
--- a/lib/diameter/examples/dict/rfc4590_digest.dia
+++ b/lib/diameter/examples/dict/rfc4590_digest.dia
@@ -1,7 +1,7 @@
;;
;; %CopyrightBegin%
;;
-;; Copyright Ericsson AB 2010-2011. All Rights Reserved.
+;; Copyright Ericsson AB 2010-2013. All Rights Reserved.
;;
;; The contents of this file are subject to the Erlang Public License,
;; Version 1.1, (the "License"); you may not use this file except in
diff --git a/lib/diameter/examples/dict/rfc4740_sip.dia b/lib/diameter/examples/dict/rfc4740_sip.dia
index 8c21882649..cada3ac826 100644
--- a/lib/diameter/examples/dict/rfc4740_sip.dia
+++ b/lib/diameter/examples/dict/rfc4740_sip.dia
@@ -1,7 +1,7 @@
;;
;; %CopyrightBegin%
;;
-;; Copyright Ericsson AB 2010-2011. All Rights Reserved.
+;; Copyright Ericsson AB 2010-2013. All Rights Reserved.
;;
;; The contents of this file are subject to the Erlang Public License,
;; Version 1.1, (the "License"); you may not use this file except in
@@ -23,7 +23,7 @@
@id 6
-@inherits rfc3588_base
+@inherits common
@inherits rfc4590_digest
;; ===========================================================================
diff --git a/lib/diameter/src/base/diameter.erl b/lib/diameter/src/base/diameter.erl
index 77200cc7d0..d74e091e11 100644
--- a/lib/diameter/src/base/diameter.erl
+++ b/lib/diameter/src/base/diameter.erl
@@ -343,7 +343,7 @@ call(SvcName, App, Message) ->
| {capx_timeout, 'Unsigned32'()}
| {disconnect_cb, evaluable()}
| {length_errors, exit | handle | discard}
- | {reconnect_timer, 'Unsigned32'()}
+ | {connect_timer, 'Unsigned32'()}
| {watchdog_timer, 'Unsigned32'() | {module(), atom(), list()}}
| {watchdog_config, [{okay|suspect, non_neg_integer()}]}
| {spawn_opt, list()}
diff --git a/lib/diameter/src/base/diameter_capx.erl b/lib/diameter/src/base/diameter_capx.erl
index 1a931a9854..93548ecafd 100644
--- a/lib/diameter/src/base/diameter_capx.erl
+++ b/lib/diameter/src/base/diameter_capx.erl
@@ -168,12 +168,13 @@ ipaddr(A) ->
%%
%% Build a CER record to send to a remote peer.
-%% Use the fact that diameter_caps has the same field names as CER.
+%% Use the fact that diameter_caps is expected to have the same field
+%% names as CER.
bCER(#diameter_caps{} = Rec, Dict) ->
- Values = lists:zip(Dict:'#info-'(diameter_base_CER, fields),
+ RecName = Dict:msg2rec('CER'),
+ Values = lists:zip(Dict:'#info-'(RecName, fields),
tl(tuple_to_list(Rec))),
- Dict:'#new-'(diameter_base_CER, [{K, map(K, V, Dict)}
- || {K,V} <- Values]).
+ Dict:'#new-'(RecName, [{K, map(K, V, Dict)} || {K,V} <- Values]).
%% map/3
%%
@@ -186,8 +187,9 @@ bCER(#diameter_caps{} = Rec, Dict) ->
%% since the corresponding dictionaries expect different values for a
%% 'Vendor-Id': a list for 3588, an integer for 6733.
-map('Vendor-Specific-Application-Id', L, Dict) ->
- Rec = Dict:'#new-'('diameter_base_Vendor-Specific-Application-Id', []),
+map('Vendor-Specific-Application-Id' = T, L, Dict) ->
+ RecName = Dict:name2rec(T),
+ Rec = Dict:'#new-'(RecName, []),
Def = Dict:'#get-'('Vendor-Id', Rec),
[vsa(V, Def) || V <- L];
map(_, V, _) ->
@@ -342,8 +344,9 @@ cs(LS, RS) ->
%% CER is a subset of CEA, the latter adding Result-Code and a few
%% more AVP's.
cea_from_cer(CER, Dict) ->
- [diameter_base_CER | Values] = Dict:'#get-'(CER),
- Dict:'#set-'(Values, Dict:'#new-'(diameter_base_CEA)).
+ RecName = Dict:msg2rec('CEA'),
+ [_ | Values] = Dict:'#get-'(CER),
+ Dict:'#set-'(Values, Dict:'#new-'(RecName)).
%% rCEA/3
diff --git a/lib/diameter/src/base/diameter_codec.erl b/lib/diameter/src/base/diameter_codec.erl
index 1d647b8c87..0de4d53973 100644
--- a/lib/diameter/src/base/diameter_codec.erl
+++ b/lib/diameter/src/base/diameter_codec.erl
@@ -477,8 +477,11 @@ split_head(<<Code:32, 1:1, M:1, P:1, _:5, Len:24, V:32, _/bitstring>>) ->
split_head(<<Code:32, 0:1, M:1, P:1, _:5, Len:24, _/bitstring>>) ->
{Code, undefined, M, P, Len, 8};
-split_head(Bin) ->
- ?THROW({5014, #diameter_avp{data = Bin}}).
+%% Header is truncated: pack_avp/1 will pad to the minimum header
+%% length.
+split_head(B)
+ when is_bitstring(B) ->
+ ?THROW({5014, #diameter_avp{data = B}}).
%% 3588:
%%
@@ -523,9 +526,8 @@ split_data(_, _, _) ->
%% split_data/4
split_data(Bin, HdrLen, Len, Pad) ->
- <<_:HdrLen/binary, T/bitstring>> = Bin,
- case T of
- <<Data:Len/binary, _:Pad/binary, Rest/bitstring>> ->
+ case Bin of
+ <<_:HdrLen/binary, Data:Len/binary, _:Pad/binary, Rest/bitstring>> ->
{Data, Rest};
_ ->
invalid_avp_length()
@@ -573,15 +575,15 @@ pack_avp(#diameter_avp{data = {Dict, Name, Value}} = A) ->
{Name, Type} = Dict:avp_name(Code, Vid),
pack_avp(A#diameter_avp{data = {Hdr, {Type, Value}}});
-pack_avp(#diameter_avp{code = undefined, data = Bin})
- when is_binary(Bin) ->
+pack_avp(#diameter_avp{code = undefined, data = B})
+ when is_bitstring(B) ->
%% Reset the AVP Length of an AVP Header resulting from a 5014
%% error. The RFC doesn't explicitly say to do this but the
%% receiver can't correctly extract this and following AVP's
%% without a correct length. On the downside, the header doesn't
%% reveal if the received header has been padded.
- Pad = 8*header_length(Bin) - bit_size(Bin),
- Len = size(<<H:5/binary, _:24, T/binary>> = <<Bin/bitstring, 0:Pad>>),
+ Pad = 8*header_length(B) - bit_size(B),
+ Len = size(<<H:5/binary, _:24, T/binary>> = <<B/bitstring, 0:Pad>>),
<<H/binary, Len:24, T/binary>>;
%% ... or as an iolist.
diff --git a/lib/diameter/src/base/diameter_config.erl b/lib/diameter/src/base/diameter_config.erl
index 34b40c3a29..f5ea459fd0 100644
--- a/lib/diameter/src/base/diameter_config.erl
+++ b/lib/diameter/src/base/diameter_config.erl
@@ -537,7 +537,9 @@ opt({capx_timeout, Tmo}) ->
opt({length_errors, T}) ->
lists:member(T, [exit, handle, discard]);
-opt({reconnect_timer, Tmo}) ->
+opt({K, Tmo})
+ when K == reconnect_timer; %% deprecated
+ K == connect_timer ->
?IS_UINT32(Tmo);
opt({watchdog_timer, {M,F,A}})
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index 47e03cd0a0..70e66537ed 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -1143,10 +1143,17 @@ q_restart(false, _) ->
%% communicate.
default_tc(connect, Opts) ->
- proplists:get_value(reconnect_timer, Opts, ?DEFAULT_TC);
+ connect_timer(Opts, ?DEFAULT_TC);
default_tc(accept, _) ->
0.
+%% Accept both connect_timer and the (older) reconnect_timer, the
+%% latter being a remnant from a time in which the timer did apply to
+%% reconnect attempts.
+connect_timer(Opts, Def0) ->
+ Def = proplists:get_value(reconnect_timer, Opts, Def0),
+ proplists:get_value(connect_timer, Opts, Def).
+
%% Bound tc below if the watchdog was restarted recently to avoid
%% continuous restarted in case of faulty config or other problems.
tc(Time, Tc) ->
@@ -1181,7 +1188,7 @@ tc(false = No, _, _) -> %% removed
%% another watchdog to be able to detect that it should transition
%% from initial into reopen rather than okay. That someone is either
%% the accepting watchdog upon reception of a CER from the previously
-%% connected peer, or us after reconnect_timer timeout.
+%% connected peer, or us after connect_timer timeout.
close(#watchdog{type = connect}, _) ->
ok;
@@ -1194,16 +1201,16 @@ close(#watchdog{type = accept,
%% Tell watchdog to (maybe) die later ...
c(Pid, true, Opts) ->
- Tc = proplists:get_value(reconnect_timer, Opts, 2*?DEFAULT_TC),
+ Tc = connect_timer(Opts, 2*?DEFAULT_TC),
erlang:send_after(Tc, Pid, close);
%% ... or now.
c(Pid, false, _Opts) ->
Pid ! close.
-%% The RFC's only document the behaviour of Tc, our reconnect_timer,
+%% The RFC's only document the behaviour of Tc, our connect_timer,
%% for the establishment of connections but we also give
-%% reconnect_timer semantics for a listener, being the time within
+%% connect_timer semantics for a listener, being the time within
%% which a new connection attempt is expected of a connecting peer.
%% The value should be greater than the peer's Tc + jitter.
diff --git a/lib/diameter/src/base/diameter_types.erl b/lib/diameter/src/base/diameter_types.erl
index 8c07e84777..ca3338be5f 100644
--- a/lib/diameter/src/base/diameter_types.erl
+++ b/lib/diameter/src/base/diameter_types.erl
@@ -92,6 +92,9 @@
when is_binary(Bin) ->
binary_to_list(Bin);
+'OctetString'(decode, B) ->
+ ?INVALID_LENGTH(B);
+
'OctetString'(encode = M, zero) ->
'OctetString'(M, []);
@@ -255,9 +258,7 @@
2 == A, 16 == size(B) ->
list_to_tuple([N || <<N:A/unit:8>> <= B]);
-'Address'(decode, <<A:16, _/binary>> = B)
- when 1 == A;
- 2 == A ->
+'Address'(decode, B) ->
?INVALID_LENGTH(B);
'Address'(encode, T) ->
@@ -278,7 +279,10 @@
<<_,_/binary>> = 'OctetString'(M, X);
'DiameterIdentity'(decode = M, <<_,_/binary>> = X) ->
- 'OctetString'(M, X).
+ 'OctetString'(M, X);
+
+'DiameterIdentity'(decode, X) ->
+ ?INVALID_LENGTH(X).
%% --------------------
@@ -286,6 +290,9 @@
when is_binary(Bin) ->
scan_uri(Bin);
+'DiameterURI'(decode, B) ->
+ ?INVALID_LENGTH(B);
+
%% The minimal DiameterURI is "aaa://x", 7 characters.
'DiameterURI'(encode = M, zero) ->
'OctetString'(M, lists:duplicate(0,7));
@@ -330,9 +337,13 @@
%% --------------------
-'UTF8String'(decode, Bin) ->
+'UTF8String'(decode, Bin)
+ when is_binary(Bin) ->
tl([0|_] = unicode:characters_to_list([0, Bin])); %% assert list return
+'UTF8String'(decode, B) ->
+ ?INVALID_LENGTH(B);
+
'UTF8String'(encode = M, zero) ->
'UTF8String'(M, []);
diff --git a/lib/diameter/src/base/diameter_watchdog.erl b/lib/diameter/src/base/diameter_watchdog.erl
index 7e75801718..9a1c8b6585 100644
--- a/lib/diameter/src/base/diameter_watchdog.erl
+++ b/lib/diameter/src/base/diameter_watchdog.erl
@@ -329,7 +329,7 @@ code_change(_, State, _) ->
%% the commentary is ours.
%% Service or watchdog is telling the watchdog of an accepting
-%% transport to die after reconnect_timer expiry or reestablished
+%% transport to die after connect_timer expiry or reestablished
%% connection (in another transport process) respectively.
transition(close, #watchdog{status = down}) ->
{{accept, _}, _, _} = getr(restart), %% assert
@@ -461,15 +461,28 @@ eraser(Key) ->
%% encode/3
-encode(Msg, Mask, Dict) ->
+encode(dwr = M, Dict0, Mask) ->
+ Msg = getr(M),
Seq = diameter_session:sequence(Mask),
Hdr = #diameter_header{version = ?DIAMETER_VERSION,
end_to_end_id = Seq,
hop_by_hop_id = Seq},
Pkt = #diameter_packet{header = Hdr,
msg = Msg},
- #diameter_packet{bin = Bin} = diameter_codec:encode(Dict, Pkt),
- Bin.
+ #diameter_packet{bin = Bin} = diameter_codec:encode(Dict0, Pkt),
+ Bin;
+
+
+encode(dwa, Dict0, #diameter_packet{header = H, transport_data = TD}
+ = ReqPkt) ->
+ AnsPkt = #diameter_packet{header
+ = H#diameter_header{is_request = false,
+ is_error = undefined,
+ is_retransmitted = false},
+ msg = dwa(ReqPkt),
+ transport_data = TD},
+
+ diameter_codec:encode(Dict0, AnsPkt).
%% okay/3
@@ -527,7 +540,7 @@ send_watchdog(#watchdog{pending = false,
dictionary = Dict0,
sequence = Mask}
= S) ->
- send(TPid, {send, encode(getr(dwr), Mask, Dict0)}),
+ send(TPid, {send, encode(dwr, Dict0, Mask)}),
?LOG(send, 'DWR'),
S#watchdog{pending = true}.
@@ -546,9 +559,8 @@ recv(Name, Pkt, S) ->
%% rcv/3
rcv('DWR', Pkt, #watchdog{transport = TPid,
- dictionary = Dict0,
- sequence = Mask}) ->
- send(TPid, {send, encode(dwa(Pkt), Mask, Dict0)}),
+ dictionary = Dict0}) ->
+ send(TPid, {send, encode(dwa, Dict0, Pkt)}),
?LOG(send, 'DWA');
rcv(N, _, _)
diff --git a/lib/diameter/src/compiler/diameter_codegen.erl b/lib/diameter/src/compiler/diameter_codegen.erl
index e687145263..22422f2ef2 100644
--- a/lib/diameter/src/compiler/diameter_codegen.erl
+++ b/lib/diameter/src/compiler/diameter_codegen.erl
@@ -33,11 +33,6 @@
-export([from_dict/4]).
-%% Internal exports (for test).
--export([file/1,
- file/2,
- file/3]).
-
-include("diameter_forms.hrl").
-include("diameter_vsn.hrl").
@@ -48,18 +43,61 @@
%% ===========================================================================
--spec from_dict(File, Spec, Opts, Mode)
+-spec from_dict(File, ParseD, Opts, Mode)
-> ok
+ | term()
when File :: string(),
- Spec :: orddict:orddict(),
+ ParseD :: orddict:orddict(),
Opts :: list(),
- Mode :: spec | erl | hrl.
+ Mode :: parse | forms | erl | hrl.
-from_dict(File, Spec, Opts, Mode) ->
+from_dict(File, ParseD, Opts, Mode) ->
Outdir = proplists:get_value(outdir, Opts, "."),
+ Return = proplists:get_value(return, Opts, false),
+ Mod = mod(File, orddict:find(name, ParseD)),
putr(verbose, lists:member(verbose, Opts)),
- putr(debug, lists:member(debug, Opts)),
- codegen(File, Spec, Outdir, Mode).
+ try
+ maybe_write(Return, Mode, Outdir, Mod, gen(Mode, ParseD, ?A(Mod)))
+ after
+ eraser(verbose)
+ end.
+
+mod(File, error) ->
+ filename:rootname(filename:basename(File));
+mod(_, {ok, Mod}) ->
+ Mod.
+
+maybe_write(true, _, _, _, T) ->
+ T;
+
+maybe_write(_, Mode, Outdir, Mod, T) ->
+ Path = filename:join(Outdir, Mod), %% minus extension
+ do_write(Mode, [Path, $., ext(Mode)], T).
+
+ext(parse) ->
+ "D";
+ext(forms) ->
+ "F";
+ext(T) ->
+ ?S(T).
+
+do_write(M, Path, T)
+ when M == parse;
+ M == forms ->
+ write_term(Path, T);
+do_write(_, Path, T) ->
+ write(Path, T).
+
+write(Path, T) ->
+ write(Path, "~s", T).
+
+write_term(Path, T) ->
+ write(Path, "~p.~n", T).
+
+write(Path, Fmt, T) ->
+ {ok, Fd} = file:open(Path, [write]),
+ io:fwrite(Fd, Fmt, [T]),
+ ok = file:close(Fd).
%% Optional reports when running verbosely.
report(What, Data) ->
@@ -77,20 +115,8 @@ putr(Key, Value) ->
getr(Key) ->
get({?MODULE, Key}).
-%% ===========================================================================
-%% ===========================================================================
-
-%% Generate from parsed dictionary in a file.
-
-file(F) ->
- file(F, spec).
-
-file(F, Mode) ->
- file(F, ".", Mode).
-
-file(F, Outdir, Mode) ->
- {ok, [Spec]} = file:consult(F),
- from_dict(F, Spec, Outdir, Mode).
+eraser(Key) ->
+ erase({?MODULE, Key}).
%% ===========================================================================
%% ===========================================================================
@@ -98,97 +124,68 @@ file(F, Outdir, Mode) ->
get_value(Key, Plist) ->
proplists:get_value(Key, Plist, []).
-write(Path, Str) ->
- w(Path, Str, "~s").
-
-write_term(Path, T) ->
- w(Path, T, "~p.").
-
-w(Path, T, Fmt) ->
- {ok, Fd} = file:open(Path, [write]),
- io:fwrite(Fd, Fmt ++ "~n", [T]),
- file:close(Fd).
-
-codegen(File, Spec, Outdir, Mode) ->
- Mod = mod(File, orddict:find(name, Spec)),
- Path = filename:join(Outdir, Mod), %% minus extension
- gen(Mode, Spec, ?A(Mod), Path),
- ok.
-
-mod(File, error) ->
- filename:rootname(filename:basename(File));
-mod(_, {ok, Mod}) ->
- Mod.
-
-gen(spec, Spec, _Mod, Path) ->
- write_term(Path ++ ".spec", [?VERSION | Spec]);
-
-gen(hrl, Spec, Mod, Path) ->
- gen_hrl(Path ++ ".hrl", Mod, Spec);
-
-gen(erl, Spec, Mod, Path) ->
- Forms = [{?attribute, module, Mod},
- {?attribute, compile, {parse_transform, diameter_exprecs}},
- {?attribute, compile, nowarn_unused_function},
- {?attribute, export, [{name, 0},
- {id, 0},
- {vendor_id, 0},
- {vendor_name, 0},
- {decode_avps, 2}, %% in diameter_gen.hrl
- {encode_avps, 2}, %%
- {msg_name, 2},
- {msg_header, 1},
- {rec2msg, 1},
- {msg2rec, 1},
- {name2rec, 1},
- {avp_name, 2},
- {avp_arity, 2},
- {avp_header, 1},
- {avp, 3},
- {grouped_avp, 3},
- {enumerated_avp, 3},
- {empty_value, 1},
- {dict, 0}]},
- %% diameter.hrl is included for #diameter_avp
- {?attribute, include_lib, "diameter/include/diameter.hrl"},
- {?attribute, include_lib, "diameter/include/diameter_gen.hrl"},
- f_name(Mod),
- f_id(Spec),
- f_vendor_id(Spec),
- f_vendor_name(Spec),
- f_msg_name(Spec),
- f_msg_header(Spec),
- f_rec2msg(Spec),
- f_msg2rec(Spec),
- f_name2rec(Spec),
- f_avp_name(Spec),
- f_avp_arity(Spec),
- f_avp_header(Spec),
- f_avp(Spec),
- f_enumerated_avp(Spec),
- f_empty_value(Spec),
- f_dict(Spec),
- {eof, ?LINE}],
-
- gen_erl(Path, insert_hrl_forms(Spec, Forms)).
-
-gen_erl(Path, Forms) ->
- getr(debug) andalso write_term(Path ++ ".forms", Forms),
- write(Path ++ ".erl",
- header() ++ erl_prettypr:format(erl_syntax:form_list(Forms))).
-
-insert_hrl_forms(Spec, Forms) ->
- {H,T} = lists:splitwith(fun is_header/1, Forms),
- H ++ make_hrl_forms(Spec) ++ T.
-
-is_header({attribute, _, export, _}) ->
- false;
-is_header(_) ->
- true.
-
-make_hrl_forms(Spec) ->
+gen(parse, ParseD, _Mod) ->
+ [?VERSION | ParseD];
+
+gen(forms, ParseD, Mod) ->
+ pp(erl_forms(Mod, ParseD));
+
+gen(hrl, ParseD, Mod) ->
+ gen_hrl(Mod, ParseD);
+
+gen(erl, ParseD, Mod) ->
+ [header(), prettypr(erl_forms(Mod, ParseD)), $\n].
+
+erl_forms(Mod, ParseD) ->
+ Forms = [[{?attribute, module, Mod},
+ {?attribute, compile, {parse_transform, diameter_exprecs}},
+ {?attribute, compile, nowarn_unused_function}],
+ make_hrl_forms(ParseD),
+ [{?attribute, export, [{name, 0},
+ {id, 0},
+ {vendor_id, 0},
+ {vendor_name, 0},
+ {decode_avps, 2}, %% in diameter_gen.hrl
+ {encode_avps, 2}, %%
+ {msg_name, 2},
+ {msg_header, 1},
+ {rec2msg, 1},
+ {msg2rec, 1},
+ {name2rec, 1},
+ {avp_name, 2},
+ {avp_arity, 2},
+ {avp_header, 1},
+ {avp, 3},
+ {grouped_avp, 3},
+ {enumerated_avp, 3},
+ {empty_value, 1},
+ {dict, 0}]},
+ %% diameter.hrl is included for #diameter_avp
+ {?attribute, include_lib, "diameter/include/diameter.hrl"},
+ {?attribute, include_lib, "diameter/include/diameter_gen.hrl"},
+ f_name(Mod),
+ f_id(ParseD),
+ f_vendor_id(ParseD),
+ f_vendor_name(ParseD),
+ f_msg_name(ParseD),
+ f_msg_header(ParseD),
+ f_rec2msg(ParseD),
+ f_msg2rec(ParseD),
+ f_name2rec(ParseD),
+ f_avp_name(ParseD),
+ f_avp_arity(ParseD),
+ f_avp_header(ParseD),
+ f_avp(ParseD),
+ f_enumerated_avp(ParseD),
+ f_empty_value(ParseD),
+ f_dict(ParseD),
+ {eof, ?LINE}]],
+
+ lists:append(Forms).
+
+make_hrl_forms(ParseD) ->
{_Prefix, MsgRecs, GrpRecs, ImportedGrpRecs}
- = make_record_forms(Spec),
+ = make_record_forms(ParseD),
RecordForms = MsgRecs ++ GrpRecs ++ lists:flatmap(fun({_,Fs}) -> Fs end,
ImportedGrpRecs),
@@ -199,16 +196,16 @@ make_hrl_forms(Spec) ->
%% export_records is used by the diameter_exprecs parse transform.
[{?attribute, export_records, RecNames} | RecordForms].
-make_record_forms(Spec) ->
- Prefix = prefix(Spec),
+make_record_forms(ParseD) ->
+ Prefix = prefix(ParseD),
- MsgRecs = a_record(Prefix, fun msg_proj/1, get_value(messages, Spec)),
- GrpRecs = a_record(Prefix, fun grp_proj/1, get_value(grouped, Spec)),
+ MsgRecs = a_record(Prefix, fun msg_proj/1, get_value(messages, ParseD)),
+ GrpRecs = a_record(Prefix, fun grp_proj/1, get_value(grouped, ParseD)),
ImportedGrpRecs = [{M, a_record(Prefix, fun grp_proj/1, Gs)}
- || {M,Gs} <- get_value(import_groups, Spec)],
+ || {M,Gs} <- get_value(import_groups, ParseD)],
- {Prefix, MsgRecs, GrpRecs, ImportedGrpRecs}.
+ {to_upper(Prefix), MsgRecs, GrpRecs, ImportedGrpRecs}.
msg_proj({Name, _, _, _, Avps}) ->
{Name, Avps}.
@@ -246,9 +243,9 @@ f_name(Name) ->
%%% # id/0
%%% ------------------------------------------------------------------------
-f_id(Spec) ->
+f_id(ParseD) ->
{?function, id, 0,
- [c_id(orddict:find(id, Spec))]}.
+ [c_id(orddict:find(id, ParseD))]}.
c_id({ok, Id}) ->
{?clause, [], [], [?INTEGER(Id)]};
@@ -260,9 +257,9 @@ c_id(error) ->
%%% # vendor_id/0
%%% ------------------------------------------------------------------------
-f_vendor_id(Spec) ->
+f_vendor_id(ParseD) ->
{?function, vendor_id, 0,
- [{?clause, [], [], [b_vendor_id(orddict:find(vendor, Spec))]}]}.
+ [{?clause, [], [], [b_vendor_id(orddict:find(vendor, ParseD))]}]}.
b_vendor_id({ok, {Id, _}}) ->
?INTEGER(Id);
@@ -273,9 +270,9 @@ b_vendor_id(error) ->
%%% # vendor_name/0
%%% ------------------------------------------------------------------------
-f_vendor_name(Spec) ->
+f_vendor_name(ParseD) ->
{?function, vendor_name, 0,
- [{?clause, [], [], [b_vendor_name(orddict:find(vendor, Spec))]}]}.
+ [{?clause, [], [], [b_vendor_name(orddict:find(vendor, ParseD))]}]}.
b_vendor_name({ok, {_, Name}}) ->
?Atom(Name);
@@ -286,15 +283,15 @@ b_vendor_name(error) ->
%%% # msg_name/1
%%% ------------------------------------------------------------------------
-f_msg_name(Spec) ->
- {?function, msg_name, 2, msg_name(Spec)}.
+f_msg_name(ParseD) ->
+ {?function, msg_name, 2, msg_name(ParseD)}.
%% Return the empty name for any unknown command to which
%% DIAMETER_COMMAND_UNSUPPORTED should be replied.
-msg_name(Spec) ->
+msg_name(ParseD) ->
lists:flatmap(fun c_msg_name/1, proplists:get_value(command_codes,
- Spec,
+ ParseD,
[]))
++ [{?clause, [?VAR('_'), ?VAR('_')], [], [?ATOM('')]}].
@@ -310,12 +307,12 @@ c_msg_name({Code, Req, Ans}) ->
%%% # msg2rec/1
%%% ------------------------------------------------------------------------
-f_msg2rec(Spec) ->
- {?function, msg2rec, 1, msg2rec(Spec)}.
+f_msg2rec(ParseD) ->
+ {?function, msg2rec, 1, msg2rec(ParseD)}.
-msg2rec(Spec) ->
- Pre = prefix(Spec),
- lists:map(fun(T) -> c_msg2rec(T, Pre) end, get_value(messages, Spec))
+msg2rec(ParseD) ->
+ Pre = prefix(ParseD),
+ lists:map(fun(T) -> c_msg2rec(T, Pre) end, get_value(messages, ParseD))
++ [?BADARG(1)].
c_msg2rec({N,_,_,_,_}, Pre) ->
@@ -325,12 +322,12 @@ c_msg2rec({N,_,_,_,_}, Pre) ->
%%% # rec2msg/1
%%% ------------------------------------------------------------------------
-f_rec2msg(Spec) ->
- {?function, rec2msg, 1, rec2msg(Spec)}.
+f_rec2msg(ParseD) ->
+ {?function, rec2msg, 1, rec2msg(ParseD)}.
-rec2msg(Spec) ->
- Pre = prefix(Spec),
- lists:map(fun(T) -> c_rec2msg(T, Pre) end, get_value(messages, Spec))
+rec2msg(ParseD) ->
+ Pre = prefix(ParseD),
+ lists:map(fun(T) -> c_rec2msg(T, Pre) end, get_value(messages, ParseD))
++ [?BADARG(1)].
c_rec2msg({N,_,_,_,_}, Pre) ->
@@ -340,13 +337,13 @@ c_rec2msg({N,_,_,_,_}, Pre) ->
%%% # name2rec/1
%%% ------------------------------------------------------------------------
-f_name2rec(Spec) ->
- {?function, name2rec, 1, name2rec(Spec)}.
+f_name2rec(ParseD) ->
+ {?function, name2rec, 1, name2rec(ParseD)}.
-name2rec(Spec) ->
- Pre = prefix(Spec),
- Groups = get_value(grouped, Spec)
- ++ lists:flatmap(fun avps/1, get_value(import_groups, Spec)),
+name2rec(ParseD) ->
+ Pre = prefix(ParseD),
+ Groups = get_value(grouped, ParseD)
+ ++ lists:flatmap(fun avps/1, get_value(import_groups, ParseD)),
lists:map(fun({N,_,_,_}) -> c_name2rec(N, Pre) end, Groups)
++ [{?clause, [?VAR('T')], [], [?CALL(msg2rec, [?VAR('T')])]}].
@@ -360,8 +357,8 @@ avps({_Mod, Avps}) ->
%%% # avp_name/1
%%% ------------------------------------------------------------------------
-f_avp_name(Spec) ->
- {?function, avp_name, 2, avp_name(Spec)}.
+f_avp_name(ParseD) ->
+ {?function, avp_name, 2, avp_name(ParseD)}.
%% 3588, 4.1:
%%
@@ -372,11 +369,11 @@ f_avp_name(Spec) ->
%% field. AVP numbers 256 and above are used for Diameter, which are
%% allocated by IANA (see Section 11.1).
-avp_name(Spec) ->
- Avps = get_value(avp_types, Spec),
- Imported = get_value(import_avps, Spec),
- Vid = orddict:find(vendor, Spec),
- Vs = vendor_id_map(Spec),
+avp_name(ParseD) ->
+ Avps = get_value(avp_types, ParseD),
+ Imported = get_value(import_avps, ParseD),
+ Vid = orddict:find(vendor, ParseD),
+ Vs = vendor_id_map(ParseD),
lists:map(fun(T) -> c_avp_name(T, Vs, Vid) end, Avps)
++ lists:flatmap(fun(T) -> c_imported_avp_name(T, Vs) end, Imported)
@@ -407,25 +404,25 @@ c_avp_name_(T, Code, Vid) ->
[],
[T]}.
-vendor_id_map(Spec) ->
+vendor_id_map(ParseD) ->
lists:flatmap(fun({V,Ns}) -> [{N,V} || N <- Ns] end,
- get_value(avp_vendor_id, Spec))
+ get_value(avp_vendor_id, ParseD))
++ lists:flatmap(fun({_,_,[],_}) -> [];
({N,_,[V],_}) -> [{N,V}]
end,
- get_value(grouped, Spec)).
+ get_value(grouped, ParseD)).
%%% ------------------------------------------------------------------------
%%% # avp_arity/2
%%% ------------------------------------------------------------------------
-f_avp_arity(Spec) ->
- {?function, avp_arity, 2, avp_arity(Spec)}.
+f_avp_arity(ParseD) ->
+ {?function, avp_arity, 2, avp_arity(ParseD)}.
-avp_arity(Spec) ->
- Msgs = get_value(messages, Spec),
- Groups = get_value(grouped, Spec)
- ++ lists:flatmap(fun avps/1, get_value(import_groups, Spec)),
+avp_arity(ParseD) ->
+ Msgs = get_value(messages, ParseD),
+ Groups = get_value(grouped, ParseD)
+ ++ lists:flatmap(fun avps/1, get_value(import_groups, ParseD)),
c_avp_arity(Msgs ++ Groups)
++ [{?clause, [?VAR('_'), ?VAR('_')], [], [?INTEGER(0)]}].
@@ -449,15 +446,15 @@ c_arity(Name, Avp) ->
%%% # avp/3
%%% ------------------------------------------------------------------------
-f_avp(Spec) ->
- {?function, avp, 3, avp(Spec) ++ [?BADARG(3)]}.
+f_avp(ParseD) ->
+ {?function, avp, 3, avp(ParseD) ++ [?BADARG(3)]}.
-avp(Spec) ->
- Native = get_value(avp_types, Spec),
- CustomMods = get_value(custom_types, Spec),
- TypeMods = get_value(codecs, Spec),
- Imported = get_value(import_avps, Spec),
- Enums = get_value(enum, Spec),
+avp(ParseD) ->
+ Native = get_value(avp_types, ParseD),
+ CustomMods = get_value(custom_types, ParseD),
+ TypeMods = get_value(codecs, ParseD),
+ Imported = get_value(import_avps, ParseD),
+ Enums = get_value(enum, ParseD),
Custom = lists:map(fun({M,As}) -> {M, custom_types, As} end,
CustomMods)
@@ -548,14 +545,14 @@ custom(codecs, AvpName, Type) ->
%%% # enumerated_avp/3
%%% ------------------------------------------------------------------------
-f_enumerated_avp(Spec) ->
- {?function, enumerated_avp, 3, enumerated_avp(Spec) ++ [?BADARG(3)]}.
+f_enumerated_avp(ParseD) ->
+ {?function, enumerated_avp, 3, enumerated_avp(ParseD) ++ [?BADARG(3)]}.
-enumerated_avp(Spec) ->
- Enums = get_value(enum, Spec),
+enumerated_avp(ParseD) ->
+ Enums = get_value(enum, ParseD),
lists:flatmap(fun cs_enumerated_avp/1, Enums)
++ lists:flatmap(fun({M,Es}) -> enumerated_avp(M, Es, Enums) end,
- get_value(import_enums, Spec)).
+ get_value(import_enums, ParseD)).
enumerated_avp(Mod, Es, Enums) ->
lists:flatmap(fun({N,_}) ->
@@ -585,16 +582,16 @@ c_enumerated_avp(AvpName, {_,I}) ->
%%% msg_header/1
%%% ------------------------------------------------------------------------
-f_msg_header(Spec) ->
- {?function, msg_header, 1, msg_header(Spec) ++ [?BADARG(1)]}.
+f_msg_header(ParseD) ->
+ {?function, msg_header, 1, msg_header(ParseD) ++ [?BADARG(1)]}.
-msg_header(Spec) ->
- msg_header(get_value(messages, Spec), Spec).
+msg_header(ParseD) ->
+ msg_header(get_value(messages, ParseD), ParseD).
msg_header([], _) ->
[];
-msg_header(Msgs, Spec) ->
- ApplId = orddict:fetch(id, Spec),
+msg_header(Msgs, ParseD) ->
+ ApplId = orddict:fetch(id, ParseD),
lists:map(fun({M,C,F,_,_}) -> c_msg_header(M, C, F, ApplId) end, Msgs).
@@ -616,14 +613,14 @@ emf('ERR', N) -> N bor 2#00100000.
%%% # avp_header/1
%%% ------------------------------------------------------------------------
-f_avp_header(Spec) ->
- {?function, avp_header, 1, avp_header(Spec) ++ [?BADARG(1)]}.
+f_avp_header(ParseD) ->
+ {?function, avp_header, 1, avp_header(ParseD) ++ [?BADARG(1)]}.
-avp_header(Spec) ->
- Native = get_value(avp_types, Spec),
- Imported = get_value(import_avps, Spec),
- Vid = orddict:find(vendor, Spec),
- Vs = vendor_id_map(Spec),
+avp_header(ParseD) ->
+ Native = get_value(avp_types, ParseD),
+ Imported = get_value(import_avps, ParseD),
+ Vid = orddict:find(vendor, ParseD),
+ Vs = vendor_id_map(ParseD),
lists:flatmap(fun(A) -> c_avp_header(A, Vs, Vid) end,
Native ++ Imported).
@@ -679,14 +676,14 @@ v(false, _, _, _) ->
%%% # empty_value/0
%%% ------------------------------------------------------------------------
-f_empty_value(Spec) ->
- {?function, empty_value, 1, empty_value(Spec)}.
+f_empty_value(ParseD) ->
+ {?function, empty_value, 1, empty_value(ParseD)}.
-empty_value(Spec) ->
- Imported = lists:flatmap(fun avps/1, get_value(import_enums, Spec)),
- Groups = get_value(grouped, Spec)
- ++ lists:flatmap(fun avps/1, get_value(import_groups, Spec)),
- Enums = [T || {N,_} = T <- get_value(enum, Spec),
+empty_value(ParseD) ->
+ Imported = lists:flatmap(fun avps/1, get_value(import_enums, ParseD)),
+ Groups = get_value(grouped, ParseD)
+ ++ lists:flatmap(fun avps/1, get_value(import_groups, ParseD)),
+ Enums = [T || {N,_} = T <- get_value(enum, ParseD),
not lists:keymember(N, 1, Imported)]
++ Imported,
lists:map(fun c_empty_value/1, Groups ++ Enums)
@@ -706,72 +703,52 @@ c_empty_value({Name, _}) ->
%%% # dict/0
%%% ------------------------------------------------------------------------
-f_dict(Spec) ->
+f_dict(ParseD) ->
{?function, dict, 0,
- [{?clause, [], [], [?TERM([?VERSION | Spec])]}]}.
+ [{?clause, [], [], [?TERM([?VERSION | ParseD])]}]}.
%%% ------------------------------------------------------------------------
-%%% # gen_hrl/3
+%%% # gen_hrl/2
%%% ------------------------------------------------------------------------
-gen_hrl(Path, Mod, Spec) ->
- {ok, Fd} = file:open(Path, [write]),
-
+gen_hrl(Mod, ParseD) ->
{Prefix, MsgRecs, GrpRecs, ImportedGrpRecs}
- = make_record_forms(Spec),
-
- file:write(Fd, hrl_header(Mod)),
-
- forms("Message records", Fd, MsgRecs),
- forms("Grouped AVP records", Fd, GrpRecs),
-
- lists:foreach(fun({M,Fs}) ->
- forms("Grouped AVP records from " ++ atom_to_list(M),
- Fd,
- Fs)
- end,
- ImportedGrpRecs),
-
- PREFIX = to_upper(Prefix),
-
- write("ENUM Macros",
- Fd,
- m_enums(PREFIX, false, get_value(enum, Spec))),
- write("DEFINE Macros",
- Fd,
- m_enums(PREFIX, false, get_value(define, Spec))),
-
- lists:foreach(fun({M,Es}) ->
- write("ENUM Macros from " ++ atom_to_list(M),
- Fd,
- m_enums(PREFIX, true, Es))
- end,
- get_value(import_enums, Spec)),
-
- file:close(Fd).
-
-forms(_, _, []) ->
- ok;
-forms(Banner, Fd, Forms) ->
- write(Banner, Fd, prettypr(Forms)).
-
-write(_, _, []) ->
- ok;
-write(Banner, Fd, Str) ->
- banner(Fd, Banner),
- io:fwrite(Fd, "~s~n", [Str]).
+ = make_record_forms(ParseD),
+
+ [hrl_header(Mod),
+ forms("Message records", MsgRecs),
+ forms("Grouped AVP records", GrpRecs),
+ lists:map(fun({M,Fs}) ->
+ forms("Grouped AVP records from " ++ atom_to_list(M),
+ Fs)
+ end,
+ ImportedGrpRecs),
+ format("ENUM Macros", m_enums(Prefix, false, get_value(enum, ParseD))),
+ format("DEFINE Macros", m_enums(Prefix, false, get_value(define, ParseD))),
+ lists:map(fun({M,Es}) ->
+ format("ENUM Macros from " ++ atom_to_list(M),
+ m_enums(Prefix, true, Es))
+ end,
+ get_value(import_enums, ParseD))].
+
+forms(_, [] = No) ->
+ No;
+forms(Banner, Forms) ->
+ format(Banner, prettypr(Forms)).
+
+format(_, [] = No) ->
+ No;
+format(Banner, Str) ->
+ [banner(Banner), Str, $\n].
prettypr(Forms) ->
erl_prettypr:format(erl_syntax:form_list(Forms)).
-banner(Fd, Heading) ->
- file:write(Fd, banner(Heading)).
-
banner(Heading) ->
- ("\n\n"
+ ["\n\n"
"%%% -------------------------------------------------------\n"
- "%%% " ++ Heading ++ ":\n"
- "%%% -------------------------------------------------------\n\n").
+ "%%% ", Heading, ":\n"
+ "%%% -------------------------------------------------------\n\n"].
z(S) ->
string:join(string:tokens(S, "\s\t"), "\s").
@@ -845,8 +822,8 @@ arity([_], '*' = Inf) -> {0, Inf};
arity({_}, '*' = Inf) -> {1, Inf};
arity(_, {_,_} = Q) -> Q.
-prefix(Spec) ->
- case orddict:find(prefix, Spec) of
+prefix(ParseD) ->
+ case orddict:find(prefix, ParseD) of
{ok, P} ->
P ++ "_";
error ->
@@ -855,3 +832,70 @@ prefix(Spec) ->
rec_name(Name, Prefix) ->
Prefix ++ Name.
+
+%% ===========================================================================
+%% pp/1
+%%
+%% Preprocess forms as generated by 'forms' option. In particular,
+%% replace the include_lib attributes in generated forms by the
+%% corresponding forms, extracting the latter from an existing
+%% dictionary (diameter_gen_relay). The resulting forms can be
+%% compiled to beam using compile:forms/2 (which does no preprocessing
+%% or it's own; DiY currently appears to be the only way to preprocess
+%% a forms list).
+
+pp(Forms) ->
+ {_, Beam, _} = code:get_object_code(diameter_gen_relay),
+ pp(Forms, abstract_code(Beam)).
+
+pp(Forms, {ok, Code}) ->
+ Files = files(Code, []),
+ lists:flatmap(fun(T) -> include(T, Files) end, Forms);
+
+pp(Forms, {error, Reason}) ->
+ erlang:error({forms, Reason, Forms}).
+
+include({attribute, _, include_lib, Path}, Files) ->
+ Inc = filename:basename(Path),
+ [{Inc, Forms}] = [T || {F, _} = T <- Files, F == Inc], %% expect one
+ lists:flatmap(fun filter/1, Forms);
+
+include(T, _) ->
+ [T].
+
+abstract_code(Beam) ->
+ case beam_lib:chunks(Beam, [abstract_code]) of
+ {ok, {_Mod, [{abstract_code, {_Vsn, Code}}]}} ->
+ {ok, Code};
+ {ok, {_Mod, [{abstract_code, no_abstract_code = No}]}} ->
+ {error, No};
+ {error = E, beam_lib, Reason} ->
+ {E, Reason}
+ end.
+
+files([{attribute, _, file, {Path, _}} | T], Acc) ->
+ {Body, Rest} = lists:splitwith(fun({attribute, _, file, _}) -> false;
+ (_) -> true
+ end,
+ T),
+ files(Rest, [{filename:basename(Path), Body} | Acc]);
+
+files([], Acc) ->
+ Acc.
+
+%% Only retain record diameter_avp and functions not generated by
+%% diameter_exprecs.
+
+filter({attribute, _, record, {diameter_avp, _}} = T) ->
+ [T];
+
+filter({function, _, Name, _, _} = T) ->
+ case ?S(Name) of
+ [$#|_] -> %% generated by diameter_exprecs
+ [];
+ _ ->
+ [T]
+ end;
+
+filter(_) ->
+ [].
diff --git a/lib/diameter/src/compiler/diameter_dict_util.erl b/lib/diameter/src/compiler/diameter_dict_util.erl
index 36a6efa294..3941f30e03 100644
--- a/lib/diameter/src/compiler/diameter_dict_util.erl
+++ b/lib/diameter/src/compiler/diameter_dict_util.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -46,7 +46,7 @@
-spec parse(File, Opts)
-> {ok, orddict:orddict()}
| {error, term()}
- when File :: {path, string()}
+ when File :: {path, file:name_all()}
| iolist()
| binary(),
Opts :: list().
@@ -265,6 +265,9 @@ io(K, Id)
io(vendor = K, {Id, Name}) ->
[?NL, section(K) | [[?SP, tok(X)] || X <- [Id, Name]]];
+io(_, []) ->
+ [];
+
io(avp_types = K, Body) ->
[?NL, ?NL, section(K), ?NL, [body(K,A) || A <- Body]];
diff --git a/lib/diameter/src/compiler/diameter_make.erl b/lib/diameter/src/compiler/diameter_make.erl
index 16e30c1ffb..2f314b7e57 100644
--- a/lib/diameter/src/compiler/diameter_make.erl
+++ b/lib/diameter/src/compiler/diameter_make.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -30,102 +30,231 @@
-module(diameter_make).
--export([codec/1,
- codec/2,
- dict/1,
- dict/2,
+-export([codec/2,
+ codec/1,
format/1,
- reformat/1]).
+ flatten/1]).
-export_type([opt/0]).
+-include("diameter_vsn.hrl").
+
+%% Options passed to codec/2.
-type opt() :: {include|outdir|name|prefix|inherits, string()}
+ | return
| verbose
- | debug.
+ | parse %% internal parsed form
+ | forms %% abstract format for compile:forms/1,2
+ | erl
+ | hrl.
+
+%% Internal parsed format with a version tag.
+-type parsed() :: list().
+
+%% Literal dictionary or path. A NL of CR identifies the former.
+-type dict() :: iolist()
+ | binary()
+ | parsed(). %% as returned by codec/2
+
+%% Name of a literal dictionary if otherwise unspecified.
+-define(DEFAULT_DICT_FILE, "dictionary.dia").
%% ===========================================================================
%% codec/1-2
%%
-%% Parse a dictionary file and generate a codec module.
+%% Parse a dictionary file and generate a codec module. Input
+%% dictionary can be either a path or the dictionary itself: the
+%% occurrence of \n or \r in the argument is used to distinguish the
+%% two.
--spec codec(Path, [opt()])
+-spec codec(File, [opt()])
-> ok
+ | {ok, list()} %% with option 'return', one element for each output
| {error, Reason}
- when Path :: string(),
+ when File :: dict()
+ | {path, file:name_all()},
Reason :: string().
codec(File, Opts) ->
- case dict(File, Opts) of
- {ok, Dict} ->
- make(File,
- Opts,
- Dict,
- [spec || _ <- [1], lists:member(debug, Opts)] ++ [erl, hrl]);
- {error, _} = E ->
- E
+ {Dict, Path} = identify(File),
+ case parse(Dict, Opts) of
+ {ok, ParseD} ->
+ make(Path, default(Opts), ParseD);
+ {error = E, Reason} ->
+ {E, diameter_dict_util:format_error(Reason)}
end.
codec(File) ->
codec(File, []).
-%% dict/2
+%% format/1
%%
-%% Parse a dictionary file and return the orddict that a codec module
-%% returns from dict/0.
-
--spec dict(string(), [opt()])
- -> {ok, orddict:orddict()}
- | {error, string()}.
+%% Turn an orddict returned by dict/1-2 back into a dictionary.
-dict(Path, Opts) ->
- case diameter_dict_util:parse({path, Path}, Opts) of
- {ok, _} = Ok ->
- Ok;
- {error = E, Reason} ->
- {E, diameter_dict_util:format_error(Reason)}
- end.
+-spec format(parsed())
+ -> iolist().
-dict(File) ->
- dict(File, []).
+format([?VERSION | Dict]) ->
+ diameter_dict_util:format(Dict).
-%% format/1
+%% flatten/1
%%
-%% Turn an orddict returned by dict/1-2 back into a dictionary file
-%% in the form of an iolist().
+%% Reconstitute a dictionary without @inherits.
--spec format(orddict:orddict())
- -> iolist().
+-spec flatten(parsed())
+ -> parsed().
-format(Dict) ->
- diameter_dict_util:format(Dict).
+flatten([?VERSION = V | Dict]) ->
+ [V | lists:foldl(fun flatten/2,
+ Dict,
+ [avp_vendor_id,
+ custom_types,
+ codecs,
+ [avp_types, import_avps],
+ [grouped, import_groups],
+ [enum, import_enums]])].
+
+%% ===========================================================================
+
+%% flatten/2
+
+flatten([_,_] = Keys, Dict) ->
+ [Values, Imports] = [orddict:fetch(K, Dict) || K <- Keys],
+ Vs = lists:append([Values | [V || {_Mod, V} <- Imports]]),
+ lists:foldl(fun({K,V},D) -> orddict:store(K,V,D) end,
+ Dict,
+ lists:zip([inherits | Keys], [[], Vs, []]));
+
+%% Inherited avp's setting the 'V' flag get their value either from
+%% @avp_vendor_id in the inheriting dictionary or from @vendor in the
+%% *inherited* (not inheriting) dictionary: add the latter to
+%% @avp_vendor_id as required.
+flatten(avp_vendor_id = Key, Dict) ->
+ Def = orddict:find(vendor, Dict),
+ ModD = imports(Dict),
+ Vids = orddict:fetch(Key, Dict),
+ Avps = lists:append([As || {_,As} <- Vids]),
+ orddict:store(Key,
+ dict:fold(fun(M, As, A) -> vid(M, As -- Avps, Def, A) end,
+ Vids,
+ ModD),
+ Dict);
+
+%% Import @codecs and @custom_types from inherited dictionaries as
+%% required.
+flatten(Key, Dict) ->
+ ImportAvps = orddict:fetch(import_avps, Dict),
+ ImportItems = [{M, As}
+ || {Mod, Avps} <- ImportAvps,
+ [_|D] <- [Mod:dict()],
+ {M,As0} <- orddict:fetch(Key, D),
+ F <- [fun(A) -> lists:keymember(A, 1, Avps) end],
+ [_|_] = As <- [lists:filter(F, As0)]],
+ orddict:store(Key,
+ lists:foldl(fun merge/2,
+ orddict:fetch(Key, Dict),
+ ImportItems),
+ Dict).
-%% reformat/1
+%% merge/2
+
+merge({Mod, _Avps} = T, Acc) ->
+ merge(lists:keyfind(Mod, 1, Acc), T, Acc).
+
+merge({Mod, Avps}, {Mod, As}, Acc) ->
+ lists:keyreplace(Mod, 1, Acc, {Mod, Avps ++ As});
+merge(false, T, Acc) ->
+ [T | Acc].
+
+%% imports/1
%%
-%% Parse a dictionary file and return its formatted equivalent.
+%% Return a module() -> [AVP] dict of inherited AVP's setting the V flag.
--spec reformat(File)
- -> {ok, iolist()}
- | {error, Reason}
- when File :: string(),
- Reason :: string().
+imports(Dict) ->
+ lists:foldl(fun imports/2,
+ dict:new(),
+ orddict:fetch(import_avps, Dict)).
+
+imports({Mod, Avps}, Dict) ->
+ dict:store(Mod,
+ [A || {A,_,_,Fs} <- Avps, lists:member($V, Fs)],
+ Dict).
-reformat(File) ->
- case dict(File) of
- {ok, Dict} ->
- {ok, format(Dict)};
- {error, _} = No ->
- No
+%% vid/4
+
+vid(_, [], _, Acc) ->
+ Acc;
+vid(Mod, Avps, Def, Acc) ->
+ v(Mod:vendor_id(), Avps, Def, Acc).
+
+v(Vid, _, {ok, {Vid, _}}, Acc) -> %% same id as inheriting dictionary's
+ Acc;
+v(Vid, Avps, _, Acc) ->
+ case lists:keyfind(Vid, 1, Acc) of
+ {Vid, As} ->
+ lists:keyreplace(Vid, 1, Acc, {Vid, As ++ Avps});
+ false ->
+ [{Vid, Avps} | Acc]
end.
%% ===========================================================================
-make(_, _, _, []) ->
+parse({dict, ParseD}, _) ->
+ {ok, ParseD};
+parse(File, Opts) ->
+ diameter_dict_util:parse(File, Opts).
+
+default(Opts) ->
+ def(modes(Opts), Opts).
+
+def([], Opts) ->
+ [erl, hrl | Opts];
+def(_, Opts) ->
+ Opts.
+
+modes(Opts) ->
+ lists:filter(fun is_mode/1, Opts).
+
+is_mode(T) ->
+ lists:member(T, [erl, hrl, parse, forms]).
+
+identify([Vsn | [T|_] = ParseD])
+ when is_tuple(T) ->
+ ?VERSION == Vsn orelse erlang:error({version, {Vsn, ?VERSION}}),
+ {{dict, ParseD}, ?DEFAULT_DICT_FILE};
+identify({path, File} = T) ->
+ {T, File};
+identify(File) ->
+ Bin = iolist_to_binary([File]),
+ case is_path(Bin) of
+ true -> {{path, File}, File};
+ false -> {Bin, ?DEFAULT_DICT_FILE}
+ end.
+
+%% Interpret anything containing \n or \r as a literal dictionary,
+%% otherwise a path. (Which might be the wrong guess in the worst case.)
+is_path(Bin) ->
+ try
+ [throw(C) || <<C>> <= Bin, $\n == C orelse $\r == C],
+ true
+ catch
+ throw:_ -> false
+ end.
+
+make(File, Opts, Dict) ->
+ ok(lists:foldl(fun(M,A) -> [make(File, Opts, Dict, M) | A] end,
+ [],
+ modes(Opts))).
+
+ok([ok|_]) ->
ok;
-make(File, Opts, Dict, [Mode | Rest]) ->
+ok([_|_] = L) ->
+ {ok, lists:reverse(L)}.
+
+make(File, Opts, Dict, Mode) ->
try
- ok = diameter_codegen:from_dict(File, Dict, Opts, Mode),
- make(File, Opts, Dict, Rest)
+ diameter_codegen:from_dict(File, Dict, Opts, Mode)
catch
error: Reason ->
erlang:error({Reason, Mode, erlang:get_stacktrace()})
diff --git a/lib/diameter/src/diameter.appup.src b/lib/diameter/src/diameter.appup.src
index c6828e6705..c7ae8a2828 100644
--- a/lib/diameter/src/diameter.appup.src
+++ b/lib/diameter/src/diameter.appup.src
@@ -31,11 +31,26 @@
{"1.4", [{restart_application, diameter}]}, %% R16A
{"1.4.1", [{restart_application, diameter}]}, %% R16B
{"1.4.1.1", [{restart_application, diameter}]},
- {"1.4.2", [{load_module, diameter_types}, %% R16B01
+ {"1.4.2", [{load_module, diameter_codec}, %% R16B01
+ {load_module, diameter_types},
{load_module, diameter_config},
+ {load_module, diameter_capx},
{load_module, diameter_service},
{load_module, diameter_peer_fsm},
- {load_module, diameter_watchdog}]}
+ {load_module, diameter_watchdog},
+ {load_module, diameter}]},
+ {"1.4.3", [{load_module, diameter_capx}, %% R16B02
+ {load_module, diameter_service},
+ {load_module, diameter_watchdog},
+ {load_module, diameter_codec},
+ {load_module, diameter_types},
+ {load_module, diameter_config},
+ {load_module, diameter}]},
+ {"1.4.4", [{load_module, diameter_capx},
+ {load_module, diameter_service},
+ {load_module, diameter_watchdog},
+ {load_module, diameter_config},
+ {load_module, diameter}]}
],
[
{"0.9", [{restart_application, diameter}]},
@@ -49,6 +64,18 @@
{"1.4", [{restart_application, diameter}]},
{"1.4.1", [{restart_application, diameter}]},
{"1.4.1.1", [{restart_application, diameter}]},
- {"1.4.2", [{restart_application, diameter}]}
+ {"1.4.2", [{restart_application, diameter}]},
+ {"1.4.3", [{load_module, diameter_types},
+ {load_module, diameter_config},
+ {load_module, diameter_codec},
+ {load_module, diameter_service},
+ {load_module, diameter_watchdog},
+ {load_module, diameter_capx},
+ {load_module, diameter}]},
+ {"1.4.4", [{load_module, diameter_capx},
+ {load_module, diameter_config},
+ {load_module, diameter_service},
+ {load_module, diameter_watchdog},
+ {load_module, diameter}]}
]
}.
diff --git a/lib/diameter/test/diameter_codec_test.erl b/lib/diameter/test/diameter_codec_test.erl
index 295d23912b..0b4568a9e5 100644
--- a/lib/diameter/test/diameter_codec_test.erl
+++ b/lib/diameter/test/diameter_codec_test.erl
@@ -473,9 +473,6 @@ pack(true, Arity, Avp, Value, Acc) ->
pack(false, Arity, Avp, Value, Acc) ->
min(Arity, Avp, Value, Acc).
-all(Mod, Name, Avp, V) ->
- all(Mod:avp_arity(Name, Avp), Avp, V).
-
all(1, Avp, V) ->
{Avp, V};
all({0,'*'}, Avp, V) ->
@@ -489,9 +486,6 @@ a(N, Avp, V)
when N /= 0 ->
{Avp, lists:duplicate(N,V)}.
-min(Mod, Name, Avp, V, Acc) ->
- min(Mod:avp_arity(Name, Avp), Avp, V, Acc).
-
min(1, Avp, V, Acc) ->
[{Avp, V} | Acc];
min({0,_}, _, _, Acc) ->
diff --git a/lib/diameter/test/diameter_compiler_SUITE.erl b/lib/diameter/test/diameter_compiler_SUITE.erl
index 81722c8dca..ed369e8af3 100644
--- a/lib/diameter/test/diameter_compiler_SUITE.erl
+++ b/lib/diameter/test/diameter_compiler_SUITE.erl
@@ -31,10 +31,15 @@
%% testcases
-export([format/1, format/2,
replace/1, replace/2,
- generate/1, generate/4]).
+ generate/1, generate/4,
+ flatten1/1, flatten1/3,
+ flatten2/1]).
-export([dict/0]). %% fake dictionary module
+%% dictionary callbacks for flatten2/1
+-export(['A1'/3, 'Unsigned32'/3]).
+
-define(base, "base_rfc3588.dia").
-define(util, diameter_util).
-define(S, atom_to_list).
@@ -45,7 +50,7 @@
%% RE/Replacement (in the sense of re:replace/4) pairs for morphing
%% base_rfc3588.dia. The key is 'ok' or the the expected error as
%% returned in the first element of the error tuple returned by
-%% diameter_dict_util:parse/2.
+%% diameter_make:codec/2.
-define(REPLACE,
[{ok,
"",
@@ -335,7 +340,9 @@ suite() ->
all() ->
[format,
replace,
- generate].
+ generate,
+ flatten1,
+ flatten2].
%% Error handling testcases will make an erroneous dictionary out of
%% the base dictionary and check that the expected error results.
@@ -361,10 +368,18 @@ format(Config) ->
format(Mods, Bin) ->
B = modify(Bin, Mods),
- {ok, Dict} = diameter_dict_util:parse(B, []),
- {ok, D} = diameter_dict_util:parse(diameter_dict_util:format(Dict), []),
+ {ok, Dict} = parse(B, []),
+ {ok, D} = parse(diameter_make:format(Dict), []),
{Dict, Dict} = {Dict, D}.
+parse(File, Opts) ->
+ case diameter_make:codec(File, [parse, hrl, return | Opts]) of
+ {ok, [Dict, _]} ->
+ {ok, Dict};
+ {error, _} = E ->
+ E
+ end.
+
%% ===========================================================================
%% replace/1
%%
@@ -379,13 +394,10 @@ replace(Config) ->
replace({E, Mods}, Bin) ->
B = modify(Bin, Mods),
- case {E, diameter_dict_util:parse(B, [{include, here()}]), Mods} of
+ case {E, parse(B, [{include, here()}]), Mods} of
{ok, {ok, Dict}, _} ->
Dict;
- {_, {error, {E,_} = T}, _} ->
- S = diameter_dict_util:format_error(T),
- true = nochar($", S, E),
- true = nochar($', S, E),
+ {_, {error, S}, _} ->
S
end.
@@ -403,20 +415,127 @@ generate(Config) ->
[] = ?util:run([{?MODULE, [generate, M, Bin, N, T]}
|| {E,N} <- Rs,
{ok, M} <- [norm(E)],
- T <- [erl, hrl, spec]]).
+ T <- [erl, hrl, parse, forms]]).
generate(Mods, Bin, N, Mode) ->
B = modify(Bin, Mods ++ [{"@name .*", "@name dict" ++ ?L(N)}]),
- {ok, Dict} = diameter_dict_util:parse(B, []),
+ {ok, Dict} = parse(B, []),
File = "dict" ++ integer_to_list(N),
- {_, ok} = {Dict, diameter_codegen:from_dict("dict",
- Dict,
- [{name, File},
- {prefix, "base"},
- debug],
- Mode)},
- Mode == erl
- andalso ({ok, _} = compile:file(File ++ ".erl", [return_errors])).
+ {_, ok} = {Dict, diameter_make:codec(Dict,
+ [{name, File},
+ {prefix, "base"},
+ Mode])},
+ generate(Mode, File, Dict).
+
+generate(erl, File, _) ->
+ {ok, _} = compile:file(File ++ ".erl", [return_errors]);
+
+generate(forms, File, _) ->
+ {ok, [_]} = file:consult(File ++ ".F");
+
+generate(parse, File, Dict) ->
+ {ok, [Dict]} = file:consult(File ++ ".D"), %% assert
+ {ok, [F]} = diameter_make:codec(Dict, [forms, return]),
+ {ok, _, _, _} = compile:forms(F, [return]);
+
+generate(hrl, _, _) ->
+ ok.
+
+%% ===========================================================================
+%% flatten1/1
+
+flatten1(_Config) ->
+ [Vsn | BaseD] = diameter_gen_base_rfc6733:dict(),
+ {ok, I} = parse("@inherits diameter_gen_base_rfc6733\n", []),
+ [Vsn | FlatD] = diameter_make:flatten(I),
+ [] = ?util:run([{?MODULE, [flatten1, K, BaseD, FlatD]}
+ || K <- [avp_types, grouped, enum]]).
+
+flatten1(Key, BaseD, FlatD) ->
+ Vs = orddict:fetch(Key, BaseD),
+ Vs = orddict:fetch(Key, FlatD).
+
+%% ===========================================================================
+%% flatten2/1
+
+flatten2(_Config) ->
+ Dict1 =
+ "@name diameter_test1\n"
+ "@prefix diameter_test1\n"
+ "@vendor 666 test\n"
+ "@avp_vendor_id 111 A1 A3\n"
+ "@avp_vendor_id 222 A4 A6\n"
+ "@custom_types " ++ ?S(?MODULE) ++ " A1 A4\n"
+ "@codecs " ++ ?S(?MODULE) ++ " A3 A6\n"
+ "@avp_types\n"
+ "A1 1001 Unsigned32 V\n"
+ "A2 1002 Unsigned32 V\n"
+ "A3 1003 Unsigned32 V\n"
+ "A4 1004 Unsigned32 V\n"
+ "A5 1005 Unsigned32 V\n"
+ "A6 1006 Unsigned32 V\n"
+ "@end ignored\n",
+ Dict2 =
+ "@name diameter_test2\n"
+ "@prefix diameter_test2\n"
+ "@vendor 777 test\n"
+ "@inherits diameter_test1 A1 A2 A3\n"
+ "@inherits diameter_gen_base_rfc6733\n"
+ "@avp_vendor_id 333 A1\n",
+
+ {ok, [E1, F1]}
+ = diameter_make:codec(Dict1, [erl, forms, return]),
+ ct:pal("~s", [E1]),
+ diameter_test1 = M1 = load_forms(F1),
+
+ {ok, [D2, E2, F2]}
+ = diameter_make:codec(Dict2, [parse, erl, forms, return]),
+ ct:pal("~s", [E2]),
+ diameter_test2 = M2 = load_forms(F2),
+
+ Flat = lists:flatten(diameter_make:format(diameter_make:flatten(D2))),
+ ct:pal("~s", [Flat]),
+ {ok, [E3, F3]}
+ = diameter_make:codec(Flat, [erl, forms, return,
+ {name, "diameter_test3"}]),
+ ct:pal("~s", [E3]),
+ diameter_test3 = M3 = load_forms(F3),
+
+ [{1001, 111, M1, 'A1'}, %% @avp_vendor_id
+ {1002, 666, M1, 'A2'}, %% @vendor
+ {1003, 111, M1, 'A3'}, %% @avp_vendor_id
+ {1004, 222, M1, 'A4'}, %% @avp_vendor_id
+ {1005, 666, M1, 'A5'}, %% @vendor
+ {1006, 222, M1, 'A6'}, %% @avp_vendor_id
+ {1001, 333, M2, 'A1'}, %% M2 @avp_vendor_id
+ {1002, 666, M2, 'A2'}, %% M1 @vendor
+ {1003, 666, M2, 'A3'}, %% M1 @vendor
+ {1001, 333, M3, 'A1'}, %% (as for M2)
+ {1002, 666, M3, 'A2'}, %% "
+ {1003, 666, M3, 'A3'}] %% "
+ = [{Code, Vid, Mod, Name}
+ || Mod <- [M1, M2, M3],
+ Code <- lists:seq(1001, 1006),
+ Vid <- [666, 111, 222, 777, 333],
+ {Name, 'Unsigned32'} <- [Mod:avp_name(Code, Vid)]],
+
+ [] = [{A,T,M,RC} || A <- ['A1', 'A3'],
+ T <- [encode, decode],
+ M <- [M2, M3],
+ Ref <- [make_ref()],
+ RC <- [M:avp(T, Ref, A)],
+ RC /= {T, Ref}].
+
+'A1'(T, 'Unsigned32', Ref) ->
+ {T, Ref}.
+
+'Unsigned32'(T, 'A3', Ref) ->
+ {T, Ref}.
+
+load_forms(Forms) ->
+ {ok, Mod, Bin, _} = compile:forms(Forms, [return]),
+ {module, Mod} = code:load_binary(Mod, ?S(Mod), Bin),
+ Mod.
%% ===========================================================================
@@ -428,9 +547,6 @@ norm({E, RE, Repl}) ->
norm({_,_} = T) ->
T.
-nochar(Char, Str, Err) ->
- Err == parse orelse not lists:member(Char, Str) orelse Str.
-
here() ->
filename:dirname(code:which(?MODULE)).
diff --git a/lib/diameter/test/diameter_examples_SUITE.erl b/lib/diameter/test/diameter_examples_SUITE.erl
index 75b542b679..02c8d34361 100644
--- a/lib/diameter/test/diameter_examples_SUITE.erl
+++ b/lib/diameter/test/diameter_examples_SUITE.erl
@@ -133,7 +133,7 @@ make(Path, Dict0) ->
try
ok = to_erl(Path, [{name, Name},
{prefix, Pre},
- {inherits, "rfc3588_base/" ++ Mod0}
+ {inherits, "common/" ++ Mod0}
| [{inherits, D ++ "/" ++ M ++ Suf}
|| {D,M} <- dep(Dict)]]),
ok = to_beam(Name)
diff --git a/lib/diameter/vsn.mk b/lib/diameter/vsn.mk
index e003fe76b9..9fda067f2b 100644
--- a/lib/diameter/vsn.mk
+++ b/lib/diameter/vsn.mk
@@ -18,5 +18,5 @@
# %CopyrightEnd%
APPLICATION = diameter
-DIAMETER_VSN = 1.4.3
+DIAMETER_VSN = 1.5
APP_VSN = $(APPLICATION)-$(DIAMETER_VSN)$(PRE_VSN)
diff --git a/lib/eldap/doc/src/eldap.xml b/lib/eldap/doc/src/eldap.xml
index 30767abd7e..5b81716543 100644
--- a/lib/eldap/doc/src/eldap.xml
+++ b/lib/eldap/doc/src/eldap.xml
@@ -35,6 +35,7 @@
<p>References:</p>
<list type="bulleted">
<item> <p>RFC 4510 - RFC 4519</p> </item>
+ <item> <p>RFC 2830</p> </item>
</list>
<p>The above publications can be found at <url href="http://www.ietf.org">IETF</url>.
</p>
@@ -87,6 +88,38 @@ filter() See present/1, substrings/2,
</desc>
</func>
<func>
+ <name>start_tls(Handle, Options) -> ok | {error,Error}</name>
+ <fsummary>Upgrade a connection to TLS.</fsummary>
+ <desc>
+ <p>Same as start_tls(Handle, Options, infinity)</p>
+ </desc>
+ </func>
+ <func>
+ <name>start_tls(Handle, Options, Timeout) -> ok | {error,Error}</name>
+ <fsummary>Upgrade a connection to TLS.</fsummary>
+ <type>
+ <v>Handle = handle()</v>
+ <v>Options = ssl:ssl_options()</v>
+ <v>Timeout = inifinity | positive_integer()</v>
+ </type>
+ <desc>
+ <p>Upgrade the connection associated with <c>Handle</c> to a tls connection if possible.</p>
+ <p>The upgrade is done in two phases: first the server is asked for permission to upgrade. Second, if the request is acknowledged, the upgrade is performed.</p>
+ <p>Error responese from phase one will not affect the current encryption state of the connection. Those responses are:</p>
+ <taglist>
+ <tag><c>tls_already_started</c></tag>
+ <item>The connection is already encrypted. The connection is not affected.</item>
+ <tag><c>{response,ResponseFromServer}</c></tag>
+ <item>The upgrade was refused by the LDAP server. The <c>ResponseFromServer</c> is an atom delivered byt the LDAP server explained in section 2.3 of rfc 2830. The connection is not affected, so it is still un-encrypted.</item>
+ </taglist>
+ <p>Errors in the seconde phase will however end the connection:</p>
+ <taglist>
+ <tag><c>Error</c></tag>
+ <item>Any error responded from ssl:connect/3</item>
+ </taglist>
+ </desc>
+ </func>
+ <func>
<name>simple_bind(Handle, Dn, Password) -> ok | {error, Reason}</name>
<fsummary>Authenticate the connection.</fsummary>
<type>
diff --git a/lib/eldap/src/eldap.erl b/lib/eldap/src/eldap.erl
index 8ebb88e35b..af5bf94c97 100644
--- a/lib/eldap/src/eldap.erl
+++ b/lib/eldap/src/eldap.erl
@@ -6,10 +6,12 @@
%%% draft-ietf-asid-ldap-c-api-00.txt
%%%
%%% Copyright (c) 2010 Torbjorn Tornkvist
+%%% Copyright Ericsson AB 2011-2013. All Rights Reserved.
%%% See MIT-LICENSE at the top dir for licensing information.
%%% --------------------------------------------------------------------
-vc('$Id$ ').
-export([open/1,open/2,simple_bind/3,controlling_process/2,
+ start_tls/2, start_tls/3,
baseObject/0,singleLevel/0,wholeSubtree/0,close/1,
equalityMatch/2,greaterOrEqual/2,lessOrEqual/2,
approxMatch/2,search/2,substrings/2,present/1,
@@ -36,14 +38,16 @@
host, % Host running LDAP server
port = ?LDAP_PORT, % The LDAP server port
fd, % Socket filedescriptor.
+ prev_fd, % Socket that was upgraded by start_tls
binddn = "", % Name of the entry to bind as
passwd, % Password for (above) entry
id = 0, % LDAP Request ID
log, % User provided log function
timeout = infinity, % Request timeout
anon_auth = false, % Allow anonymous authentication
- use_tls = false, % LDAP/LDAPS
- tls_opts = [] % ssl:ssloption()
+ ldaps = false, % LDAP/LDAPS
+ using_tls = false, % true if LDAPS or START_TLS executed
+ tls_opts = [] % ssl:ssloption()
}).
%%% For debug purposes
@@ -77,6 +81,16 @@ open(Hosts, Opts) when is_list(Hosts), is_list(Opts) ->
recv(Pid).
%%% --------------------------------------------------------------------
+%%% Upgrade an existing connection to tls
+%%% --------------------------------------------------------------------
+start_tls(Handle, TlsOptions) ->
+ start_tls(Handle, TlsOptions, infinity).
+
+start_tls(Handle, TlsOptions, Timeout) ->
+ send(Handle, {start_tls,TlsOptions,Timeout}),
+ recv(Handle).
+
+%%% --------------------------------------------------------------------
%%% Shutdown connection (and process) asynchronous.
%%% --------------------------------------------------------------------
@@ -351,11 +365,11 @@ parse_args([{anon_auth, true}|T], Cpid, Data) ->
parse_args([{anon_auth, _}|T], Cpid, Data) ->
parse_args(T, Cpid, Data);
parse_args([{ssl, true}|T], Cpid, Data) ->
- parse_args(T, Cpid, Data#eldap{use_tls = true});
+ parse_args(T, Cpid, Data#eldap{ldaps = true, using_tls=true});
parse_args([{ssl, _}|T], Cpid, Data) ->
parse_args(T, Cpid, Data);
parse_args([{sslopts, Opts}|T], Cpid, Data) when is_list(Opts) ->
- parse_args(T, Cpid, Data#eldap{use_tls = true, tls_opts = Opts ++ Data#eldap.tls_opts});
+ parse_args(T, Cpid, Data#eldap{ldaps = true, using_tls=true, tls_opts = Opts ++ Data#eldap.tls_opts});
parse_args([{sslopts, _}|T], Cpid, Data) ->
parse_args(T, Cpid, Data);
parse_args([{log, F}|T], Cpid, Data) when is_function(F) ->
@@ -386,10 +400,10 @@ try_connect([Host|Hosts], Data) ->
try_connect([],_) ->
{error,"connect failed"}.
-do_connect(Host, Data, Opts) when Data#eldap.use_tls == false ->
+do_connect(Host, Data, Opts) when Data#eldap.ldaps == false ->
gen_tcp:connect(Host, Data#eldap.port, Opts, Data#eldap.timeout);
-do_connect(Host, Data, Opts) when Data#eldap.use_tls == true ->
- ssl:connect(Host, Data#eldap.port, Opts ++ Data#eldap.tls_opts).
+do_connect(Host, Data, Opts) when Data#eldap.ldaps == true ->
+ ssl:connect(Host, Data#eldap.port, Opts++Data#eldap.tls_opts).
loop(Cpid, Data) ->
receive
@@ -430,6 +444,11 @@ loop(Cpid, Data) ->
?PRINT("New Cpid is: ~p~n",[NewCpid]),
?MODULE:loop(NewCpid, Data);
+ {From, {start_tls,TlsOptions,Timeout}} ->
+ {Res,NewData} = do_start_tls(Data, TlsOptions, Timeout),
+ send(From,Res),
+ ?MODULE:loop(Cpid, NewData);
+
{_From, close} ->
unlink(Cpid),
exit(closed);
@@ -444,6 +463,51 @@ loop(Cpid, Data) ->
end.
+
+%%% --------------------------------------------------------------------
+%%% startTLS Request
+%%% --------------------------------------------------------------------
+
+do_start_tls(Data=#eldap{using_tls=true}, _, _) ->
+ {{error,tls_already_started}, Data};
+do_start_tls(Data=#eldap{fd=FD} , TlsOptions, Timeout) ->
+ case catch exec_start_tls(Data) of
+ {ok,NewData} ->
+ case ssl:connect(FD,TlsOptions,Timeout) of
+ {ok, SslSocket} ->
+ {ok, NewData#eldap{prev_fd = FD,
+ fd = SslSocket,
+ using_tls = true
+ }};
+ {error,Error} ->
+ {{error,Error}, Data}
+ end;
+ {error,Error} -> {{error,Error},Data};
+ Else -> {{error,Else},Data}
+ end.
+
+-define(START_TLS_OID, "1.3.6.1.4.1.1466.20037").
+
+exec_start_tls(Data) ->
+ Req = #'ExtendedRequest'{requestName = ?START_TLS_OID},
+ Reply = request(Data#eldap.fd, Data, Data#eldap.id, {extendedReq, Req}),
+ exec_extended_req_reply(Data, Reply).
+
+exec_extended_req_reply(Data, {ok,Msg}) when
+ Msg#'LDAPMessage'.messageID == Data#eldap.id ->
+ case Msg#'LDAPMessage'.protocolOp of
+ {extendedResp, Result} ->
+ case Result#'ExtendedResponse'.resultCode of
+ success ->
+ {ok,Data};
+ Error ->
+ {error, {response,Error}}
+ end;
+ Other -> {error, Other}
+ end;
+exec_extended_req_reply(_, Error) ->
+ {error, Error}.
+
%%% --------------------------------------------------------------------
%%% bindRequest
%%% --------------------------------------------------------------------
@@ -685,14 +749,14 @@ send_request(S, Data, ID, Request) ->
Else -> Else
end.
-do_send(S, Data, Bytes) when Data#eldap.use_tls == false ->
+do_send(S, Data, Bytes) when Data#eldap.using_tls == false ->
gen_tcp:send(S, Bytes);
-do_send(S, Data, Bytes) when Data#eldap.use_tls == true ->
+do_send(S, Data, Bytes) when Data#eldap.using_tls == true ->
ssl:send(S, Bytes).
-do_recv(S, #eldap{use_tls=false, timeout=Timeout}, Len) ->
+do_recv(S, #eldap{using_tls=false, timeout=Timeout}, Len) ->
gen_tcp:recv(S, Len, Timeout);
-do_recv(S, #eldap{use_tls=true, timeout=Timeout}, Len) ->
+do_recv(S, #eldap{using_tls=true, timeout=Timeout}, Len) ->
ssl:recv(S, Len, Timeout).
recv_response(S, Data) ->
@@ -800,7 +864,7 @@ recv(From) ->
{error, {internal_error, Reason}}
end.
-ldap_closed_p(Data, Emsg) when Data#eldap.use_tls == true ->
+ldap_closed_p(Data, Emsg) when Data#eldap.using_tls == true ->
%% Check if the SSL socket seems to be alive or not
case catch ssl:sockname(Data#eldap.fd) of
{error, _} ->
diff --git a/lib/eldap/test/README b/lib/eldap/test/README
new file mode 100644
index 0000000000..8774db1504
--- /dev/null
+++ b/lib/eldap/test/README
@@ -0,0 +1,36 @@
+
+This works for me on Ubuntu.
+
+To run thoose test you need
+ 1) some certificates
+ 2) a running ldap server, for example OpenLDAPs slapd. See http://www.openldap.org/doc/admin24
+
+1)-------
+To generate certificates:
+erl
+> make_certs:all("/dev/null", "eldap_basic_SUITE_data/certs").
+
+2)-------
+To start slapd:
+ sudo slapd -f $ERL_TOP/lib/eldap/test/ldap_server/slapd.conf -F /tmp/slapd/slapd.d -h "ldap://localhost:9876 ldaps://localhost:9877"
+
+This will however not work, since slapd is guarded by apparmor that checks that slapd does not access other than allowed files...
+
+To make a local extension of alowed operations:
+ sudo emacs /etc/apparmor.d/local/usr.sbin.slapd
+
+and, after the change (yes, at least on Ubuntu it is right to edit ../local/.. but run with an other file) :
+
+ sudo apparmor_parser -r /etc/apparmor.d/usr.sbin.slapd
+
+
+The local file looks like this for me:
+
+# Site-specific additions and overrides for usr.sbin.slapd.
+# For more details, please see /etc/apparmor.d/local/README.
+
+/etc/pkcs11/** r,
+/usr/lib/x86_64-linux-gnu/** rm,
+
+/ldisk/hans_otp/otp/lib/eldap/test/** rw,
+/tmp/slapd/** rwk,
diff --git a/lib/eldap/test/eldap.cfg b/lib/eldap/test/eldap.cfg
new file mode 100644
index 0000000000..3a24afa067
--- /dev/null
+++ b/lib/eldap/test/eldap.cfg
@@ -0,0 +1 @@
+{eldap_server,{"localhost",389}}.
diff --git a/lib/eldap/test/eldap_basic_SUITE.erl b/lib/eldap/test/eldap_basic_SUITE.erl
index c7e3052b29..bf5fa83c3c 100644
--- a/lib/eldap/test/eldap_basic_SUITE.erl
+++ b/lib/eldap/test/eldap_basic_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2012. All Rights Reserved.
+%% Copyright Ericsson AB 2012-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -27,39 +27,45 @@
-define(TIMEOUT, 120000). % 2 min
-init_per_suite(Config0) ->
- {{EldapHost,Port}, Config1} =
- case catch ct:get_config(eldap_server, undefined) of
- undefined -> %% Dev test only
- Server = {"localhost", 9876},
- {Server, [{eldap_server, {"localhost", 9876}}|Config0]};
- {'EXIT', _} -> %% Dev test only
- Server = {"localhost", 9876},
- {Server, [{eldap_server, {"localhost", 9876}}|Config0]};
- Server ->
- {Server, [{eldap_server, Server}|Config0]}
- end,
- %% Add path for this test run
+init_per_suite(Config) ->
+ StartSsl = try ssl:start()
+ catch
+ Error:Reason ->
+ {skip, lists:flatten(io_lib:format("eldap init_per_suite failed to start ssl Error=~p Reason=~p", [Error, Reason]))}
+ end,
+ case StartSsl of
+ ok ->
+ chk_config(ldap_server, {"localhost",9876},
+ chk_config(ldaps_server, {"localhost",9877},
+ Config));
+ _ ->
+ StartSsl
+ end.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config0) ->
+ {EldapHost,Port} = proplists:get_value(ldap_server,Config0),
try
- {ok, Handle} = eldap:open([EldapHost], [{port, Port}]),
+ {ok, Handle} = eldap:open([EldapHost], [{port,Port}]),
ok = eldap:simple_bind(Handle, "cn=Manager,dc=ericsson,dc=se", "hejsan"),
{ok, MyHost} = inet:gethostname(),
Path = "dc="++MyHost++",dc=ericsson,dc=se",
- Config = [{eldap_path,Path}|Config1],
eldap:add(Handle,"dc=ericsson,dc=se",
[{"objectclass", ["dcObject", "organization"]},
{"dc", ["ericsson"]}, {"o", ["Testing"]}]),
eldap:add(Handle,Path,
[{"objectclass", ["dcObject", "organization"]},
{"dc", [MyHost]}, {"o", ["Test machine"]}]),
- Config
+ [{eldap_path,Path}|Config0]
catch error:{badmatch,Error} ->
io:format("Eldap init error ~p~n ~p~n",[Error, erlang:get_stacktrace()]),
- {skip, lists:flatten(io_lib:format("Ldap init failed with host ~p", [EldapHost]))}
+ {skip, lists:flatten(io_lib:format("Ldap init failed with host ~p:~p. Error=~p", [EldapHost,Port,Error]))}
end.
-end_per_suite(Config) ->
- %% Cleanup everything
- {EHost, Port} = proplists:get_value(eldap_server, Config),
+
+end_per_testcase(_TestCase, Config) ->
+ {EHost, Port} = proplists:get_value(ldap_server, Config),
Path = proplists:get_value(eldap_path, Config),
{ok, H} = eldap:open([EHost], [{port, Port}]),
ok = eldap:simple_bind(H, "cn=Manager,dc=ericsson,dc=se", "hejsan"),
@@ -71,16 +77,20 @@ end_per_suite(Config) ->
[ok = eldap:delete(H, Entry) || {eldap_entry, Entry, _} <- Entries];
_ -> ignore
end,
- ok.
-init_per_testcase(_TestCase, Config) -> Config.
-end_per_testcase(_TestCase, _Config) -> ok.
+ ok.
%% suite() ->
all() ->
[app,
- api].
+ api,
+ ssl_api,
+ start_tls,
+ tls_operations,
+ start_tls_twice,
+ start_tls_on_ssl
+ ].
app(doc) -> "Test that the eldap app file is ok";
app(suite) -> [];
@@ -90,21 +100,89 @@ app(Config) when is_list(Config) ->
api(doc) -> "Basic test that all api functions works as expected";
api(suite) -> [];
api(Config) ->
- {Host,Port} = proplists:get_value(eldap_server, Config),
+ {Host,Port} = proplists:get_value(ldap_server, Config),
{ok, H} = eldap:open([Host], [{port,Port}]),
%% {ok, H} = eldap:open([Host], [{port,Port+1}, {ssl, true}]),
+ do_api_checks(H, Config),
+ eldap:close(H),
+ ok.
+
+
+ssl_api(doc) -> "Basic test that all api functions works as expected";
+ssl_api(suite) -> [];
+ssl_api(Config) ->
+ {Host,Port} = proplists:get_value(ldaps_server, Config),
+ {ok, H} = eldap:open([Host], [{port,Port}, {ssl,true}]),
+ do_api_checks(H, Config),
+ eldap:close(H),
+ ok.
+
+
+start_tls(doc) -> "Test that an existing (tcp) connection can be upgraded to tls";
+start_tls(suite) -> [];
+start_tls(Config) ->
+ {Host,Port} = proplists:get_value(ldap_server, Config),
+ {ok, H} = eldap:open([Host], [{port,Port}]),
+ ok = eldap:start_tls(H, [
+ {keyfile, filename:join([proplists:get_value(data_dir,Config),
+ "certs/client/key.pem"])}
+ ]),
+ eldap:close(H).
+
+
+tls_operations(doc) -> "Test that an upgraded connection is usable for ldap stuff";
+tls_operations(suite) -> [];
+tls_operations(Config) ->
+ {Host,Port} = proplists:get_value(ldap_server, Config),
+ {ok, H} = eldap:open([Host], [{port,Port}]),
+ ok = eldap:start_tls(H, [
+ {keyfile, filename:join([proplists:get_value(data_dir,Config),
+ "certs/client/key.pem"])}
+ ]),
+ do_api_checks(H, Config),
+ eldap:close(H).
+
+start_tls_twice(doc) -> "Test that start_tls on an already upgraded connection fails";
+start_tls_twice(suite) -> [];
+start_tls_twice(Config) ->
+ {Host,Port} = proplists:get_value(ldap_server, Config),
+ {ok, H} = eldap:open([Host], [{port,Port}]),
+ ok = eldap:start_tls(H, []),
+ {error,tls_already_started} = eldap:start_tls(H, []),
+ do_api_checks(H, Config),
+ eldap:close(H).
+
+
+start_tls_on_ssl(doc) -> "Test that start_tls on an ldaps connection fails";
+start_tls_on_ssl(suite) -> [];
+start_tls_on_ssl(Config) ->
+ {Host,Port} = proplists:get_value(ldaps_server, Config),
+ {ok, H} = eldap:open([Host], [{port,Port}, {ssl,true}]),
+ {error,tls_already_started} = eldap:start_tls(H, []),
+ do_api_checks(H, Config),
+ eldap:close(H).
+
+
+%%%--------------------------------------------------------------------------------
+chk_config(Key, Default, Config) ->
+ case catch ct:get_config(ldap_server, undefined) of
+ undefined -> [{Key,Default} | Config ];
+ {'EXIT',_} -> [{Key,Default} | Config ];
+ Value -> [{Key,Value} | Config]
+ end.
+
+
+
+do_api_checks(H, Config) ->
BasePath = proplists:get_value(eldap_path, Config),
+
All = fun(Where) ->
eldap:search(H, #eldap_search{base=Where,
filter=eldap:present("objectclass"),
scope= eldap:wholeSubtree()})
end,
- Search = fun(Filter) ->
- eldap:search(H, #eldap_search{base=BasePath,
- filter=Filter,
- scope=eldap:singleLevel()})
- end,
- {ok, #eldap_search_result{entries=[_]}} = All(BasePath),
+ {ok, #eldap_search_result{entries=[_XYZ]}} = All(BasePath),
+%% ct:log("XYZ=~p",[_XYZ]),
{error, noSuchObject} = All("cn=Bar,"++BasePath),
{error, _} = eldap:add(H, "cn=Jonas Jonsson," ++ BasePath,
@@ -112,52 +190,67 @@ api(Config) ->
{"cn", ["Jonas Jonsson"]}, {"sn", ["Jonsson"]}]),
eldap:simple_bind(H, "cn=Manager,dc=ericsson,dc=se", "hejsan"),
- %% Add
+ chk_add(H, BasePath),
+ {ok,FB} = chk_search(H, BasePath),
+ chk_modify(H, FB),
+ chk_delete(H, BasePath),
+ chk_modify_dn(H, FB).
+
+
+chk_add(H, BasePath) ->
ok = eldap:add(H, "cn=Jonas Jonsson," ++ BasePath,
[{"objectclass", ["person"]},
{"cn", ["Jonas Jonsson"]}, {"sn", ["Jonsson"]}]),
+ {error, entryAlreadyExists} = eldap:add(H, "cn=Jonas Jonsson," ++ BasePath,
+ [{"objectclass", ["person"]},
+ {"cn", ["Jonas Jonsson"]}, {"sn", ["Jonsson"]}]),
ok = eldap:add(H, "cn=Foo Bar," ++ BasePath,
[{"objectclass", ["person"]},
{"cn", ["Foo Bar"]}, {"sn", ["Bar"]}, {"telephoneNumber", ["555-1232", "555-5432"]}]),
ok = eldap:add(H, "ou=Team," ++ BasePath,
[{"objectclass", ["organizationalUnit"]},
- {"ou", ["Team"]}]),
+ {"ou", ["Team"]}]).
- %% Search
+chk_search(H, BasePath) ->
+ Search = fun(Filter) ->
+ eldap:search(H, #eldap_search{base=BasePath,
+ filter=Filter,
+ scope=eldap:singleLevel()})
+ end,
JJSR = {ok, #eldap_search_result{entries=[#eldap_entry{}]}} = Search(eldap:equalityMatch("sn", "Jonsson")),
JJSR = Search(eldap:substrings("sn", [{any, "ss"}])),
FBSR = {ok, #eldap_search_result{entries=[#eldap_entry{object_name=FB}]}} =
Search(eldap:substrings("sn", [{any, "a"}])),
FBSR = Search(eldap:substrings("sn", [{initial, "B"}])),
FBSR = Search(eldap:substrings("sn", [{final, "r"}])),
-
F_AND = eldap:'and'([eldap:present("objectclass"), eldap:present("ou")]),
{ok, #eldap_search_result{entries=[#eldap_entry{}]}} = Search(F_AND),
F_NOT = eldap:'and'([eldap:present("objectclass"), eldap:'not'(eldap:present("ou"))]),
{ok, #eldap_search_result{entries=[#eldap_entry{}, #eldap_entry{}]}} = Search(F_NOT),
+ {ok,FB}. %% FIXME
- %% MODIFY
+chk_modify(H, FB) ->
Mod = [eldap:mod_replace("telephoneNumber", ["555-12345"]),
eldap:mod_add("description", ["Nice guy"])],
%% io:format("MOD ~p ~p ~n",[FB, Mod]),
ok = eldap:modify(H, FB, Mod),
%% DELETE ATTR
- ok = eldap:modify(H, FB, [eldap:mod_delete("telephoneNumber", [])]),
+ ok = eldap:modify(H, FB, [eldap:mod_delete("telephoneNumber", [])]).
- %% DELETE
+
+chk_delete(H, BasePath) ->
{error, entryAlreadyExists} = eldap:add(H, "cn=Jonas Jonsson," ++ BasePath,
[{"objectclass", ["person"]},
{"cn", ["Jonas Jonsson"]}, {"sn", ["Jonsson"]}]),
ok = eldap:delete(H, "cn=Jonas Jonsson," ++ BasePath),
- {error, noSuchObject} = eldap:delete(H, "cn=Jonas Jonsson," ++ BasePath),
+ {error, noSuchObject} = eldap:delete(H, "cn=Jonas Jonsson," ++ BasePath).
- %% MODIFY_DN
- ok = eldap:modify_dn(H, FB, "cn=Niclas Andre", true, ""),
- %%io:format("Res ~p~n ~p~n",[R, All(BasePath)]),
+chk_modify_dn(H, FB) ->
+ ok = eldap:modify_dn(H, FB, "cn=Niclas Andre", true, "").
+ %%io:format("Res ~p~n ~p~n",[R, All(BasePath)]).
- eldap:close(H),
- ok.
+%%%----------------
add(H, Attr, Value, Path0, Attrs, Class) ->
Path = case Path0 of
[] -> Attr ++ "=" ++ Value;
diff --git a/lib/eldap/test/eldap_basic_SUITE_data/certs/README b/lib/eldap/test/eldap_basic_SUITE_data/certs/README
new file mode 100644
index 0000000000..a7c8e9dc2e
--- /dev/null
+++ b/lib/eldap/test/eldap_basic_SUITE_data/certs/README
@@ -0,0 +1 @@
+See ../../README
diff --git a/lib/eldap/test/ldap_server/slapd.conf b/lib/eldap/test/ldap_server/slapd.conf
index 87be676d9f..eca298c866 100644
--- a/lib/eldap/test/ldap_server/slapd.conf
+++ b/lib/eldap/test/ldap_server/slapd.conf
@@ -1,14 +1,32 @@
-include /etc/ldap/schema/core.schema
-pidfile /tmp/openldap-data/slapd.pid
-argsfile /tmp/openldap-data/slapd.args
+modulepath /usr/lib/ldap
+moduleload back_bdb.la
+
+# example config file - global configuration section
+include /etc/ldap/schema/core.schema
+referral ldap://root.openldap.org
+access to * by * read
+
+TLSCACertificateFile /ldisk/hans_otp/otp/lib/eldap/test/eldap_basic_SUITE_data/certs/server/cacerts.pem
+TLSCertificateFile /ldisk/hans_otp/otp/lib/eldap/test/eldap_basic_SUITE_data/certs/server/cert.pem
+TLSCertificateKeyFile /ldisk/hans_otp/otp/lib/eldap/test/eldap_basic_SUITE_data/certs/server/keycert.pem
+
database bdb
suffix "dc=ericsson,dc=se"
rootdn "cn=Manager,dc=ericsson,dc=se"
rootpw hejsan
+
# The database must exist before running slapd
-directory /tmp/openldap-data
+directory /tmp/slapd/openldap-data-ericsson.se
+
# Indices to maintain
index objectClass eq
-# URI "ldap://0.0.0.0:9876 ldaps://0.0.0.0:9870"
-# servers/slapd/slapd -d 255 -h "ldap://0.0.0.0:9876 ldaps://0.0.0.0:9870" -f /ldisk/dgud/src/otp/lib/eldap/test/ldap_server/slapd.conf \ No newline at end of file
+access to attrs=userPassword
+ by self write
+ by anonymous auth
+ by dn.base="cn=Manager,dc=ericsson,dc=se" write
+ by * none
+access to *
+ by self write
+ by dn.base="cn=Manager,dc=ericsson,dc=se" write
+ by * read
diff --git a/lib/eldap/test/make_certs.erl b/lib/eldap/test/make_certs.erl
new file mode 100644
index 0000000000..f963af180d
--- /dev/null
+++ b/lib/eldap/test/make_certs.erl
@@ -0,0 +1,313 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(make_certs).
+
+-export([all/2]).
+
+-record(dn, {commonName,
+ organizationalUnitName = "Erlang OTP",
+ organizationName = "Ericsson AB",
+ localityName = "Stockholm",
+ countryName = "SE",
+ emailAddress = "peter@erix.ericsson.se"}).
+
+all(DataDir, PrivDir) ->
+ OpenSSLCmd = "openssl",
+ create_rnd(DataDir, PrivDir), % For all requests
+ rootCA(PrivDir, OpenSSLCmd, "erlangCA"),
+ intermediateCA(PrivDir, OpenSSLCmd, "otpCA", "erlangCA"),
+ endusers(PrivDir, OpenSSLCmd, "otpCA", ["client", "server"]),
+ collect_certs(PrivDir, ["erlangCA", "otpCA"], ["client", "server"]),
+ %% Create keycert files
+ SDir = filename:join([PrivDir, "server"]),
+ SC = filename:join([SDir, "cert.pem"]),
+ SK = filename:join([SDir, "key.pem"]),
+ SKC = filename:join([SDir, "keycert.pem"]),
+ append_files([SK, SC], SKC),
+ CDir = filename:join([PrivDir, "client"]),
+ CC = filename:join([CDir, "cert.pem"]),
+ CK = filename:join([CDir, "key.pem"]),
+ CKC = filename:join([CDir, "keycert.pem"]),
+ append_files([CK, CC], CKC),
+ remove_rnd(PrivDir).
+
+append_files(FileNames, ResultFileName) ->
+ {ok, ResultFile} = file:open(ResultFileName, [write]),
+ do_append_files(FileNames, ResultFile).
+
+do_append_files([], RF) ->
+ ok = file:close(RF);
+do_append_files([F|Fs], RF) ->
+ {ok, Data} = file:read_file(F),
+ ok = file:write(RF, Data),
+ do_append_files(Fs, RF).
+
+rootCA(Root, OpenSSLCmd, Name) ->
+ create_ca_dir(Root, Name, ca_cnf(Name)),
+ DN = #dn{commonName = Name},
+ create_self_signed_cert(Root, OpenSSLCmd, Name, req_cnf(DN)),
+ ok.
+
+intermediateCA(Root, OpenSSLCmd, CA, ParentCA) ->
+ CA = "otpCA",
+ create_ca_dir(Root, CA, ca_cnf(CA)),
+ CARoot = filename:join([Root, CA]),
+ DN = #dn{commonName = CA},
+ CnfFile = filename:join([CARoot, "req.cnf"]),
+ file:write_file(CnfFile, req_cnf(DN)),
+ KeyFile = filename:join([CARoot, "private", "key.pem"]),
+ ReqFile = filename:join([CARoot, "req.pem"]),
+ create_req(Root, OpenSSLCmd, CnfFile, KeyFile, ReqFile),
+ CertFile = filename:join([CARoot, "cert.pem"]),
+ sign_req(Root, OpenSSLCmd, ParentCA, "ca_cert", ReqFile, CertFile).
+
+endusers(Root, OpenSSLCmd, CA, Users) ->
+ lists:foreach(fun(User) -> enduser(Root, OpenSSLCmd, CA, User) end, Users).
+
+enduser(Root, OpenSSLCmd, CA, User) ->
+ UsrRoot = filename:join([Root, User]),
+ file:make_dir(UsrRoot),
+ CnfFile = filename:join([UsrRoot, "req.cnf"]),
+ DN = #dn{commonName = User},
+ file:write_file(CnfFile, req_cnf(DN)),
+ KeyFile = filename:join([UsrRoot, "key.pem"]),
+ ReqFile = filename:join([UsrRoot, "req.pem"]),
+ create_req(Root, OpenSSLCmd, CnfFile, KeyFile, ReqFile),
+ CertFileAllUsage = filename:join([UsrRoot, "cert.pem"]),
+ sign_req(Root, OpenSSLCmd, CA, "user_cert", ReqFile, CertFileAllUsage),
+ CertFileDigitalSigOnly = filename:join([UsrRoot, "digital_signature_only_cert.pem"]),
+ sign_req(Root, OpenSSLCmd, CA, "user_cert_digital_signature_only", ReqFile, CertFileDigitalSigOnly).
+
+collect_certs(Root, CAs, Users) ->
+ Bins = lists:foldr(
+ fun(CA, Acc) ->
+ File = filename:join([Root, CA, "cert.pem"]),
+ {ok, Bin} = file:read_file(File),
+ [Bin, "\n" | Acc]
+ end, [], CAs),
+ lists:foreach(
+ fun(User) ->
+ File = filename:join([Root, User, "cacerts.pem"]),
+ file:write_file(File, Bins)
+ end, Users).
+
+create_self_signed_cert(Root, OpenSSLCmd, CAName, Cnf) ->
+ CARoot = filename:join([Root, CAName]),
+ CnfFile = filename:join([CARoot, "req.cnf"]),
+ file:write_file(CnfFile, Cnf),
+ KeyFile = filename:join([CARoot, "private", "key.pem"]),
+ CertFile = filename:join([CARoot, "cert.pem"]),
+ Cmd = [OpenSSLCmd, " req"
+ " -new"
+ " -x509"
+ " -config ", CnfFile,
+ " -keyout ", KeyFile,
+ " -out ", CertFile],
+ Env = [{"ROOTDIR", Root}],
+ cmd(Cmd, Env),
+ fix_key_file(OpenSSLCmd, KeyFile).
+
+% openssl 1.0 generates key files in pkcs8 format by default and we don't handle this format
+fix_key_file(OpenSSLCmd, KeyFile) ->
+ KeyFileTmp = KeyFile ++ ".tmp",
+ Cmd = [OpenSSLCmd, " rsa",
+ " -in ",
+ KeyFile,
+ " -out ",
+ KeyFileTmp],
+ cmd(Cmd, []),
+ ok = file:rename(KeyFileTmp, KeyFile).
+
+create_ca_dir(Root, CAName, Cnf) ->
+ CARoot = filename:join([Root, CAName]),
+ file:make_dir(CARoot),
+ create_dirs(CARoot, ["certs", "crl", "newcerts", "private"]),
+ create_rnd(Root, filename:join([CAName, "private"])),
+ create_files(CARoot, [{"serial", "01\n"},
+ {"index.txt", ""},
+ {"ca.cnf", Cnf}]).
+
+create_req(Root, OpenSSLCmd, CnfFile, KeyFile, ReqFile) ->
+ Cmd = [OpenSSLCmd, " req"
+ " -new"
+ " -config ", CnfFile,
+ " -keyout ", KeyFile,
+ " -out ", ReqFile],
+ Env = [{"ROOTDIR", Root}],
+ cmd(Cmd, Env),
+ fix_key_file(OpenSSLCmd, KeyFile).
+
+sign_req(Root, OpenSSLCmd, CA, CertType, ReqFile, CertFile) ->
+ CACnfFile = filename:join([Root, CA, "ca.cnf"]),
+ Cmd = [OpenSSLCmd, " ca"
+ " -batch"
+ " -notext"
+ " -config ", CACnfFile,
+ " -extensions ", CertType,
+ " -in ", ReqFile,
+ " -out ", CertFile],
+ Env = [{"ROOTDIR", Root}],
+ cmd(Cmd, Env).
+
+%%
+%% Misc
+%%
+
+create_dirs(Root, Dirs) ->
+ lists:foreach(fun(Dir) ->
+ file:make_dir(filename:join([Root, Dir])) end,
+ Dirs).
+
+create_files(Root, NameContents) ->
+ lists:foreach(
+ fun({Name, Contents}) ->
+ file:write_file(filename:join([Root, Name]), Contents) end,
+ NameContents).
+
+create_rnd(FromDir, ToDir) ->
+ From = filename:join([FromDir, "RAND"]),
+ To = filename:join([ToDir, "RAND"]),
+ file:copy(From, To).
+
+remove_rnd(Dir) ->
+ File = filename:join([Dir, "RAND"]),
+ file:delete(File).
+
+cmd(Cmd, Env) ->
+ FCmd = lists:flatten(Cmd),
+ Port = open_port({spawn, FCmd}, [stream, eof, exit_status, stderr_to_stdout,
+ {env, Env}]),
+ eval_cmd(Port).
+
+eval_cmd(Port) ->
+ receive
+ {Port, {data, _}} ->
+ eval_cmd(Port);
+ {Port, eof} ->
+ ok
+ end,
+ receive
+ {Port, {exit_status, Status}} when Status /= 0 ->
+ %% io:fwrite("exit status: ~w~n", [Status]),
+ exit({eval_cmd, Status})
+ after 0 ->
+ ok
+ end.
+
+%%
+%% Contents of configuration files
+%%
+
+req_cnf(DN) ->
+ ["# Purpose: Configuration for requests (end users and CAs)."
+ "\n"
+ "ROOTDIR = $ENV::ROOTDIR\n"
+ "\n"
+
+ "[req]\n"
+ "input_password = secret\n"
+ "output_password = secret\n"
+ "default_bits = 1024\n"
+ "RANDFILE = $ROOTDIR/RAND\n"
+ "encrypt_key = no\n"
+ "default_md = sha1\n"
+ "#string_mask = pkix\n"
+ "x509_extensions = ca_ext\n"
+ "prompt = no\n"
+ "distinguished_name= name\n"
+ "\n"
+
+ "[name]\n"
+ "commonName = ", DN#dn.commonName, "\n"
+ "organizationalUnitName = ", DN#dn.organizationalUnitName, "\n"
+ "organizationName = ", DN#dn.organizationName, "\n"
+ "localityName = ", DN#dn.localityName, "\n"
+ "countryName = ", DN#dn.countryName, "\n"
+ "emailAddress = ", DN#dn.emailAddress, "\n"
+ "\n"
+
+ "[ca_ext]\n"
+ "basicConstraints = critical, CA:true\n"
+ "keyUsage = cRLSign, keyCertSign\n"
+ "subjectKeyIdentifier = hash\n"
+ "subjectAltName = email:copy\n"].
+
+
+ca_cnf(CA) ->
+ ["# Purpose: Configuration for CAs.\n"
+ "\n"
+ "ROOTDIR = $ENV::ROOTDIR\n"
+ "default_ca = ca\n"
+ "\n"
+
+ "[ca]\n"
+ "dir = $ROOTDIR/", CA, "\n"
+ "certs = $dir/certs\n"
+ "crl_dir = $dir/crl\n"
+ "database = $dir/index.txt\n"
+ "new_certs_dir = $dir/newcerts\n"
+ "certificate = $dir/cert.pem\n"
+ "serial = $dir/serial\n"
+ "crl = $dir/crl.pem\n"
+ "private_key = $dir/private/key.pem\n"
+ "RANDFILE = $dir/private/RAND\n"
+ "\n"
+ "x509_extensions = user_cert\n"
+ "unique_subject = no\n"
+ "default_days = 3600\n"
+ "default_md = sha1\n"
+ "preserve = no\n"
+ "policy = policy_match\n"
+ "\n"
+
+ "[policy_match]\n"
+ "commonName = supplied\n"
+ "organizationalUnitName = optional\n"
+ "organizationName = match\n"
+ "countryName = match\n"
+ "localityName = match\n"
+ "emailAddress = supplied\n"
+ "\n"
+
+ "[user_cert]\n"
+ "basicConstraints = CA:false\n"
+ "keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n"
+ "subjectKeyIdentifier = hash\n"
+ "authorityKeyIdentifier = keyid,issuer:always\n"
+ "subjectAltName = email:copy\n"
+ "issuerAltName = issuer:copy\n"
+ "\n"
+
+ "[user_cert_digital_signature_only]\n"
+ "basicConstraints = CA:false\n"
+ "keyUsage = digitalSignature\n"
+ "subjectKeyIdentifier = hash\n"
+ "authorityKeyIdentifier = keyid,issuer:always\n"
+ "subjectAltName = email:copy\n"
+ "issuerAltName = issuer:copy\n"
+ "\n"
+
+ "[ca_cert]\n"
+ "basicConstraints = critical,CA:true\n"
+ "keyUsage = cRLSign, keyCertSign\n"
+ "subjectKeyIdentifier = hash\n"
+ "authorityKeyIdentifier = keyid:always,issuer:always\n"
+ "subjectAltName = email:copy\n"
+ "issuerAltName = issuer:copy\n"].
diff --git a/lib/erl_interface/src/legacy/erl_eterm.c b/lib/erl_interface/src/legacy/erl_eterm.c
index 7ca4f430de..636d26b24b 100644
--- a/lib/erl_interface/src/legacy/erl_eterm.c
+++ b/lib/erl_interface/src/legacy/erl_eterm.c
@@ -686,7 +686,7 @@ int erl_length(const ETERM *ep)
return n;
}
-
+
/***********************************************************************
* I o l i s t f u n c t i o n s
*
diff --git a/lib/erl_interface/test/all_SUITE_data/ei_runner.c b/lib/erl_interface/test/all_SUITE_data/ei_runner.c
index 205f911e38..cdf32b48c4 100644
--- a/lib/erl_interface/test/all_SUITE_data/ei_runner.c
+++ b/lib/erl_interface/test/all_SUITE_data/ei_runner.c
@@ -77,7 +77,7 @@ run_tests(char* argv0, TestCase test_cases[], unsigned number)
}
}
-
+
/***********************************************************************
*
* R e a d i n g p a c k e t s
@@ -182,7 +182,7 @@ char *read_packet(int *len)
return io_buf;
}
-
+
/***********************************************************************
* S e n d i n g r e p l i e s
*
diff --git a/lib/erl_interface/test/all_SUITE_data/runner.c b/lib/erl_interface/test/all_SUITE_data/runner.c
index a474c17722..038d651275 100644
--- a/lib/erl_interface/test/all_SUITE_data/runner.c
+++ b/lib/erl_interface/test/all_SUITE_data/runner.c
@@ -78,7 +78,7 @@ run_tests(char* argv0, TestCase test_cases[], unsigned number)
}
}
-
+
/***********************************************************************
*
* R e a d i n g p a c k e t s
@@ -188,7 +188,7 @@ char *read_packet(int *len)
return io_buf;
}
-
+
/***********************************************************************
* S e n d i n g r e p l i e s
*
diff --git a/lib/erl_interface/test/ei_accept_SUITE.erl b/lib/erl_interface/test/ei_accept_SUITE.erl
index 48469e68dc..642809ea7a 100644
--- a/lib/erl_interface/test/ei_accept_SUITE.erl
+++ b/lib/erl_interface/test/ei_accept_SUITE.erl
@@ -155,7 +155,7 @@ start_einode(Einode, N, Host, Port) ->
ok.
-
+
%%% Interface functions for ei (erl_interface) functions.
ei_connect_init(P, Num, Cookie, Creation) ->
diff --git a/lib/erl_interface/test/erl_connect_SUITE.erl b/lib/erl_interface/test/erl_connect_SUITE.erl
index bd54013402..c8becc760c 100644
--- a/lib/erl_interface/test/erl_connect_SUITE.erl
+++ b/lib/erl_interface/test/erl_connect_SUITE.erl
@@ -106,7 +106,7 @@ erl_reg_send(Config) when is_list(Config) ->
?line runner:recv_eot(P),
ok.
-
+
%%% Interface functions for erl_interface functions.
erl_connect_init(P, Num, Cookie, Creation) ->
diff --git a/lib/erl_interface/test/erl_eterm_SUITE.erl b/lib/erl_interface/test/erl_eterm_SUITE.erl
index 10a27e48e3..100e9b6f68 100644
--- a/lib/erl_interface/test/erl_eterm_SUITE.erl
+++ b/lib/erl_interface/test/erl_eterm_SUITE.erl
@@ -108,7 +108,7 @@ end_per_group(_GroupName, Config) ->
Config.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
%%% 1. B a s i c t e s t s
@@ -196,7 +196,7 @@ t_erl_free_compound(Config) when is_list(Config) ->
?line runner:test(?t_erl_free_compound),
ok.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
%%% 2. C o n s t r u c t i n g t e r m s
@@ -521,7 +521,7 @@ t_erl_cons(Config) when is_list(Config) ->
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
%%% 3. E x t r a c t i n g & i n f o f u n c t i o n s
@@ -669,7 +669,7 @@ t_erl_element(Config) when is_list(Config) ->
ok.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
%%% 4. I / O l i s t f u n c t i o n s
@@ -894,7 +894,7 @@ iolist_to_string(Port, Term) ->
'NULL' -> 'NULL'
end.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
%%% 5. M i s c e l l a n o u s T e s t s
diff --git a/lib/erl_interface/test/erl_eterm_SUITE_data/eterm_test.c b/lib/erl_interface/test/erl_eterm_SUITE_data/eterm_test.c
index 80d7f69520..94959187b9 100644
--- a/lib/erl_interface/test/erl_eterm_SUITE_data/eterm_test.c
+++ b/lib/erl_interface/test/erl_eterm_SUITE_data/eterm_test.c
@@ -269,7 +269,7 @@ TESTCASE(t_erl_free_compound)
report(1);
}
-
+
/***********************************************************************
*
* 2. C o n s t r u c t i n g t e r m s
@@ -1047,7 +1047,7 @@ TESTCASE(t_erl_cons)
-
+
/***********************************************************************
*
* 3. E x t r a c t i n g & i n f o f u n c t i o n s
@@ -1296,7 +1296,7 @@ TESTCASE(extractor_macros)
}
-
+
/***********************************************************************
*
* 4. I / O l i s t f u n c t i o n s
@@ -1393,7 +1393,7 @@ TESTCASE(t_erl_iolist_to_string)
}
}
-
+
/***********************************************************************
*
* 5. M i s c e l l a n o u s T e s t s
diff --git a/lib/eunit/src/eunit_surefire.erl b/lib/eunit/src/eunit_surefire.erl
index cc021625d5..a2463d32e8 100644
--- a/lib/eunit/src/eunit_surefire.erl
+++ b/lib/eunit/src/eunit_surefire.erl
@@ -174,7 +174,7 @@ handle_cancel(group, Data, St) ->
setup_failed -> "fixture setup ";
cleanup_failed -> "fixture cleanup "
end
- ++ io_lib:format("~p", [proplists:get_value(id, Data)]),
+ ++ io_lib:format("~w", [proplists:get_value(id, Data)]),
Desc = format_desc(proplists:get_value(desc, Data)),
TestCase = #testcase{
name = Name, description = Desc,
diff --git a/lib/hipe/cerl/erl_types.erl b/lib/hipe/cerl/erl_types.erl
index d1243b2325..d7d8a878c5 100644
--- a/lib/hipe/cerl/erl_types.erl
+++ b/lib/hipe/cerl/erl_types.erl
@@ -671,8 +671,9 @@ t_solve_remote(?function(Domain, Range), ET, R, C) ->
{RT2, RR2} = t_solve_remote(Range, ET, R, C),
{?function(RT1, RT2), RR1 ++ RR2};
t_solve_remote(?list(Types, Term, Size), ET, R, C) ->
- {RT, RR} = t_solve_remote(Types, ET, R, C),
- {?list(RT, Term, Size), RR};
+ {RT1, RR1} = t_solve_remote(Types, ET, R, C),
+ {RT2, RR2} = t_solve_remote(Term, ET, R, C),
+ {?list(RT1, RT2, Size), RR1 ++ RR2};
t_solve_remote(?product(Types), ET, R, C) ->
{RL, RR} = list_solve_remote(Types, ET, R, C),
{?product(RL), RR};
@@ -1349,8 +1350,8 @@ t_maybe_improper_list() ->
t_maybe_improper_list(_Content, ?unit) -> ?none;
t_maybe_improper_list(?unit, _Termination) -> ?none;
t_maybe_improper_list(Content, Termination) ->
- %% Safety check
- true = t_is_subtype(t_nil(), Termination),
+ %% Safety check: would be nice to have but does not work with remote types
+ %% true = t_is_subtype(t_nil(), Termination),
?list(Content, Termination, ?unknown_qual).
-spec t_is_maybe_improper_list(erl_type()) -> boolean().
@@ -1365,8 +1366,8 @@ t_is_maybe_improper_list(_) -> false.
%% t_improper_list(?unit, _Termination) -> ?none;
%% t_improper_list(_Content, ?unit) -> ?none;
%% t_improper_list(Content, Termination) ->
-%% %% Safety check
-%% false = t_is_subtype(t_nil(), Termination),
+%% %% Safety check: would be nice to have but does not work with remote types
+%% %% false = t_is_subtype(t_nil(), Termination),
%% ?list(Content, Termination, ?any).
-spec lift_list_to_pos_empty(erl_type()) -> erl_type().
diff --git a/lib/inets/doc/src/ftp.xml b/lib/inets/doc/src/ftp.xml
index f8f11ec705..4d559817c4 100644
--- a/lib/inets/doc/src/ftp.xml
+++ b/lib/inets/doc/src/ftp.xml
@@ -547,15 +547,14 @@
<v>Opts = options()</v>
<v>options() = [option()]</v>
<v>option() = start_option() | open_option()</v>
- <!-- <v>start_options() = [start_option()]</v> -->
<v>start_option() = {verbose, verbose()} | {debug, debug()}</v>
<v>verbose() = boolean() (defaults to false)</v>
<v>debug() = disable | debug | trace (defaults to disable)</v>
- <!-- <v>open_options() = [open_option()]</v> -->
- <v>open_option() = {ipfamily, ipfamily()} | {port, port()} | {mode, mode()} | {timeout, timeout()} | {dtimeout, dtimeout()} | {progress, progress()}</v>
+ <v>open_option() = {ipfamily, ipfamily()} | {port, port()} | {mode, mode()} | {tls, tls_options()} | {timeout, timeout()} | {dtimeout, dtimeout()} | {progress, progress()}</v>
<v>ipfamily() = inet | inet6 | inet6fb4 (defaults to inet)</v>
<v>port() = integer() > 0 (defaults to 21)</v>
<v>mode() = active | passive (defaults to passive)</v>
+ <v>tls_options() = [<seealso marker="ssl:ssl#type-ssloption">ssl:ssloption()</seealso>]</v>
<v>timeout() = integer() > 0 (defaults to 60000 milliseconds)</v>
<v>dtimeout() = integer() > 0 | infinity (defaults to infinity)</v>
<v>pogress() = ignore | {module(), function(), initial_data()} (defaults to ignore)</v>
@@ -570,6 +569,10 @@
(without the inets service framework) and
open a session with the FTP server at <c>Host</c>. </p>
+ <p>If the option <c>{tls, tls_options()}</c> is present, the ftp session will be transported over tls (ftps, see
+<url href="http://www.ietf.org/rfc/rfc4217.txt">RFC 4217</url>). The list <c>tls_options()</c> may be empty. The function <seealso marker="ssl:ssl#connect/3"><c>ssl:connect/3</c></seealso> is used for securing both the control connection and the data sessions.
+ </p>
+
<p>A session opened in this way, is closed using the
<seealso marker="#close">close</seealso> function. </p>
@@ -815,8 +818,7 @@
<p>Sets the file transfer type to <c>ascii</c> or <c>binary</c>. When
an ftp session is opened, the default transfer type of the
server is used, most often <c>ascii</c>, which is the default
- according to RFC 959.</p>
-
+ according to <url href="http://www.ietf.org/rfc/rfc959.txt">RFC 959</url>.</p>
<marker id="user3"></marker>
</desc>
</func>
@@ -943,7 +945,7 @@
<section>
<title>SEE ALSO</title>
<p>file, filename, J. Postel and J. Reynolds: File Transfer Protocol
- (RFC 959).
+ (<url href="http://www.ietf.org/rfc/rfc959.txt">RFC 959</url>).
</p>
</section>
diff --git a/lib/inets/doc/src/httpc.xml b/lib/inets/doc/src/httpc.xml
index d9a27e7d1e..db68cc3116 100644
--- a/lib/inets/doc/src/httpc.xml
+++ b/lib/inets/doc/src/httpc.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2004</year><year>2012</year>
+ <year>2004</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -440,7 +440,10 @@ apply(Module, Function, [ReplyInfo | Args])
<v>Profile = profile() | pid() (when started <c>stand_alone</c>)</v>
</type>
<desc>
- <p>Cancels an asynchronous HTTP-request. </p>
+ <p>Cancels an asynchronous HTTP-request. Note this does not guarantee
+ that the request response will not be delivered, as it is asynchronous the
+ the request may already have been completed when the cancellation arrives.
+ </p>
<marker id="set_options"></marker>
</desc>
diff --git a/lib/inets/src/ftp/ftp.erl b/lib/inets/src/ftp/ftp.erl
index 5d9887a9a4..86ef9280ad 100644
--- a/lib/inets/src/ftp/ftp.erl
+++ b/lib/inets/src/ftp/ftp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -18,7 +18,7 @@
%%
%%
%% Description: This module implements an ftp client, RFC 959.
-%% It also supports ipv6 RFC 2428.
+%% It also supports ipv6 RFC 2428 and starttls RFC 4217.
-module(ftp).
@@ -39,7 +39,8 @@
send_chunk_start/2, send_chunk/2, send_chunk_end/1,
type/2, user/3, user/4, account/2,
append/3, append/2, append_bin/3,
- append_chunk/2, append_chunk_end/1, append_chunk_start/2, info/1]).
+ append_chunk/2, append_chunk_end/1, append_chunk_start/2,
+ info/1, latest_ctrl_response/1]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2,
@@ -54,7 +55,7 @@
-include("ftp_internal.hrl").
-%% Constante used in internal state definition
+%% Constants used in internal state definition
-define(CONNECTION_TIMEOUT, 60*1000).
-define(DATA_ACCEPT_TIMEOUT, infinity).
-define(DEFAULT_MODE, passive).
@@ -67,7 +68,8 @@
%% Internal state
-record(state, {
csock = undefined, % socket() - Control connection socket
- dsock = undefined, % socket() - Data connection socket
+ dsock = undefined, % socket() - Data connection socket
+ tls_options = undefined, % list()
verbose = false, % boolean()
ldir = undefined, % string() - Current local directory
type = ftp_server_default, % atom() - binary | ascii
@@ -83,6 +85,7 @@
%% and hence the end of the response is reached!
ctrl_data = {<<>>, [], start}, % {binary(), [bytes()], LineStatus}
%% pid() - Client pid (note not the same as "From")
+ latest_ctrl_response = "",
owner = undefined,
client = undefined, % "From" to be used in gen_server:reply/2
%% Function that activated a connection and maybe some
@@ -90,7 +93,8 @@
caller = undefined, % term()
ipfamily, % inet | inet6 | inet6fb4
progress = ignore, % ignore | pid()
- dtimeout = ?DATA_ACCEPT_TIMEOUT % non_neg_integer() | infinity
+ dtimeout = ?DATA_ACCEPT_TIMEOUT, % non_neg_integer() | infinity
+ tls_upgrading_data_connection = false
}).
@@ -99,6 +103,8 @@
-type common_reason() :: 'econn' | 'eclosed' | term().
-type file_write_error_reason() :: term(). % See file:write for more info
+-define(DBG(F,A), 'n/a').
+%%-define(DBG(F,A), io:format(F,A)).
%%%=========================================================================
%%% API - CLIENT FUNCTIONS
@@ -154,8 +160,7 @@ open(Host, Opts) when is_list(Opts) ->
?fcrt("open", [{open_options, OpenOptions}]),
case start_link(StartOptions, []) of
{ok, Pid} ->
- ?fcrt("open - ok", [{pid, Pid}]),
- call(Pid, {open, ip_comm, OpenOptions}, plain);
+ do_open(Pid, OpenOptions, tls_options(Opts));
Error1 ->
?fcrt("open - error", [{error1, Error1}]),
Error1
@@ -166,7 +171,13 @@ open(Host, Opts) when is_list(Opts) ->
Error2
end.
-
+do_open(Pid, OpenOptions, TLSOpts) ->
+ case call(Pid, {open, ip_comm, OpenOptions}, plain) of
+ {ok, Pid} ->
+ maybe_tls_upgrade(Pid, TLSOpts);
+ Error ->
+ Error
+ end.
%%--------------------------------------------------------------------------
%% user(Pid, User, Pass, <Acc>) -> ok | {error, euser} | {error, econn}
%% | {error, eacct}
@@ -744,6 +755,18 @@ info(Pid) ->
call(Pid, info, list).
+%%--------------------------------------------------------------------------
+%% latest_ctrl_response(Pid) -> string()
+%% Pid = pid()
+%%
+%% Description: The latest received response from the server
+%%--------------------------------------------------------------------------
+
+-spec latest_ctrl_response(Pid :: pid()) -> string().
+
+latest_ctrl_response(Pid) ->
+ call(Pid, latest_ctrl_response, string).
+
%%%========================================================================
%%% Behavior callbacks
%%%========================================================================
@@ -905,6 +928,10 @@ open_options(Options) ->
{progress, ValidateProgress, false, ?PROGRESS_DEFAULT}],
validate_options(Options, ValidOptions, []).
+tls_options(Options) ->
+ %% Options will be validated by ssl application
+ proplists:get_value(tls, Options, undefined).
+
validate_options([], [], Acc) ->
?fcrt("validate_options -> done", [{acc, Acc}]),
{ok, lists:reverse(Acc)};
@@ -1021,8 +1048,8 @@ handle_call({_, info}, _, #state{verbose = Verbose,
ipfamily = IpFamily,
csock = Socket,
progress = Progress} = State) ->
- {ok, {_, LocalPort}} = inet:sockname(Socket),
- {ok, {Address, Port}} = inet:peername(Socket),
+ {ok, {_, LocalPort}} = sockname(Socket),
+ {ok, {Address, Port}} = peername(Socket),
Options = [{verbose, Verbose},
{ipfamily, IpFamily},
{mode, Mode},
@@ -1033,6 +1060,9 @@ handle_call({_, info}, _, #state{verbose = Verbose,
{progress, Progress}],
{reply, {ok, Options}, State};
+handle_call({_,latest_ctrl_response}, _, #state{latest_ctrl_response=Resp} = State) ->
+ {reply, {ok,Resp}, State};
+
%% But everything else must come from the owner
handle_call({Pid, _}, _, #state{owner = Owner} = State) when Owner =/= Pid ->
{reply, {error, not_connection_owner}, State};
@@ -1091,6 +1121,11 @@ handle_call({_, {open, ip_comm, Host, Opts}}, From, State) ->
{stop, normal, State2#state{client = undefined}}
end;
+handle_call({_, {open, tls_upgrade, TLSOptions}}, From, State) ->
+ send_ctrl_message(State, mk_cmd("AUTH TLS", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{client = From, caller = open, tls_options = TLSOptions}};
+
handle_call({_, {user, User, Password}}, From,
#state{csock = CSock} = State) when (CSock =/= undefined) ->
handle_user(User, Password, "", State#state{client = From});
@@ -1196,8 +1231,8 @@ handle_call({_,{recv_chunk_start, RemoteFile}}, From, #state{chunk = false}
handle_call({_, recv_chunk}, _, #state{chunk = false} = State) ->
{reply, {error, "ftp:recv_chunk_start/2 not called"}, State};
-handle_call({_, recv_chunk}, From, #state{chunk = true} = State) ->
- activate_data_connection(State),
+handle_call({_, recv_chunk}, From, #state{chunk = true} = State0) ->
+ State = activate_data_connection(State0),
{noreply, State#state{client = From, caller = recv_chunk}};
handle_call({_, {send, LocalFile, RemoteFile}}, From,
@@ -1299,71 +1334,77 @@ handle_info(timeout, State) ->
{noreply, State};
%%% Data socket messages %%%
-handle_info({tcp, Socket, Data},
- #state{dsock = Socket,
- caller = {recv_file, Fd}} = State) ->
+handle_info({Trpt, Socket, Data},
+ #state{dsock = {Trpt,Socket},
+ caller = {recv_file, Fd}} = State0) when Trpt==tcp;Trpt==ssl ->
+ ?DBG('L~p --data ~p ----> ~s~p~n',[?LINE,Socket,Data,State0]),
file_write(binary_to_list(Data), Fd),
- progress_report({binary, Data}, State),
- activate_data_connection(State),
+ progress_report({binary, Data}, State0),
+ State = activate_data_connection(State0),
{noreply, State};
-handle_info({tcp, Socket, Data}, #state{dsock = Socket, client = From,
+handle_info({Trpt, Socket, Data}, #state{dsock = {Trpt,Socket}, client = From,
caller = recv_chunk}
- = State) ->
+ = State) when Trpt==tcp;Trpt==ssl ->
+ ?DBG('L~p --data ~p ----> ~s~p~n',[?LINE,Socket,Data,State]),
gen_server:reply(From, {ok, Data}),
{noreply, State#state{client = undefined, data = <<>>}};
-handle_info({tcp, Socket, Data}, #state{dsock = Socket} = State) ->
- activate_data_connection(State),
+handle_info({Trpt, Socket, Data}, #state{dsock = {Trpt,Socket}} = State0) when Trpt==tcp;Trpt==ssl ->
+ ?DBG('L~p --data ~p ----> ~s~p~n',[?LINE,Socket,Data,State0]),
+ State = activate_data_connection(State0),
{noreply, State#state{data = <<(State#state.data)/binary,
Data/binary>>}};
-handle_info({tcp_closed, Socket}, #state{dsock = Socket,
+handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket},
caller = {recv_file, Fd}}
- = State) ->
+ = State) when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
file_close(Fd),
progress_report({transfer_size, 0}, State),
activate_ctrl_connection(State),
{noreply, State#state{dsock = undefined, data = <<>>}};
-handle_info({tcp_closed, Socket}, #state{dsock = Socket, client = From,
+handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket}, client = From,
caller = recv_chunk}
- = State) ->
+ = State) when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
gen_server:reply(From, ok),
{noreply, State#state{dsock = undefined, client = undefined,
data = <<>>, caller = undefined,
chunk = false}};
-handle_info({tcp_closed, Socket}, #state{dsock = Socket, caller = recv_bin,
- data = Data} = State) ->
+handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket}, caller = recv_bin,
+ data = Data} = State)
+ when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
activate_ctrl_connection(State),
{noreply, State#state{dsock = undefined, data = <<>>,
caller = {recv_bin, Data}}};
-handle_info({tcp_closed, Socket}, #state{dsock = Socket, data = Data,
+handle_info({Cls, Socket}, #state{dsock = {Trpt,Socket}, data = Data,
caller = {handle_dir_result, Dir}}
- = State) ->
+ = State) when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
activate_ctrl_connection(State),
{noreply, State#state{dsock = undefined,
caller = {handle_dir_result, Dir, Data},
% data = <<?CR,?LF>>}};
data = <<>>}};
-
-handle_info({tcp_error, Socket, Reason}, #state{dsock = Socket,
- client = From} = State) ->
+
+handle_info({Err, Socket, Reason}, #state{dsock = {Trpt,Socket},
+ client = From} = State)
+ when {Err,Trpt}=={tcp_error,tcp} ; {Err,Trpt}=={ssl_error,ssl} ->
gen_server:reply(From, {error, Reason}),
close_data_connection(State),
{noreply, State#state{dsock = undefined, client = undefined,
data = <<>>, caller = undefined, chunk = false}};
%%% Ctrl socket messages %%%
-handle_info({tcp, Socket, Data}, #state{csock = Socket,
- verbose = Verbose,
- caller = Caller,
- client = From,
- ctrl_data = {CtrlData, AccLines,
- LineStatus}}
+handle_info({Transport, Socket, Data}, #state{csock = {Transport, Socket},
+ verbose = Verbose,
+ caller = Caller,
+ client = From,
+ ctrl_data = {CtrlData, AccLines,
+ LineStatus}}
= State) ->
+ ?DBG('--ctrl ~p ----> ~s~p~n',[Socket,<<CtrlData/binary, Data/binary>>,State]),
case ftp_response:parse_lines(<<CtrlData/binary, Data/binary>>,
AccLines, LineStatus) of
{ok, Lines, NextMsgData} ->
@@ -1374,27 +1415,32 @@ handle_info({tcp, Socket, Data}, #state{csock = Socket,
gen_server:reply(From, string:tokens(Lines, [?CR, ?LF])),
{noreply, State#state{client = undefined,
caller = undefined,
+ latest_ctrl_response = Lines,
ctrl_data = {NextMsgData, [],
start}}};
_ ->
+ ?DBG(' ...handle_ctrl_result(~p,...) ctrl_data=~p~n',[CtrlResult,{NextMsgData, [], start}]),
handle_ctrl_result(CtrlResult,
- State#state{ctrl_data =
- {NextMsgData, [], start}})
+ State#state{latest_ctrl_response = Lines,
+ ctrl_data =
+ {NextMsgData, [], start}})
end;
{continue, NewCtrlData} ->
+ ?DBG(' ...Continue... ctrl_data=~p~n',[NewCtrlData]),
activate_ctrl_connection(State),
{noreply, State#state{ctrl_data = NewCtrlData}}
end;
-handle_info({tcp_closed, Socket}, #state{csock = Socket}) ->
- %% If the server closes the control channel it is
- %% the expected behavior that connection process terminates.
+%% If the server closes the control channel it is
+%% the expected behavior that connection process terminates.
+handle_info({Cls, Socket}, #state{csock = {Trpt, Socket}})
+ when {Cls,Trpt}=={tcp_closed,tcp} ; {Cls,Trpt}=={ssl_closed,ssl} ->
exit(normal); %% User will get error message from terminate/2
-handle_info({tcp_error, Socket, Reason}, _) ->
+handle_info({Err, Socket, Reason}, _) when Err==tcp_error ; Err==ssl_error ->
Report =
- io_lib:format("tcp_error on socket: ~p for reason: ~p~n",
- [Socket, Reason]),
+ io_lib:format("~p on socket: ~p for reason: ~p~n",
+ [Err, Socket, Reason]),
error_logger:error_report(Report),
%% If tcp does not work the only option is to terminate,
%% this is the expected behavior under these circumstances.
@@ -1425,8 +1471,8 @@ handle_info({'EXIT', Pid, Reason}, #state{progress = Pid} = State) ->
%% so we do not want to crash, but we make a log entry as it is an
%% unwanted behaviour.)
handle_info(Info, State) ->
- Report = io_lib:format("ftp : ~p : Unexpected message: ~p\n",
- [self(), Info]),
+ Report = io_lib:format("ftp : ~p : Unexpected message: ~p~nState: ~p~n",
+ [self(), Info, State]),
error_logger:info_report(Report),
{noreply, State}.
@@ -1566,8 +1612,37 @@ handle_user_account(Acc, State) ->
%%--------------------------------------------------------------------------
%% handle_ctrl_result
%%--------------------------------------------------------------------------
-%%--------------------------------------------------------------------------
-%% Handling of control connection setup
+handle_ctrl_result({tls_upgrade, _}, #state{csock = {tcp, Socket},
+ tls_options = TLSOptions,
+ timeout = Timeout,
+ caller = open, client = From}
+ = State0) ->
+ ?DBG('<--ctrl ssl:connect(~p, ~p)~n~p~n',[Socket,TLSOptions,State0]),
+ case ssl:connect(Socket, TLSOptions, Timeout) of
+ {ok, TLSSocket} ->
+ State = State0#state{csock = {ssl,TLSSocket}},
+ send_ctrl_message(State, mk_cmd("PBSZ 0", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{tls_upgrading_data_connection = {true, pbsz}} };
+ {error, _} = Error ->
+ gen_server:reply(From, {Error, self()}),
+ {stop, normal, State0#state{client = undefined,
+ caller = undefined,
+ tls_upgrading_data_connection = false}}
+ end;
+
+handle_ctrl_result({pos_compl, _}, #state{tls_upgrading_data_connection = {true, pbsz}} = State) ->
+ send_ctrl_message(State, mk_cmd("PROT P", [])),
+ activate_ctrl_connection(State),
+ {noreply, State#state{tls_upgrading_data_connection = {true, prot}}};
+
+handle_ctrl_result({pos_compl, _}, #state{tls_upgrading_data_connection = {true, prot},
+ client = From} = State) ->
+ gen_server:reply(From, {ok, self()}),
+ {noreply, State#state{client = undefined,
+ caller = undefined,
+ tls_upgrading_data_connection = false}};
+
handle_ctrl_result({pos_compl, _}, #state{caller = open, client = From}
= State) ->
gen_server:reply(From, {ok, self()}),
@@ -1601,10 +1676,10 @@ handle_ctrl_result({pos_compl, Lines},
timeout = Timeout}
= State) ->
[_, PortStr | _] = lists:reverse(string:tokens(Lines, "|")),
- {ok, {IP, _}} = inet:peername(CSock),
+ {ok, {IP, _}} = peername(CSock),
case connect(IP, list_to_integer(PortStr), Timeout, State) of
{ok, _, Socket} ->
- handle_caller(State#state{caller = Caller, dsock = Socket});
+ handle_caller(State#state{caller = Caller, dsock = {tcp, Socket}});
{error, _Reason} = Error ->
gen_server:reply(From, Error),
{noreply, State#state{client = undefined, caller = undefined}}
@@ -1614,7 +1689,7 @@ handle_ctrl_result({pos_compl, Lines},
#state{mode = passive,
ipfamily = inet,
client = From,
- caller = {setup_data_connection, Caller},
+ caller = {setup_data_connection, Caller},
timeout = Timeout} = State) ->
{_, [?LEFT_PAREN | Rest]} =
@@ -1626,9 +1701,11 @@ handle_ctrl_result({pos_compl, Lines},
string:tokens(NewPortAddr, [$,])),
IP = {A1, A2, A3, A4},
Port = (P1 * 256) + P2,
+
+ ?DBG('<--data tcp connect to ~p:~p, Caller=~p~n',[IP,Port,Caller]),
case connect(IP, Port, Timeout, State) of
- {ok, _, Socket} ->
- handle_caller(State#state{caller = Caller, dsock = Socket});
+ {ok, _, Socket} ->
+ handle_caller(State#state{caller = Caller, dsock = {tcp,Socket}});
{error, _Reason} = Error ->
gen_server:reply(From, Error),
{noreply,State#state{client = undefined, caller = undefined}}
@@ -1669,18 +1746,18 @@ handle_ctrl_result({pos_compl, Lines},
%%--------------------------------------------------------------------------
%% Directory listing
-handle_ctrl_result({pos_prel, _}, #state{caller = {dir, Dir}} = State) ->
- case accept_data_connection(State) of
- {ok, NewState} ->
- activate_data_connection(NewState),
- {noreply, NewState#state{caller = {handle_dir_result, Dir}}};
+handle_ctrl_result({pos_prel, _}, #state{caller = {dir, Dir}} = State0) ->
+ case accept_data_connection(State0) of
+ {ok, State1} ->
+ State = activate_data_connection(State1),
+ {noreply, State#state{caller = {handle_dir_result, Dir}}};
{error, _Reason} = ERROR ->
- case State#state.client of
+ case State0#state.client of
undefined ->
- {stop, ERROR, State};
+ {stop, ERROR, State0};
From ->
gen_server:reply(From, ERROR),
- {stop, normal, State#state{client = undefined}}
+ {stop, normal, State0#state{client = undefined}}
end
end;
@@ -1778,18 +1855,18 @@ handle_ctrl_result({Status, _},
%%--------------------------------------------------------------------------
%% File handling - recv_bin
-handle_ctrl_result({pos_prel, _}, #state{caller = recv_bin} = State) ->
- case accept_data_connection(State) of
- {ok, NewState} ->
- activate_data_connection(NewState),
- {noreply, NewState};
+handle_ctrl_result({pos_prel, _}, #state{caller = recv_bin} = State0) ->
+ case accept_data_connection(State0) of
+ {ok, State1} ->
+ State = activate_data_connection(State1),
+ {noreply, State};
{error, _Reason} = ERROR ->
- case State#state.client of
+ case State0#state.client of
undefined ->
- {stop, ERROR, State};
+ {stop, ERROR, State0};
From ->
gen_server:reply(From, ERROR),
- {stop, normal, State#state{client = undefined}}
+ {stop, normal, State0#state{client = undefined}}
end
end;
@@ -1812,36 +1889,35 @@ handle_ctrl_result({Status, _}, #state{caller = {recv_bin, _}} = State) ->
%% File handling - start_chunk_transfer
handle_ctrl_result({pos_prel, _}, #state{client = From,
caller = start_chunk_transfer}
- = State) ->
- case accept_data_connection(State) of
- {ok, NewState} ->
- gen_server:reply(From, ok),
- {noreply, NewState#state{chunk = true, client = undefined,
- caller = undefined}};
+ = State0) ->
+ case accept_data_connection(State0) of
+ {ok, State1} ->
+ State = start_chunk(State1),
+ {noreply, State};
{error, _Reason} = ERROR ->
- case State#state.client of
+ case State0#state.client of
undefined ->
- {stop, ERROR, State};
+ {stop, ERROR, State0};
From ->
gen_server:reply(From, ERROR),
- {stop, normal, State#state{client = undefined}}
+ {stop, normal, State0#state{client = undefined}}
end
end;
%%--------------------------------------------------------------------------
%% File handling - recv_file
-handle_ctrl_result({pos_prel, _}, #state{caller = {recv_file, _}} = State) ->
- case accept_data_connection(State) of
- {ok, NewState} ->
- activate_data_connection(NewState),
- {noreply, NewState};
+handle_ctrl_result({pos_prel, _}, #state{caller = {recv_file, _}} = State0) ->
+ case accept_data_connection(State0) of
+ {ok, State1} ->
+ State = activate_data_connection(State1),
+ {noreply, State};
{error, _Reason} = ERROR ->
- case State#state.client of
+ case State0#state.client of
undefined ->
- {stop, ERROR, State};
+ {stop, ERROR, State0};
From ->
gen_server:reply(From, ERROR),
- {stop, normal, State#state{client = undefined}}
+ {stop, normal, State0#state{client = undefined}}
end
end;
@@ -1853,36 +1929,32 @@ handle_ctrl_result({Status, _}, #state{caller = {recv_file, Fd}} = State) ->
%%--------------------------------------------------------------------------
%% File handling - transfer_*
handle_ctrl_result({pos_prel, _}, #state{caller = {transfer_file, Fd}}
- = State) ->
- case accept_data_connection(State) of
- {ok, NewState} ->
- send_file(Fd, NewState);
+ = State0) ->
+ case accept_data_connection(State0) of
+ {ok, State1} ->
+ send_file(State1, Fd);
{error, _Reason} = ERROR ->
- case State#state.client of
+ case State0#state.client of
undefined ->
- {stop, ERROR, State};
+ {stop, ERROR, State0};
From ->
gen_server:reply(From, ERROR),
- {stop, normal, State#state{client = undefined}}
+ {stop, normal, State0#state{client = undefined}}
end
end;
handle_ctrl_result({pos_prel, _}, #state{caller = {transfer_data, Bin}}
- = State) ->
- case accept_data_connection(State) of
- {ok, NewState} ->
- send_data_message(NewState, Bin),
- close_data_connection(NewState),
- activate_ctrl_connection(NewState),
- {noreply, NewState#state{caller = transfer_data_second_phase,
- dsock = undefined}};
+ = State0) ->
+ case accept_data_connection(State0) of
+ {ok, State} ->
+ send_bin(State, Bin);
{error, _Reason} = ERROR ->
- case State#state.client of
+ case State0#state.client of
undefined ->
- {stop, ERROR, State};
+ {stop, ERROR, State0};
From ->
gen_server:reply(From, ERROR),
- {stop, normal, State#state{client = undefined}}
+ {stop, normal, State0#state{client = undefined}}
end
end;
@@ -1975,7 +2047,7 @@ setup_ctrl_connection(Host, Port, Timeout, State) ->
MsTime = millisec_time(),
case connect(Host, Port, Timeout, State) of
{ok, IpFam, CSock} ->
- NewState = State#state{csock = CSock, ipfamily = IpFam},
+ NewState = State#state{csock = {tcp, CSock}, ipfamily = IpFam},
activate_ctrl_connection(NewState),
case Timeout - (millisec_time() - MsTime) of
Timeout2 when (Timeout2 >= 0) ->
@@ -1991,12 +2063,12 @@ setup_ctrl_connection(Host, Port, Timeout, State) ->
setup_data_connection(#state{mode = active,
caller = Caller,
csock = CSock} = State) ->
- case (catch inet:sockname(CSock)) of
+ case (catch sockname(CSock)) of
{ok, {{_, _, _, _, _, _, _, _} = IP, _}} ->
{ok, LSock} =
gen_tcp:listen(0, [{ip, IP}, {active, false},
inet6, binary, {packet, 0}]),
- {ok, Port} = inet:port(LSock),
+ {ok, {_, Port}} = sockname(LSock),
IpAddress = inet_parse:ntoa(IP),
Cmd = mk_cmd("EPRT |2|~s|~p|", [IpAddress, Port]),
send_ctrl_message(State, Cmd),
@@ -2029,20 +2101,6 @@ setup_data_connection(#state{mode = passive, ipfamily = inet,
activate_ctrl_connection(State),
{noreply, State#state{caller = {setup_data_connection, Caller}}}.
-
-%% setup_data_connection(#state{mode = passive, ip_v6_disabled = false,
-%% caller = Caller} = State) ->
-%% send_ctrl_message(State, mk_cmd("EPSV", [])),
-%% activate_ctrl_connection(State),
-%% {noreply, State#state{caller = {setup_data_connection, Caller}}};
-
-%% setup_data_connection(#state{mode = passive, ip_v6_disabled = true,
-%% caller = Caller} = State) ->
-%% send_ctrl_message(State, mk_cmd("PASV", [])),
-%% activate_ctrl_connection(State),
-%% {noreply, State#state{caller = {setup_data_connection, Caller}}}.
-
-
connect(Host, Port, Timeout, #state{ipfamily = inet = IpFam}) ->
connect2(Host, Port, IpFam, Timeout);
@@ -2088,75 +2146,101 @@ connect2(Host, Port, IpFam, Timeout) ->
accept_data_connection(#state{mode = active,
dtimeout = DTimeout,
- dsock = {lsock, LSock}} = State) ->
+ tls_options = TLSOptions,
+ dsock = {lsock, LSock}} = State0) ->
case gen_tcp:accept(LSock, DTimeout) of
+ {ok, Socket} when is_list(TLSOptions) ->
+ gen_tcp:close(LSock),
+ ?DBG('<--data ssl:connect(~p, ~p)~n~p~n',[Socket,TLSOptions,State0]),
+ case ssl:connect(Socket, TLSOptions, DTimeout) of
+ {ok, TLSSocket} ->
+ {ok, State0#state{dsock={ssl,TLSSocket}}};
+ {error, Reason} ->
+ {error, {ssl_connect_failed, Reason}}
+ end;
{ok, Socket} ->
gen_tcp:close(LSock),
- {ok, State#state{dsock = Socket}};
+ {ok, State0#state{dsock={tcp,Socket}}};
{error, Reason} ->
{error, {data_connect_failed, Reason}}
end;
+accept_data_connection(#state{mode = passive,
+ dtimeout = DTimeout,
+ dsock = {tcp,Socket},
+ tls_options = TLSOptions} = State) when is_list(TLSOptions) ->
+ ?DBG('<--data ssl:connect(~p, ~p)~n~p~n',[Socket,TLSOptions,State]),
+ case ssl:connect(Socket, TLSOptions, DTimeout) of
+ {ok, TLSSocket} ->
+ {ok, State#state{dsock={ssl,TLSSocket}}};
+ {error, Reason} ->
+ {error, {ssl_connect_failed, Reason}}
+ end;
accept_data_connection(#state{mode = passive} = State) ->
- {ok, State}.
+ {ok,State}.
+
-send_ctrl_message(#state{csock = Socket, verbose = Verbose}, Message) ->
- %% io:format("send control message: ~n~p~n", [lists:flatten(Message)]),
+send_ctrl_message(_S=#state{csock = Socket, verbose = Verbose}, Message) ->
verbose(lists:flatten(Message),Verbose,send),
+ ?DBG('<--ctrl ~p ---- ~s~p~n',[Socket,Message,_S]),
send_message(Socket, Message).
-send_data_message(#state{dsock = Socket}, Message) ->
- send_message(Socket, Message).
-
-send_message(Socket, Message) ->
- case gen_tcp:send(Socket, Message) of
+send_data_message(_S=#state{dsock = Socket}, Message) ->
+ ?DBG('<==data ~p ==== ~s~n~p~n',[Socket,Message,_S]),
+ case send_message(Socket, Message) of
ok ->
ok;
{error, Reason} ->
- Report = io_lib:format("gen_tcp:send/2 failed for "
- "reason ~p~n", [Reason]),
+ Report = io_lib:format("send/2 for socket ~p failed with "
+ "reason ~p~n", [Socket, Reason]),
error_logger:error_report(Report),
- %% If tcp does not work the only option is to terminate,
+ %% If tcp/ssl does not work the only option is to terminate,
%% this is the expected behavior under these circumstances.
exit(normal) %% User will get error message from terminate/2
end.
+send_message({tcp, Socket}, Message) ->
+ gen_tcp:send(Socket, Message);
+send_message({ssl, Socket}, Message) ->
+ ssl:send(Socket, Message).
+
activate_ctrl_connection(#state{csock = Socket, ctrl_data = {<<>>, _, _}}) ->
activate_connection(Socket);
activate_ctrl_connection(#state{csock = Socket}) ->
%% We have already received at least part of the next control message,
%% that has been saved in ctrl_data, process this first.
- self() ! {tcp, Socket, <<>>}.
+ self() ! {tcp, unwrap_socket(Socket), <<>>}.
-activate_data_connection(#state{dsock = Socket}) ->
- activate_connection(Socket).
+unwrap_socket({tcp,Socket}) -> Socket;
+unwrap_socket({ssl,Socket}) -> Socket;
+unwrap_socket(Socket) -> Socket.
+
-activate_connection(Socket) ->
- inet:setopts(Socket, [{active, once}]).
+activate_data_connection(#state{dsock = Socket} = State) ->
+ activate_connection(Socket),
+ State.
-close_ctrl_connection(#state{csock = undefined}) ->
- ok;
-close_ctrl_connection(#state{csock = Socket}) ->
- close_connection(Socket).
+activate_connection({tcp, Socket}) -> inet:setopts(Socket, [{active, once}]);
+activate_connection({ssl, Socket}) -> ssl:setopts(Socket, [{active, once}]).
-close_data_connection(#state{dsock = undefined}) ->
- ok;
-close_data_connection(#state{dsock = {lsock, Socket}}) ->
- close_connection(Socket);
-close_data_connection(#state{dsock = Socket}) ->
- close_connection(Socket).
+close_ctrl_connection(#state{csock = undefined}) -> ok;
+close_ctrl_connection(#state{csock = Socket}) -> close_connection(Socket).
-close_connection(Socket) ->
- gen_tcp:close(Socket).
+close_data_connection(#state{dsock = undefined}) -> ok;
+close_data_connection(#state{dsock = Socket}) -> close_connection(Socket).
-%% ------------ FILE HANDELING ----------------------------------------
+close_connection({tcp, Socket}) -> gen_tcp:close(Socket);
+close_connection({ssl, Socket}) -> ssl:close(Socket).
-send_file(Fd, State) ->
+%% ------------ FILE HANDELING ----------------------------------------
+send_file(#state{tls_upgrading_data_connection = {true, CTRL, _}} = State, Fd) ->
+ {noreply, State#state{tls_upgrading_data_connection = {true, CTRL, ?MODULE, send_file, Fd}}};
+send_file(State, Fd) ->
case file_read(Fd) of
{ok, N, Bin} when N > 0->
send_data_message(State, Bin),
progress_report({binary, Bin}, State),
- send_file(Fd, State);
+ send_file(State, Fd);
{ok, _, _} ->
file_close(Fd),
close_data_connection(State),
@@ -2206,6 +2290,15 @@ call(GenServer, Msg, Format, Timeout) ->
cast(GenServer, Msg) ->
gen_server:cast(GenServer, {self(), Msg}).
+send_bin(#state{tls_upgrading_data_connection = {true, CTRL, _}} = State, Bin) ->
+ State#state{tls_upgrading_data_connection = {true, CTRL, ?MODULE, send_bin, Bin}};
+send_bin(State, Bin) ->
+ send_data_message(State, Bin),
+ close_data_connection(State),
+ activate_ctrl_connection(State),
+ {noreply, State#state{caller = transfer_data_second_phase,
+ dsock = undefined}}.
+
mk_cmd(Fmt, Args) ->
[io_lib:format(Fmt, Args)| [?CR, ?LF]]. % Deep list ok.
@@ -2216,20 +2309,6 @@ pwd_result(Lines) ->
lists:splitwith(fun(?DOUBLE_QUOTE) -> false; (_) -> true end, Rest),
Dir.
-%% is_verbose(Params) ->
-%% check_param(verbose, Params).
-
-%% is_debug(Flags) ->
-%% check_param(debug, Flags).
-
-%% is_trace(Flags) ->
-%% check_param(trace, Flags).
-
-%% is_ipv6_disabled(Flags) ->
-%% check_param(ip_v6_disabled, Flags).
-
-%% check_param(Param, Params) ->
-%% lists:member(Param, Params).
key_search(Key, List, Default) ->
case lists:keysearch(Key, 1, List) of
@@ -2239,14 +2318,6 @@ key_search(Key, List, Default) ->
Default
end.
-%% check_option(Pred, Value, Default) ->
-%% case Pred(Value) of
-%% true ->
-%% Value;
-%% false ->
-%% Default
-%% end.
-
verbose(Lines, true, Direction) ->
DirStr =
case Direction of
@@ -2276,3 +2347,23 @@ progress_report(Report, #state{progress = ProgressPid}) ->
millisec_time() ->
{A,B,C} = erlang:now(),
A*1000000000+B*1000+(C div 1000).
+
+peername({tcp, Socket}) -> inet:peername(Socket);
+peername({ssl, Socket}) -> ssl:peername(Socket).
+
+sockname({tcp, Socket}) -> inet:peername(Socket);
+sockname({ssl, Socket}) -> ssl:peername(Socket).
+
+maybe_tls_upgrade(Pid, undefined) ->
+ {ok, Pid};
+maybe_tls_upgrade(Pid, TLSOptions) ->
+ catch ssl:start(),
+ call(Pid, {open, tls_upgrade, TLSOptions}, plain).
+
+start_chunk(#state{tls_upgrading_data_connection = {true, CTRL, _}} = State) ->
+ State#state{tls_upgrading_data_connection = {true, CTRL, ?MODULE, start_chunk, undefined}};
+start_chunk(#state{client = From} = State) ->
+ gen_server:reply(From, ok),
+ State#state{chunk = true,
+ client = undefined,
+ caller = undefined}.
diff --git a/lib/inets/src/ftp/ftp_response.erl b/lib/inets/src/ftp/ftp_response.erl
index 4bf788e946..dfe180ff18 100644
--- a/lib/inets/src/ftp/ftp_response.erl
+++ b/lib/inets/src/ftp/ftp_response.erl
@@ -175,6 +175,8 @@ error_string(Reason) ->
%% Positive Preleminary Reply
interpret_status(?POS_PREL,_,_) -> pos_prel;
+%%FIXME ??? 3??? interpret_status(?POS_COMPL, ?AUTH_ACC, 3) -> tls_upgrade;
+interpret_status(?POS_COMPL, ?AUTH_ACC, 4) -> tls_upgrade;
%% Positive Completion Reply
interpret_status(?POS_COMPL,_,_) -> pos_compl;
%% Positive Intermediate Reply nedd account
diff --git a/lib/inets/src/http_client/httpc.erl b/lib/inets/src/http_client/httpc.erl
index 4d7023a8e9..da9bbdd1ec 100644
--- a/lib/inets/src/http_client/httpc.erl
+++ b/lib/inets/src/http_client/httpc.erl
@@ -208,16 +208,8 @@ cancel_request(RequestId) ->
cancel_request(RequestId, Profile)
when is_atom(Profile) orelse is_pid(Profile) ->
?hcrt("cancel request", [{request_id, RequestId}, {profile, Profile}]),
- ok = httpc_manager:cancel_request(RequestId, profile_name(Profile)),
- receive
- %% If the request was already fulfilled throw away the
- %% answer as the request has been canceled.
- {http, {RequestId, _}} ->
- ok
- after 0 ->
- ok
- end.
-
+ httpc_manager:cancel_request(RequestId, profile_name(Profile)).
+
%%--------------------------------------------------------------------------
%% set_options(Options) -> ok | {error, Reason}
@@ -241,14 +233,7 @@ set_options(Options, Profile) when is_atom(Profile) orelse is_pid(Profile) ->
?hcrt("set options", [{options, Options}, {profile, Profile}]),
case validate_options(Options) of
{ok, Opts} ->
- try
- begin
- httpc_manager:set_options(Opts, profile_name(Profile))
- end
- catch
- exit:{noproc, _} ->
- {error, inets_not_started}
- end;
+ httpc_manager:set_options(Opts, profile_name(Profile));
{error, Reason} ->
{error, Reason}
end.
@@ -343,8 +328,6 @@ store_cookies(SetCookieHeaders, Url, Profile)
ok
end
catch
- exit:{noproc, _} ->
- {error, {not_started, Profile}};
error:{badmatch, Bad} ->
{error, {parse_failed, Bad}}
end.
diff --git a/lib/inets/src/http_client/httpc_handler.erl b/lib/inets/src/http_client/httpc_handler.erl
index 55794f57dc..80c8b2439e 100644
--- a/lib/inets/src/http_client/httpc_handler.erl
+++ b/lib/inets/src/http_client/httpc_handler.erl
@@ -32,7 +32,7 @@
start_link/4,
%% connect_and_send/2,
send/2,
- cancel/3,
+ cancel/2,
stream_next/1,
info/1
]).
@@ -117,8 +117,8 @@ send(Request, Pid) ->
%% Description: Cancels a request. Intended to be called by the httpc
%% manager process.
%%--------------------------------------------------------------------
-cancel(RequestId, Pid, From) ->
- cast({cancel, RequestId, From}, Pid).
+cancel(RequestId, Pid) ->
+ cast({cancel, RequestId}, Pid).
%%--------------------------------------------------------------------
@@ -400,19 +400,17 @@ handle_call(info, _, State) ->
%% handle_keep_alive_queue/2 on the other hand will just skip the
%% request as if it was never issued as in this case the request will
%% not have been sent.
-handle_cast({cancel, RequestId, From},
+handle_cast({cancel, RequestId},
#state{request = #request{id = RequestId} = Request,
profile_name = ProfileName,
canceled = Canceled} = State) ->
?hcrv("cancel current request", [{request_id, RequestId},
{profile, ProfileName},
{canceled, Canceled}]),
- httpc_manager:request_canceled(RequestId, ProfileName, From),
- ?hcrv("canceled", []),
{stop, normal,
State#state{canceled = [RequestId | Canceled],
request = Request#request{from = answer_sent}}};
-handle_cast({cancel, RequestId, From},
+handle_cast({cancel, RequestId},
#state{profile_name = ProfileName,
request = #request{id = CurrId},
canceled = Canceled} = State) ->
@@ -420,8 +418,6 @@ handle_cast({cancel, RequestId, From},
{curr_req_id, CurrId},
{profile, ProfileName},
{canceled, Canceled}]),
- httpc_manager:request_canceled(RequestId, ProfileName, From),
- ?hcrv("canceled", []),
{noreply, State#state{canceled = [RequestId | Canceled]}};
handle_cast(stream_next, #state{session = Session} = State) ->
@@ -521,19 +517,12 @@ handle_info({Proto, _Socket, Data},
activate_once(Session),
{noreply, State#state{mfa = NewMFA}}
catch
- exit:_Exit ->
- ?hcrd("data processing exit", [{exit, _Exit}]),
+ _:_Reason ->
+ ?hcrd("data processing exit", [{exit, _Reason}]),
ClientReason = {could_not_parse_as_http, Data},
ClientErrMsg = httpc_response:error(Request, ClientReason),
NewState = answer_request(Request, ClientErrMsg, State),
- {stop, normal, NewState};
- error:_Error ->
- ?hcrd("data processing error", [{error, _Error}]),
- ClientReason = {could_not_parse_as_http, Data},
- ClientErrMsg = httpc_response:error(Request, ClientReason),
- NewState = answer_request(Request, ClientErrMsg, State),
{stop, normal, NewState}
-
end,
?hcri("data processed", [{final_result, FinalResult}]),
FinalResult;
@@ -1165,7 +1154,7 @@ handle_http_body(Body, #state{headers = Headers,
handle_response(#state{status = new} = State) ->
?hcrd("handle response - status = new", []),
- handle_response(try_to_enable_pipeline_or_keep_alive(State));
+ handle_response(check_persistent(State));
handle_response(#state{request = Request,
status = Status,
@@ -1440,39 +1429,22 @@ is_keep_alive_enabled_server(_,_) ->
is_keep_alive_connection(Headers, #session{client_close = ClientClose}) ->
(not ((ClientClose) orelse httpc_response:is_server_closing(Headers))).
-try_to_enable_pipeline_or_keep_alive(
- #state{session = Session,
- request = #request{method = Method},
+check_persistent(
+ #state{session = #session{type = Type} = Session,
status_line = {Version, _, _},
headers = Headers,
- profile_name = ProfileName} = State) ->
- ?hcrd("try to enable pipeline or keep-alive",
- [{version, Version},
- {headers, Headers},
- {session, Session}]),
+ profile_name = ProfileName} = State) ->
case is_keep_alive_enabled_server(Version, Headers) andalso
- is_keep_alive_connection(Headers, Session) of
+ is_keep_alive_connection(Headers, Session) of
true ->
- case (is_pipeline_enabled_client(Session) andalso
- httpc_request:is_idempotent(Method)) of
- true ->
- insert_session(Session, ProfileName),
- State#state{status = pipeline};
- false ->
- insert_session(Session, ProfileName),
- %% Make sure type is keep_alive in session
- %% as it in this case might be pipeline
- NewSession = Session#session{type = keep_alive},
- State#state{status = keep_alive,
- session = NewSession}
- end;
+ mark_persistent(ProfileName, Session),
+ State#state{status = Type};
false ->
State#state{status = close}
end.
answer_request(#request{id = RequestId, from = From} = Request, Msg,
- #state{session = Session,
- timers = Timers,
+ #state{timers = Timers,
profile_name = ProfileName} = State) ->
?hcrt("answer request", [{request, Request}, {msg, Msg}]),
httpc_response:send(From, Msg),
@@ -1482,19 +1454,14 @@ answer_request(#request{id = RequestId, from = From} = Request, Msg,
Timer = {RequestId, TimerRef},
cancel_timer(TimerRef, {timeout, Request#request.id}),
httpc_manager:request_done(RequestId, ProfileName),
- NewSession = maybe_make_session_available(ProfileName, Session),
Timers2 = Timers#timers{request_timers = lists:delete(Timer,
RequestTimers)},
State#state{request = Request#request{from = answer_sent},
- session = NewSession,
timers = Timers2}.
-maybe_make_session_available(ProfileName,
- #session{available = false} = Session) ->
- update_session(ProfileName, Session, #session.available, true),
- Session#session{available = true};
-maybe_make_session_available(_ProfileName, Session) ->
- Session.
+mark_persistent(ProfileName, Session) ->
+ update_session(ProfileName, Session, #session.persistent, true),
+ Session#session{persistent = true}.
cancel_timers(#timers{request_timers = ReqTmrs, queue_timer = QTmr}) ->
cancel_timer(QTmr, timeout_queue),
diff --git a/lib/inets/src/http_client/httpc_internal.hrl b/lib/inets/src/http_client/httpc_internal.hrl
index 30e2742e9d..d5b3dd2a2a 100644
--- a/lib/inets/src/http_client/httpc_internal.hrl
+++ b/lib/inets/src/http_client/httpc_internal.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -143,8 +143,8 @@
%% true | false
%% This will be true, when a response has been received for
- %% the first request. See type above.
- available = false
+ %% the first request and the server has not closed the connection
+ persistent = false
}).
diff --git a/lib/inets/src/http_client/httpc_manager.erl b/lib/inets/src/http_client/httpc_manager.erl
index c45dcab802..a3ed371e61 100644
--- a/lib/inets/src/http_client/httpc_manager.erl
+++ b/lib/inets/src/http_client/httpc_manager.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2002-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2002-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -29,7 +29,6 @@
start_link/3,
request/2,
cancel_request/2,
- request_canceled/3,
request_done/2,
retry_request/2,
redirect_request/2,
@@ -144,22 +143,7 @@ redirect_request(Request, ProfileName) ->
%%--------------------------------------------------------------------
cancel_request(RequestId, ProfileName) ->
- call(ProfileName, {cancel_request, RequestId}).
-
-
-%%--------------------------------------------------------------------
-%% Function: request_canceled(RequestId, ProfileName) -> ok
-%% RequestId - ref()
-%% ProfileName = atom()
-%%
-%% Description: Confirms that a request has been canceld. Intended to
-%% be called by the httpc handler process.
-%%--------------------------------------------------------------------
-
-request_canceled(RequestId, ProfileName, From) ->
- gen_server:reply(From, ok),
- cast(ProfileName, {request_canceled, RequestId}).
-
+ cast(ProfileName, {cancel_request, RequestId}).
%%--------------------------------------------------------------------
%% Function: request_done(RequestId, ProfileName) -> ok
@@ -467,33 +451,13 @@ do_init(ProfileName, CookiesDir) ->
%%--------------------------------------------------------------------
handle_call({request, Request}, _, State) ->
?hcri("request", [{request, Request}]),
- case (catch handle_request(Request, State)) of
+ case (catch handle_request(Request, State, false)) of
{reply, Msg, NewState} ->
{reply, Msg, NewState};
Error ->
{stop, Error, httpc_response:error(Request, Error), State}
end;
-handle_call({cancel_request, RequestId}, From,
- #state{handler_db = HandlerDb} = State) ->
- ?hcri("cancel_request", [{request_id, RequestId}]),
- case ets:lookup(HandlerDb, RequestId) of
- [] ->
- %% The request has allready compleated make sure
- %% it is deliverd to the client process queue so
- %% it can be thrown away by httpc:cancel_request
- %% This delay is hopfully a temporary workaround.
- %% Note that it will not not delay the manager,
- %% only the client that called httpc:cancel_request
- timer:apply_after(?DELAY, gen_server, reply, [From, ok]),
- {noreply, State};
- [{_, Pid, _}] ->
- httpc_handler:cancel(RequestId, Pid, From),
- {noreply,
- State#state{cancel =
- [{RequestId, Pid, From} | State#state.cancel]}}
- end;
-
handle_call(reset_cookies, _, #state{cookie_db = CookieDb} = State) ->
?hcrv("reset cookies", []),
httpc_cookie:reset_db(CookieDb),
@@ -547,7 +511,7 @@ handle_cast({retry_or_redirect_request, {Time, Request}},
{noreply, State};
handle_cast({retry_or_redirect_request, Request}, State) ->
- case (catch handle_request(Request, State)) of
+ case (catch handle_request(Request, State, true)) of
{reply, {ok, _}, NewState} ->
{noreply, NewState};
Error ->
@@ -555,19 +519,19 @@ handle_cast({retry_or_redirect_request, Request}, State) ->
{stop, Error, State}
end;
-handle_cast({request_canceled, RequestId}, State) ->
- ?hcrv("request canceled", [{request_id, RequestId}]),
- ets:delete(State#state.handler_db, RequestId),
- case lists:keysearch(RequestId, 1, State#state.cancel) of
- {value, Entry = {RequestId, _, From}} ->
- ?hcrt("found in cancel", [{from, From}]),
- {noreply,
- State#state{cancel = lists:delete(Entry, State#state.cancel)}};
- Else ->
- ?hcrt("not found in cancel", [{else, Else}]),
- {noreply, State}
+handle_cast({cancel_request, RequestId},
+ #state{handler_db = HandlerDb} = State) ->
+ case ets:lookup(HandlerDb, RequestId) of
+ [] ->
+ %% Request already compleated nothing to
+ %% cancel
+ {noreply, State};
+ [{_, Pid, _}] ->
+ httpc_handler:cancel(RequestId, Pid),
+ ets:delete(State#state.handler_db, RequestId),
+ {noreply, State}
end;
-
+
handle_cast({request_done, RequestId}, State) ->
?hcrv("request done", [{request_id, RequestId}]),
ets:delete(State#state.handler_db, RequestId),
@@ -629,22 +593,8 @@ handle_info({'EXIT', _, _}, State) ->
%% Handled in DOWN
{noreply, State};
handle_info({'DOWN', _, _, Pid, _}, State) ->
- ets:match_delete(State#state.handler_db, {'_', Pid, '_'}),
-
- %% If there where any canceled request, handled by the
- %% the process that now has terminated, the
- %% cancelation can be viewed as sucessfull!
- NewCanceldList =
- lists:foldl(fun(Entry = {_, HandlerPid, From}, Acc) ->
- case HandlerPid of
- Pid ->
- gen_server:reply(From, ok),
- lists:delete(Entry, Acc);
- _ ->
- Acc
- end
- end, State#state.cancel, State#state.cancel),
- {noreply, State#state{cancel = NewCanceldList}};
+ ets:match_delete(State#state.handler_db, {'_', Pid, '_'}),
+ {noreply, State};
handle_info(Info, State) ->
Report = io_lib:format("Unknown message in "
"httpc_manager:handle_info ~p~n", [Info]),
@@ -774,7 +724,7 @@ get_handler_info(Tab) ->
handle_request(#request{settings =
#http_options{version = "HTTP/0.9"}} = Request,
- State) ->
+ State, _) ->
%% Act as an HTTP/0.9 client that does not know anything
%% about persistent connections
@@ -787,7 +737,7 @@ handle_request(#request{settings =
handle_request(#request{settings =
#http_options{version = "HTTP/1.0"}} = Request,
- State) ->
+ State, _) ->
%% Act as an HTTP/1.0 client that does not
%% use persistent connections
@@ -798,13 +748,13 @@ handle_request(#request{settings =
start_handler(NewRequest#request{headers = NewHeaders}, State),
{reply, {ok, NewRequest#request.id}, State};
-handle_request(Request, State = #state{options = Options}) ->
+handle_request(Request, State = #state{options = Options}, Retry) ->
NewRequest = handle_cookies(generate_request_id(Request), State),
SessionType = session_type(Options),
case select_session(Request#request.method,
Request#request.address,
- Request#request.scheme, SessionType, State) of
+ Request#request.scheme, SessionType, State, Retry) of
{ok, HandlerPid} ->
pipeline_or_keep_alive(NewRequest, HandlerPid, State);
no_connection ->
@@ -828,6 +778,7 @@ start_handler(#request{id = Id,
#state{profile_name = ProfileName,
handler_db = HandlerDb,
options = Options}) ->
+ ClientClose = httpc_request:is_client_closing(Request#request.headers),
{ok, Pid} =
case is_inets_manager() of
true ->
@@ -838,13 +789,18 @@ start_handler(#request{id = Id,
end,
HandlerInfo = {Id, Pid, From},
ets:insert(HandlerDb, HandlerInfo),
+ insert_session(#session{id = {Request#request.address, Pid},
+ scheme = Request#request.scheme,
+ client_close = ClientClose,
+ type = session_type(Options)
+ }, ProfileName),
erlang:monitor(process, Pid).
select_session(Method, HostPort, Scheme, SessionType,
#state{options = #options{max_pipeline_length = MaxPipe,
max_keep_alive_length = MaxKeepAlive},
- session_db = SessionDb}) ->
+ session_db = SessionDb}, Retry) ->
?hcrd("select session", [{session_type, SessionType},
{max_pipeline_length, MaxPipe},
{max_keep_alive_length, MaxKeepAlive}]),
@@ -857,19 +813,29 @@ select_session(Method, HostPort, Scheme, SessionType,
%% client_close, scheme and type specified.
%% The fields id (part of: HandlerPid) and queue_length
%% specified.
- Pattern = #session{id = {HostPort, '$1'},
- client_close = false,
- scheme = Scheme,
- queue_length = '$2',
- type = SessionType,
- available = true,
- _ = '_'},
+ Pattern = case (Retry andalso SessionType == pipeline) of
+ true ->
+ #session{id = {HostPort, '$1'},
+ client_close = false,
+ scheme = Scheme,
+ queue_length = '$2',
+ type = SessionType,
+ persistent = true,
+ _ = '_'};
+ false ->
+ #session{id = {HostPort, '$1'},
+ client_close = false,
+ scheme = Scheme,
+ queue_length = '$2',
+ type = SessionType,
+ _ = '_'}
+ end,
%% {'_', {HostPort, '$1'}, false, Scheme, '_', '$2', SessionTyp},
Candidates = ets:match(SessionDb, Pattern),
?hcrd("select session", [{host_port, HostPort},
{scheme, Scheme},
{type, SessionType},
- {candidates, Candidates}]),
+ {candidates, Candidates}]),
select_session(Candidates, MaxKeepAlive, MaxPipe, SessionType);
false ->
no_connection
diff --git a/lib/inets/src/http_server/httpd_conf.erl b/lib/inets/src/http_server/httpd_conf.erl
index b3ca13e2fe..27446ca7fe 100644
--- a/lib/inets/src/http_server/httpd_conf.erl
+++ b/lib/inets/src/http_server/httpd_conf.erl
@@ -798,6 +798,8 @@ store({log_format, LogFormat}, _ConfigList)
store({server_tokens, ServerTokens} = Entry, _ConfigList) ->
Server = server(ServerTokens),
{ok, [Entry, {server, Server}]};
+store({keep_alive_timeout, KeepAliveTimeout}, _ConfigList) ->
+ {ok, {keep_alive_timeout, KeepAliveTimeout * 1000}};
store(ConfigListEntry, _ConfigList) ->
{ok, ConfigListEntry}.
diff --git a/lib/inets/src/http_server/httpd_log.erl b/lib/inets/src/http_server/httpd_log.erl
index a34435e0e8..7ff73669f9 100644
--- a/lib/inets/src/http_server/httpd_log.erl
+++ b/lib/inets/src/http_server/httpd_log.erl
@@ -39,14 +39,21 @@
Size :: 0 | pos_integer() | string()) ->
{Log :: atom() | pid(), Entry :: string()} | term() .
-access_entry(Log, NoLog, Info, RFC931, AuthUser, Date, StatusCode, SizeStr)
- when is_list(SizeStr) ->
+%% Somethime the size in the form of the content_length is put here, which
+%% is actually in the form of a string
+%% So it can either be the size as an integer, the size as a string
+%% or, worst case scenario, bytes.
+access_entry(Log, NoLog, Info, RFC931, AuthUser, Date, StatusCode,
+ SizeStrOrBytes)
+ when is_list(SizeStrOrBytes) ->
Size =
- case (catch list_to_integer(SizeStr)) of
+ case (catch list_to_integer(SizeStrOrBytes)) of
I when is_integer(I) ->
+ %% This is from using the content_length (which is a string)
I;
_ ->
- SizeStr % This is better then nothing
+ %% This is better than nothing
+ httpd_util:flatlength(SizeStrOrBytes)
end,
access_entry(Log, NoLog, Info, RFC931, AuthUser, Date, StatusCode, Size);
access_entry(Log, NoLog,
diff --git a/lib/inets/src/http_server/httpd_request_handler.erl b/lib/inets/src/http_server/httpd_request_handler.erl
index cb20159794..ea7a17e40d 100644
--- a/lib/inets/src/http_server/httpd_request_handler.erl
+++ b/lib/inets/src/http_server/httpd_request_handler.erl
@@ -267,9 +267,9 @@ handle_info({ssl_error, _, _} = Reason, State) ->
{stop, Reason, State};
%% Timeouts
-handle_info(timeout, #state{mod = ModData, mfa = {_, parse, _}} = State) ->
- error_log("No request received on keep-alive connection "
- "before server side timeout", ModData),
+handle_info(timeout, #state{mfa = {_, parse, _}} = State) ->
+ %% error_log("No request received on keep-alive connection "
+ %% "before server side timeout", ModData),
%% No response should be sent!
{stop, normal, State#state{response_sent = true}};
handle_info(timeout, #state{mod = ModData} = State) ->
@@ -316,7 +316,10 @@ terminate(normal, State) ->
do_terminate(State);
terminate(Reason, #state{response_sent = false, mod = ModData} = State) ->
httpd_response:send_status(ModData, 500, none),
- error_log(httpd_util:reason_phrase(500), ModData),
+ ReasonStr =
+ lists:flatten(io_lib:format("~s - ~p",
+ [httpd_util:reason_phrase(500), Reason])),
+ error_log(ReasonStr, ModData),
terminate(Reason, State#state{response_sent = true, mod = ModData});
terminate(_Reason, State) ->
do_terminate(State).
diff --git a/lib/inets/src/http_server/httpd_response.erl b/lib/inets/src/http_server/httpd_response.erl
index a45b04f275..0895729d05 100644
--- a/lib/inets/src/http_server/httpd_response.erl
+++ b/lib/inets/src/http_server/httpd_response.erl
@@ -23,9 +23,10 @@
is_disable_chunked_send/1, cache_headers/2]).
-export([map_status_code/2]).
--include("httpd.hrl").
--include("http_internal.hrl").
--include("httpd_internal.hrl").
+-include_lib("inets/src/inets_app/inets_internal.hrl").
+-include_lib("inets/include/httpd.hrl").
+-include_lib("inets/src/http_lib/http_internal.hrl").
+-include_lib("inets/src/http_server/httpd_internal.hrl").
-define(VMODULE,"RESPONSE").
@@ -35,7 +36,7 @@ generate_and_send_response(#mod{init_data =
#init_data{peername = {_,"unknown"}}}) ->
ok;
generate_and_send_response(#mod{config_db = ConfigDB} = ModData) ->
- Modules = httpd_util:lookup(ConfigDB,modules, ?DEFAULT_MODS),
+ Modules = httpd_util:lookup(ConfigDB, modules, ?DEFAULT_MODS),
case traverse_modules(ModData, Modules) of
done ->
ok;
@@ -68,16 +69,7 @@ traverse_modules(ModData,[]) ->
{proceed,ModData#mod.data};
traverse_modules(ModData,[Module|Rest]) ->
?hdrd("traverse modules", [{callback_module, Module}]),
- case (catch apply(Module, do, [ModData])) of
- {'EXIT', Reason} ->
- String =
- lists:flatten(
- io_lib:format("traverse exit from apply: ~p:do => ~n~p",
- [Module, Reason])),
- report_error(mod_log, ModData#mod.config_db, String),
- report_error(mod_disk_log, ModData#mod.config_db, String),
- send_status(ModData, 500, none),
- done;
+ try apply(Module, do, [ModData]) of
done ->
?hdrt("traverse modules - done", []),
done;
@@ -87,6 +79,19 @@ traverse_modules(ModData,[Module|Rest]) ->
{proceed, NewData} ->
?hdrt("traverse modules - proceed", [{new_data, NewData}]),
traverse_modules(ModData#mod{data = NewData}, Rest)
+ catch
+ T:E ->
+ String =
+ lists:flatten(
+ io_lib:format("module traverse failed: ~p:do => "
+ "~n Error Type: ~p"
+ "~n Error: ~p"
+ "~n Stack trace: ~p",
+ [Module, T, E, ?STACK()])),
+ report_error(mod_log, ModData#mod.config_db, String),
+ report_error(mod_disk_log, ModData#mod.config_db, String),
+ send_status(ModData, 500, none),
+ done
end.
%% send_status %%
diff --git a/lib/inets/src/http_server/mod_cgi.erl b/lib/inets/src/http_server/mod_cgi.erl
index f1b73810e6..d933b0a4ba 100644
--- a/lib/inets/src/http_server/mod_cgi.erl
+++ b/lib/inets/src/http_server/mod_cgi.erl
@@ -131,9 +131,9 @@ store({script_nocache, Value} = Conf, _)
{ok, Conf};
store({script_nocache, Value}, _) ->
{error, {wrong_type, {script_nocache, Value}}};
-store({script_timeout, Value} = Conf, _)
+store({script_timeout, Value}, _)
when is_integer(Value), Value >= 0 ->
- {ok, Conf};
+ {ok, {script_timeout, Value * 1000}};
store({script_timeout, Value}, _) ->
{error, {wrong_type, {script_timeout, Value}}}.
@@ -238,7 +238,7 @@ send_request_body_to_script(ModData, Port) ->
end.
deliver_webpage(#mod{config_db = Db} = ModData, Port) ->
- Timeout = cgi_timeout(Db),
+ Timeout = script_timeout(Db),
case receive_headers(Port, httpd_cgi, parse_headers,
[<<>>, [], []], Timeout) of
{Headers, Body} ->
@@ -341,8 +341,8 @@ script_elements(#mod{method = "PUT", entity_body = Body}, _) ->
script_elements(_, _) ->
[].
-cgi_timeout(Db) ->
- httpd_util:lookup(Db, cgi_timeout, ?DEFAULT_CGI_TIMEOUT).
+script_timeout(Db) ->
+ httpd_util:lookup(Db, script_timeout, ?DEFAULT_CGI_TIMEOUT).
%% Convert error to printable string
%%
diff --git a/lib/inets/src/http_server/mod_head.erl b/lib/inets/src/http_server/mod_head.erl
index c346fd4d23..02b8485b25 100644
--- a/lib/inets/src/http_server/mod_head.erl
+++ b/lib/inets/src/http_server/mod_head.erl
@@ -42,6 +42,10 @@ do(Info) ->
%% A response has been sent! Nothing to do about it!
{already_sent, _StatusCode, _Size} ->
{proceed,Info#mod.data};
+ {response, Header, _Body} -> %% New way
+ {proceed,
+ lists:keyreplace(response, 1, Info#mod.data,
+ {response, Header, nobody})};
%% A response has been generated!
{_StatusCode, _Response} ->
{proceed,Info#mod.data}
diff --git a/lib/inets/src/inets_app/inets_internal.hrl b/lib/inets/src/inets_app/inets_internal.hrl
index e56af3b59d..06843f2275 100644
--- a/lib/inets/src/inets_app/inets_internal.hrl
+++ b/lib/inets/src/inets_app/inets_internal.hrl
@@ -21,6 +21,8 @@
-ifndef(inets_internal_hrl).
-define(inets_internal_hrl, true).
+-define(STACK(), erlang:get_stacktrace()).
+
%% Various trace macros
-define(report(Severity, Label, Service, Content),
diff --git a/lib/inets/test/Makefile b/lib/inets/test/Makefile
index 2f2f6ec16e..73070ac57e 100644
--- a/lib/inets/test/Makefile
+++ b/lib/inets/test/Makefile
@@ -152,21 +152,6 @@ MODULES = \
erl_make_certs \
ftp_SUITE \
ftp_format_SUITE \
- ftp_solaris8_sparc_test \
- ftp_solaris9_sparc_test \
- ftp_solaris10_sparc_test \
- ftp_solaris10_x86_test \
- ftp_linux_x86_test \
- ftp_linux_ppc_test \
- ftp_macosx_x86_test \
- ftp_macosx_ppc_test \
- ftp_openbsd_x86_test \
- ftp_freebsd_x86_test \
- ftp_netbsd_x86_test \
- ftp_windows_xp_test \
- ftp_windows_2003_server_test \
- ftp_suite_lib \
- ftp_ticket_test \
http_format_SUITE \
httpc_SUITE \
httpc_cookie_SUITE \
diff --git a/lib/inets/test/ftp_SUITE.erl b/lib/inets/test/ftp_SUITE.erl
index 17e5f6777e..e39f9f1eb6 100644
--- a/lib/inets/test/ftp_SUITE.erl
+++ b/lib/inets/test/ftp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -18,110 +18,803 @@
%%
%%
+%%
+%% ct:run("../inets_test", ftp_SUITE).
+%%
+
-module(ftp_SUITE).
+-include_lib("kernel/include/file.hrl").
-include_lib("common_test/include/ct.hrl").
--include("test_server_line.hrl").
+-include("inets_test_lib.hrl").
-%% Test server specific exports
--export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2]).
-% -export([init_per_testcase/2, end_per_testcase/2]).
--export([init_per_suite/1, end_per_suite/1]).
+%% Note: This directive should only be used in test suites.
+-compile(export_all).
-define(FTP_USER, "anonymous").
--define(FTP_PASS, passwd()).
--define(FTP_PORT, 21).
+-define(FTP_PASS(Cmnt), (fun({ok,__H}) -> "ftp_SUITE_"++Cmnt++"@" ++ __H;
+ (_) -> "ftp_SUITE_"++Cmnt++"@localhost"
+ end)(inet:gethostname())
+ ).
-define(BAD_HOST, "badhostname").
-define(BAD_USER, "baduser").
-define(BAD_DIR, "baddirectory").
--ifdef(ftp_debug_client).
--define(ftp_open(Host, Flags), do_ftp_open(Host, [debug] ++ Flags)).
--else.
--ifdef(ftp_trace_client).
--define(ftp_open(Host, Flags), do_ftp_open(Host, [trace] ++ Flags)).
--else.
--define(ftp_open(Host, Flags), do_ftp_open(Host, [verbose] ++ Flags)).
--endif.
--endif.
-
+go() -> ct:run_test([{suite,"ftp_SUITE"}, {logdir,"LOG"}]).
+gos() -> ct:run_test([{suite,"ftp_SUITE"}, {group,ftps_passive}, {logdir,"LOG"}]).
%%--------------------------------------------------------------------
-%% all(Arg) -> [Doc] | [Case] | {skip, Comment}
-%% Arg - doc | suite
-%% Doc - string()
-%% Case - atom()
-%% Name of a test case function.
-%% Comment - string()
-%% Description: Returns documentation/test cases in this test suite
-%% or a skip tuple if the platform is not supported.
+%% Common Test interface functions -----------------------------------
%%--------------------------------------------------------------------
-suite() -> [{ct_hooks, [ts_install_cth]}].
+all() ->
+ [
+ {group, ftp_passive},
+ {group, ftp_active},
+ {group, ftps_passive},
+ {group, ftps_active}
+ ].
-all() ->
+groups() ->
[
- {group, solaris8_test},
- {group, solaris9_test},
- {group, solaris10_test},
- {group, linux_x86_test},
- {group, linux_ppc_test},
- {group, macosx_x86_test},
- {group, macosx_ppc_test},
- {group, openbsd_test},
- {group, freebsd_test},
- {group, netbsd_test},
- {group, windows_xp_test},
- {group, windows_2003_server_test},
- {group, ticket_tests}
+ {ftp_passive, [], ftp_tests()},
+ {ftp_active, [], ftp_tests()},
+ {ftps_passive, [], ftp_tests()},
+ {ftps_active, [], ftp_tests()}
].
-groups() ->
+ftp_tests()->
[
- {solaris8_test, [], [{ftp_solaris8_sparc_test, all}]},
- {solaris9_test, [], [{ftp_solaris9_sparc_test, all}]},
- {solaris10_test, [], [{ftp_solaris10_sparc_test, all},
- {ftp_solaris10_x86_test, all}]},
- {linux_x86_test, [], [{ftp_linux_x86_test, all}]},
- {linux_ppc_test, [], [{ftp_linux_ppc_test, all}]},
- {macosx_x86_test, [], [{ftp_macosx_x86_test, all}]},
- {macosx_ppc_test, [], [{ftp_macosx_ppc_test, all}]},
- {openbsd_test, [], [{ftp_openbsd_x86_test, all}]},
- {freebsd_test, [], [{ftp_freebsd_x86_test, all}]},
- {netbsd_test, [], [{ftp_netbsd_x86_test, all}]},
- {windows_xp_test, [], [{ftp_windows_xp_test, all}]},
- {windows_2003_server_test, [], [{ftp_windows_2003_server_test, all}]},
- {ticket_tests, [], [{ftp_ticket_test, all}]}
+ user,
+ bad_user,
+ pwd,
+ cd,
+ lcd,
+ ls,
+ nlist,
+ rename,
+ delete,
+ mkdir,
+ rmdir,
+ send,
+ send_3,
+ send_bin,
+ send_chunk,
+ append,
+ append_bin,
+ append_chunk,
+ recv,
+ recv_3,
+ recv_bin,
+ recv_chunk,
+ type,
+ quote,
+ ip_v6_disabled
].
-init_per_group(_GroupName, Config) ->
- Config.
+%%--------------------------------------------------------------------
+
+%%% Config
+%%% key meaning
+%%% ................................................................
+%%% ftpservers list of servers to check if they are available
+%%% The element is:
+%%% {Name, % string(). The os command name
+%%% StartCommand, % fun()->{ok,start_result()} | {error,string()}.
+%%% % The command to start the daemon with.
+%%% ChkUp, % fun(start_result()) -> string(). Os command to check
+%%% % if the server is running. [] if not running.
+%%% % The string in string() is suitable for logging.
+%%% StopCommand, % fun(start_result()) -> void(). The command to stop the daemon with.
+%%% AugmentFun, % fun(config()) -> config() Adds two funs for transforming names of files
+%%% % and directories to the form they are returned from this server
+%%% ServerHost, % string(). Mostly "localhost"
+%%% ServerPort % pos_integer()
+%%% }
+%%%
+
+-define(default_ftp_servers,
+ [{"vsftpd",
+ fun(__CONF__) ->
+ DataDir = ?config(data_dir,__CONF__),
+ ConfFile = filename:join(DataDir, "vsftpd.conf"),
+ PrivDir = ?config(priv_dir,__CONF__),
+ AnonRoot = PrivDir,
+ Cmd = ["vsftpd "++filename:join(DataDir,"vsftpd.conf"),
+ " -oftpd_banner=erlang_otp_testing",
+ " -oanon_root=\"",AnonRoot,"\"",
+ " -orsa_cert_file=\"",filename:join(DataDir,"server-cert.pem"),"\"",
+ " -orsa_private_key_file=\"",filename:join(DataDir,"server-key.pem"),"\""
+ ],
+ Result = os:cmd(Cmd),
+ ct:log("Config file:~n~s~n~nServer start command:~n ~s~nResult:~n ~p",
+ [case file:read_file(ConfFile) of
+ {ok,X} -> X;
+ _ -> ""
+ end,
+ Cmd, Result
+ ]),
+ case Result of
+ [] -> {ok,'dont care'};
+ [Msg] -> {error,Msg}
+ end
+ end,
+ fun(_StartResult) -> os:cmd("ps ax | grep erlang_otp_testing | grep -v grep")
+ end,
+ fun(_StartResult) -> os:cmd("kill `ps ax | grep erlang_otp_testing | awk '/vsftpd/{print $1}'`")
+ end,
+ fun(__CONF__) ->
+ AnonRoot = ?config(priv_dir,__CONF__),
+ [{id2ftp, fun(Id) -> filename:join(AnonRoot,Id) end},
+ {id2ftp_result,fun(Id) -> filename:join(AnonRoot,Id) end} | __CONF__]
+ end,
+ "localhost",
+ 9999
+ }
+ ]
+ ).
-end_per_group(_GroupName, Config) ->
- Config.
+init_per_suite(Config) ->
+ case find_executable(Config) of
+ false ->
+ {skip, "No ftp server found"};
+ {ok,Data} ->
+ TstDir = filename:join(?config(priv_dir,Config), "test"),
+ file:make_dir(TstDir),
+ make_cert_files(dsa, rsa, "server-", ?config(data_dir,Config)),
+ start_ftpd([{test_dir,TstDir},
+ {ftpd_data,Data}
+ | Config])
+ end.
+
+end_per_suite(Config) ->
+ ps_ftpd(Config),
+ stop_ftpd(Config),
+ ps_ftpd(Config),
+ ok.
+
+%%--------------------------------------------------------------------
+init_per_group(_Group, Config) -> Config.
+
+end_per_group(_Group, Config) -> Config.
+%%--------------------------------------------------------------------
+init_per_testcase(Case, Config0) ->
+ Group = proplists:get_value(name,?config(tc_group_properties,Config0)),
+ try ?MODULE:Case(doc) of
+ Msg -> ct:comment(Msg)
+ catch
+ _:_-> ok
+ end,
+ TLS = [{tls,[{reuse_sessions,true}]}],
+ ACTIVE = [{mode,active}],
+ PASSIVE = [{mode,passive}],
+ ExtraOpts = [verbose],
+ Config =
+ case Group of
+ ftp_active -> ftp__open(Config0, ACTIVE ++ExtraOpts);
+ ftps_active -> ftp__open(Config0, TLS++ ACTIVE ++ExtraOpts);
+ ftp_passive -> ftp__open(Config0, PASSIVE ++ExtraOpts);
+ ftps_passive -> ftp__open(Config0, TLS++PASSIVE ++ExtraOpts)
+ end,
+ case Case of
+ user -> Config;
+ bad_user -> Config;
+ _ ->
+ Pid = ?config(ftp,Config),
+ ok = ftp:user(Pid, ?FTP_USER, ?FTP_PASS(atom_to_list(Group)++"-"++atom_to_list(Case)) ),
+ ok = ftp:cd(Pid, ?config(priv_dir,Config)),
+ Config
+ end.
+
+end_per_testcase(user, _Config) -> ok;
+end_per_testcase(bad_user, _Config) -> ok;
+end_per_testcase(_Case, Config) ->
+ case ?config(tc_status,Config) of
+ ok -> ok;
+ _ ->
+ try ftp:latest_ctrl_response(?config(ftp,Config))
+ of
+ {ok,S} -> ct:log("***~n*** Latest ctrl channel response:~n*** ~p~n***",[S])
+ catch
+ _:_ -> ok
+ end
+ end,
+ ftp__close(Config).
%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
+%% Test Cases --------------------------------------------------------
+%%--------------------------------------------------------------------
+user(doc) -> ["Open an ftp connection to a host, and logon as anonymous ftp, then logoff"];
+user(Config) ->
+ Pid = ?config(ftp, Config),
+ ok = ftp:user(Pid, ?FTP_USER, ?FTP_PASS("")),% logon
+ ok = ftp:close(Pid), % logoff
+ {error,eclosed} = ftp:pwd(Pid), % check logoff result
+ ok.
+
+%%-------------------------------------------------------------------------
+bad_user(doc) -> ["Open an ftp connection to a host, and logon with bad user."];
+bad_user(Config) ->
+ Pid = ?config(ftp, Config),
+ {error, euser} = ftp:user(Pid, ?BAD_USER, ?FTP_PASS("")),
+ ok.
+
+%%-------------------------------------------------------------------------
+pwd(doc) -> ["Test ftp:pwd/1 & ftp:lpwd/1"];
+pwd(Config0) ->
+ Config = set_state([reset], Config0),
+ Pid = ?config(ftp, Config),
+ {ok, PWD} = ftp:pwd(Pid),
+ {ok, PathLpwd} = ftp:lpwd(Pid),
+ PWD = id2ftp_result("", Config),
+ PathLpwd = id2ftp_result("", Config).
+
+%%-------------------------------------------------------------------------
+cd(doc) -> ["Open an ftp connection, log on as anonymous ftp, and cd to a"
+ "directory and to a non-existent directory."];
+cd(Config0) ->
+ Dir = "test",
+ Config = set_state([reset,{mkdir,Dir}], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:cd(Pid, id2ftp(Dir,Config)),
+ {ok, PWD} = ftp:pwd(Pid),
+ ExpectedPWD = id2ftp_result(Dir, Config),
+ PWD = ExpectedPWD,
+ {error, epath} = ftp:cd(Pid, ?BAD_DIR).
+
+%%-------------------------------------------------------------------------
+lcd(doc) ->
+ ["Test api function ftp:lcd/2"];
+lcd(Config0) ->
+ Dir = "test",
+ Config = set_state([reset,{mkdir,Dir}], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:lcd(Pid, id2ftp(Dir,Config)),
+ {ok, PWD} = ftp:lpwd(Pid),
+ ExpectedPWD = id2ftp_result(Dir, Config),
+ PWD = ExpectedPWD,
+ {error, epath} = ftp:lcd(Pid, ?BAD_DIR).
+
+%%-------------------------------------------------------------------------
+ls(doc) -> ["Open an ftp connection; ls the current directory, and the "
+ "\"test\" directory. We assume that ls never fails, since "
+ "it's output is meant to be read by humans. "];
+ls(Config0) ->
+ Config = set_state([reset,{mkdir,"test"}], Config0),
+ Pid = ?config(ftp, Config),
+ {ok, _R1} = ftp:ls(Pid),
+ {ok, _R2} = ftp:ls(Pid, id2ftp("test",Config)),
+ %% neither nlist nor ls operates on a directory
+ %% they operate on a pathname, which *can* be a
+ %% directory, but can also be a filename or a group
+ %% of files (including wildcards).
+ case ?config(wildcard_support, Config) of
+ true ->
+ {ok, _R3} = ftp:ls(Pid, id2ftp("te*",Config));
+ _ ->
+ ok
+ end.
+
+%%-------------------------------------------------------------------------
+nlist(doc) -> ["Open an ftp connection; nlist the current directory, and the "
+ "\"test\" directory. Nlist does not behave consistenly over "
+ "operating systems. On some it is an error to have an empty "
+ "directory."];
+nlist(Config0) ->
+ Config = set_state([reset,{mkdir,"test"}], Config0),
+ Pid = ?config(ftp, Config),
+ {ok, _R1} = ftp:nlist(Pid),
+ {ok, _R2} = ftp:nlist(Pid, id2ftp("test",Config)),
+ %% neither nlist nor ls operates on a directory
+ %% they operate on a pathname, which *can* be a
+ %% directory, but can also be a filename or a group
+ %% of files (including wildcards).
+ case ?config(wildcard_support, Config) of
+ true ->
+ {ok, _R3} = ftp:nlist(Pid, id2ftp("te*",Config));
+ _ ->
+ ok
+ end.
+
+%%-------------------------------------------------------------------------
+rename(doc) -> ["Rename a file."];
+rename(Config0) ->
+ Contents = <<"ftp_SUITE test ...">>,
+ OldFile = "old.txt",
+ NewFile = "new.txt",
+ Config = set_state([reset,{mkfile,OldFile,Contents}], Config0),
+ Pid = ?config(ftp, Config),
+
+ ok = ftp:rename(Pid,
+ id2ftp(OldFile,Config),
+ id2ftp(NewFile,Config)),
+
+ true = (chk_file(NewFile,Contents,Config)
+ and chk_no_file([OldFile],Config)).
+
+
+%%-------------------------------------------------------------------------
+send(doc) -> ["Transfer a file with ftp using send/2."];
+send(Config0) ->
+ Contents = <<"ftp_SUITE test ...">>,
+ SrcDir = "data",
+ File = "file.txt",
+ Config = set_state([reset,{mkfile,[SrcDir,File],Contents}], Config0),
+ Pid = ?config(ftp, Config),
+
+chk_no_file([File],Config),
+chk_file([SrcDir,File],Contents,Config),
+
+ ok = ftp:lcd(Pid, id2ftp(SrcDir,Config)),
+ ok = ftp:cd(Pid, id2ftp("",Config)),
+ ok = ftp:send(Pid, File),
+
+ chk_file(File, Contents, Config).
+
+%%-------------------------------------------------------------------------
+send_3(doc) -> ["Transfer a file with ftp using send/3."];
+send_3(Config0) ->
+ Contents = <<"ftp_SUITE test ...">>,
+ Dir = "incoming",
+ File = "file.txt",
+ RemoteFile = "remfile.txt",
+ Config = set_state([reset,{mkfile,File,Contents},{mkdir,Dir}], Config0),
+ Pid = ?config(ftp, Config),
+
+ ok = ftp:cd(Pid, id2ftp(Dir,Config)),
+ ok = ftp:lcd(Pid, id2ftp("",Config)),
+ ok = ftp:send(Pid, File, RemoteFile),
+
+ chk_file([Dir,RemoteFile], Contents, Config).
+
+%%-------------------------------------------------------------------------
+send_bin(doc) -> ["Send a binary."];
+send_bin(Config0) ->
+ BinContents = <<"ftp_SUITE test ...">>,
+ File = "file.txt",
+ Config = set_state([reset], Config0),
+ Pid = ?config(ftp, Config),
+ {error, enotbinary} = ftp:send_bin(Pid, "some string", id2ftp(File,Config)),
+ ok = ftp:send_bin(Pid, BinContents, id2ftp(File,Config)),
+ chk_file(File, BinContents, Config).
+
+%%-------------------------------------------------------------------------
+send_chunk(doc) -> ["Send a binary using chunks."];
+send_chunk(Config0) ->
+ Contents = <<"ftp_SUITE test ...">>,
+ File = "file.txt",
+ Config = set_state([reset,{mkdir,"incoming"}], Config0),
+ Pid = ?config(ftp, Config),
+
+ ok = ftp:send_chunk_start(Pid, id2ftp(File,Config)),
+ {error, echunk} = ftp:cd(Pid, "incoming"),
+ {error, enotbinary} = ftp:send_chunk(Pid, "some string"),
+ ok = ftp:send_chunk(Pid, Contents),
+ ok = ftp:send_chunk(Pid, Contents),
+ ok = ftp:send_chunk_end(Pid),
+ chk_file(File, <<Contents/binary,Contents/binary>>, Config).
+
+%%-------------------------------------------------------------------------
+delete(doc) -> ["Delete a file."];
+delete(Config0) ->
+ Contents = <<"ftp_SUITE test ...">>,
+ File = "file.txt",
+ Config = set_state([reset,{mkfile,File,Contents}], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:delete(Pid, id2ftp(File,Config)),
+ chk_no_file([File], Config).
+
+%%-------------------------------------------------------------------------
+mkdir(doc) -> ["Make a remote directory."];
+mkdir(Config0) ->
+ NewDir = "new_dir",
+ Config = set_state([reset], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:mkdir(Pid, id2ftp(NewDir,Config)),
+ chk_dir([NewDir], Config).
+
+%%-------------------------------------------------------------------------
+rmdir(doc) -> ["Remove a directory."];
+rmdir(Config0) ->
+ Dir = "dir",
+ Config = set_state([reset,{mkdir,Dir}], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:rmdir(Pid, id2ftp(Dir,Config)),
+ chk_no_dir([Dir], Config).
+
+%%-------------------------------------------------------------------------
+append(doc) -> ["Append a local file twice to a remote file"];
+append(Config0) ->
+ SrcFile = "f_src.txt",
+ DstFile = "f_dst.txt",
+ Contents = <<"ftp_SUITE test ...">>,
+ Config = set_state([reset,{mkfile,SrcFile,Contents}], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:append(Pid, id2ftp(SrcFile,Config), id2ftp(DstFile,Config)),
+ ok = ftp:append(Pid, id2ftp(SrcFile,Config), id2ftp(DstFile,Config)),
+ chk_file(DstFile, <<Contents/binary,Contents/binary>>, Config).
+
+%%-------------------------------------------------------------------------
+append_bin(doc) -> ["Append a local file twice to a remote file using append_bin"];
+append_bin(Config0) ->
+ DstFile = "f_dst.txt",
+ Contents = <<"ftp_SUITE test ...">>,
+ Config = set_state([reset], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:append_bin(Pid, Contents, id2ftp(DstFile,Config)),
+ ok = ftp:append_bin(Pid, Contents, id2ftp(DstFile,Config)),
+ chk_file(DstFile, <<Contents/binary,Contents/binary>>, Config).
+
+%%-------------------------------------------------------------------------
+append_chunk(doc) -> ["Append chunks."];
+append_chunk(Config0) ->
+ File = "f_dst.txt",
+ Contents = [<<"ER">>,<<"LE">>,<<"RL">>],
+ Config = set_state([reset], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:append_chunk_start(Pid, id2ftp(File,Config)),
+ {error, enotbinary} = ftp:append_chunk(Pid, binary_to_list(lists:nth(1,Contents))),
+ ok = ftp:append_chunk(Pid,lists:nth(1,Contents)),
+ ok = ftp:append_chunk(Pid,lists:nth(2,Contents)),
+ ok = ftp:append_chunk(Pid,lists:nth(3,Contents)),
+ ok = ftp:append_chunk_end(Pid),
+ chk_file(File, <<"ERLERL">>, Config).
+
+%%-------------------------------------------------------------------------
+recv(doc) -> ["Receive a file using recv/2"];
+recv(Config0) ->
+ File = "f_dst.txt",
+ SrcDir = "a_dir",
+ Contents = <<"ftp_SUITE test ...">>,
+ Config = set_state([reset, {mkfile,[SrcDir,File],Contents}], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:cd(Pid, id2ftp(SrcDir,Config)),
+ ok = ftp:lcd(Pid, id2ftp("",Config)),
+ ok = ftp:recv(Pid, File),
+ chk_file(File, Contents, Config).
+
+%%-------------------------------------------------------------------------
+recv_3(doc) -> ["Receive a file using recv/3"];
+recv_3(Config0) ->
+ DstFile = "f_src.txt",
+ SrcFile = "f_dst.txt",
+ Contents = <<"ftp_SUITE test ...">>,
+ Config = set_state([reset, {mkfile,SrcFile,Contents}], Config0),
+ Pid = ?config(ftp, Config),
+ ok = ftp:cd(Pid, id2ftp("",Config)),
+ ok = ftp:recv(Pid, SrcFile, id2abs(DstFile,Config)),
+ chk_file(DstFile, Contents, Config).
+
+%%-------------------------------------------------------------------------
+recv_bin(doc) -> ["Receive a file as a binary."];
+recv_bin(Config0) ->
+ File = "f_dst.txt",
+ Contents = <<"ftp_SUITE test ...">>,
+ Config = set_state([reset, {mkfile,File,Contents}], Config0),
+ Pid = ?config(ftp, Config),
+ {ok,Received} = ftp:recv_bin(Pid, id2ftp(File,Config)),
+ find_diff(Received, Contents).
+
+%%-------------------------------------------------------------------------
+recv_chunk(doc) -> ["Receive a file using chunk-wise."];
+recv_chunk(Config0) ->
+ File = "big_file.txt",
+ Contents = list_to_binary( lists:duplicate(1000, lists:seq(0,255)) ),
+ Config = set_state([reset, {mkfile,File,Contents}], Config0),
+ Pid = ?config(ftp, Config),
+ {{error, "ftp:recv_chunk_start/2 not called"},_} = recv_chunk(Pid, <<>>),
+ ok = ftp:recv_chunk_start(Pid, id2ftp(File,Config)),
+ {ok, ReceivedContents, _Ncunks} = recv_chunk(Pid, <<>>),
+ find_diff(ReceivedContents, Contents).
+
+recv_chunk(Pid, Acc) -> recv_chunk(Pid, Acc, 0).
+
+recv_chunk(Pid, Acc, N) ->
+ case ftp:recv_chunk(Pid) of
+ ok -> {ok, Acc, N};
+ {ok, Bin} -> recv_chunk(Pid, <<Acc/binary, Bin/binary>>, N+1);
+ Error -> {Error, N}
+ end.
+
+%%-------------------------------------------------------------------------
+type(doc) -> ["Test that we can change btween ASCCI and binary transfer mode"];
+type(Config) ->
+ Pid = ?config(ftp, Config),
+ ok = ftp:type(Pid, ascii),
+ ok = ftp:type(Pid, binary),
+ ok = ftp:type(Pid, ascii),
+ {error, etype} = ftp:type(Pid, foobar).
+
+%%-------------------------------------------------------------------------
+quote(doc) -> [""];
+quote(Config) ->
+ Pid = ?config(ftp, Config),
+ ["257 \""++_Rest] = ftp:quote(Pid, "pwd"), %% 257
+ [_| _] = ftp:quote(Pid, "help"),
+ %% This negativ test causes some ftp servers to hang. This test
+ %% is not important for the client, so we skip it for now.
+ %%["425 Can't build data connection: Connection refused."]
+ %% = ftp:quote(Pid, "list"),
+ ok.
+
+
+%%-------------------------------------------------------------------------
+ip_v6_disabled(doc) -> ["Test ipv4 command PORT"];
+ip_v6_disabled(_Config) ->
+ %%% FIXME!!!! What is this???
+ ok.%% send(Config).
+
+%%-------------------------------------------------------------------------
+%% big_one(doc) ->
+%% ["Create a local file and transfer it to the remote host into the "
+%% "the \"incoming\" directory, remove "
+%% "the local file. Then open a new connection; cd to \"incoming\", "
+%% "lcd to the private directory; receive the file; delete the "
+%% "remote file; close connection; check that received file is in "
+%% "the correct directory; cleanup." ];
+%% big_one(Config) ->
+%% Pid = ?config(ftp, Config),
+%% do_recv(Pid, Config).
+
+%% do_recv(Pid, Config) ->
+%% PrivDir = ?config(priv_dir, Config),
+%% File = ?config(file, Config),
+%% Newfile = ?config(new_file, Config),
+%% AbsFile = filename:absname(File, PrivDir),
+%% Contents = "ftp_SUITE:recv test ...",
+%% ok = file:write_file(AbsFile, list_to_binary(Contents)),
+%% ok = ftp:cd(Pid, "incoming"),
+%% ftp:delete(Pid, File), % reset
+%% ftp:lcd(Pid, PrivDir),
+%% ok = ftp:send(Pid, File),
+%% ok = file:delete(AbsFile), % cleanup
+%% test_server:sleep(100),
+%% ok = ftp:lcd(Pid, PrivDir),
+%% ok = ftp:recv(Pid, File),
+%% {ok, Files} = file:list_dir(PrivDir),
+%% true = lists:member(File, Files),
+%% ok = file:delete(AbsFile), % cleanup
+%% ok = ftp:recv(Pid, File, Newfile),
+%% ok = ftp:delete(Pid, File), % cleanup
+%% ok.
+
+
+%%--------------------------------------------------------------------
+%% Internal functions -----------------------------------------------
+%%--------------------------------------------------------------------
+
+make_cert_files(Alg1, Alg2, Prefix, Dir) ->
+ CaInfo = {CaCert,_} = erl_make_certs:make_cert([{key,Alg1}]),
+ {Cert,CertKey} = erl_make_certs:make_cert([{key,Alg2},{issuer,CaInfo}]),
+ CaCertFile = filename:join(Dir, Prefix++"cacerts.pem"),
+ CertFile = filename:join(Dir, Prefix++"cert.pem"),
+ KeyFile = filename:join(Dir, Prefix++"key.pem"),
+ der_to_pem(CaCertFile, [{'Certificate', CaCert, not_encrypted}]),
+ der_to_pem(CertFile, [{'Certificate', Cert, not_encrypted}]),
+ der_to_pem(KeyFile, [CertKey]),
+ ok.
+
+der_to_pem(File, Entries) ->
+ PemBin = public_key:pem_encode(Entries),
+ file:write_file(File, PemBin).
+
+%%--------------------------------------------------------------------
+chk_file(Path=[C|_], ExpectedContents, Config) when 0<C,C=<255 ->
+ chk_file([Path], ExpectedContents, Config);
+
+chk_file(PathList, ExpectedContents, Config) ->
+ Path = filename:join(PathList),
+ AbsPath = id2abs(Path,Config),
+ case file:read_file(AbsPath) of
+ {ok,ExpectedContents} ->
+ true;
+ {ok,ReadContents} ->
+ {error,{diff,Pos,RC,LC}} = find_diff(ReadContents, ExpectedContents, 1),
+ ct:log("Bad contents of ~p.~nGot:~n~p~nExpected:~n~p~nDiff at pos ~p ~nRead: ~p~nExp : ~p",
+ [AbsPath,ReadContents,ExpectedContents,Pos,RC,LC]),
+ ct:fail("Bad contents of ~p", [Path]);
+ {error,Error} ->
+ try begin
+ {ok,CWD} = file:get_cwd(),
+ ct:log("file:get_cwd()=~p~nfiles:~n~p",[CWD,file:list_dir(CWD)])
+ end
+ of _ -> ok
+ catch _:_ ->ok
+ end,
+ ct:fail("Error reading ~p: ~p",[Path,Error])
+ end.
+
+
+chk_no_file(Path=[C|_], Config) when 0<C,C=<255 ->
+ chk_no_file([Path], Config);
+
+chk_no_file(PathList, Config) ->
+ Path = filename:join(PathList),
+ AbsPath = id2abs(Path,Config),
+ case file:read_file(AbsPath) of
+ {error,enoent} ->
+ true;
+ {ok,Contents} ->
+ ct:log("File ~p exists although it shouldn't. Contents:~n~p",
+ [AbsPath,Contents]),
+ ct:fail("File exists: ~p", [Path]);
+ {error,Error} ->
+ ct:fail("Unexpected error reading ~p: ~p",[Path,Error])
+ end.
+
+
+chk_dir(Path=[C|_], Config) when 0<C,C=<255 ->
+ chk_dir([Path], Config);
+
+chk_dir(PathList, Config) ->
+ Path = filename:join(PathList),
+ AbsPath = id2abs(Path,Config),
+ case file:read_file_info(AbsPath) of
+ {ok, #file_info{type=directory}} ->
+ true;
+ {ok, #file_info{type=Type}} ->
+ ct:fail("Expected dir ~p is a ~p",[Path,Type]);
+ {error,Error} ->
+ ct:fail("Expected dir ~p: ~p",[Path,Error])
+ end.
+
+chk_no_dir(PathList, Config) ->
+ Path = filename:join(PathList),
+ AbsPath = id2abs(Path,Config),
+ case file:read_file_info(AbsPath) of
+ {error,enoent} ->
+ true;
+ {ok, #file_info{type=directory}} ->
+ ct:fail("Dir ~p erroneously exists",[Path]);
+ {ok, #file_info{type=Type}} ->
+ ct:fail("~p ~p erroneously exists",[Type,Path]);
+ {error,Error} ->
+ ct:fail("Unexpected error for ~p: ~p",[Path,Error])
+ end.
+
+
+%%--------------------------------------------------------------------
+%%--------------------------------------------------------------------
+%% find a suitable ftpd
%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
+find_executable(Config) ->
+ FTPservers = case ?config(ftpservers,Config) of
+ undefined -> ?default_ftp_servers;
+ L -> L
+ end,
+ case lists:dropwhile(fun not_available/1, FTPservers) of
+ [] -> false;
+ [FTPD_data|_] -> {ok, FTPD_data}
+ end.
+
+not_available({Name,_StartCmd,_ChkUp,_StopCommand,_ConfigUpd,_Host,_Port}) ->
+ os:find_executable(Name) == false.
+
%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- inets:start(),
+%% start/stop of ftpd
+%%
+start_ftpd(Config) ->
+ {Name,StartCmd,_ChkUp,_StopCommand,ConfigRewrite,Host,Port} = ?config(ftpd_data, Config),
+ case StartCmd(Config) of
+ {ok,StartResult} ->
+ [{ftpd_host,Host},
+ {ftpd_port,Port},
+ {ftpd_start_result,StartResult} | ConfigRewrite(Config)];
+ {error,Msg} ->
+ {skip, [Name," not started: ",Msg]}
+ end.
+
+stop_ftpd(Config) ->
+ {_Name,_StartCmd,_ChkUp,StopCommand,_ConfigUpd,_Host,_Port} = ?config(ftpd_data, Config),
+ StopCommand(?config(ftpd_start_result,Config)).
+
+ps_ftpd(Config) ->
+ {_Name,_StartCmd,ChkUp,_StopCommand,_ConfigUpd,_Host,_Port} = ?config(ftpd_data, Config),
+ ct:log( ChkUp(?config(ftpd_start_result,Config)) ).
+
+
+ftpd_running(Config) ->
+ {_Name,_StartCmd,ChkUp,_StopCommand,_ConfigUpd,_Host,_Port} = ?config(ftpd_data, Config),
+ ChkUp(?config(ftpd_start_result,Config)).
+
+%%--------------------------------------------------------------------
+%% start/stop of ftpc
+%%
+ftp__open(Config, Options) ->
+ Host = ?config(ftpd_host,Config),
+ Port = ?config(ftpd_port,Config),
+ ct:log("Host=~p, Port=~p",[Host,Port]),
+ {ok,Pid} = ftp:open(Host, [{port,Port} | Options]),
+ [{ftp,Pid}|Config].
+
+ftp__close(Config) ->
+ ok = ftp:close(?config(ftp,Config)),
Config.
%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
+%%
+split(Cs) -> string:tokens(Cs, "\r\n").
+
+%%--------------------------------------------------------------------
+%%
+find_diff(Bin1, Bin2) ->
+ case find_diff(Bin1, Bin2, 1) of
+ {error, {diff,Pos,RC,LC}} ->
+ ct:log("Contents differ at position ~p.~nOp1: ~p~nOp2: ~p",[Pos,RC,LC]),
+ ct:fail("Contents differ at pos ~p",[Pos]);
+ Other ->
+ Other
+ end.
+
+find_diff(A, A, _) -> true;
+find_diff(<<H,T1/binary>>, <<H,T2/binary>>, Pos) -> find_diff(T1, T2, Pos+1);
+find_diff(RC, LC, Pos) -> {error, {diff, Pos, RC, LC}}.
%%--------------------------------------------------------------------
-end_per_suite(_Config) ->
- inets:stop(),
- ok.
+%%
+set_state(Ops, Config) when is_list(Ops) -> lists:foldl(fun set_state/2, Config, Ops);
+
+set_state(reset, Config) ->
+ rm('*', id2abs("",Config)),
+ PrivDir = ?config(priv_dir,Config),
+ file:set_cwd(PrivDir),
+ ftp:lcd(?config(ftp,Config),PrivDir),
+ set_state({mkdir,""},Config);
+set_state({mkdir,Id}, Config) ->
+ Abs = id2abs(Id, Config),
+ mk_path(Abs),
+ file:make_dir(Abs),
+ Config;
+set_state({mkfile,Id,Contents}, Config) ->
+ Abs = id2abs(Id, Config),
+ mk_path(Abs),
+ ok = file:write_file(Abs, Contents),
+ Config.
+
+mk_path(Abs) -> lists:foldl(fun mk_path/2, [], filename:split(filename:dirname(Abs))).
+
+mk_path(F, Pfx) ->
+ case file:read_file_info(AbsName=filename:join(Pfx,F)) of
+ {ok,#file_info{type=directory}} ->
+ AbsName;
+ {error,eexist} ->
+ AbsName;
+ {error,enoent} ->
+ ok = file:make_dir(AbsName),
+ AbsName
+ end.
+
+
+rm('*', Pfx) ->
+ {ok,Fs} = file:list_dir(Pfx),
+ lists:foreach(fun(F) -> rm(F, Pfx) end, Fs);
+rm(F, Pfx) ->
+ case file:read_file_info(AbsName=filename:join(Pfx,F)) of
+ {ok,#file_info{type=directory}} ->
+ {ok,Fs} = file:list_dir(AbsName),
+ lists:foreach(fun(F1) -> rm(F1,AbsName) end, Fs),
+ ok = file:del_dir(AbsName);
+
+ {ok,#file_info{type=regular}} ->
+ ok = file:delete(AbsName);
+
+ {error,enoent} ->
+ ok
+ end.
+
+%%--------------------------------------------------------------------
+%%
+
+id2abs(Id, Conf) -> filename:join(?config(priv_dir,Conf),ids(Id)).
+id2ftp(Id, Conf) -> (?config(id2ftp,Conf))(ids(Id)).
+id2ftp_result(Id, Conf) -> (?config(id2ftp_result,Conf))(ids(Id)).
+
+ids([[_|_]|_]=Ids) -> filename:join(Ids);
+ids(Id) -> Id.
+
+
+is_expected_absName(Id, File, Conf) -> File = (?config(id2abs,Conf))(Id).
+is_expected_ftpInName(Id, File, Conf) -> File = (?config(id2ftp,Conf))(Id).
+is_expected_ftpOutName(Id, File, Conf) -> File = (?config(id2ftp_result,Conf))(Id).
diff --git a/lib/inets/test/ftp_SUITE_data/ftpd_hosts.skel b/lib/inets/test/ftp_SUITE_data/ftpd_hosts.skel
deleted file mode 100644
index 75096ce687..0000000000
--- a/lib/inets/test/ftp_SUITE_data/ftpd_hosts.skel
+++ /dev/null
@@ -1,18 +0,0 @@
-%% Add a host name in the appropriate list
-%% Each "platform" contains a list of hostnames (a string) that can
-%% be used for testing the ftp client.
-%% The definition below are an example!!
-[{solaris8_sparc, ["solaris8_sparc_dummy1", "solaris8_sparc_dummy2"]},
- {solaris9_sparc, ["solaris9_sparc_dummy1"]},
- {solaris10_sparc, ["solaris10_sparc_dummy1"]},
- {solaris10_x86, ["solaris10_x86_dummy1", "solaris10_x86_dummy2"]},
- {linux_x86, ["linux_x86_dummy1", "linux_x86_dummy2"]},
- {linux_ppc, ["linux_ppc_dummy1"]},
- {macosx_ppc, ["macosx_ppc_dummy1"]},
- {macosx_x86, ["macosx_x86_dummy1", "macosx_x86_dummy2"]},
- {openbsd_x86, []},
- {freebsd_x86, ["freebsd_x86_dummy1"]},
- {netbsd_x86, []},
- {windows_xp, []},
- {windows_2003_server, ["win2003_dummy1"]},
- {ticket_test, ["solaris8_x86_dummy1", "linux_x86_dummy1"]}].
diff --git a/lib/inets/test/ftp_SUITE_data/vsftpd.conf b/lib/inets/test/ftp_SUITE_data/vsftpd.conf
new file mode 100644
index 0000000000..a5584f5916
--- /dev/null
+++ b/lib/inets/test/ftp_SUITE_data/vsftpd.conf
@@ -0,0 +1,26 @@
+
+###
+### Some parameters are given in the vsftpd start command.
+###
+### Typical command-line paramters are such that has a file path
+### component like cert files.
+###
+
+
+listen=YES
+listen_port=9999
+run_as_launching_user=YES
+ssl_enable=YES
+allow_anon_ssl=YES
+
+background=YES
+
+write_enable=YES
+anonymous_enable=YES
+anon_upload_enable=YES
+anon_mkdir_write_enable=YES
+anon_other_write_enable=YES
+anon_world_readable_only=NO
+
+### Shouldn't be necessary....
+require_ssl_reuse=NO
diff --git a/lib/inets/test/ftp_freebsd_x86_test.erl b/lib/inets/test/ftp_freebsd_x86_test.erl
deleted file mode 100644
index 1d66779882..0000000000
--- a/lib/inets/test/ftp_freebsd_x86_test.erl
+++ /dev/null
@@ -1,160 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_freebsd_x86_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Freebsd x86 ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(freebsd_x86, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_linux_ppc_test.erl b/lib/inets/test/ftp_linux_ppc_test.erl
deleted file mode 100644
index bba97237f1..0000000000
--- a/lib/inets/test/ftp_linux_ppc_test.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_linux_ppc_test).
-
-%% Note: This directive should only be used in test suites.
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Linux ppc ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(linux_ppc, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
diff --git a/lib/inets/test/ftp_linux_x86_test.erl b/lib/inets/test/ftp_linux_x86_test.erl
deleted file mode 100644
index bbefd8231e..0000000000
--- a/lib/inets/test/ftp_linux_x86_test.erl
+++ /dev/null
@@ -1,160 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_linux_x86_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Linux x86 ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(linux_x86, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_macosx_ppc_test.erl b/lib/inets/test/ftp_macosx_ppc_test.erl
deleted file mode 100644
index c9f33b8beb..0000000000
--- a/lib/inets/test/ftp_macosx_ppc_test.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_macosx_ppc_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Macosx ppc ").
-
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(macosx_ppc, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
-[open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(_X) -> {skipped,"unknown error"}.%?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(_X) -> {skipped,"unknown error"}.%%?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_macosx_x86_test.erl b/lib/inets/test/ftp_macosx_x86_test.erl
deleted file mode 100644
index 17b7160b95..0000000000
--- a/lib/inets/test/ftp_macosx_x86_test.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_macosx_x86_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Macosx x86 ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(macosx_x86, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
-[open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist([{wildcard_support, false} | X]).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist([{wildcard_support, false} | X]).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_netbsd_x86_test.erl b/lib/inets/test/ftp_netbsd_x86_test.erl
deleted file mode 100644
index bb474852c5..0000000000
--- a/lib/inets/test/ftp_netbsd_x86_test.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_netbsd_x86_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Netbsd x86 ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(netbsd_x86, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_openbsd_x86_test.erl b/lib/inets/test/ftp_openbsd_x86_test.erl
deleted file mode 100644
index 54fce702a0..0000000000
--- a/lib/inets/test/ftp_openbsd_x86_test.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_openbsd_x86_test).
-
-%% Note: This directive should only be used in test suites.
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Openbsd x86 ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(openbsd_x86, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
diff --git a/lib/inets/test/ftp_solaris10_sparc_test.erl b/lib/inets/test/ftp_solaris10_sparc_test.erl
deleted file mode 100644
index 0da50dc91b..0000000000
--- a/lib/inets/test/ftp_solaris10_sparc_test.erl
+++ /dev/null
@@ -1,161 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_solaris10_sparc_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Solaris 10 sparc ").
-
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(solaris10_sparc, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_solaris10_x86_test.erl b/lib/inets/test/ftp_solaris10_x86_test.erl
deleted file mode 100644
index 3e7045bb4d..0000000000
--- a/lib/inets/test/ftp_solaris10_x86_test.erl
+++ /dev/null
@@ -1,162 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_solaris10_x86_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD, ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_), ?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM, "Solaris 10 x86 ").
-
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(solaris10_x86, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-%%--------------------------------------------------------------------
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_solaris8_sparc_test.erl b/lib/inets/test/ftp_solaris8_sparc_test.erl
deleted file mode 100644
index 23dbfc8fe3..0000000000
--- a/lib/inets/test/ftp_solaris8_sparc_test.erl
+++ /dev/null
@@ -1,159 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_solaris8_sparc_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Solaris 8 sparc ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(solaris8_sparc, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_solaris9_sparc_test.erl b/lib/inets/test/ftp_solaris9_sparc_test.erl
deleted file mode 100644
index 896e2f497f..0000000000
--- a/lib/inets/test/ftp_solaris9_sparc_test.erl
+++ /dev/null
@@ -1,158 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_solaris9_sparc_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Solaris 9 sparc ").
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(solaris9_sparc, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_suite_lib.erl b/lib/inets/test/ftp_suite_lib.erl
deleted file mode 100644
index 35f21cc74d..0000000000
--- a/lib/inets/test/ftp_suite_lib.erl
+++ /dev/null
@@ -1,1675 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_suite_lib).
-
-
--include_lib("test_server/include/test_server.hrl").
--include_lib("test_server/include/test_server_line.hrl").
--include("inets_test_lib.hrl").
-
-%% Test server specific exports
-% -export([init_per_testcase/2, end_per_testcase/2]).
-
--compile(export_all).
-
-
--record(progress, {
- current = 0,
- total
- }).
-
-
-
--define(FTP_USER, "anonymous").
--define(FTP_PASS, passwd()).
--define(FTP_PORT, 21).
-
--define(BAD_HOST, "badhostname").
--define(BAD_USER, "baduser").
--define(BAD_DIR, "baddirectory").
-
--ifdef(ftp_debug_client).
--define(ftp_open(Host, Flags),
- do_ftp_open(Host, [{debug, debug},
- {timeout, timer:seconds(15)} | Flags])).
--else.
--ifdef(ftp_trace_client).
--define(ftp_open(Host, Flags),
- do_ftp_open(Host, [{debug, trace},
- {timeout, timer:seconds(15)} | Flags])).
--else.
--define(ftp_open(Host, Flags),
- do_ftp_open(Host, [{verbose, true},
- {timeout, timer:seconds(15)} | Flags])).
--endif.
--endif.
-
-%% -- Tickets --
-
-tickets(doc) ->
- "Test cases for reported bugs";
-tickets(suite) ->
- [ticket_6035].
-
-%% --
-
-ftpd_init(FtpdTag, Config) ->
- %% Get the host name(s) of FTP server
- Hosts =
- case ct:get_config(ftpd_hosts) of
- undefined ->
- ftpd_hosts(data_dir(Config));
- H ->
- H
- end,
- p("ftpd_init -> "
- "~n Hosts: ~p"
- "~n Config: ~p"
- "~n FtpdTag: ~p", [Hosts, Config, FtpdTag]),
- %% Get the first host that actually have a running FTP server
- case lists:keysearch(FtpdTag, 1, Hosts) of
- {value, {_, TagHosts}} when is_list(TagHosts) ->
- inets:start(),
- case (catch get_ftpd_host(TagHosts)) of
- {ok, Host} ->
- inets:stop(),
- [{ftp_remote_host, Host}|Config];
- _ ->
- inets:stop(),
- Reason = lists:flatten(
- io_lib:format("Could not find a valid "
- "FTP server for ~p (~p)",
- [FtpdTag, TagHosts])),
- {skip, Reason}
- end;
- _ ->
- Reason = lists:flatten(
- io_lib:format("No host(s) running FTPD server "
- "for ~p", [FtpdTag])),
- {skip, Reason}
- end.
-
-ftpd_fin(Config) ->
- lists:keydelete(ftp_remote_host, 1, Config).
-
-get_ftpd_host([]) ->
- {error, no_host};
-get_ftpd_host([Host|Hosts]) ->
- p("get_ftpd_host -> entry with"
- "~n Host: ~p"
- "~n", [Host]),
- case (catch ftp:open(Host, [{port, ?FTP_PORT}, {timeout, 20000}])) of
- {ok, Pid} ->
- (catch ftp:close(Pid)),
- {ok, Host};
- _ ->
- get_ftpd_host(Hosts)
- end.
-
-
-%%--------------------------------------------------------------------
-
-dirty_select_ftpd_host(Config) ->
- Hosts =
- case ct:get_config(ftpd_hosts) of
- undefined ->
- ftpd_hosts(data_dir(Config));
- H ->
- H
- end,
- dirty_select_ftpd_host2(Hosts).
-
-dirty_select_ftpd_host2([]) ->
- throw({error, not_found});
-dirty_select_ftpd_host2([{PlatformTag, Hosts} | PlatformHosts]) ->
- case dirty_select_ftpd_host3(Hosts) of
- none ->
- dirty_select_ftpd_host2(PlatformHosts);
- {ok, Host} ->
- {PlatformTag, Host}
- end.
-
-dirty_select_ftpd_host3([]) ->
- none;
-dirty_select_ftpd_host3([Host|Hosts]) when is_list(Host) ->
- case dirty_select_ftpd_host4(Host) of
- true ->
- {ok, Host};
- false ->
- dirty_select_ftpd_host3(Hosts)
- end;
-dirty_select_ftpd_host3([_|Hosts]) ->
- dirty_select_ftpd_host3(Hosts).
-
-%% This is a very simple and dirty test that there is a
-%% (FTP) deamon on the other end.
-dirty_select_ftpd_host4(Host) ->
- Port = 21,
- IpFam = inet,
- Opts = [IpFam, binary, {packet, 0}, {active, false}],
- Timeout = ?SECS(5),
- case gen_tcp:connect(Host, Port, Opts, Timeout) of
- {ok, Sock} ->
- gen_tcp:close(Sock),
- true;
- _Error ->
- false
- end.
-
-
-%%--------------------------------------------------------------------
-
-test_filenames() ->
- {ok, Host} = inet:gethostname(),
- File = Host ++ "_ftp_test.txt",
- NewFile = "new_" ++ File,
- {File, NewFile}.
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(Case, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config)
- when (Case =:= open) orelse
- (Case =:= open_port) ->
- put(ftp_testcase, Case),
- io:format(user, "~n~n*** INIT ~w:~w ***~n~n", [?MODULE, Case]),
- inets:start(),
- NewConfig = data_dir(Config),
- watch_dog(NewConfig);
-
-init_per_testcase(Case, Config) ->
- put(ftp_testcase, Case),
- do_init_per_testcase(Case, Config).
-
-do_init_per_testcase(Case, Config)
- when (Case =:= passive_user) ->
- io:format(user, "~n~n*** INIT ~w:~w ***~n~n", [?MODULE,Case]),
- inets:start(),
- NewConfig = close_connection(watch_dog(Config)),
- Host = ftp_host(Config),
- case (catch ?ftp_open(Host, [{mode, passive}])) of
- {ok, Pid} ->
- [{ftp, Pid} | data_dir(NewConfig)];
- {skip, _} = SKIP ->
- SKIP
- end;
-
-do_init_per_testcase(Case, Config)
- when (Case =:= active_user) ->
- io:format(user, "~n~n*** INIT ~w:~w ***~n~n", [?MODULE, Case]),
- inets:start(),
- NewConfig = close_connection(watch_dog(Config)),
- Host = ftp_host(Config),
- case (catch ?ftp_open(Host, [{mode, active}])) of
- {ok, Pid} ->
- [{ftp, Pid} | data_dir(NewConfig)];
- {skip, _} = SKIP ->
- SKIP
- end;
-
-do_init_per_testcase(Case, Config)
- when (Case =:= progress_report_send) orelse
- (Case =:= progress_report_recv) ->
- inets:start(),
- io:format(user, "~n~n*** INIT ~w:~w ***~n~n", [?MODULE, Case]),
- NewConfig = close_connection(watch_dog(Config)),
- Host = ftp_host(Config),
- Opts = [{port, ?FTP_PORT},
- {verbose, true},
- {progress, {?MODULE, progress, #progress{}}}],
- case ftp:open(Host, Opts) of
- {ok, Pid} ->
- ok = ftp:user(Pid, ?FTP_USER, ?FTP_PASS),
- [{ftp, Pid} | data_dir(NewConfig)];
- {skip, _} = SKIP ->
- SKIP
- end;
-
-do_init_per_testcase(Case, Config) ->
- io:format(user,"~n~n*** INIT ~w:~w ***~n~n", [?MODULE, Case]),
- inets:start(),
- NewConfig = close_connection(watch_dog(Config)),
- Host = ftp_host(Config),
- Opts1 =
- if
- ((Case =:= passive_ip_v6_disabled) orelse
- (Case =:= active_ip_v6_disabled)) ->
- [{ipfamily, inet}];
- true ->
- []
- end,
- Opts2 =
- case string:tokens(atom_to_list(Case), [$_]) of
- ["active" | _] ->
- [{mode, active} | Opts1];
- _ ->
- [{mode, passive} | Opts1]
- end,
- case (catch ?ftp_open(Host, Opts2)) of
- {ok, Pid} ->
- ok = ftp:user(Pid, ?FTP_USER, ?FTP_PASS),
- [{ftp, Pid} | data_dir(NewConfig)];
- {skip, _} = SKIP ->
- SKIP
- end.
-
-
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(Case, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(_, Config) ->
- NewConfig = close_connection(Config),
- Dog = ?config(watchdog, NewConfig),
- inets:stop(),
- test_server:timetrap_cancel(Dog),
- ok.
-
-
-%%-------------------------------------------------------------------------
-%% Suites similar for all hosts.
-%%-------------------------------------------------------------------------
-
-passive(suite) ->
- [
- passive_user,
- passive_pwd,
- passive_cd,
- passive_lcd,
- passive_ls,
- passive_nlist,
- passive_rename,
- passive_delete,
- passive_mkdir,
- passive_send,
- passive_send_bin,
- passive_send_chunk,
- passive_append,
- passive_append_bin,
- passive_append_chunk,
- passive_recv,
- passive_recv_bin,
- passive_recv_chunk,
- passive_type,
- passive_quote,
- passive_ip_v6_disabled
- ].
-
-active(suite) ->
- [
- active_user,
- active_pwd,
- active_cd,
- active_lcd,
- active_ls,
- active_nlist,
- active_rename,
- active_delete,
- active_mkdir,
- active_send,
- active_send_bin,
- active_send_chunk,
- active_append,
- active_append_bin,
- active_append_chunk,
- active_recv,
- active_recv_bin,
- active_recv_chunk,
- active_type,
- active_quote,
- active_ip_v6_disabled
- ].
-
-
-
-%%-------------------------------------------------------------------------
-%% Test cases starts here.
-%%-------------------------------------------------------------------------
-
-open(doc) ->
- ["Open an ftp connection to a host and close the connection."
- "Also check that !-messages does not disturbe the connection"];
-open(suite) ->
- [];
-open(Config) when is_list(Config) ->
- Host = ftp_host(Config),
- (catch tc_open(Host)).
-
-
-tc_open(Host) ->
- p("tc_open -> entry with"
- "~n Host: ~p", [Host]),
- {ok, Pid} = ?ftp_open(Host, []),
- ok = ftp:close(Pid),
- p("tc_open -> try (ok) open 1"),
- {ok, Pid1} =
- ftp:open({option_list, [{host,Host},
- {port, ?FTP_PORT},
- {flags, [verbose]},
- {timeout, 30000}]}),
- ok = ftp:close(Pid1),
-
- p("tc_open -> try (fail) open 2"),
- {error, ehost} =
- ftp:open({option_list, [{port, ?FTP_PORT}, {flags, [verbose]}]}),
- {ok, Pid2} = ftp:open(Host),
- ok = ftp:close(Pid2),
-
- p("tc_open -> try (ok) open 3"),
- {ok, NewHost} = inet:getaddr(Host, inet),
- {ok, Pid3} = ftp:open(NewHost),
- ftp:user(Pid3, ?FTP_USER, ?FTP_PASS),
- Pid3 ! foobar,
- test_server:sleep(5000),
- {message_queue_len, 0} = process_info(self(), message_queue_len),
- ["200" ++ _] = ftp:quote(Pid3, "NOOP"),
- ok = ftp:close(Pid3),
-
- %% Bad input that has default values are ignored and the defult
- %% is used.
- p("tc_open -> try (ok) open 4"),
- {ok, Pid4} =
- ftp:open({option_list, [{host, Host},
- {port, badarg},
- {flags, [verbose]},
- {timeout, 30000}]}),
- test_server:sleep(100),
- ok = ftp:close(Pid4),
-
- p("tc_open -> try (ok) open 5"),
- {ok, Pid5} =
- ftp:open({option_list, [{host, Host},
- {port, ?FTP_PORT},
- {flags, [verbose]},
- {timeout, -42}]}),
- test_server:sleep(100),
- ok = ftp:close(Pid5),
-
- p("tc_open -> try (ok) open 6"),
- {ok, Pid6} =
- ftp:open({option_list, [{host, Host},
- {port, ?FTP_PORT},
- {flags, [verbose]},
- {mode, cool}]}),
- test_server:sleep(100),
- ok = ftp:close(Pid6),
-
- p("tc_open -> try (ok) open 7"),
- {ok, Pid7} =
- ftp:open(Host, [{port, ?FTP_PORT}, {verbose, true}, {timeout, 30000}]),
- ok = ftp:close(Pid7),
-
- p("tc_open -> try (ok) open 8"),
- {ok, Pid8} =
- ftp:open(Host, ?FTP_PORT),
- ok = ftp:close(Pid8),
-
- p("tc_open -> try (ok) open 9"),
- {ok, Pid9} =
- ftp:open(Host, [{port, ?FTP_PORT},
- {verbose, true},
- {timeout, 30000},
- {dtimeout, -99}]),
- ok = ftp:close(Pid9),
-
- p("tc_open -> try (ok) open 10"),
- {ok, Pid10} =
- ftp:open(Host, [{port, ?FTP_PORT},
- {verbose, true},
- {timeout, 30000},
- {dtimeout, "foobar"}]),
- ok = ftp:close(Pid10),
-
- p("tc_open -> try (ok) open 11"),
- {ok, Pid11} =
- ftp:open(Host, [{port, ?FTP_PORT},
- {verbose, true},
- {timeout, 30000},
- {dtimeout, 1}]),
- ok = ftp:close(Pid11),
-
- p("tc_open -> done"),
- ok.
-
-
-%%-------------------------------------------------------------------------
-
-open_port(doc) ->
- ["Open an ftp connection to a host with given port number "
- "and close the connection."]; % See also OTP-3892
-open_port(suite) ->
- [];
-open_port(Config) when is_list(Config) ->
- Host = ftp_host(Config),
- {ok, Pid} = ftp:open(Host, [{port, ?FTP_PORT}]),
- ok = ftp:close(Pid),
- {error, ehost} = ftp:open(?BAD_HOST, []),
- ok.
-
-
-%%-------------------------------------------------------------------------
-
-passive_user(doc) ->
- ["Open an ftp connection to a host, and logon as anonymous ftp."];
-passive_user(suite) ->
- [];
-passive_user(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- p("Pid: ~p",[Pid]),
- do_user(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-passive_pwd(doc) ->
- ["Test ftp:pwd/1 & ftp:lpwd/1"];
-passive_pwd(suite) ->
- [];
-passive_pwd(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_pwd(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-passive_cd(doc) ->
- ["Open an ftp connection, log on as anonymous ftp, and cd to the"
- "directory \"/pub\" and the to the non-existent directory."];
-passive_cd(suite) ->
- [];
-passive_cd(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_cd(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-passive_lcd(doc) ->
- ["Test api function ftp:lcd/2"];
-passive_lcd(suite) ->
- [];
-passive_lcd(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- PrivDir = ?config(priv_dir, Config),
- do_lcd(Pid, PrivDir).
-
-
-%%-------------------------------------------------------------------------
-
-passive_ls(doc) ->
- ["Open an ftp connection; ls the current directory, and the "
- "\"incoming\" directory. We assume that ls never fails, since "
- "it's output is meant to be read by humans. "];
-passive_ls(suite) ->
- [];
-passive_ls(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_ls(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-passive_nlist(doc) ->
- ["Open an ftp connection; nlist the current directory, and the "
- "\"incoming\" directory. Nlist does not behave consistenly over "
- "operating systems. On some it is an error to have an empty "
- "directory."];
-passive_nlist(suite) ->
- [];
-passive_nlist(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- WildcardSupport = ?config(wildcard_support, Config),
- do_nlist(Pid, WildcardSupport).
-
-
-%%-------------------------------------------------------------------------
-
-passive_rename(doc) ->
- ["Transfer a file to the server, and rename it; then remove it."];
-passive_rename(suite) ->
- [];
-passive_rename(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_rename(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_delete(doc) ->
- ["Transfer a file to the server, and then delete it"];
-passive_delete(suite) ->
- [];
-passive_delete(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_delete(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_mkdir(doc) ->
- ["Make a remote directory, cd to it, go to parent directory, and "
- "remove the directory."];
-passive_mkdir(suite) ->
- [];
-passive_mkdir(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_mkdir(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-passive_send(doc) ->
- ["Create a local file in priv_dir; open an ftp connection to a host; "
- "logon as anonymous ftp; cd to the directory \"incoming\"; lcd to "
- "priv_dir; send the file; get a directory listing and check that "
- "the file is on the list;, delete the remote file; get another listing "
- "and check that the file is not on the list; close the session; "
- "delete the local file."];
-passive_send(suite) ->
- [];
-passive_send(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_append(doc) ->
- ["Create a local file in priv_dir; open an ftp connection to a host; "
- "logon as anonymous ftp; cd to the directory \"incoming\"; lcd to "
- "priv_dir; append the file to a file at the remote side that not exits"
- "this will create the file at the remote side. Then it append the file "
- "again. When this is done it recive the remote file and control that"
- "the content is doubled in it.After that it will remove the files"];
-passive_append(suite) ->
- [];
-passive_append(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_append(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_send_bin(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "send a binary; remove file; close the connection."];
-passive_send_bin(suite) ->
- [];
-passive_send_bin(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send_bin(Pid, Config).
-
-%%-------------------------------------------------------------------------
-
-passive_append_bin(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "append a binary twice; get the file and compare the content"
- "remove file; close the connection."];
-passive_append_bin(suite) ->
- [];
-passive_append_bin(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_append_bin(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_send_chunk(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "send chunks; remove file; close the connection."];
-passive_send_chunk(suite) ->
- [];
-passive_send_chunk(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send_chunk(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_append_chunk(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "append chunks;control content remove file; close the connection."];
-passive_append_chunk(suite) ->
- [];
-passive_append_chunk(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_append_chunk(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_recv(doc) ->
- ["Create a local file and transfer it to the remote host into the "
- "the \"incoming\" directory, remove "
- "the local file. Then open a new connection; cd to \"incoming\", "
- "lcd to the private directory; receive the file; delete the "
- "remote file; close connection; check that received file is in "
- "the correct directory; cleanup." ];
-passive_recv(suite) ->
- [];
-passive_recv(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_recv(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_recv_bin(doc) ->
- ["Send a binary to the remote host; and retreive "
- "the file; then remove the file."];
-passive_recv_bin(suite) ->
- [];
-passive_recv_bin(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_recv_bin(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_recv_chunk(doc) ->
- ["Send a binary to the remote host; Connect again, and retreive "
- "the file; then remove the file."];
-passive_recv_chunk(suite) ->
- [];
-passive_recv_chunk(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_recv_chunk(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-passive_type(doc) ->
- ["Test that we can change btween ASCCI and binary transfer mode"];
-passive_type(suite) ->
- [];
-passive_type(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_type(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-passive_quote(doc) ->
- [""];
-passive_quote(suite) ->
- [];
-passive_quote(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_quote(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-passive_ip_v6_disabled(doc) ->
- ["Test ipv4 command PASV"];
-passive_ip_v6_disabled(suite) ->
- [];
-passive_ip_v6_disabled(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_user(doc) ->
- ["Open an ftp connection to a host, and logon as anonymous ftp."];
-active_user(suite) ->
- [];
-active_user(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_user(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-active_pwd(doc) ->
- ["Test ftp:pwd/1 & ftp:lpwd/1"];
-active_pwd(suite) ->
- [];
-active_pwd(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_pwd(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-active_cd(doc) ->
- ["Open an ftp connection, log on as anonymous ftp, and cd to the"
- "directory \"/pub\" and to a non-existent directory."];
-active_cd(suite) ->
- [];
-active_cd(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_cd(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-active_lcd(doc) ->
- ["Test api function ftp:lcd/2"];
-active_lcd(suite) ->
- [];
-active_lcd(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- PrivDir = ?config(priv_dir, Config),
- do_lcd(Pid, PrivDir).
-
-
-%%-------------------------------------------------------------------------
-
-active_ls(doc) ->
- ["Open an ftp connection; ls the current directory, and the "
- "\"incoming\" directory. We assume that ls never fails, since "
- "it's output is meant to be read by humans. "];
-active_ls(suite) ->
- [];
-active_ls(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_ls(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-active_nlist(doc) ->
- ["Open an ftp connection; nlist the current directory, and the "
- "\"incoming\" directory. Nlist does not behave consistenly over "
- "operating systems. On some it is an error to have an empty "
- "directory."];
-active_nlist(suite) ->
- [];
-active_nlist(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- WildcardSupport = ?config(wildcard_support, Config),
- do_nlist(Pid, WildcardSupport).
-
-
-%%-------------------------------------------------------------------------
-
-active_rename(doc) ->
- ["Transfer a file to the server, and rename it; then remove it."];
-active_rename(suite) ->
- [];
-active_rename(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_rename(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_delete(doc) ->
- ["Transfer a file to the server, and then delete it"];
-active_delete(suite) ->
- [];
-active_delete(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_delete(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_mkdir(doc) ->
- ["Make a remote directory, cd to it, go to parent directory, and "
- "remove the directory."];
-active_mkdir(suite) ->
- [];
-active_mkdir(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_mkdir(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-active_send(doc) ->
- ["Create a local file in priv_dir; open an ftp connection to a host; "
- "logon as anonymous ftp; cd to the directory \"incoming\"; lcd to "
- "priv_dir; send the file; get a directory listing and check that "
- "the file is on the list;, delete the remote file; get another listing "
- "and check that the file is not on the list; close the session; "
- "delete the local file."];
-active_send(suite) ->
- [];
-active_send(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_append(doc) ->
- ["Create a local file in priv_dir; open an ftp connection to a host; "
- "logon as anonymous ftp; cd to the directory \"incoming\"; lcd to "
- "priv_dir; append the file to a file at the remote side that not exits"
- "this will create the file at the remote side. Then it append the file "
- "again. When this is done it recive the remote file and control that"
- "the content is doubled in it.After that it will remove the files"];
-active_append(suite) ->
- [];
-active_append(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_append(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_send_bin(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "send a binary; remove file; close the connection."];
-active_send_bin(suite) ->
- [];
-active_send_bin(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send_bin(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_append_bin(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "append a binary twice; get the file and compare the content"
- "remove file; close the connection."];
-active_append_bin(suite) ->
- [];
-active_append_bin(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_append_bin(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_send_chunk(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "send chunks; remove file; close the connection."];
-active_send_chunk(suite) ->
- [];
-active_send_chunk(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send_chunk(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_append_chunk(doc) ->
- ["Open a connection to a host; cd to the directory \"incoming\"; "
- "append chunks;control content remove file; close the connection."];
-active_append_chunk(suite) ->
- [];
-active_append_chunk(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_append_chunk(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_recv(doc) ->
- ["Create a local file and transfer it to the remote host into the "
- "the \"incoming\" directory, remove "
- "the local file. Then open a new connection; cd to \"incoming\", "
- "lcd to the private directory; receive the file; delete the "
- "remote file; close connection; check that received file is in "
- "the correct directory; cleanup." ];
-active_recv(suite) ->
- [];
-active_recv(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_recv(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_recv_bin(doc) ->
- ["Send a binary to the remote host; and retreive "
- "the file; then remove the file."];
-active_recv_bin(suite) ->
- [];
-active_recv_bin(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_recv_bin(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_recv_chunk(doc) ->
- ["Send a binary to the remote host; Connect again, and retreive "
- "the file; then remove the file."];
-active_recv_chunk(suite) ->
- [];
-active_recv_chunk(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_recv_chunk(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-active_type(doc) ->
- ["Test that we can change btween ASCCI and binary transfer mode"];
-active_type(suite) ->
- [];
-active_type(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_type(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-active_quote(doc) ->
- [""];
-active_quote(suite) ->
- [];
-active_quote(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_quote(Pid).
-
-
-%%-------------------------------------------------------------------------
-
-active_ip_v6_disabled(doc) ->
- ["Test ipv4 command PORT"];
-active_ip_v6_disabled(suite) ->
- [];
-active_ip_v6_disabled(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- do_send(Pid, Config).
-
-
-%%-------------------------------------------------------------------------
-
-api_missuse(doc)->
- ["Test that behaviour of the ftp process if the api is abused"];
-api_missuse(suite) -> [];
-api_missuse(Config) when is_list(Config) ->
- p("api_missuse -> entry"),
- Flag = process_flag(trap_exit, true),
- Pid = ?config(ftp, Config),
- Host = ftp_host(Config),
-
- %% Serious programming fault, connetion will be shut down
- p("api_missuse -> verify bad call termination (~p)", [Pid]),
- case (catch gen_server:call(Pid, {self(), foobar, 10}, infinity)) of
- {error, {connection_terminated, 'API_violation'}} ->
- ok;
- Unexpected1 ->
- exit({unexpected_result, Unexpected1})
- end,
- test_server:sleep(500),
- undefined = process_info(Pid, status),
-
- p("api_missuse -> start new client"),
- {ok, Pid2} = ?ftp_open(Host, []),
- %% Serious programming fault, connetion will be shut down
- p("api_missuse -> verify bad cast termination"),
- gen_server:cast(Pid2, {self(), foobar, 10}),
- test_server:sleep(500),
- undefined = process_info(Pid2, status),
-
- p("api_missuse -> start new client"),
- {ok, Pid3} = ?ftp_open(Host, []),
- %% Could be an innocent misstake the connection lives.
- p("api_missuse -> verify bad bang"),
- Pid3 ! foobar,
- test_server:sleep(500),
- {status, _} = process_info(Pid3, status),
- process_flag(trap_exit, Flag),
- p("api_missuse -> done"),
- ok.
-
-
-%%-------------------------------------------------------------------------
-
-not_owner(doc) ->
- ["Test what happens if a process that not owns the connection tries "
- "to use it"];
-not_owner(suite) ->
- [];
-not_owner(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- OtherPid = spawn_link(?MODULE, not_owner, [Pid, self()]),
-
- receive
- {OtherPid, ok} ->
- {ok, _} = ftp:pwd(Pid)
- end,
- ok.
-
-not_owner(FtpPid, Pid) ->
- {error, not_connection_owner} = ftp:pwd(FtpPid),
- ftp:close(FtpPid),
- test_server:sleep(100),
- Pid ! {self(), ok}.
-
-
-%%-------------------------------------------------------------------------
-
-
-progress_report(doc) ->
- ["Solaris 8 sparc test the option progress."];
-progress_report(suite) ->
- [progress_report_send, progress_report_recv].
-
-
-%% --
-
-progress_report_send(doc) ->
- ["Test the option progress for ftp:send/[2,3]"];
-progress_report_send(suite) ->
- [];
-progress_report_send(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- ReportPid =
- spawn_link(?MODULE, progress_report_receiver_init, [self(), 1]),
- do_send(Pid, Config),
- receive
- {ReportPid, ok} ->
- ok
- end.
-
-
-%% --
-
-progress_report_recv(doc) ->
- ["Test the option progress for ftp:recv/[2,3]"];
-progress_report_recv(suite) ->
- [];
-progress_report_recv(Config) when is_list(Config) ->
- Pid = ?config(ftp, Config),
- ReportPid =
- spawn_link(?MODULE, progress_report_receiver_init, [self(), 3]),
- do_recv(Pid, Config),
- receive
- {ReportPid, ok} ->
- ok
- end,
- ok.
-
-progress(#progress{} = Progress , _File, {file_size, Total}) ->
- progress_report_receiver ! start,
- Progress#progress{total = Total};
-progress(#progress{total = Total, current = Current}
- = Progress, _File, {transfer_size, 0}) ->
- progress_report_receiver ! finish,
- case Total of
- unknown ->
- ok;
- Current ->
- ok;
- _ ->
- test_server:fail({error, {progress, {total, Total},
- {current, Current}}})
- end,
- Progress;
-progress(#progress{current = Current} = Progress, _File,
- {transfer_size, Size}) ->
- progress_report_receiver ! update,
- Progress#progress{current = Current + Size}.
-
-progress_report_receiver_init(Pid, N) ->
- register(progress_report_receiver, self()),
- receive
- start ->
- ok
- end,
- progress_report_receiver_loop(Pid, N-1).
-
-progress_report_receiver_loop(Pid, N) ->
- receive
- update ->
- progress_report_receiver_loop(Pid, N);
- finish when N =:= 0 ->
- Pid ! {self(), ok};
- finish ->
- Pid ! {self(), ok},
- receive
- start ->
- ok
- end,
- progress_report_receiver_loop(Pid, N-1)
- end.
-
-
-%%-------------------------------------------------------------------------
-%% Ticket test cases
-%%-------------------------------------------------------------------------
-
-ticket_6035(doc) -> ["Test that owning process that exits with reason "
- "'shutdown' does not cause an error message."];
-ticket_6035(suite) -> [];
-ticket_6035(Config) ->
- p("ticket_6035 -> entry with"
- "~n Config: ~p", [Config]),
- PrivDir = ?config(priv_dir, Config),
- LogFile = filename:join([PrivDir,"ticket_6035.log"]),
- try
- begin
- p("ticket_6035 -> select ftpd host"),
- Host = dirty_select_ftpd_host(Config),
- p("ticket_6035 -> ftpd host selected (~p) => now spawn ftp owner", [Host]),
- Pid = spawn(?MODULE, open_wait_6035, [Host, self()]),
- p("ticket_6035 -> waiter spawned: ~p => now open error logfile (~p)",
- [Pid, LogFile]),
- error_logger:logfile({open, LogFile}),
- p("ticket_6035 -> error logfile open => now kill waiter process"),
- true = kill_ftp_proc_6035(Pid, LogFile),
- p("ticket_6035 -> waiter process killed => now close error logfile"),
- error_logger:logfile(close),
- p("ticket_6035 -> done", []),
- ok
- end
- catch
- throw:{error, not_found} ->
- {skip, "No available FTP servers"}
- end.
-
-kill_ftp_proc_6035(Pid, LogFile) ->
- p("kill_ftp_proc_6035 -> entry"),
- receive
- open ->
- p("kill_ftp_proc_6035 -> received open => now issue shutdown"),
- exit(Pid, shutdown),
- kill_ftp_proc_6035(Pid, LogFile);
- {open_failed, Reason} ->
- p("kill_ftp_proc_6035 -> received open_failed"
- "~n Reason: ~p", [Reason]),
- exit({skip, {failed_openening_server_connection, Reason}})
- after
- 5000 ->
- p("kill_ftp_proc_6035 -> timeout"),
- is_error_report_6035(LogFile)
- end.
-
-open_wait_6035({Tag, FtpServer}, From) ->
- p("open_wait_6035 -> try connect to [~p] ~s for ~p", [Tag, FtpServer, From]),
- case ftp:open(FtpServer, [{timeout, timer:seconds(15)}]) of
- {ok, Pid} ->
- p("open_wait_6035 -> connected (~p), now login", [Pid]),
- LoginResult = ftp:user(Pid,"anonymous","kldjf"),
- p("open_wait_6035 -> login result: ~p", [LoginResult]),
- From ! open,
- receive
- dummy ->
- p("open_wait_6035 -> received dummy"),
- ok
- after
- 10000 ->
- p("open_wait_6035 -> timeout"),
- ok
- end,
- p("open_wait_6035 -> done(ok)"),
- ok;
- {error, Reason} ->
- p("open_wait_6035 -> open failed"
- "~n Reason: ~p", [Reason]),
- From ! {open_failed, {Reason, FtpServer}},
- p("open_wait_6035 -> done(error)"),
- ok
- end.
-
-is_error_report_6035(LogFile) ->
- p("is_error_report_6035 -> entry"),
- Res =
- case file:read_file(LogFile) of
- {ok, Bin} ->
- Txt = binary_to_list(Bin),
- p("is_error_report_6035 -> logfile read: ~n~p", [Txt]),
- read_log_6035(Txt);
- _ ->
- false
- end,
- p("is_error_report_6035 -> logfile read result: "
- "~n ~p", [Res]),
- %% file:delete(LogFile),
- Res.
-
-read_log_6035("=ERROR REPORT===="++_Rest) ->
- p("read_log_6035 -> ERROR REPORT detected"),
- true;
-read_log_6035([H|T]) ->
- p("read_log_6035 -> OTHER: "
- "~p", [H]),
- read_log_6035(T);
-read_log_6035([]) ->
- p("read_log_6035 -> done"),
- false.
-
-
-%%--------------------------------------------------------------------
-%% Internal functions
-%%--------------------------------------------------------------------
-do_user(Pid) ->
- {error, euser} = ftp:user(Pid, ?BAD_USER, ?FTP_PASS),
- ok = ftp:user(Pid, ?FTP_USER, ?FTP_PASS),
- ok.
-
-do_pwd(Pid) ->
- {ok, "/"} = ftp:pwd(Pid),
- {ok, Path} = ftp:lpwd(Pid),
- {ok, Path} = file:get_cwd(),
- ok.
-
-do_cd(Pid) ->
- ok = ftp:cd(Pid, "/pub"),
- {error, epath} = ftp:cd(Pid, ?BAD_DIR),
- ok.
-
-do_lcd(Pid, Dir) ->
- ok = ftp:lcd(Pid, Dir),
- {error, epath} = ftp:lcd(Pid, ?BAD_DIR),
- ok.
-
-
-do_ls(Pid) ->
- {ok, _} = ftp:ls(Pid),
- {ok, _} = ftp:ls(Pid, "incoming"),
- %% neither nlist nor ls operates on a directory
- %% they operate on a pathname, which *can* be a
- %% directory, but can also be a filename or a group
- %% of files (including wildcards).
- {ok, _} = ftp:ls(Pid, "incom*"),
- ok.
-
-do_nlist(Pid, WildcardSupport) ->
- {ok, _} = ftp:nlist(Pid),
- {ok, _} = ftp:nlist(Pid, "incoming"),
- %% neither nlist nor ls operates on a directory
- %% they operate on a pathname, which *can* be a
- %% directory, but can also be a filename or a group
- %% of files (including wildcards).
- case WildcardSupport of
- true ->
- {ok, _} = ftp:nlist(Pid, "incom*"),
- ok;
- _ ->
- ok
- end.
-
-do_rename(Pid, Config) ->
- PrivDir = ?config(priv_dir, Config),
- LFile = ?config(file, Config),
- NewLFile = ?config(new_file, Config),
- AbsLFile = filename:absname(LFile, PrivDir),
- Contents = "ftp_SUITE test ...",
- ok = file:write_file(AbsLFile, list_to_binary(Contents)),
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:lcd(Pid, PrivDir),
- ftp:delete(Pid, LFile), % reset
- ftp:delete(Pid, NewLFile), % reset
- ok = ftp:send(Pid, LFile),
- {error, epath} = ftp:rename(Pid, NewLFile, LFile),
- ok = ftp:rename(Pid, LFile, NewLFile),
- ftp:delete(Pid, LFile), % cleanup
- ftp:delete(Pid, NewLFile), % cleanup
- ok.
-
-do_delete(Pid, Config) ->
- PrivDir = ?config(priv_dir, Config),
- LFile = ?config(file, Config),
- AbsLFile = filename:absname(LFile, PrivDir),
- Contents = "ftp_SUITE test ...",
- ok = file:write_file(AbsLFile, list_to_binary(Contents)),
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:lcd(Pid, PrivDir),
- ftp:delete(Pid,LFile), % reset
- ok = ftp:send(Pid, LFile),
- ok = ftp:delete(Pid,LFile),
- ok.
-
-do_mkdir(Pid) ->
- {A, B, C} = erlang:now(),
- NewDir = "nisse_" ++ integer_to_list(A) ++ "_" ++
- integer_to_list(B) ++ "_" ++ integer_to_list(C),
- ok = ftp:cd(Pid, "incoming"),
- {ok, CurrDir} = ftp:pwd(Pid),
- ok = ftp:mkdir(Pid, NewDir),
- ok = ftp:cd(Pid, NewDir),
- ok = ftp:cd(Pid, CurrDir),
- ok = ftp:rmdir(Pid, NewDir),
- ok.
-
-do_send(Pid, Config) ->
- PrivDir = ?config(priv_dir, Config),
- LFile = ?config(file, Config),
- RFile = LFile ++ ".remote",
- AbsLFile = filename:absname(LFile, PrivDir),
- Contents = "ftp_SUITE test ...",
- ok = file:write_file(AbsLFile, list_to_binary(Contents)),
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:lcd(Pid, PrivDir),
- ok = ftp:send(Pid, LFile, RFile),
- {ok, RFilesString} = ftp:nlist(Pid),
- RFiles = split(RFilesString),
- true = lists:member(RFile, RFiles),
- ok = ftp:delete(Pid, RFile),
- case ftp:nlist(Pid) of
- {error, epath} ->
- ok; % No files
- {ok, RFilesString1} ->
- RFiles1 = split(RFilesString1),
- false = lists:member(RFile, RFiles1)
- end,
- ok = file:delete(AbsLFile).
-
-do_append(Pid, Config) ->
- PrivDir = ?config(priv_dir, Config),
- LFile = ?config(file, Config),
- RFile = ?config(new_file, Config),
- AbsLFile = filename:absname(LFile, PrivDir),
- Contents = "ftp_SUITE test:appending\r\n",
-
- ok = file:write_file(AbsLFile, list_to_binary(Contents)),
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:lcd(Pid, PrivDir),
-
- %% remove files from earlier failed test case
- ftp:delete(Pid, RFile),
- ftp:delete(Pid, LFile),
-
- ok = ftp:append(Pid, LFile, RFile),
- ok = ftp:append(Pid, LFile, RFile),
- ok = ftp:append(Pid, LFile),
-
- %% Control the contents of the file
- {ok, Bin1} = ftp:recv_bin(Pid, RFile),
- ok = ftp:delete(Pid, RFile),
- ok = file:delete(AbsLFile),
- ok = check_content(binary_to_list(Bin1), Contents, double),
-
- {ok, Bin2} = ftp:recv_bin(Pid, LFile),
- ok = ftp:delete(Pid, LFile),
- ok = check_content(binary_to_list(Bin2), Contents, singel),
- ok.
-
-do_send_bin(Pid, Config) ->
- File = ?config(file, Config),
- Contents = "ftp_SUITE test ...",
- Bin = list_to_binary(Contents),
- ok = ftp:cd(Pid, "incoming"),
- {error, enotbinary} = ftp:send_bin(Pid, Contents, File),
- ok = ftp:send_bin(Pid, Bin, File),
- {ok, RFilesString} = ftp:nlist(Pid),
- RFiles = split(RFilesString),
- true = lists:member(File, RFiles),
- ok = ftp:delete(Pid, File),
- ok.
-
-do_append_bin(Pid, Config) ->
- File = ?config(file, Config),
- Contents = "ftp_SUITE test ...",
- Bin = list_to_binary(Contents),
- ok = ftp:cd(Pid, "incoming"),
- {error, enotbinary} = ftp:append_bin(Pid, Contents, File),
- ok = ftp:append_bin(Pid, Bin, File),
- ok = ftp:append_bin(Pid, Bin, File),
- %% Control the contents of the file
- {ok, Bin2} = ftp:recv_bin(Pid, File),
- ok = ftp:delete(Pid,File),
- ok = check_content(binary_to_list(Bin2),binary_to_list(Bin), double).
-
-do_send_chunk(Pid, Config) ->
- File = ?config(file, Config),
- Contents = "ftp_SUITE test ...",
- Bin = list_to_binary(Contents),
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:send_chunk_start(Pid, File),
- {error, echunk} = ftp:cd(Pid, "incoming"),
- {error, enotbinary} = ftp:send_chunk(Pid, Contents),
- ok = ftp:send_chunk(Pid, Bin),
- ok = ftp:send_chunk(Pid, Bin),
- ok = ftp:send_chunk_end(Pid),
- {ok, RFilesString} = ftp:nlist(Pid),
- RFiles = split(RFilesString),
- true = lists:member(File, RFiles),
- ok = ftp:delete(Pid, File),
- ok.
-
-do_append_chunk(Pid, Config) ->
- File = ?config(file, Config),
- Contents = ["ER","LE","RL"],
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:append_chunk_start(Pid, File),
- {error, enotbinary} = ftp:append_chunk(Pid, lists:nth(1,Contents)),
- ok = ftp:append_chunk(Pid,list_to_binary(lists:nth(1,Contents))),
- ok = ftp:append_chunk(Pid,list_to_binary(lists:nth(2,Contents))),
- ok = ftp:append_chunk(Pid,list_to_binary(lists:nth(3,Contents))),
- ok = ftp:append_chunk_end(Pid),
- %%Control the contents of the file
- {ok, Bin2} = ftp:recv_bin(Pid, File),
- ok = check_content(binary_to_list(Bin2),"ERL", double),
- ok = ftp:delete(Pid, File),
- ok.
-
-do_recv(Pid, Config) ->
- PrivDir = ?config(priv_dir, Config),
- File = ?config(file, Config),
- Newfile = ?config(new_file, Config),
- AbsFile = filename:absname(File, PrivDir),
- Contents = "ftp_SUITE:recv test ...",
- ok = file:write_file(AbsFile, list_to_binary(Contents)),
- ok = ftp:cd(Pid, "incoming"),
- ftp:delete(Pid, File), % reset
- ftp:lcd(Pid, PrivDir),
- ok = ftp:send(Pid, File),
- ok = file:delete(AbsFile), % cleanup
- test_server:sleep(100),
- ok = ftp:lcd(Pid, PrivDir),
- ok = ftp:recv(Pid, File),
- {ok, Files} = file:list_dir(PrivDir),
- true = lists:member(File, Files),
- ok = file:delete(AbsFile), % cleanup
- ok = ftp:recv(Pid, File, Newfile),
- ok = ftp:delete(Pid, File), % cleanup
- ok.
-
-do_recv_bin(Pid, Config) ->
- File = ?config(file, Config),
- Contents1 = "ftp_SUITE test ...",
- Bin1 = list_to_binary(Contents1),
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:send_bin(Pid, Bin1, File),
- test_server:sleep(100),
- {ok, Bin2} = ftp:recv_bin(Pid, File),
- ok = ftp:delete(Pid, File), % cleanup
- Contents2 = binary_to_list(Bin2),
- Contents1 = Contents2,
- ok.
-
-do_recv_chunk(Pid, Config) ->
- File = ?config(file, Config),
- Data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
- "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
- "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
- "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
- "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
- "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
- "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"
- "HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"
- "IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII",
-
- Contents1 = lists:flatten(lists:duplicate(10, Data)),
- Bin1 = list_to_binary(Contents1),
- ok = ftp:cd(Pid, "incoming"),
- ok = ftp:type(Pid, binary),
- ok = ftp:send_bin(Pid, Bin1, File),
- test_server:sleep(100),
- {error, "ftp:recv_chunk_start/2 not called"} = recv_chunk(Pid, <<>>),
- ok = ftp:recv_chunk_start(Pid, File),
- {ok, Contents2} = recv_chunk(Pid, <<>>),
- ok = ftp:delete(Pid, File), % cleanup
- ok = find_diff(Contents2, Contents1, 1),
- ok.
-
-do_type(Pid) ->
- ok = ftp:type(Pid, ascii),
- ok = ftp:type(Pid, binary),
- ok = ftp:type(Pid, ascii),
- {error, etype} = ftp:type(Pid, foobar),
- ok.
-
-do_quote(Pid) ->
- ["257 \"/\""++_Rest] = ftp:quote(Pid, "pwd"), %% 257
- [_| _] = ftp:quote(Pid, "help"),
- %% This negativ test causes some ftp servers to hang. This test
- %% is not important for the client, so we skip it for now.
- %%["425 Can't build data connection: Connection refused."]
- %% = ftp:quote(Pid, "list"),
- ok.
-
- watch_dog(Config) ->
- Dog = test_server:timetrap(inets_test_lib:minutes(1)),
- NewConfig = lists:keydelete(watchdog, 1, Config),
- [{watchdog, Dog} | NewConfig].
-
- close_connection(Config) ->
- case ?config(ftp, Config) of
- Pid when is_pid(Pid) ->
- ok = ftp:close(Pid),
- lists:delete({ftp, Pid}, Config);
- _ ->
- Config
- end.
-
-ftp_host(Config) ->
- case ?config(ftp_remote_host, Config) of
- undefined ->
- exit({skip, "No host specified"});
- Host ->
- Host
- end.
-
-check_content(RContent, LContent, Amount) ->
- LContent2 = case Amount of
- double ->
- LContent ++ LContent;
- singel ->
- LContent
- end,
- case string:equal(RContent, LContent2) of
- true ->
- ok;
- false ->
- %% Find where the diff is
- Where = find_diff(RContent, LContent2, 1),
- Where
- end.
-
-find_diff(A, A, _) ->
- ok;
-find_diff([H|T1], [H|T2], Pos) ->
- find_diff(T1, T2, Pos+1);
-find_diff(RC, LC, Pos) ->
- {error, {diff, Pos, RC, LC}}.
-
-recv_chunk(Pid, Acc) ->
- case ftp:recv_chunk(Pid) of
- ok ->
- {ok, binary_to_list(Acc)};
- {ok, Bin} ->
- recv_chunk(Pid, <<Acc/binary, Bin/binary>>);
- Error ->
- Error
- end.
-
-split(Cs) ->
- split(Cs, [], []).
-
-split([$\r, $\n| Cs], I, Is) ->
- split(Cs, [], [lists:reverse(I)| Is]);
-split([C| Cs], I, Is) ->
- split(Cs, [C| I], Is);
-split([], I, Is) ->
- lists:reverse([lists:reverse(I)| Is]).
-
-do_ftp_open(Host, Opts) ->
- p("do_ftp_open -> entry with"
- "~n Host: ~p"
- "~n Opts: ~p", [Host, Opts]),
- case ftp:open(Host, Opts) of
- {ok, _} = OK ->
- OK;
- {error, Reason} ->
- Str =
- lists:flatten(
- io_lib:format("Unable to reach test FTP server ~p (~p)",
- [Host, Reason])),
- throw({skip, Str})
- end.
-
-
-passwd() ->
- Host =
- case inet:gethostname() of
- {ok, H} ->
- H;
- _ ->
- "localhost"
- end,
- "ftp_SUITE@" ++ Host.
-
-ftpd_hosts(Config) ->
- DataDir = ?config(data_dir, Config),
- FileName = filename:join([DataDir, "../ftp_SUITE_data/", ftpd_hosts]),
- p("FileName: ~p", [FileName]),
- case file:consult(FileName) of
- {ok, [Hosts]} when is_list(Hosts) ->
- Hosts;
- _ ->
- []
- end.
-
-wrapper(Prefix,doc,Func) ->
- Prefix++Func(doc);
-wrapper(_,X,Func) ->
- Func(X).
-
-data_dir(Config) ->
- case ?config(data_dir, Config) of
- List when (length(List) > 0) ->
- PathList = filename:split(List),
- {NewPathList,_} = lists:split((length(PathList)-1), PathList),
- DataDir = filename:join(NewPathList ++ [ftp_SUITE_data]),
- NewConfig =
- lists:keyreplace(data_dir,1,Config, {data_dir,DataDir}),
- NewConfig;
- _ -> Config
- end.
-
-
-
-p(F) ->
- p(F, []).
-
-p(F, A) ->
- case get(ftp_testcase) of
- undefined ->
- io:format("~w [~w] " ++ F ++ "~n", [?MODULE, self() | A]);
- TC when is_atom(TC) ->
- io:format("~w [~w] ~w:" ++ F ++ "~n", [?MODULE, self(), TC | A])
- end.
diff --git a/lib/inets/test/ftp_ticket_test.erl b/lib/inets/test/ftp_ticket_test.erl
deleted file mode 100644
index fe4ab35728..0000000000
--- a/lib/inets/test/ftp_ticket_test.erl
+++ /dev/null
@@ -1,61 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2006-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_ticket_test).
-
--compile(export_all).
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Solaris 8 sparc ").
-
-
-%% Test server callbacks
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-
-all() ->
- tickets().
-
-groups() ->
- [].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-init_per_suite(Config) ->
- ?LIB_MOD:ftpd_init(ticket_test, Config).
-
-tickets() ->
- [ticket_6035].
-
-
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-ticket_6035(X) -> ?LIB_MOD:ticket_6035(X).
diff --git a/lib/inets/test/ftp_windows_2003_server_test.erl b/lib/inets/test/ftp_windows_2003_server_test.erl
deleted file mode 100644
index 32f25713f8..0000000000
--- a/lib/inets/test/ftp_windows_2003_server_test.erl
+++ /dev/null
@@ -1,167 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2011. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_windows_2003_server_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Windows 2003 server ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(windows_2003_server, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [
- open,
- open_port,
- {group, passive},
- {group, active},
- api_missuse,
- not_owner,
- {group, progress_report}
- ].
-
-groups() ->
- [
- {passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [], ftp_suite_lib:progress_report(suite)}
- ].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-%% Test cases starts here.
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/ftp_windows_xp_test.erl b/lib/inets/test/ftp_windows_xp_test.erl
deleted file mode 100644
index 06d919ba00..0000000000
--- a/lib/inets/test/ftp_windows_xp_test.erl
+++ /dev/null
@@ -1,157 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-
--module(ftp_windows_xp_test).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
-
--define(LIB_MOD,ftp_suite_lib).
--define(CASE_WRAPPER(_A_,_B_,_C_),?LIB_MOD:wrapper(_A_,_B_,_C_)).
--define(PLATFORM,"Windows xp ").
-
-%% Test server callback functions
-%%--------------------------------------------------------------------
-%% Function: init_per_suite(Config) -> Config
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Initiation before the whole suite
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%%--------------------------------------------------------------------
-init_per_suite(Config) ->
- {File, NewFile} = ?LIB_MOD:test_filenames(),
- NewConfig = [{file, File}, {new_file, NewFile} | Config],
- ?LIB_MOD:ftpd_init(windows_xp, NewConfig).
-
-%%--------------------------------------------------------------------
-%% Function: end_per_suite(Config) -> _
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after the whole suite
-%%--------------------------------------------------------------------
-end_per_suite(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
-
-%%--------------------------------------------------------------------
-%% Function: init_per_testcase(TestCase, Config) -> Config
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%%
-%% Description: Initiation before each test case
-%%
-%% Note: This function is free to add any key/value pairs to the Config
-%% variable, but should NOT alter/remove any existing entries.
-%% Description: Initiation before each test case
-%%--------------------------------------------------------------------
-init_per_testcase(Case, Config) ->
- ftp_suite_lib:init_per_testcase(Case, Config).
-%%--------------------------------------------------------------------
-%% Function: end_per_testcase(TestCase, Config) -> _
-%% Case - atom()
-%% Name of the test case that is about to be run.
-%% Config - [tuple()]
-%% A list of key/value pairs, holding the test case configuration.
-%% Description: Cleanup after each test case
-%%--------------------------------------------------------------------
-end_per_testcase(Case, Config) ->
- ftp_suite_lib:end_per_testcase(Case, Config).
-
-%%--------------------------------------------------------------------
-%% Function: all(Clause) -> TestCases
-%% Clause - atom() - suite | doc
-%% TestCases - [Case]
-%% Case - atom()
-%% Name of a test case.
-%% Description: Returns a list of all test cases in this test suite
-%%--------------------------------------------------------------------
-all() ->
- [open, open_port, {group, passive}, {group, active},
- api_missuse, not_owner, {group, progress_report}].
-
-groups() ->
- [{passive, [], ftp_suite_lib:passive(suite)},
- {active, [], ftp_suite_lib:active(suite)},
- {progress_report, [],
- ftp_suite_lib:progress_report(suite)}].
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
-open(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open/1).
-open_port(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:open_port/1).
-api_missuse(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:api_missuse/1).
-not_owner(X) -> ?CASE_WRAPPER(?PLATFORM,X,fun ?LIB_MOD:not_owner/1).
-
-passive_user(X) -> ?LIB_MOD:passive_user(X).
-passive_pwd(X) -> ?LIB_MOD:passive_pwd(X).
-passive_cd(X) -> ?LIB_MOD:passive_cd(X).
-passive_lcd(X) -> ?LIB_MOD:passive_lcd(X).
-passive_ls(X) -> ?LIB_MOD:passive_ls(X).
-passive_nlist(X) -> ?LIB_MOD:passive_nlist(X).
-passive_rename(X) -> ?LIB_MOD:passive_rename(X).
-passive_delete(X) -> ?LIB_MOD:passive_delete(X).
-passive_mkdir(X) -> ?LIB_MOD:passive_mkdir(X).
-passive_send(X) -> ?LIB_MOD:passive_send(X).
-passive_send_bin(X) -> ?LIB_MOD:passive_send_bin(X).
-passive_send_chunk(X) -> ?LIB_MOD:passive_send_chunk(X).
-passive_append(X) -> ?LIB_MOD:passive_append(X).
-passive_append_bin(X) -> ?LIB_MOD:passive_append_bin(X).
-passive_append_chunk(X) -> ?LIB_MOD:passive_append_chunk(X).
-passive_recv(X) -> ?LIB_MOD:passive_recv(X).
-passive_recv_bin(X) -> ?LIB_MOD:passive_recv_bin(X).
-passive_recv_chunk(X) -> ?LIB_MOD:passive_recv_chunk(X).
-passive_type(X) -> ?LIB_MOD:passive_type(X).
-passive_quote(X) -> ?LIB_MOD:passive_quote(X).
-passive_ip_v6_disabled(X) -> ?LIB_MOD:passive_ip_v6_disabled(X).
-active_user(X) -> ?LIB_MOD:active_user(X).
-active_pwd(X) -> ?LIB_MOD:active_pwd(X).
-active_cd(X) -> ?LIB_MOD:active_cd(X).
-active_lcd(X) -> ?LIB_MOD:active_lcd(X).
-active_ls(X) -> ?LIB_MOD:active_ls(X).
-active_nlist(X) -> ?LIB_MOD:active_nlist(X).
-active_rename(X) -> ?LIB_MOD:active_rename(X).
-active_delete(X) -> ?LIB_MOD:active_delete(X).
-active_mkdir(X) -> ?LIB_MOD:active_mkdir(X).
-active_send(X) -> ?LIB_MOD:active_send(X).
-active_send_bin(X) -> ?LIB_MOD:active_send_bin(X).
-active_send_chunk(X) -> ?LIB_MOD:active_send_chunk(X).
-active_append(X) -> ?LIB_MOD:active_append(X).
-active_append_bin(X) -> ?LIB_MOD:active_append_bin(X).
-active_append_chunk(X) -> ?LIB_MOD:active_append_chunk(X).
-active_recv(X) -> ?LIB_MOD:active_recv(X).
-active_recv_bin(X) -> ?LIB_MOD:active_recv_bin(X).
-active_recv_chunk(X) -> ?LIB_MOD:active_recv_chunk(X).
-active_type(X) -> ?LIB_MOD:active_type(X).
-active_quote(X) -> ?LIB_MOD:active_quote(X).
-active_ip_v6_disabled(X) -> ?LIB_MOD:active_ip_v6_disabled(X).
-progress_report_send(X) -> ?LIB_MOD:progress_report_send(X).
-progress_report_recv(X) -> ?LIB_MOD:progress_report_recv(X).
-
-fin(Config) ->
- ?LIB_MOD:ftpd_fin(Config).
diff --git a/lib/inets/test/httpc_SUITE.erl b/lib/inets/test/httpc_SUITE.erl
index 0c35f284f7..fe6edd504e 100644
--- a/lib/inets/test/httpc_SUITE.erl
+++ b/lib/inets/test/httpc_SUITE.erl
@@ -145,6 +145,22 @@ init_per_group(misc = Group, Config) ->
ok = httpc:set_options([{ipfamily, Inet}]),
Config;
+init_per_group(Group, Config0) when Group =:= sim_https; Group =:= https->
+ start_apps(Group),
+ StartSsl = try ssl:start()
+ catch
+ Error:Reason ->
+ {skip, lists:flatten(io_lib:format("Failed to start apps for https Error=~p Reason=~p", [Error, Reason]))}
+ end,
+ case StartSsl of
+ {error, {already_started, _}} ->
+ do_init_per_group(Group, Config0);
+ ok ->
+ do_init_per_group(Group, Config0);
+ _ ->
+ StartSsl
+ end;
+
init_per_group(Group, Config0) ->
start_apps(Group),
Config = proplists:delete(port, Config0),
@@ -153,7 +169,10 @@ init_per_group(Group, Config0) ->
end_per_group(_, _Config) ->
ok.
-
+do_init_per_group(Group, Config0) ->
+ Config = proplists:delete(port, Config0),
+ Port = server_start(Group, server_config(Group, Config)),
+ [{port, Port} | Config].
%%--------------------------------------------------------------------
init_per_testcase(pipeline, Config) ->
inets:start(httpc, [{profile, pipeline}]),
@@ -277,9 +296,6 @@ trace(Config) when is_list(Config) ->
pipeline(Config) when is_list(Config) ->
Request = {url(group_name(Config), "/dummy.html", Config), []},
{ok, _} = httpc:request(get, Request, [], [], pipeline),
-
- %% Make sure pipeline session is registerd
- test_server:sleep(4000),
keep_alive_requests(Request, pipeline).
%%--------------------------------------------------------------------
@@ -287,9 +303,6 @@ pipeline(Config) when is_list(Config) ->
persistent_connection(Config) when is_list(Config) ->
Request = {url(group_name(Config), "/dummy.html", Config), []},
{ok, _} = httpc:request(get, Request, [], [], persistent),
-
- %% Make sure pipeline session is registerd
- test_server:sleep(4000),
keep_alive_requests(Request, persistent).
%%-------------------------------------------------------------------------
@@ -311,13 +324,8 @@ async(Config) when is_list(Config) ->
{ok, NewRequestId} =
httpc:request(get, Request, [], [{sync, false}]),
- ok = httpc:cancel_request(NewRequestId),
- receive
- {http, {NewRequestId, _}} ->
- ct:fail(http_cancel_request_failed)
- after 3000 ->
- ok
- end.
+ ok = httpc:cancel_request(NewRequestId).
+
%%-------------------------------------------------------------------------
save_to_file() ->
[{doc, "Test to save the http body to a file"}].
@@ -1080,6 +1088,8 @@ server_config(_, _) ->
start_apps(https) ->
inets_test_lib:start_apps([crypto, public_key, ssl]);
+start_apps(sim_https) ->
+ inets_test_lib:start_apps([crypto, public_key, ssl]);
start_apps(_) ->
ok.
@@ -1149,7 +1159,7 @@ receive_replys([ID|IDs]) ->
{http, {ID, {{_, 200, _}, [_|_], _}}} ->
receive_replys(IDs);
{http, {Other, {{_, 200, _}, [_|_], _}}} ->
- ct:fail({recived_canceld_id, Other})
+ ct:pal({recived_canceld_id, Other})
end.
%% Perform a synchronous stop
diff --git a/lib/inets/test/httpd_basic_SUITE.erl b/lib/inets/test/httpd_basic_SUITE.erl
index b1fe373cff..2d06f3e70c 100644
--- a/lib/inets/test/httpd_basic_SUITE.erl
+++ b/lib/inets/test/httpd_basic_SUITE.erl
@@ -38,7 +38,9 @@ all() ->
erl_script_nocache_opt,
script_nocache,
escaped_url_in_error_body,
- slowdose
+ script_timeout,
+ slowdose,
+ keep_alive_timeout
].
groups() ->
@@ -80,16 +82,19 @@ DUMMY
DummyFile = filename:join([PrivDir,"dummy.html"]),
CgiDir = filename:join(PrivDir, "cgi-bin"),
ok = file:make_dir(CgiDir),
- Cgi = case test_server:os_type() of
- {win32, _} ->
- "printenv.bat";
- _ ->
- "printenv.sh"
- end,
- inets_test_lib:copy_file(Cgi, DataDir, CgiDir),
- AbsCgi = filename:join([CgiDir, Cgi]),
- {ok, FileInfo} = file:read_file_info(AbsCgi),
- ok = file:write_file_info(AbsCgi, FileInfo#file_info{mode = 8#00755}),
+ {CgiPrintEnv, CgiSleep} = case test_server:os_type() of
+ {win32, _} ->
+ {"printenv.bat", "cgi_sleep.exe"};
+ _ ->
+ {"printenv.sh", "cgi_sleep"}
+ end,
+ lists:foreach(
+ fun(Cgi) ->
+ inets_test_lib:copy_file(Cgi, DataDir, CgiDir),
+ AbsCgi = filename:join([CgiDir, Cgi]),
+ {ok, FileInfo} = file:read_file_info(AbsCgi),
+ ok = file:write_file_info(AbsCgi, FileInfo#file_info{mode = 8#00755})
+ end, [CgiPrintEnv, CgiSleep]),
{ok, Fd} = file:open(DummyFile, [write]),
ok = file:write(Fd, Dummy),
ok = file:close(Fd),
@@ -100,7 +105,8 @@ DUMMY
{document_root, PrivDir},
{bind_address, "localhost"}],
- [{httpd_conf, HttpdConf}, {cgi_dir, CgiDir}, {cgi_script, Cgi} | Config].
+ [{httpd_conf, HttpdConf}, {cgi_dir, CgiDir},
+ {cgi_printenv, CgiPrintEnv}, {cgi_sleep, CgiSleep} | Config].
%%--------------------------------------------------------------------
%% Function: end_per_suite(Config) -> _
@@ -235,7 +241,7 @@ script_nocache(Config) when is_list(Config) ->
verify_script_nocache(Config, CgiNoCache, EsiNoCache, CgiOption, EsiOption) ->
HttpdConf = ?config(httpd_conf, Config),
- CgiScript = ?config(cgi_script, Config),
+ CgiScript = ?config(cgi_printenv, Config),
CgiDir = ?config(cgi_dir, Config),
{ok, Pid} = inets:start(httpd, [{port, 0},
{script_alias,
@@ -363,6 +369,63 @@ escaped_url_in_error_body(Config) when is_list(Config) ->
inets:stop(httpd, Pid),
tsp("escaped_url_in_error_body -> done"),
ok.
+
+
+%%-------------------------------------------------------------------------
+%%-------------------------------------------------------------------------
+
+keep_alive_timeout(doc) ->
+ ["Test the keep_alive_timeout option"];
+keep_alive_timeout(suite) ->
+ [];
+keep_alive_timeout(Config) when is_list(Config) ->
+ HttpdConf = ?config(httpd_conf, Config),
+ {ok, Pid} = inets:start(httpd, [{port, 0}, {keep_alive, true}, {keep_alive_timeout, 2} | HttpdConf]),
+ Info = httpd:info(Pid),
+ Port = proplists:get_value(port, Info),
+ _Address = proplists:get_value(bind_address, Info),
+ {ok, S} = gen_tcp:connect("localhost", Port, []),
+ receive
+ after 3000 ->
+ {error, closed} = gen_tcp:send(S, "hey")
+ end,
+ inets:stop(httpd, Pid).
+
+%%-------------------------------------------------------------------------
+%%-------------------------------------------------------------------------
+
+script_timeout(doc) ->
+ ["Test the httpd script_timeout option"];
+script_timeout(suite) ->
+ [];
+script_timeout(Config) when is_list(Config) ->
+ verify_script_timeout(Config, 20, 200),
+ verify_script_timeout(Config, 5, 403),
+ ok.
+
+verify_script_timeout(Config, ScriptTimeout, StatusCode) ->
+ HttpdConf = ?config(httpd_conf, Config),
+ CgiScript = ?config(cgi_sleep, Config),
+ CgiDir = ?config(cgi_dir, Config),
+ {ok, Pid} = inets:start(httpd, [{port, 0},
+ {script_alias,
+ {"/cgi-bin/", CgiDir ++ "/"}},
+ {script_timeout, ScriptTimeout}
+ | HttpdConf]),
+ Info = httpd:info(Pid),
+ Port = proplists:get_value(port, Info),
+ Address = proplists:get_value(bind_address, Info),
+ ok = httpd_test_lib:verify_request(ip_comm, Address, Port, node(),
+ "GET /cgi-bin/" ++ CgiScript ++
+ " HTTP/1.0\r\n\r\n",
+ [{statuscode, StatusCode},
+ {version, "HTTP/1.0"}]),
+ inets:stop(httpd, Pid).
+
+
+%%-------------------------------------------------------------------------
+%%-------------------------------------------------------------------------
+
slowdose(doc) ->
["Testing minimum bytes per second option"];
slowdose(Config) when is_list(Config) ->
diff --git a/lib/inets/test/httpd_basic_SUITE_data/Makefile.src b/lib/inets/test/httpd_basic_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..9da2ed583f
--- /dev/null
+++ b/lib/inets/test/httpd_basic_SUITE_data/Makefile.src
@@ -0,0 +1,14 @@
+CC = @CC@
+LD = @LD@
+CFLAGS = @CFLAGS@ -I@erl_include@ @DEFS@
+CROSSLDFLAGS = @CROSSLDFLAGS@
+
+PROGS = cgi_sleep@exe@
+
+all: $(PROGS)
+
+cgi_sleep@exe@: cgi_sleep@obj@
+ $(LD) $(CROSSLDFLAGS) -o cgi_sleep cgi_sleep@obj@ @LIBS@
+
+cgi_sleep@obj@: cgi_sleep.c
+ $(CC) -c -o cgi_sleep@obj@ $(CFLAGS) cgi_sleep.c
diff --git a/lib/inets/test/httpd_basic_SUITE_data/cgi_sleep.c b/lib/inets/test/httpd_basic_SUITE_data/cgi_sleep.c
new file mode 100644
index 0000000000..126bb23987
--- /dev/null
+++ b/lib/inets/test/httpd_basic_SUITE_data/cgi_sleep.c
@@ -0,0 +1,26 @@
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef __WIN32__
+#include <windows.h>
+#include <fcntl.h>
+#include <io.h>
+#else
+#include <unistd.h>
+#endif
+
+int main(void)
+{
+ unsigned int seconds = 10;
+
+#ifdef __WIN32__
+ Sleep(seconds * 1000);
+ _setmode(_fileno(stdout), _O_BINARY);
+#else
+ sleep(seconds);
+#endif
+
+ printf("Content-type: text/plain\r\n\r\n");
+ printf("Slept for %u seconds.\r\n", seconds);
+ exit(EXIT_SUCCESS);
+}
diff --git a/lib/kernel/doc/src/gen_sctp.xml b/lib/kernel/doc/src/gen_sctp.xml
index 7ea58fffff..33f1c20608 100644
--- a/lib/kernel/doc/src/gen_sctp.xml
+++ b/lib/kernel/doc/src/gen_sctp.xml
@@ -322,7 +322,7 @@
<p>
Branch off an existing association <anno>Assoc</anno>
in a socket <anno>Socket</anno> of type <c>seqpacket</c>
- (one-to-may style) into
+ (one-to-many style) into
a new socket <anno>NewSocket</anno> of type <c>stream</c>
(one-to-one style).
</p>
diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml
index fd62f778a2..bc4c68230e 100644
--- a/lib/kernel/doc/src/inet.xml
+++ b/lib/kernel/doc/src/inet.xml
@@ -430,8 +430,56 @@ fe80::204:acff:fe17:bf38
<name name="peername" arity="1"/>
<fsummary>Return the address and port for the other end of a connection</fsummary>
<desc>
- <p>Returns the address and port for the other end of a
- connection.</p>
+ <p>
+ Returns the address and port for the other end of a
+ connection.
+ </p>
+ <p>
+ Note that for SCTP sockets this function only returns
+ one of the socket's peer addresses. The function
+ <seealso marker="#peernames/1">peernames/1,2</seealso>
+ returns all.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name name="peernames" arity="1"/>
+ <fsummary>
+ Return all address/port numbers for the other end of a connection
+ </fsummary>
+ <desc>
+ <p>
+ Equivalent to
+ <seealso marker="#peernames/2"><c>peernames(<anno>Socket</anno>, 0)</c></seealso>.
+ Note that this function's behaviour for an SCTP
+ one-to-many style socket is not defined by the
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name name="peernames" arity="2"/>
+ <fsummary>
+ Return all address/port numbers for the other end of a connection
+ </fsummary>
+ <desc>
+ <p>
+ Returns a list of all address/port number pairs for the other end
+ of a socket's association <c><anno>Assoc</anno></c>.
+ </p>
+ <p>
+ This function can return multiple addresses for multihomed
+ sockets such as SCTP sockets. For other sockets it
+ returns a one element list.
+ </p>
+ <p>
+ Note that the <c><anno>Assoc</anno></c> parameter is by the
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>
+ defined to be ignored for
+ one-to-one style sockets. What the special value <c>0</c>
+ means hence its behaviour for one-to-many style sockets
+ is unfortunately not defined.
+ </p>
</desc>
</func>
<func>
@@ -446,6 +494,46 @@ fe80::204:acff:fe17:bf38
<fsummary>Return the local address and port number for a socket</fsummary>
<desc>
<p>Returns the local address and port number for a socket.</p>
+ <p>
+ Note that for SCTP sockets this function only returns
+ one of the socket addresses. The function
+ <seealso marker="#socknames/1">socknames/1,2</seealso>
+ returns all.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name name="socknames" arity="1"/>
+ <fsummary>Return all local address/port numbers for a socket</fsummary>
+ <desc>
+ <p>
+ Equivalent to
+ <seealso marker="#socknames/2"><c>socknames(<anno>Socket</anno>, 0)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+ <func>
+ <name name="socknames" arity="2"/>
+ <fsummary>Return all local address/port numbers for a socket</fsummary>
+ <desc>
+ <p>
+ Returns a list of all local address/port number pairs for a socket
+ for the given association <c><anno>Assoc</anno></c>.
+ </p>
+ <p>
+ This function can return multiple addresses for multihomed
+ sockets such as SCTP sockets. For other sockets it
+ returns a one element list.
+ </p>
+ <p>
+ Note that the <c><anno>Assoc</anno></c> parameter is by the
+ <url href="http://tools.ietf.org/html/draft-ietf-tsvwg-sctpsocket-13">SCTP Sockets API Extensions</url>
+ defined to be ignored for one-to-one style sockets.
+ For one-to-many style sockets the special value <c>0</c>
+ is defined to mean that the returned addresses shall be
+ without regard to any particular association.
+ How different SCTP implementations interprets this varies somewhat.
+ </p>
</desc>
</func>
<func>
diff --git a/lib/kernel/doc/src/os.xml b/lib/kernel/doc/src/os.xml
index 5e182de41d..9122267c40 100644
--- a/lib/kernel/doc/src/os.xml
+++ b/lib/kernel/doc/src/os.xml
@@ -177,6 +177,17 @@ format_utc_timestamp() ->
</desc>
</func>
<func>
+ <name name="unsetenv" arity="1"/>
+ <fsummary>Delete an environment variable</fsummary>
+ <desc>
+ <p>Deletes the environment variable <c><anno>VarName</anno></c>.</p>
+ <p>If Unicode filename encoding is in effect (see the <seealso
+ marker="erts:erl#file_name_encoding">erl manual
+ page</seealso>), the string (<c><anno>VarName</anno></c>) may
+ contain characters with codepoints > 255.</p>
+ </desc>
+ </func>
+ <func>
<name name="version" arity="0"/>
<fsummary>Return the Operating System version</fsummary>
<desc>
diff --git a/lib/kernel/doc/src/rpc.xml b/lib/kernel/doc/src/rpc.xml
index b01ff16c85..67fdccb734 100644
--- a/lib/kernel/doc/src/rpc.xml
+++ b/lib/kernel/doc/src/rpc.xml
@@ -185,7 +185,7 @@
{Mod, Bin, File} = code:get_object_code(Mod),
%% and load it on all nodes including this one
-{ResL, _} = rpc:multicall(code, load_binary, [Mod, Bin, File,]),
+{ResL, _} = rpc:multicall(code, load_binary, [Mod, File, Bin]),
%% and then maybe check the ResL list.</code>
</desc>
diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl
index 58d84ae924..067e07304d 100644
--- a/lib/kernel/src/gen_sctp.erl
+++ b/lib/kernel/src/gen_sctp.erl
@@ -423,7 +423,11 @@ error_string(9) ->
error_string(10) ->
"Cookie Received While Shutting Down";
error_string(11) ->
+ "Restart of an Association with New Addresses";
+error_string(12) ->
"User Initiated Abort";
+error_string(13) ->
+ "Protocol Violation";
%% For more info on principal SCTP error codes: phone +44 7981131933
error_string(N) when is_integer(N) ->
unknown_error;
diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl
index 27f085c3aa..b1c9d56c2d 100644
--- a/lib/kernel/src/inet.erl
+++ b/lib/kernel/src/inet.erl
@@ -24,6 +24,7 @@
%% socket
-export([peername/1, sockname/1, port/1, send/2,
+ peernames/1, peernames/2, socknames/1, socknames/2,
setopts/2, getopts/2,
getifaddrs/0, getifaddrs/1,
getif/1, getif/0, getiflist/0, getiflist/1,
@@ -120,6 +121,17 @@
'addr' | 'broadaddr' | 'dstaddr' |
'mtu' | 'netmask' | 'flags' |'hwaddr'.
+-type if_getopt_result() ::
+ {'addr', ip_address()} |
+ {'broadaddr', ip_address()} |
+ {'dstaddr', ip_address()} |
+ {'mtu', non_neg_integer()} |
+ {'netmask', ip_address()} |
+ {'flags', ['up' | 'down' | 'broadcast' | 'no_broadcast' |
+ 'pointtopoint' | 'no_pointtopoint' |
+ 'running' | 'multicast' | 'loopback']} |
+ {'hwaddr', ether_address()}.
+
-type address_family() :: 'inet' | 'inet6'.
-type socket_protocol() :: 'tcp' | 'udp' | 'sctp'.
-type socket_type() :: 'stream' | 'dgram' | 'seqpacket'.
@@ -146,6 +158,7 @@ close(Socket) ->
ok
end.
+
-spec peername(Socket) -> {ok, {Address, Port}} | {error, posix()} when
Socket :: socket(),
Address :: ip_address(),
@@ -162,6 +175,24 @@ setpeername(Socket, {IP,Port}) ->
setpeername(Socket, undefined) ->
prim_inet:setpeername(Socket, undefined).
+-spec peernames(Socket) -> {ok, [{Address, Port}]} | {error, posix()} when
+ Socket :: socket(),
+ Address :: ip_address(),
+ Port :: non_neg_integer().
+
+peernames(Socket) ->
+ prim_inet:peernames(Socket).
+
+-spec peernames(Socket, Assoc) ->
+ {ok, [{Address, Port}]} | {error, posix()} when
+ Socket :: socket(),
+ Assoc :: #sctp_assoc_change{} | gen_sctp:assoc_id(),
+ Address :: ip_address(),
+ Port :: non_neg_integer().
+
+peernames(Socket, Assoc) ->
+ prim_inet:peernames(Socket, Assoc).
+
-spec sockname(Socket) -> {ok, {Address, Port}} | {error, posix()} when
Socket :: socket(),
@@ -179,6 +210,25 @@ setsockname(Socket, {IP,Port}) ->
setsockname(Socket, undefined) ->
prim_inet:setsockname(Socket, undefined).
+-spec socknames(Socket) -> {ok, [{Address, Port}]} | {error, posix()} when
+ Socket :: socket(),
+ Address :: ip_address(),
+ Port :: non_neg_integer().
+
+socknames(Socket) ->
+ prim_inet:socknames(Socket).
+
+-spec socknames(Socket, Assoc) ->
+ {ok, [{Address, Port}]} | {error, posix()} when
+ Socket :: socket(),
+ Assoc :: #sctp_assoc_change{} | gen_sctp:assoc_id(),
+ Address :: ip_address(),
+ Port :: non_neg_integer().
+
+socknames(Socket, Assoc) ->
+ prim_inet:socknames(Socket, Assoc).
+
+
-spec port(Socket) -> {'ok', Port} | {'error', any()} when
Socket :: socket(),
Port :: port_number().
@@ -266,13 +316,13 @@ getiflist() ->
-spec ifget(Socket :: socket(),
Name :: string() | atom(),
Opts :: [if_getopt()]) ->
- {'ok', [if_setopt()]} | {'error', posix()}.
+ {'ok', [if_getopt_result()]} | {'error', posix()}.
ifget(Socket, Name, Opts) ->
prim_inet:ifget(Socket, Name, Opts).
-spec ifget(Name :: string() | atom(), Opts :: [if_getopt()]) ->
- {'ok', [if_setopt()]} | {'error', posix()}.
+ {'ok', [if_getopt_result()]} | {'error', posix()}.
ifget(Name, Opts) ->
withsocket(fun(S) -> prim_inet:ifget(S, Name, Opts) end).
diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl
index 18a4a61b2f..641a8dc0ca 100644
--- a/lib/kernel/src/inet_int.hrl
+++ b/lib/kernel/src/inet_int.hrl
@@ -86,6 +86,8 @@
-define(INET_REQ_ACCEPT, 26).
-define(INET_REQ_LISTEN, 27).
-define(INET_REQ_IGNOREFD, 28).
+-define(INET_REQ_GETLADDRS, 29).
+-define(INET_REQ_GETPADDRS, 30).
%% TCP requests
%%-define(TCP_REQ_ACCEPT, 40). MOVED
diff --git a/lib/kernel/src/os.erl b/lib/kernel/src/os.erl
index ded03361ee..9415593485 100644
--- a/lib/kernel/src/os.erl
+++ b/lib/kernel/src/os.erl
@@ -26,7 +26,7 @@
%%% BIFs
--export([getenv/0, getenv/1, getpid/0, putenv/2, timestamp/0]).
+-export([getenv/0, getenv/1, getpid/0, putenv/2, timestamp/0, unsetenv/1]).
-spec getenv() -> [string()].
@@ -58,6 +58,12 @@ putenv(_, _) ->
timestamp() ->
erlang:nif_error(undef).
+-spec unsetenv(VarName) -> true when
+ VarName :: string().
+
+unsetenv(_) ->
+ erlang:nif_error(undef).
+
%%% End of BIFs
-spec type() -> {Osfamily, Osname} when
diff --git a/lib/kernel/test/erl_prim_loader_SUITE.erl b/lib/kernel/test/erl_prim_loader_SUITE.erl
index 35502a1d27..b2ca3bdbc2 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,7 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2]).
--export([get_path/1, set_path/1, get_file/1,
+-export([get_path/1, set_path/1, get_file/1, normalize_and_backslash/1,
inet_existing/1, inet_coming_up/1, inet_disconnects/1,
multiple_slaves/1, file_requests/1,
local_archive/1, remote_archive/1,
@@ -39,7 +39,8 @@
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
- [get_path, set_path, get_file, inet_existing,
+ [get_path, set_path, get_file,
+ normalize_and_backslash, inet_existing,
inet_coming_up, inet_disconnects, multiple_slaves,
file_requests, local_archive, remote_archive,
primary_archive, virtual_dir_in_archive].
@@ -107,6 +108,26 @@ get_file(Config) when is_list(Config) ->
?line error = erl_prim_loader:get_file({dummy}),
ok.
+normalize_and_backslash(Config) ->
+ %% Test OTP-11170
+ case os:type() of
+ {win32,_} ->
+ {skip, "not on windows"};
+ _ ->
+ test_normalize_and_backslash(Config)
+ end.
+test_normalize_and_backslash(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Dir = filename:join(PrivDir,"\\"),
+ File = filename:join(Dir,"file-OTP-11170"),
+ ok = file:make_dir(Dir),
+ ok = file:write_file(File,"a file to test OTP-11170"),
+ {ok,["file-OTP-11170"]} = file:list_dir(Dir),
+ {ok,["file-OTP-11170"]} = erl_prim_loader:list_dir(Dir),
+ ok = file:delete(File),
+ ok = file:del_dir(Dir),
+ ok.
+
inet_existing(doc) -> ["Start a node using the 'inet' loading method, ",
"from an already started boot server."];
inet_existing(Config) when is_list(Config) ->
diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl
index e89cb44797..d2de96b269 100644
--- a/lib/kernel/test/gen_sctp_SUITE.erl
+++ b/lib/kernel/test/gen_sctp_SUITE.erl
@@ -1,4 +1,4 @@
-%%
+%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2007-2013. All Rights Reserved.
@@ -36,7 +36,9 @@
open_multihoming_ipv6_socket/1,
open_multihoming_ipv4_and_ipv6_socket/1,
basic_stream/1, xfer_stream_min/1, peeloff_active_once/1,
- peeloff_active_true/1, buffers/1]).
+ peeloff_active_true/1, buffers/1,
+ names_unihoming_ipv4/1, names_unihoming_ipv6/1,
+ names_multihoming_ipv4/1, names_multihoming_ipv6/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -48,7 +50,9 @@ all() ->
open_multihoming_ipv6_socket,
open_multihoming_ipv4_and_ipv6_socket,
basic_stream, xfer_stream_min, peeloff_active_once,
- peeloff_active_true, buffers].
+ peeloff_active_true, buffers,
+ names_unihoming_ipv4, names_unihoming_ipv6,
+ names_multihoming_ipv4, names_multihoming_ipv6].
groups() ->
[].
@@ -107,7 +111,7 @@ xfer_min(Config) when is_list(Config) ->
?line {ok,Sb} = gen_sctp:open([{type,seqpacket}]),
?line {ok,Pb} = inet:port(Sb),
?line ok = gen_sctp:listen(Sb, true),
-
+
?line {ok,Sa} = gen_sctp:open(),
?line {ok,Pa} = inet:port(Sa),
?line {ok,#sctp_assoc_change{state=comm_up,
@@ -118,18 +122,18 @@ xfer_min(Config) when is_list(Config) ->
gen_sctp:connect(Sa, Loopback, Pb, []),
?line {SbAssocId,SaOutboundStreams,SaInboundStreams} =
case recv_event(log_ok(gen_sctp:recv(Sb, infinity))) of
- {Loopback,Pa,
- #sctp_assoc_change{state=comm_up,
- error=0,
- outbound_streams=SbOutboundStreams,
- inbound_streams=SbInboundStreams,
- assoc_id=AssocId}} ->
- {AssocId,SbInboundStreams,SbOutboundStreams};
- {Loopback,Pa,
- #sctp_paddr_change{state=addr_confirmed,
- addr={Loopback,Pa},
- error=0,
- assoc_id=AssocId}} ->
+ {Loopback,Pa,
+ #sctp_assoc_change{state=comm_up,
+ error=0,
+ outbound_streams=SbOutboundStreams,
+ inbound_streams=SbInboundStreams,
+ assoc_id=AssocId}} ->
+ {AssocId,SbInboundStreams,SbOutboundStreams};
+ {Loopback,Pa,
+ #sctp_paddr_change{state=addr_confirmed,
+ addr={Loopback,Pa},
+ error=0,
+ assoc_id=AssocId}} ->
{Loopback,Pa,
#sctp_assoc_change{state=comm_up,
error=0,
@@ -148,17 +152,20 @@ xfer_min(Config) when is_list(Config) ->
assoc_id=SbAssocId}],
Data} -> ok;
Event1 ->
- {Loopback,Pa,
- #sctp_paddr_change{addr = {Loopback,_},
- state = addr_available,
- error = 0,
- assoc_id = SbAssocId}} =
- recv_event(Event1),
- {ok,{Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data}} = gen_sctp:recv(Sb, infinity)
+ case recv_event(Event1) of
+ {Loopback,Pa,
+ #sctp_paddr_change{addr = {Loopback,_},
+ state = State,
+ error = 0,
+ assoc_id = SbAssocId}}
+ when State =:= addr_available;
+ State =:= addr_confirmed ->
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} = log_ok(gen_sctp:recv(Sb, infinity))
+ end
end,
?line ok = gen_sctp:send(Sb, SbAssocId, 0, Data),
?line case log_ok(gen_sctp:recv(Sa, infinity)) of
@@ -197,7 +204,7 @@ xfer_min(Config) when is_list(Config) ->
recv_event(log_ok(gen_sctp:recv(Sb, infinity))),
?line ok = gen_sctp:close(Sa),
?line ok = gen_sctp:close(Sb),
-
+
?line receive
Msg -> test_server:fail({received,Msg})
after 17 -> ok
@@ -216,7 +223,7 @@ xfer_active(Config) when is_list(Config) ->
?line {ok,Sb} = gen_sctp:open([{active,true}]),
?line {ok,Pb} = inet:port(Sb),
?line ok = gen_sctp:listen(Sb, true),
-
+
?line {ok,Sa} = gen_sctp:open([{active,true}]),
?line {ok,Pa} = inet:port(Sa),
?line ok = gen_sctp:connect_init(Sa, Loopback, Pb, []),
@@ -348,7 +355,7 @@ def_sndrcvinfo(Config) when is_list(Config) ->
%%
?line S1 =
log_ok(gen_sctp:open(
- 0, [{sctp_default_send_param,#sctp_sndrcvinfo{ppid=17}}])),
+ 0, [{sctp_default_send_param,#sctp_sndrcvinfo{ppid=17}}])),
?LOGVAR(S1),
?line P1 =
log_ok(inet:port(S1)),
@@ -455,18 +462,22 @@ def_sndrcvinfo(Config) when is_list(Config) ->
stream=1, ppid=0, context=0, assoc_id=S1AssocId}],
<<"3: ",Data/binary>>} -> ok;
Event2 ->
- {Loopback,P2,
- #sctp_paddr_change{
- addr={Loopback,_}, state=addr_available,
- error=0, assoc_id=S1AssocId}} =
- recv_event(Event2),
- ?line case log_ok(gen_sctp:recv(S1)) of
- {Loopback,P2,
- [#sctp_sndrcvinfo{
- stream=1, ppid=0, context=0,
- assoc_id=S1AssocId}],
- <<"3: ",Data/binary>>} -> ok
- end
+ case recv_event(Event2) of
+ {Loopback,P2,
+ #sctp_paddr_change{
+ addr={Loopback,_},
+ state=State,
+ error=0, assoc_id=S1AssocId}}
+ when State =:= addr_available;
+ State =:= addr_confirmed ->
+ ?line case log_ok(gen_sctp:recv(S1)) of
+ {Loopback,P2,
+ [#sctp_sndrcvinfo{
+ stream=1, ppid=0, context=0,
+ assoc_id=S1AssocId}],
+ <<"3: ",Data/binary>>} -> ok
+ end
+ end
end,
?line ok =
do_from_other_process(
@@ -509,6 +520,13 @@ log_ok(X) -> log(ok(X)).
ok({ok,X}) -> X.
+err([], Result) ->
+ erlang:error(Result);
+err([Reason|_], {error,Reason}) ->
+ ok;
+err([_|Reasons], Result) ->
+ err(Reasons, Result).
+
log(X) ->
io:format("LOG[~w]: ~p~n", [self(),X]),
X.
@@ -529,57 +547,57 @@ api_open_close(Config) when is_list(Config) ->
?line {ok,S1} = gen_sctp:open(0),
?line {ok,P} = inet:port(S1),
?line ok = gen_sctp:close(S1),
-
+
?line {ok,S2} = gen_sctp:open(P),
?line {ok,P} = inet:port(S2),
?line ok = gen_sctp:close(S2),
-
+
?line {ok,S3} = gen_sctp:open([{port,P}]),
?line {ok,P} = inet:port(S3),
?line ok = gen_sctp:close(S3),
-
+
?line {ok,S4} = gen_sctp:open(P, []),
?line {ok,P} = inet:port(S4),
?line ok = gen_sctp:close(S4),
-
+
?line {ok,S5} = gen_sctp:open(P, [{ifaddr,any}]),
?line {ok,P} = inet:port(S5),
?line ok = gen_sctp:close(S5),
?line ok = gen_sctp:close(S5),
-
+
?line try gen_sctp:close(0)
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open({})
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open(-1)
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open(65536)
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open(make_ref(), [])
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open(0, {})
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open(0, [make_ref()])
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open([{invalid_option,0}])
catch error:badarg -> ok
end,
-
+
?line try gen_sctp:open(0, [{mode,invalid_mode}])
catch error:badarg -> ok
end,
@@ -591,11 +609,11 @@ api_listen(suite) ->
[];
api_listen(Config) when is_list(Config) ->
?line Localhost = {127,0,0,1},
-
+
?line try gen_sctp:listen(0, true)
catch error:badarg -> ok
end,
-
+
?line {ok,S} = gen_sctp:open(),
?line {ok,Pb} = inet:port(S),
?line try gen_sctp:listen(S, not_allowed_for_listen)
@@ -603,7 +621,7 @@ api_listen(Config) when is_list(Config) ->
end,
?line ok = gen_sctp:close(S),
?line {error,closed} = gen_sctp:listen(S, true),
-
+
?line {ok,Sb} = gen_sctp:open(Pb),
?line {ok,Sa} = gen_sctp:open(),
?line case gen_sctp:connect(Sa, localhost, Pb, []) of
@@ -615,8 +633,8 @@ api_listen(Config) when is_list(Config) ->
gen_sctp:recv(Sa, infinity);
{error,#sctp_assoc_change{state=cant_assoc}} ->
ok%;
- %% {error,{Localhost,Pb,_,#sctp_assoc_change{state=cant_assoc}}} ->
- %% ok
+ %% {error,{Localhost,Pb,_,#sctp_assoc_change{state=cant_assoc}}} ->
+ %% ok
end,
?line ok = gen_sctp:listen(Sb, true),
?line {ok,#sctp_assoc_change{state=comm_up,
@@ -840,23 +858,36 @@ xfer_stream_min(Config) when is_list(Config) ->
?line SbOutboundStreams = SaInboundStreams,
?line ?LOGVAR(SbOutboundStreams),
?line ok = gen_sctp:send(Sa, SaAssocId, 0, Data),
- ?line case gen_sctp:recv(Sb, infinity) of
- {ok,{Loopback,
+ ?line case log_ok(gen_sctp:recv(Sb, infinity)) of
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} -> ok;
+ {Loopback,
+ Pa,[],
+ #sctp_paddr_change{addr = {Loopback,_},
+ state = addr_available,
+ error = 0,
+ assoc_id = SbAssocId}} ->
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ Data} = log_ok(gen_sctp:recv(Sb, infinity));
+ {Loopback,
+ Pa,
+ [#sctp_sndrcvinfo{stream=Stream,
+ assoc_id=SbAssocId}],
+ #sctp_paddr_change{addr = {Loopback,_},
+ state = addr_confirmed,
+ error = 0,
+ assoc_id = SbAssocId}} ->
+ {Loopback,
Pa,
[#sctp_sndrcvinfo{stream=Stream,
assoc_id=SbAssocId}],
- Data}} -> ok;
- {ok,{Loopback,
- Pa,[],
- #sctp_paddr_change{addr = {Loopback,_},
- state = addr_available,
- error = 0,
- assoc_id = SbAssocId}}} ->
- {ok,{Loopback,
- Pa,
- [#sctp_sndrcvinfo{stream=Stream,
- assoc_id=SbAssocId}],
- Data}} = gen_sctp:recv(Sb, infinity)
+ Data} = log_ok(gen_sctp:recv(Sb, infinity))
end,
?line ok =
do_from_other_process(
@@ -972,10 +1003,10 @@ peeloff(Config, SockOpts) when is_list(Config) ->
?line ?LOGVAR(S2Ai),
?line S1Ai =
receive
- {S1,{Addr,P2,
- #sctp_assoc_change{
- state=comm_up,
- assoc_id=AssocId1}}} -> AssocId1
+ {S1,{Addr,P2,
+ #sctp_assoc_change{
+ state=comm_up,
+ assoc_id=AssocId1}}} -> AssocId1
after Timeout ->
socket_bailout([S1,S2])
end,
@@ -1003,8 +1034,8 @@ peeloff(Config, SockOpts) when is_list(Config) ->
?line P3 = case P3_X of 0 -> P1; _ -> P3_X end,
?line [{_,#sctp_paddrinfo{assoc_id=S3Ai,state=active}}] =
socket_call(S3,
- {getopts,[{sctp_get_peer_addr_info,
- #sctp_paddrinfo{address={Addr,P2}}}]}),
+ {getopts,[{sctp_get_peer_addr_info,
+ #sctp_paddrinfo{address={Addr,P2}}}]}),
%%?line S3Ai = S1Ai,
?line ?LOGVAR(S3Ai),
%%
@@ -1087,9 +1118,9 @@ buffers(Config) when is_list(Config) ->
%%
?line socket_call(S1, {setopts,[{recbuf,Limit}]}),
?line Recbuf =
- case socket_call(S1, {getopts,[recbuf]}) of
- [{recbuf,RB1}] when RB1 >= Limit -> RB1
- end,
+ case socket_call(S1, {getopts,[recbuf]}) of
+ [{recbuf,RB1}] when RB1 >= Limit -> RB1
+ end,
?line Data = mk_data(Recbuf+Limit),
?line socket_call(S2, {setopts,[{sndbuf,Recbuf+Limit}]}),
?line socket_call(S2, {send,S2Ai,Stream,Data}),
@@ -1190,6 +1221,93 @@ open_multihoming_ipv4_and_ipv6_socket(Config) when is_list(Config) ->
{skip, Reason}
end.
+names_unihoming_ipv4(doc) ->
+ "Test inet:socknames/peernames on unihoming IPv4 sockets";
+names_unihoming_ipv4(suite) ->
+ [];
+names_unihoming_ipv4(Config) when is_list(Config) ->
+ ?line do_names(Config, inet, 1).
+
+names_unihoming_ipv6(doc) ->
+ "Test inet:socknames/peernames on unihoming IPv6 sockets";
+names_unihoming_ipv6(suite) ->
+ [];
+names_unihoming_ipv6(Config) when is_list(Config) ->
+ ?line do_names(Config, inet6, 1).
+
+names_multihoming_ipv4(doc) ->
+ "Test inet:socknames/peernames on multihoming IPv4 sockets";
+names_multihoming_ipv4(suite) ->
+ [];
+names_multihoming_ipv4(Config) when is_list(Config) ->
+ ?line do_names(Config, inet, 2).
+
+names_multihoming_ipv6(doc) ->
+ "Test inet:socknames/peernames on multihoming IPv6 sockets";
+names_multihoming_ipv6(suite) ->
+ [];
+names_multihoming_ipv6(Config) when is_list(Config) ->
+ ?line do_names(Config, inet6, 2).
+
+
+
+do_names(_, FamilySpec, AddressCount) ->
+ Fun =
+ fun (ServerSocket, _, ServerAssoc, ClientSocket, _, ClientAssoc) ->
+ ?line ServerSocknamesNoassoc =
+ lists:sort(ok(inet:socknames(ServerSocket))),
+ ?line ?LOGVAR(ServerSocknamesNoassoc),
+ ?line ServerSocknames =
+ lists:sort(ok(inet:socknames(ServerSocket, ServerAssoc))),
+ ?line ?LOGVAR(ServerSocknames),
+ ?line [_|_] =
+ ordsets:intersection
+ (ServerSocknamesNoassoc, ServerSocknames),
+ ?line ClientSocknamesNoassoc =
+ lists:sort(ok(inet:socknames(ClientSocket))),
+ ?line ?LOGVAR(ClientSocknamesNoassoc),
+ ?line ClientSocknames =
+ lists:sort(ok(inet:socknames(ClientSocket, ClientAssoc))),
+ ?line ?LOGVAR(ClientSocknames),
+ ?line [_|_] =
+ ordsets:intersection
+ (ClientSocknamesNoassoc, ClientSocknames),
+ ?line err([einval,enotconn], inet:peernames(ServerSocket)),
+ ?line ServerPeernames =
+ lists:sort(ok(inet:peernames(ServerSocket, ServerAssoc))),
+ ?line ?LOGVAR(ServerPeernames),
+ ?line err([einval,enotconn], inet:peernames(ClientSocket)),
+ ?line ClientPeernames =
+ lists:sort(ok(inet:peernames(ClientSocket, ClientAssoc))),
+ ?line ?LOGVAR(ClientPeernames),
+ ?line ServerSocknames = ClientPeernames,
+ ?line ClientSocknames = ServerPeernames,
+ ?line {ok,Socket} =
+ gen_sctp:peeloff(ServerSocket, ServerAssoc),
+ ?line SocknamesNoassoc =
+ lists:sort(ok(inet:socknames(Socket))),
+ ?line ?LOGVAR(SocknamesNoassoc),
+ ?line Socknames =
+ lists:sort(ok(inet:socknames(Socket, ServerAssoc))),
+ ?line ?LOGVAR(Socknames),
+ ?line true =
+ ordsets:is_subset(SocknamesNoassoc, Socknames),
+ ?line Peernames =
+ lists:sort(ok(inet:peernames(Socket, ServerAssoc))),
+ ?line ?LOGVAR(Peernames),
+ ?line ok = gen_sctp:close(Socket),
+ ?line Socknames = ClientPeernames,
+ ?line ClientSocknames = Peernames,
+ ok
+ end,
+ ?line case get_addrs_by_family(FamilySpec, AddressCount) of
+ {ok, Addresses} when length(Addresses) =:= AddressCount ->
+ ?line do_open_and_connect(Addresses, hd(Addresses), Fun);
+ {error, Reason} ->
+ {skip, Reason}
+ end.
+
+
get_addrs_by_family(Family, NumAddrs) ->
case os:type() of
@@ -1274,6 +1392,10 @@ f(F, A) ->
lists:flatten(io_lib:format(F, A)).
do_open_and_connect(ServerAddresses, AddressToConnectTo) ->
+ ?line Fun = fun (_, _, _, _, _, _) -> ok end,
+ ?line do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun).
+%%
+do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
?line ServerFamily = get_family_by_addrs(ServerAddresses),
?line io:format("Serving ~p addresses: ~p~n",
[ServerFamily, ServerAddresses]),
@@ -1284,14 +1406,26 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo) ->
?line ClientFamily = get_family_by_addr(AddressToConnectTo),
?line io:format("Connecting to ~p ~p~n",
[ClientFamily, AddressToConnectTo]),
- ?line S2 = ok(gen_sctp:open(0, [ClientFamily])),
+ ?line ClientOpts =
+ [ClientFamily |
+ case ClientFamily of
+ inet6 ->
+ [{ipv6_v6only,true}];
+ _ ->
+ []
+ end],
+ ?line S2 = ok(gen_sctp:open(0, ClientOpts)),
+ log(open),
%% Verify client can connect
- ?line #sctp_assoc_change{state=comm_up} =
+ ?line #sctp_assoc_change{state=comm_up} = S2Assoc =
ok(gen_sctp:connect(S2, AddressToConnectTo, P1, [])),
+ log(comm_up),
%% verify server side also receives comm_up from client
- ?line recv_comm_up_eventually(S1),
+ ?line S1Assoc = recv_comm_up_eventually(S1),
+ ?line Result = Fun(S1, ServerFamily, S1Assoc, S2, ClientFamily, S2Assoc),
?line ok = gen_sctp:close(S2),
- ?line ok = gen_sctp:close(S1).
+ ?line ok = gen_sctp:close(S1),
+ Result.
%% If at least one of the addresses is an ipv6 address, return inet6, else inet.
get_family_by_addrs(Addresses) ->
@@ -1306,9 +1440,11 @@ get_family_by_addr(Addr) when tuple_size(Addr) =:= 8 -> inet6.
recv_comm_up_eventually(S) ->
?line case ok(gen_sctp:recv(S)) of
- {_Addr, _Port, _Info, #sctp_assoc_change{state=comm_up}} ->
- ok;
- {_Addr, _Port, _Info, _OtherSctpMsg} ->
+ {_Addr, _Port, _Info,
+ #sctp_assoc_change{state=comm_up} = Assoc} ->
+ Assoc;
+ {_Addr, _Port, _Info, _OtherSctpMsg} = Msg ->
+ ?line log({unexpected,Msg}),
?line recv_comm_up_eventually(S)
end.
@@ -1367,10 +1503,10 @@ socket_bailout([]) ->
socket_history({State,Flush}) ->
{lists:keysort(
- 2,
- lists:flatten(
- [[{Key,Val} || Val <- Vals]
- || {Key,Vals} <- gb_trees:to_list(State)])),
+ 2,
+ lists:flatten(
+ [[{Key,Val} || Val <- Vals]
+ || {Key,Vals} <- gb_trees:to_list(State)])),
Flush}.
s_handler(Socket) ->
@@ -1453,7 +1589,7 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
#sctp_assoc_change{
state=comm_up,
inbound_streams=Is}}}|_]
- when 0 =< Stream, Stream < Is-> ok;
+ when 0 =< Stream, Stream < Is-> ok;
[] -> ok
end,
Key = {msg,AssocId,Stream},
@@ -1473,7 +1609,7 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
case {gb_get(Key, State),St} of
{[],_} -> ok;
{[{_,{Addr,Port,#sctp_assoc_change{state=comm_up}}}|_],_}
- when St =:= comm_lost; St =:= shutdown_comp -> ok
+ when St =:= comm_lost; St =:= shutdown_comp -> ok
end,
NewState = gb_push(Key, Val, State),
Parent ! {self(),{Addr,Port,SAC}},
@@ -1489,8 +1625,9 @@ s_loop(Socket, Timeout, Parent, Handler, State) ->
[] -> ok
end,
case {gb_get({assoc_change,AssocId}, State),St} of
- {[{_,{Addr,Port,#sctp_assoc_change{state=comm_up}}}|_],
- addr_available} -> ok;
+ {[{_,{Addr,Port,#sctp_assoc_change{state=comm_up}}}|_],_}
+ when St =:= addr_available;
+ St =:= addr_confirmed -> ok;
{[],addr_confirmed} -> ok
end,
Key = {paddr_change,AssocId},
diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl
index bd237cb513..e91f6f18d4 100644
--- a/lib/kernel/test/zlib_SUITE.erl
+++ b/lib/kernel/test/zlib_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -565,8 +565,8 @@ intro(Config) when is_list(Config) ->
large_deflate(doc) -> "Test deflate large file, which had a bug reported on erlang-bugs";
large_deflate(suite) -> [];
large_deflate(Config) when is_list(Config) ->
- large_deflate().
-large_deflate() ->
+ large_deflate_do().
+large_deflate_do() ->
?line Z = zlib:open(),
?line Plain = rand_bytes(zlib:getBufSize(Z)*5),
?line ok = zlib:deflateInit(Z),
@@ -899,7 +899,7 @@ worker(Seed, FnATpl, Parent) ->
Parent ! self().
worker_loop(0, _FnATpl) ->
- large_deflate(), % the time consuming one as finale
+ large_deflate_do(), % the time consuming one as finale
ok;
worker_loop(N, FnATpl) ->
{F,A} = element(random:uniform(size(FnATpl)),FnATpl),
diff --git a/lib/mnesia/src/mnesia.appup.src b/lib/mnesia/src/mnesia.appup.src
index 355aafb215..c245299740 100644
--- a/lib/mnesia/src/mnesia.appup.src
+++ b/lib/mnesia/src/mnesia.appup.src
@@ -1,22 +1,16 @@
%% -*- erlang -*-
{"%VSN%",
[
- {"4.7.1", [{restart_application, mnesia}]},
- {"4.7", [{restart_application, mnesia}]},
- {"4.6", [{restart_application, mnesia}]},
- {"4.5.1", [{restart_application, mnesia}]},
- {"4.5", [{restart_application, mnesia}]},
+ {<<"4\\.1[0-9].*">>, [{restart_application, mnesia}]},
+ {<<"4\\.[5-9].*">>, [{restart_application, mnesia}]},
{"4.4.19", [{restart_application, mnesia}]},
{"4.4.18", [{restart_application, mnesia}]},
{"4.4.17", [{restart_application, mnesia}]},
{"4.4.16", [{restart_application, mnesia}]}
],
[
- {"4.7.1", [{restart_application, mnesia}]},
- {"4.7", [{restart_application, mnesia}]},
- {"4.6", [{restart_application, mnesia}]},
- {"4.5.1", [{restart_application, mnesia}]},
- {"4.5", [{restart_application, mnesia}]},
+ {<<"4\\.1[0-9].*">>, [{restart_application, mnesia}]},
+ {<<"4\\.[5-9].*">>, [{restart_application, mnesia}]},
{"4.4.19", [{restart_application, mnesia}]},
{"4.4.18", [{restart_application, mnesia}]},
{"4.4.17", [{restart_application, mnesia}]},
diff --git a/lib/mnesia/src/mnesia_locker.erl b/lib/mnesia/src/mnesia_locker.erl
index 14011003d3..c4fe370ec1 100644
--- a/lib/mnesia/src/mnesia_locker.erl
+++ b/lib/mnesia/src/mnesia_locker.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -26,8 +26,8 @@
global_lock/5,
ixrlock/5,
init/1,
- mnesia_down/2,
release_tid/1,
+ mnesia_down/2,
async_release_tid/2,
send_release_tid/2,
receive_release_tid_acc/2,
@@ -137,6 +137,17 @@ receive_release_tid_acc([Node | Nodes], Tid) ->
receive_release_tid_acc([], _Tid) ->
ok.
+mnesia_down(Node, Pending) ->
+ case whereis(?MODULE) of
+ undefined -> {error, node_not_running};
+ Pid ->
+ Ref = make_ref(),
+ Pid ! {{self(), Ref}, {release_remote_non_pending, Node, Pending}},
+ receive %% No need to wait for anything else if process dies we die soon
+ {Ref,ok} -> ok
+ end
+ end.
+
loop(State) ->
receive
{From, {write, Tid, Oid}} ->
@@ -213,9 +224,9 @@ loop(State) ->
reply(From, {tid_released, Tid}),
loop(State);
- {release_remote_non_pending, Node, Pending} ->
+ {{From, Ref},{release_remote_non_pending, Node, Pending}} ->
release_remote_non_pending(Node, Pending),
- mnesia_monitor:mnesia_down(?MODULE, Node),
+ From ! {Ref, ok},
loop(State);
{'EXIT', Pid, _} when Pid == State#state.supervisor ->
@@ -653,19 +664,6 @@ ix_read_res(Tab,IxKey,Pos) ->
%% ********************* end server code ********************
%% The following code executes at the client side of a transactions
-mnesia_down(N, Pending) ->
- case whereis(?MODULE) of
- undefined ->
- %% Takes care of mnesia_down's in early startup
- mnesia_monitor:mnesia_down(?MODULE, N);
- Pid ->
- %% Syncronously call needed in order to avoid
- %% race with mnesia_tm's coordinator processes
- %% that may restart and acquire new locks.
- %% mnesia_monitor ensures the sync.
- Pid ! {release_remote_non_pending, N, Pending}
- end.
-
%% Aquire a write lock, but do a read, used by
%% mnesia:wread/1
diff --git a/lib/mnesia/src/mnesia_monitor.erl b/lib/mnesia/src/mnesia_monitor.erl
index 7a788238fc..438da65158 100644
--- a/lib/mnesia/src/mnesia_monitor.erl
+++ b/lib/mnesia/src/mnesia_monitor.erl
@@ -482,11 +482,7 @@ handle_cast({mnesia_down, mnesia_controller, Node}, State) ->
mnesia_tm:mnesia_down(Node),
{noreply, State};
-handle_cast({mnesia_down, mnesia_tm, {Node, Pending}}, State) ->
- mnesia_locker:mnesia_down(Node, Pending),
- {noreply, State};
-
-handle_cast({mnesia_down, mnesia_locker, Node}, State) ->
+handle_cast({mnesia_down, mnesia_tm, Node}, State) ->
Down = {mnesia_down, Node},
mnesia_lib:report_system_event(Down),
GoingDown = lists:delete(Node, State#state.going_down),
diff --git a/lib/mnesia/src/mnesia_subscr.erl b/lib/mnesia/src/mnesia_subscr.erl
index 8f78dc55e8..9272211ad2 100644
--- a/lib/mnesia/src/mnesia_subscr.erl
+++ b/lib/mnesia/src/mnesia_subscr.erl
@@ -204,7 +204,9 @@ what(Tab, Tid, Obj, write, undefined) ->
{mnesia_table_event, {write, Tab, Obj, Old, Tid}};
{'EXIT', _} ->
ignore
- end.
+ end;
+what(Tab, Tid, Obj, write, Old) ->
+ {mnesia_table_event, {write, Tab, Obj, Old, Tid}}.
deliver(_, ignore) ->
ok;
diff --git a/lib/mnesia/src/mnesia_tm.erl b/lib/mnesia/src/mnesia_tm.erl
index e54e5c4e88..17af0cad44 100644
--- a/lib/mnesia/src/mnesia_tm.erl
+++ b/lib/mnesia/src/mnesia_tm.erl
@@ -181,7 +181,7 @@ mnesia_down(Node) ->
%% mnesia_monitor takes care of the sync
case whereis(?MODULE) of
undefined ->
- mnesia_monitor:mnesia_down(?MODULE, {Node, []});
+ mnesia_monitor:mnesia_down(?MODULE, Node);
Pid ->
Pid ! {mnesia_down, Node}
end.
@@ -403,7 +403,9 @@ doit_loop(#state{coordinators=Coordinators,participants=Participants,supervisor=
Tids = gb_trees:keys(Participants),
reconfigure_participants(N, gb_trees:values(Participants)),
NewState = clear_fixtable(N, State),
- mnesia_monitor:mnesia_down(?MODULE, {N, Tids}),
+
+ mnesia_locker:mnesia_down(N, Tids),
+ mnesia_monitor:mnesia_down(?MODULE, N),
doit_loop(NewState);
{From, {unblock_me, Tab}} ->
diff --git a/lib/mnesia/test/mnesia_dirty_access_test.erl b/lib/mnesia/test/mnesia_dirty_access_test.erl
index 6017092095..519b4bb052 100644
--- a/lib/mnesia/test/mnesia_dirty_access_test.erl
+++ b/lib/mnesia/test/mnesia_dirty_access_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -198,6 +198,11 @@ dirty_update_counter(Config, Storage) ->
?match(1, mnesia:dirty_update_counter({Tab, foo}, 1)),
?match([{Tab, foo,1}], mnesia:dirty_read({Tab,foo})),
+ ?match({ok,_}, mnesia:subscribe({table, Tab, detailed})),
+
+ ?match(2, mnesia:dirty_update_counter({Tab, foo}, 1)),
+ ?match([{Tab, foo,2}], mnesia:dirty_read({Tab,foo})),
+
?verify_mnesia(Nodes, []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/lib/mnesia/test/mnesia_evil_coverage_test.erl b/lib/mnesia/test/mnesia_evil_coverage_test.erl
index db23a39943..91820238e5 100644
--- a/lib/mnesia/test/mnesia_evil_coverage_test.erl
+++ b/lib/mnesia/test/mnesia_evil_coverage_test.erl
@@ -1869,14 +1869,19 @@ subscribe_extended(Config) when is_list(Config) ->
{attributes, record_info(fields, tab)}],
?match({atomic, ok}, mnesia:create_table(Tab2, Def2)),
+ Tab3 = ctab,
+ Def3 = [{Storage, [N1, N2]}],
+ ?match({atomic, ok}, mnesia:create_table(Tab3, Def3)),
+
?match({ok, N1}, mnesia:subscribe({table, Tab1, detailed})),
?match({ok, N1}, mnesia:subscribe({table, Tab2, detailed})),
+ ?match({ok, N1}, mnesia:subscribe({table, Tab3, detailed})),
?match({error, {already_exists, _}}, mnesia:subscribe({table, Tab1, simple})),
?match({error, {badarg, {table, Tab1, bad}}}, mnesia:subscribe({table, Tab1, bad})),
?match({ok, N1}, mnesia:subscribe(activity)),
- test_ext_sub(Tab1, Tab2),
+ test_ext_sub(Tab1, Tab2, Tab3),
?match({ok, N1}, mnesia:unsubscribe(activity)),
?match({ok, N1}, mnesia:subscribe({table, Tab1, detailed})),
@@ -1895,11 +1900,11 @@ subscribe_extended(Config) when is_list(Config) ->
{max, [Tab1, Tab2]}]),
?match({ok, N1}, mnesia:subscribe({table, Tab2, detailed})),
?match({ok, N1}, mnesia:subscribe(activity)),
- test_ext_sub(Tab1, Tab2),
+ test_ext_sub(Tab1, Tab2, Tab3),
?verify_mnesia(Nodes, []).
-test_ext_sub(Tab1, Tab2) ->
+test_ext_sub(Tab1, Tab2, Tab3) ->
%% The basics
Rec1 = {Tab1, 1, 0, 0},
Rec2 = {Tab1, 1, 1, 0},
@@ -1940,7 +1945,6 @@ test_ext_sub(Tab1, Tab2) ->
?match({atomic, ok}, Delete(Tab1, 1)),
?match({mnesia_table_event, {delete, Tab1, {Tab1, 1}, [], {tid,_,S}}}, recv_event()),
?match({mnesia_activity_event, {complete, {tid,_,S}}}, recv_event()),
-
?match({ok, _N1}, mnesia:unsubscribe({table, Tab1, detailed})),
%% BAG
@@ -1969,6 +1973,17 @@ test_ext_sub(Tab1, Tab2) ->
?match({atomic, ok}, Delete(Tab2, 2)),
?match({mnesia_table_event, {delete, Tab2, {Tab2, 2}, [Rec4, Rec3], {tid,_,S}}}, recv_event()),
?match({mnesia_activity_event, {complete, {tid,_,S}}}, recv_event()),
+
+ %% COUNTERS
+
+ Rec5 = {Tab3, counter, 0},
+ ?match(ok, mnesia:dirty_write(Rec5)),
+ ?match({mnesia_table_event, {write, Tab3, Rec5, [], D}}, recv_event()),
+ ?match(1, mnesia:dirty_update_counter({Tab3, counter}, 1)),
+ ?match({mnesia_table_event, {write, Tab3, {Tab3,counter,1}, [Rec5], D}}, recv_event()),
+ ?match(ok, mnesia:dirty_delete({Tab3, counter})),
+ ?match({mnesia_table_event, {delete, Tab3, {Tab3,counter},
+ [{Tab3,counter,1}], D}}, recv_event()),
ok.
@@ -1987,7 +2002,7 @@ subscribe_standard(Config) when is_list(Config)->
%% Check system events
?match({error, {badarg, foo}}, mnesia:unsubscribe(foo)),
?match({error, badarg}, mnesia:unsubscribe({table, foo})),
- ?match(_, mnesia:unsubscribe(activity)),
+ mnesia:unsubscribe(activity),
?match({ok, N1}, mnesia:subscribe(system)),
?match({ok, N1}, mnesia:subscribe(activity)),
diff --git a/lib/observer/doc/src/ttb.xml b/lib/observer/doc/src/ttb.xml
index 4e63aecbf2..1453bbdf10 100644
--- a/lib/observer/doc/src/ttb.xml
+++ b/lib/observer/doc/src/ttb.xml
@@ -105,8 +105,9 @@ ttb:p(all, call)</code>
<v>Nodes = atom() | [atom()] | all | existing | new</v>
<v>Opts = Opt | [Opt]</v>
<v>Opt = {file,Client} | {handler, FormatHandler} | {process_info,PI} |
- shell | {shell, ShellSpec} | {timer, TimerSpec} | {overload, {MSec, Module, Function}}
- | {flush, MSec} | resume | {resume, FetchTimeout}</v>
+ shell | {shell, ShellSpec} | {timer, TimerSpec} |
+ {overload_check, {MSec, Module, Function}} |
+ {flush, MSec} | resume | {resume, FetchTimeout}</v>
<v>TimerSpec = MSec | {MSec, StopOpts}</v>
<v>MSec = FetchTimeout = integer()</v>
<v>Module = Function = atom() </v>
@@ -158,7 +159,7 @@ ttb:p(all, call)</code>
network communication are always present. The timer starts after
<c>ttb:p/2</c> is issued, so you can set up your trace patterns before.
</p>
- <p>The <c>overload</c> option allows to enable overload
+ <p>The <c>overload_check</c> option allows to enable overload
checking on the nodes under trace. <c>Module:Function(check)</c>
is performed each <c>MSec</c> milliseconds. If the check returns
<c>true</c>, the tracing is disabled on a given node.<br/>
diff --git a/lib/observer/doc/src/ttb_ug.xml b/lib/observer/doc/src/ttb_ug.xml
index 08093a9451..402d079c2c 100644
--- a/lib/observer/doc/src/ttb_ug.xml
+++ b/lib/observer/doc/src/ttb_ug.xml
@@ -697,7 +697,7 @@ f3() ->
(tiger@durin)6>ttb:stop({format, {handler, ttb:get_et_handler()}}).
</code>
- <p>This shoud render a result similar to the
+ <p>This should render a result similar to the
following:
</p>
<p></p>
diff --git a/lib/observer/test/observer_SUITE.erl b/lib/observer/test/observer_SUITE.erl
index 8dea0d8ea8..b6665cb70b 100644
--- a/lib/observer/test/observer_SUITE.erl
+++ b/lib/observer/test/observer_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -66,5 +66,5 @@ app_file(suite) ->
app_file(doc) ->
["Testing .app file"];
app_file(Config) when is_list(Config) ->
- ?line ok = ?t:app_test(os_mon),
+ ?line ok = ?t:app_test(observer),
ok.
diff --git a/lib/odbc/configure.in b/lib/odbc/configure.in
index 83f7a47434..531ad84fb9 100644
--- a/lib/odbc/configure.in
+++ b/lib/odbc/configure.in
@@ -105,6 +105,7 @@ AC_CHECK_FUNC(gethostbyname, , AC_CHECK_LIB(nsl, main, [LIBS="$LIBS -lnsl"]))
dnl Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS([fcntl.h netdb.h stdlib.h string.h sys/socket.h winsock2.h])
+AC_CHECK_HEADERS([sql.h, sqlext.h], [odbc_required_headers=yes], [odbc_required_headers=no])
dnl Checks for typedefs, structures, and compiler characteristics.
AC_C_CONST
@@ -203,6 +204,10 @@ AC_SUBST(TARGET_FLAGS)
;;
esac
+if test $odbc_required_headers = no; then
+ AC_MSG_WARN(["ODBC library - header check failed"])
+ echo "ODBC library - header check failed" > $ERL_TOP/lib/odbc/SKIP
+fi
if test $odbc_lib_link_success = no; then
AC_MSG_WARN(["ODBC library - link check failed"])
echo "ODBC library - link check failed" > $ERL_TOP/lib/odbc/SKIP
diff --git a/lib/orber/test/multi_ORB_SUITE.erl b/lib/orber/test/multi_ORB_SUITE.erl
index 3c1ffd59d3..41a309ff16 100644
--- a/lib/orber/test/multi_ORB_SUITE.erl
+++ b/lib/orber/test/multi_ORB_SUITE.erl
@@ -75,8 +75,6 @@
close_connections_local_interface_ctx_override_api/1,
ssl_1_multi_orber_generation_3_api/1, ssl_2_multi_orber_generation_3_api/1,
ssl_reconfigure_generation_3_api/1,
- ssl_1_multi_orber_generation_3_api_old/1, ssl_2_multi_orber_generation_3_api_old/1,
- ssl_reconfigure_generation_3_api_old/1,
close_connections_alt_iiop_addr_api/1, close_connections_multiple_profiles_api/1]).
@@ -137,13 +135,10 @@ cases() ->
setup_multi_connection_timeout_attempts_api,
setup_multi_connection_timeout_random_api,
ssl_1_multi_orber_api,
- ssl_1_multi_orber_generation_3_api_old,
ssl_1_multi_orber_generation_3_api,
ssl_2_multi_orber_api,
- ssl_2_multi_orber_generation_3_api_old,
ssl_2_multi_orber_generation_3_api,
ssl_reconfigure_api,
- ssl_reconfigure_generation_3_api_old,
ssl_reconfigure_generation_3_api].
%%-----------------------------------------------------------------
@@ -155,10 +150,7 @@ init_per_testcase(TC,Config)
TC =:= ssl_reconfigure_api ->
init_ssl(Config);
init_per_testcase(TC,Config)
- when TC =:= ssl_1_multi_orber_generation_3_api_old;
- TC =:= ssl_2_multi_orber_generation_3_api_old;
- TC =:= ssl_reconfigure_generation_3_api_old;
- TC =:= ssl_1_multi_orber_generation_3_api;
+ when TC =:= ssl_1_multi_orber_generation_3_api;
TC =:= ssl_2_multi_orber_generation_3_api;
TC =:= ssl_reconfigure_generation_3_api ->
init_ssl_3(Config);
@@ -1632,22 +1624,6 @@ ssl_1_multi_orber_api(_Config) ->
ssl_suite(ServerOptions, ClientOptions).
-ssl_1_multi_orber_generation_3_api_old(doc) -> ["SECURE MULTI ORB API tests (SSL depth 1)",
- "This case set up two secure orbs and test if they can",
- "communicate. The case also test to access one of the",
- "secure orbs which must raise a NO_PERMISSION exception."];
-ssl_1_multi_orber_generation_3_api_old(suite) -> [];
-ssl_1_multi_orber_generation_3_api_old(_Config) ->
-
- ServerOptions = orber_test_lib:get_options_old(iiop_ssl, server,
- 1, [{ssl_generation, 3},
- {iiop_ssl_port, 0}]),
- ClientOptions = orber_test_lib:get_options_old(iiop_ssl, client,
- 1, [{ssl_generation, 3},
- {iiop_ssl_port, 0}]),
- ssl_suite(ServerOptions, ClientOptions).
-
-
ssl_1_multi_orber_generation_3_api(doc) -> ["SECURE MULTI ORB API tests (SSL depth 1)",
"This case set up two secure orbs and test if they can",
"communicate. The case also test to access one of the",
@@ -1681,22 +1657,6 @@ ssl_2_multi_orber_api(_Config) ->
ssl_suite(ServerOptions, ClientOptions).
-ssl_2_multi_orber_generation_3_api_old(doc) -> ["SECURE MULTI ORB API tests (SSL depth 2)",
- "This case set up two secure orbs and test if they can",
- "communicate. The case also test to access one of the",
- "secure orbs which must raise a NO_PERMISSION exception."];
-ssl_2_multi_orber_generation_3_api_old(suite) -> [];
-ssl_2_multi_orber_generation_3_api_old(_Config) ->
-
- ServerOptions = orber_test_lib:get_options_old(iiop_ssl, server,
- 2, [{ssl_generation, 3},
- {iiop_ssl_port, 0}]),
- ClientOptions = orber_test_lib:get_options_old(iiop_ssl, client,
- 2, [{ssl_generation, 3},
- {iiop_ssl_port, 0}]),
- ssl_suite(ServerOptions, ClientOptions).
-
-
ssl_2_multi_orber_generation_3_api(doc) -> ["SECURE MULTI ORB API tests (SSL depth 2)",
"This case set up two secure orbs and test if they can",
"communicate. The case also test to access one of the",
@@ -1724,11 +1684,6 @@ ssl_reconfigure_api(_Config) ->
ssl_reconfigure_old([]).
-ssl_reconfigure_generation_3_api_old(doc) -> ["SECURE MULTI ORB API tests (SSL depth 2)",
- "This case set up two secure orbs and test if they can",
- "communicate. The case also test to access one of the",
- "secure orbs which must raise a NO_PERMISSION exception."];
-ssl_reconfigure_generation_3_api_old(suite) -> [];
ssl_reconfigure_generation_3_api_old(_Config) ->
ssl_reconfigure_old([{ssl_generation, 3}]).
diff --git a/lib/orber/test/orber_nat_SUITE.erl b/lib/orber/test/orber_nat_SUITE.erl
index ee31b162c2..a21bd4d499 100644
--- a/lib/orber/test/orber_nat_SUITE.erl
+++ b/lib/orber/test/orber_nat_SUITE.erl
@@ -57,7 +57,6 @@
nat_ip_address_local/1, nat_ip_address_local_local/1,
nat_iiop_port/1, nat_iiop_port_local/1,
nat_iiop_port_local_local/1,
- nat_iiop_ssl_port_old/1, nat_iiop_ssl_port_local_old/1,
nat_iiop_ssl_port/1, nat_iiop_ssl_port_local/1]).
@@ -93,8 +92,6 @@ cases() ->
nat_iiop_port_local,
nat_ip_address_local_local,
nat_iiop_port_local_local,
- nat_iiop_ssl_port_old,
- nat_iiop_ssl_port_local_old,
nat_iiop_ssl_port,
nat_iiop_ssl_port_local].
@@ -103,9 +100,7 @@ cases() ->
%%-----------------------------------------------------------------
init_per_testcase(TC, Config)
when TC =:= nat_iiop_ssl_port;
- TC =:= nat_iiop_ssl_port_local;
- TC =:= nat_iiop_ssl_port_old;
- TC =:= nat_iiop_ssl_port_local_old ->
+ TC =:= nat_iiop_ssl_port_local ->
case ?config(crypto_started, Config) of
true ->
case orber_test_lib:ssl_version() of
@@ -291,106 +286,6 @@ nat_iiop_port_local_local(_Config) ->
%% API tests for ORB to ORB, ssl security depth 1
%%-----------------------------------------------------------------
-nat_iiop_ssl_port_old(doc) -> ["SECURE MULTI ORB API tests (SSL depth 1)",
- "Make sure NAT works for SSL"];
-nat_iiop_ssl_port_old(suite) -> [];
-nat_iiop_ssl_port_old(_Config) ->
-
- IP = orber_test_lib:get_host(),
- ServerOptions = orber_test_lib:get_options_old(iiop_ssl, server,
- 1, [{iiop_ssl_port, 0},
- {flags, ?ORB_ENV_ENABLE_NAT},
- {ip_address, IP}]),
- ClientOptions = orber_test_lib:get_options_old(iiop_ssl, client,
- 1, [{iiop_ssl_port, 0}]),
- {ok, ServerNode, _ServerHost} =
- ?match({ok,_,_}, orber_test_lib:js_node(ServerOptions)),
- ServerPort = orber_test_lib:remote_apply(ServerNode, orber, iiop_port, []),
- SSLServerPort = orber_test_lib:remote_apply(ServerNode, orber, iiop_ssl_port, []),
- NATSSLServerPort = SSLServerPort+1,
- {ok, Ref} = ?match({ok, _},
- orber_test_lib:remote_apply(ServerNode, orber,
- add_listen_interface,
- [IP, ssl, NATSSLServerPort])),
- orber_test_lib:remote_apply(ServerNode, orber_env, configure_override,
- [nat_iiop_ssl_port,
- {local, NATSSLServerPort, [{4001, 43}]}]),
-
- {ok, ClientNode, _ClientHost} =
- ?match({ok,_,_}, orber_test_lib:js_node(ClientOptions)),
- ?match(ok, orber_test_lib:remote_apply(ServerNode, orber_test_lib,
- install_test_data,
- [ssl])),
-
- IOR1 = ?match(#'IOP_IOR'{},
- orber_test_lib:remote_apply(ClientNode, corba,
- string_to_object,
- ["corbaname::1.2@"++IP++":"++
- integer_to_list(ServerPort)++"/NameService#mamba"])),
-
- ?match({'external', {_IP, _Port, _ObjectKey, _Counter, _TP,
- #host_data{protocol = ssl,
- ssl_data = #'SSLIOP_SSL'{port = NATSSLServerPort}}}},
- iop_ior:get_key(IOR1)),
- ?match(ok, orber_test_lib:remote_apply(ServerNode, orber_test_lib,
- uninstall_test_data,
- [ssl])),
- ?match(ok,
- orber_test_lib:remote_apply(ServerNode, orber,
- remove_listen_interface, [Ref])),
- ok.
-
-nat_iiop_ssl_port_local_old(doc) -> ["SECURE MULTI ORB API tests (SSL depth 1)",
- "Make sure NAT works for SSL"];
-nat_iiop_ssl_port_local_old(suite) -> [];
-nat_iiop_ssl_port_local_old(_Config) ->
-
- IP = orber_test_lib:get_host(),
- ServerOptions = orber_test_lib:get_options_old(iiop_ssl, server,
- 1, [{iiop_ssl_port, 0},
- {flags,
- (?ORB_ENV_LOCAL_INTERFACE bor
- ?ORB_ENV_ENABLE_NAT)},
- {ip_address, IP}]),
- ClientOptions = orber_test_lib:get_options_old(iiop_ssl, client,
- 1, [{iiop_ssl_port, 0}]),
- {ok, ServerNode, _ServerHost} =
- ?match({ok,_,_}, orber_test_lib:js_node(ServerOptions)),
- ServerPort = orber_test_lib:remote_apply(ServerNode, orber, iiop_port, []),
- SSLServerPort = orber_test_lib:remote_apply(ServerNode, orber, iiop_ssl_port, []),
- NATSSLServerPort = SSLServerPort+1,
- {ok, Ref} = ?match({ok, _},
- orber_test_lib:remote_apply(ServerNode, orber,
- add_listen_interface,
- [IP, ssl, NATSSLServerPort])),
- orber_test_lib:remote_apply(ServerNode, orber_env, configure_override,
- [nat_iiop_ssl_port,
- {local, NATSSLServerPort, [{NATSSLServerPort, NATSSLServerPort}]}]),
-
- {ok, ClientNode, _ClientHost} =
- ?match({ok,_,_}, orber_test_lib:js_node(ClientOptions)),
- ?match(ok, orber_test_lib:remote_apply(ServerNode, orber_test_lib,
- install_test_data,
- [ssl])),
-
- IOR1 = ?match(#'IOP_IOR'{},
- orber_test_lib:remote_apply(ClientNode, corba,
- string_to_object,
- ["corbaname::1.2@"++IP++":"++
- integer_to_list(ServerPort)++"/NameService#mamba"])),
-
- ?match({'external', {_IP, _Port, _ObjectKey, _Counter, _TP,
- #host_data{protocol = ssl,
- ssl_data = #'SSLIOP_SSL'{port = NATSSLServerPort}}}},
- iop_ior:get_key(IOR1)),
- ?match(ok, orber_test_lib:remote_apply(ServerNode, orber_test_lib,
- uninstall_test_data,
- [ssl])),
- ?match(ok,
- orber_test_lib:remote_apply(ServerNode, orber,
- remove_listen_interface, [Ref])),
- ok.
-
nat_iiop_ssl_port(doc) -> ["SECURE MULTI ORB API tests (SSL depth 1)",
"Make sure NAT works for SSL"];
diff --git a/lib/os_mon/src/memsup.erl b/lib/os_mon/src/memsup.erl
index a1b8591c8c..b178732fae 100644
--- a/lib/os_mon/src/memsup.erl
+++ b/lib/os_mon/src/memsup.erl
@@ -721,20 +721,19 @@ reply(Pending, MemUsage, SysMemUsage) ->
%% get_memory_usage(OS) -> {Alloc, Total}
%% Darwin:
-%% Uses vm_stat command. This appears to lie about the page size in
-%% Mac OS X 10.2.2 - the pages given are based on 4000 bytes, but
-%% the vm_stat command tells us that it is 4096...
+%% Uses vm_stat command.
get_memory_usage({unix,darwin}) ->
Str1 = os:cmd("/usr/bin/vm_stat"),
-
- {[Free], Str2} = fread_value("Pages free:~d.", Str1),
- {[Active], Str3} = fread_value("Pages active:~d.", Str2),
- {[Inactive], Str4} = fread_value("Pages inactive:~d.", Str3),
- {[_], Str5} = fread_value("Pages speculative:~d.", Str4),
+ PageSize = 4096,
+
+ {[Free], Str2} = fread_value("Pages free:~d.", Str1),
+ {[Active], Str3} = fread_value("Pages active:~d.", Str2),
+ {[Inactive], Str4} = fread_value("Pages inactive:~d.", Str3),
+ {[Speculative], Str5} = fread_value("Pages speculative:~d.", Str4),
{[Wired], _} = fread_value("Pages wired down:~d.", Str5),
- NMemUsed = (Wired + Active + Inactive) * 4000,
- NMemTotal = NMemUsed + Free * 4000,
+ NMemUsed = (Wired + Active + Inactive) * PageSize,
+ NMemTotal = NMemUsed + (Free + Speculative) * PageSize,
{NMemUsed,NMemTotal};
%% FreeBSD: Look in /usr/include/sys/vmmeter.h for the format of struct
diff --git a/lib/public_key/asn1/OTP-PKIX.asn1 b/lib/public_key/asn1/OTP-PKIX.asn1
index 911a156d6c..8d3c76adf5 100644
--- a/lib/public_key/asn1/OTP-PKIX.asn1
+++ b/lib/public_key/asn1/OTP-PKIX.asn1
@@ -252,7 +252,17 @@ domainComponent ATTRIBUTE-TYPE-AND-VALUE-CLASS ::= {
emailAddress ATTRIBUTE-TYPE-AND-VALUE-CLASS ::= {
ID id-emailAddress
- TYPE EmailAddress }
+ TYPE EmailAddress } -- this is currently not used when decoding
+ -- The decoding and mapping between ID and Type is done in the code
+ -- in module publickey_cert_records via the function attribute_type
+ -- To be more forgiving and compatible with other SSL implementations
+ -- regarding how to handle and sometimes accept incorrect certificates
+ -- we define and use the type below instead of emailAddress
+
+ OTP-emailAddress ::= CHOICE {
+ ia5String IA5String (SIZE (1..255)),
+ utf8String UTF8String (SIZE (1..255))
+}
--
-- Signature and Public Key Algorithms
diff --git a/lib/public_key/doc/src/using_public_key.xml b/lib/public_key/doc/src/using_public_key.xml
index 5d9f1536d9..450bd7e35f 100644
--- a/lib/public_key/doc/src/using_public_key.xml
+++ b/lib/public_key/doc/src/using_public_key.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="iso-8859-1" ?>
+<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE chapter SYSTEM "chapter.dtd">
<chapter>
@@ -90,7 +90,7 @@
[{'RSAPrivateKey',&lt;&lt;224,108,117,203,152,40,15,77,128,126,
221,195,154,249,85,208,202,251,109,
119,120,57,29,89,19,9,...&gt;&gt;,
- {"DES-EDE3-CBC",&lt;&lt;"k�e��p�L"&gt;&gt;}}]
+ {"DES-EDE3-CBC",&lt;&lt;"kÙeø¼pµL"&gt;&gt;}}]
</code>
@@ -350,7 +350,7 @@ ok</code>
<p> or </p>
- <code>1> PemBin = public_key:pem_entry_encode('SubjectPublicKeyInfo', RSAPubKey).
+ <code>1> PemEntry = public_key:pem_entry_encode('SubjectPublicKeyInfo', RSAPubKey).
{'SubjectPublicKeyInfo', &lt;&lt;48,92...&gt;&gt;, not_encrypted}
2> PemBin = public_key:pem_encode([PemEntry]).
diff --git a/lib/public_key/src/pubkey_cert_records.erl b/lib/public_key/src/pubkey_cert_records.erl
index 0449129809..fdd89aa70d 100644
--- a/lib/public_key/src/pubkey_cert_records.erl
+++ b/lib/public_key/src/pubkey_cert_records.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -67,6 +67,15 @@ transform(#'AttributeTypeAndValue'{type=Id,value=Value0} = ATAV, Func) ->
{ok, {printableString, ASCCI}} ->
{ok, ASCCI}
end;
+ 'EmailAddress' when Func == decode ->
+ %% Workaround that some certificates break the ASN-1 spec
+ %% and encode emailAddress as utf8
+ case 'OTP-PUB-KEY':Func('OTP-emailAddress', Value0) of
+ {ok, {utf8String, Utf8Value}} ->
+ {ok, unicode:characters_to_list(Utf8Value)};
+ {ok, {ia5String, Ia5Value}} ->
+ {ok, Ia5Value}
+ end;
Type when is_atom(Type) -> 'OTP-PUB-KEY':Func(Type, Value0);
_UnknownType -> {ok, Value0}
end,
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index f8d167e770..d3e9bf7cf6 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -41,7 +41,7 @@ all() ->
{group, ssh_public_key_decode_encode},
encrypt_decrypt,
{group, sign_verify},
- pkix, pkix_countryname, pkix_path_validation,
+ pkix, pkix_countryname, pkix_emailaddress, pkix_path_validation,
pkix_iso_rsa_oid, pkix_iso_dsa_oid].
groups() ->
@@ -586,9 +586,9 @@ pkix(Config) when is_list(Config) ->
%%--------------------------------------------------------------------
pkix_countryname() ->
- [{doc, "Test workaround for certs that code x509countryname as utf8"}].
+ [{doc, "Test workaround for certs that code x509countryname as utf8"}].
pkix_countryname(Config) when is_list(Config) ->
- Cert = incorrect_pkix_cert(),
+ Cert = incorrect_countryname_pkix_cert(),
OTPCert = public_key:pkix_decode_cert(Cert, otp),
TBSCert = OTPCert#'OTPCertificate'.tbsCertificate,
Issuer = TBSCert#'OTPTBSCertificate'.issuer,
@@ -597,6 +597,18 @@ pkix_countryname(Config) when is_list(Config) ->
check_countryname(Subj).
%%--------------------------------------------------------------------
+pkix_emailaddress() ->
+ [{doc, "Test workaround for certs that code emailAddress as utf8"}].
+pkix_emailaddress(Config) when is_list(Config) ->
+ Cert = incorrect_emailaddress_pkix_cert(),
+ OTPCert = public_key:pkix_decode_cert(Cert, otp),
+ TBSCert = OTPCert#'OTPCertificate'.tbsCertificate,
+ Issuer = TBSCert#'OTPTBSCertificate'.issuer,
+ Subj = TBSCert#'OTPTBSCertificate'.subject,
+ check_emailaddress(Issuer),
+ check_emailaddress(Subj).
+
+%%--------------------------------------------------------------------
pkix_path_validation() ->
[{doc, "Test PKIX path validation"}].
pkix_path_validation(Config) when is_list(Config) ->
@@ -710,10 +722,23 @@ do_check_countryname([#'AttributeTypeAndValue'{type = ?'id-at-countryName',
ok;
do_check_countryname([#'AttributeTypeAndValue'{type = ?'id-at-countryName',
value = Value}|_]) ->
- ct:fail({incorrect_cuntry_name, Value});
+ ct:fail({incorrect_country_name, Value});
do_check_countryname([_| Rest]) ->
do_check_countryname(Rest).
+check_emailaddress({rdnSequence,DirName}) ->
+ do_check_emailaddress(DirName).
+do_check_emailaddress([]) ->
+ ok;
+do_check_emailaddress([#'AttributeTypeAndValue'{type = ?'id-emailAddress',
+ value = "invalid@email.com"}|_]) ->
+ ok;
+do_check_emailaddress([#'AttributeTypeAndValue'{type = ?'id-emailAddress',
+ value = Value}|_]) ->
+ ct:fail({incorrect_email_address, Value});
+do_check_emailaddress([_| Rest]) ->
+ do_check_emailaddress(Rest).
+
check_entry_type(#'DSAPrivateKey'{}, 'DSAPrivateKey') ->
true;
check_entry_type(#'RSAPrivateKey'{}, 'RSAPrivateKey') ->
@@ -732,5 +757,8 @@ check_entry_type(_,_) ->
strip_ending_newlines(Bin) ->
string:strip(binary_to_list(Bin), right, 10).
-incorrect_pkix_cert() ->
+incorrect_countryname_pkix_cert() ->
<<48,130,5,186,48,130,4,162,160,3,2,1,2,2,7,7,250,61,63,6,140,137,48,13,6,9,42, 134,72,134,247,13,1,1,5,5,0,48,129,220,49,11,48,9,6,3,85,4,6,19,2,85,83,49, 16,48,14,6,3,85,4,8,19,7,65,114,105,122,111,110,97,49,19,48,17,6,3,85,4,7,19, 10,83,99,111,116,116,115,100,97,108,101,49,37,48,35,6,3,85,4,10,19,28,83,116, 97,114,102,105,101,108,100,32,84,101,99,104,110,111,108,111,103,105,101,115, 44,32,73,110,99,46,49,57,48,55,6,3,85,4,11,19,48,104,116,116,112,58,47,47,99, 101,114,116,105,102,105,99,97,116,101,115,46,115,116,97,114,102,105,101,108, 100,116,101,99,104,46,99,111,109,47,114,101,112,111,115,105,116,111,114,121, 49,49,48,47,6,3,85,4,3,19,40,83,116,97,114,102,105,101,108,100,32,83,101,99, 117,114,101,32,67,101,114,116,105,102,105,99,97,116,105,111,110,32,65,117, 116,104,111,114,105,116,121,49,17,48,15,6,3,85,4,5,19,8,49,48,54,56,56,52,51, 53,48,30,23,13,49,48,49,48,50,51,48,49,51,50,48,53,90,23,13,49,50,49,48,50, 51,48,49,51,50,48,53,90,48,122,49,11,48,9,6,3,85,4,6,12,2,85,83,49,11,48,9,6, 3,85,4,8,12,2,65,90,49,19,48,17,6,3,85,4,7,12,10,83,99,111,116,116,115,100, 97,108,101,49,38,48,36,6,3,85,4,10,12,29,83,112,101,99,105,97,108,32,68,111, 109,97,105,110,32,83,101,114,118,105,99,101,115,44,32,73,110,99,46,49,33,48, 31,6,3,85,4,3,12,24,42,46,108,111,103,105,110,46,115,101,99,117,114,101,115, 101,114,118,101,114,46,110,101,116,48,130,1,34,48,13,6,9,42,134,72,134,247, 13,1,1,1,5,0,3,130,1,15,0,48,130,1,10,2,130,1,1,0,185,136,240,80,141,36,124, 245,182,130,73,19,188,74,166,117,72,228,185,209,43,129,244,40,44,193,231,11, 209,12,234,88,43,142,1,162,48,122,17,95,230,105,171,131,12,147,46,204,36,80, 250,171,33,253,35,62,83,22,71,212,186,141,14,198,89,89,121,204,224,122,246, 127,110,188,229,162,67,95,6,74,231,127,99,131,7,240,85,102,203,251,50,58,58, 104,245,103,181,183,134,32,203,121,232,54,32,188,139,136,112,166,126,14,91, 223,153,172,164,14,61,38,163,208,215,186,210,136,213,143,70,147,173,109,217, 250,169,108,31,211,104,238,103,93,182,59,165,43,196,189,218,241,30,148,240, 109,90,69,176,194,52,116,173,151,135,239,10,209,179,129,192,102,75,11,25,168, 223,32,174,84,223,134,70,167,55,172,143,27,130,123,226,226,7,34,142,166,39, 48,246,96,231,150,84,220,106,133,193,55,95,159,227,24,249,64,36,1,142,171,16, 202,55,126,7,156,15,194,22,116,53,113,174,104,239,203,120,45,131,57,87,84, 163,184,27,83,57,199,91,200,34,43,98,61,180,144,76,65,170,177,2,3,1,0,1,163, 130,1,224,48,130,1,220,48,15,6,3,85,29,19,1,1,255,4,5,48,3,1,1,0,48,29,6,3, 85,29,37,4,22,48,20,6,8,43,6,1,5,5,7,3,1,6,8,43,6,1,5,5,7,3,2,48,14,6,3,85, 29,15,1,1,255,4,4,3,2,5,160,48,56,6,3,85,29,31,4,49,48,47,48,45,160,43,160, 41,134,39,104,116,116,112,58,47,47,99,114,108,46,115,116,97,114,102,105,101, 108,100,116,101,99,104,46,99,111,109,47,115,102,115,50,45,48,46,99,114,108, 48,83,6,3,85,29,32,4,76,48,74,48,72,6,11,96,134,72,1,134,253,110,1,7,23,2,48, 57,48,55,6,8,43,6,1,5,5,7,2,1,22,43,104,116,116,112,115,58,47,47,99,101,114, 116,115,46,115,116,97,114,102,105,101,108,100,116,101,99,104,46,99,111,109, 47,114,101,112,111,115,105,116,111,114,121,47,48,129,141,6,8,43,6,1,5,5,7,1, 1,4,129,128,48,126,48,42,6,8,43,6,1,5,5,7,48,1,134,30,104,116,116,112,58,47, 47,111,99,115,112,46,115,116,97,114,102,105,101,108,100,116,101,99,104,46,99, 111,109,47,48,80,6,8,43,6,1,5,5,7,48,2,134,68,104,116,116,112,58,47,47,99, 101,114,116,105,102,105,99,97,116,101,115,46,115,116,97,114,102,105,101,108, 100,116,101,99,104,46,99,111,109,47,114,101,112,111,115,105,116,111,114,121, 47,115,102,95,105,110,116,101,114,109,101,100,105,97,116,101,46,99,114,116, 48,31,6,3,85,29,35,4,24,48,22,128,20,73,75,82,39,209,27,188,242,161,33,106, 98,123,81,66,122,138,215,213,86,48,59,6,3,85,29,17,4,52,48,50,130,24,42,46, 108,111,103,105,110,46,115,101,99,117,114,101,115,101,114,118,101,114,46,110, 101,116,130,22,108,111,103,105,110,46,115,101,99,117,114,101,115,101,114,118, 101,114,46,110,101,116,48,29,6,3,85,29,14,4,22,4,20,138,233,191,208,157,203, 249,85,242,239,20,195,48,10,148,49,144,101,255,116,48,13,6,9,42,134,72,134, 247,13,1,1,5,5,0,3,130,1,1,0,82,31,121,162,49,50,143,26,167,202,143,61,71, 189,201,199,57,81,122,116,90,192,88,24,102,194,174,48,157,74,27,87,210,223, 253,93,3,91,150,109,120,1,110,27,11,200,198,141,222,246,14,200,71,105,41,138, 13,114,122,106,63,17,197,181,234,121,61,89,74,65,41,231,248,219,129,83,176, 219,55,107,55,211,112,98,38,49,69,77,96,221,108,123,152,12,210,159,157,141, 43,226,55,187,129,3,82,49,136,66,81,196,91,234,196,10,82,48,6,80,163,83,71, 127,102,177,93,209,129,26,104,2,84,24,255,248,161,3,244,169,234,92,122,110, 43,4,17,113,185,235,108,219,210,236,132,216,177,227,17,169,58,162,159,182, 162,93,160,229,200,9,163,229,110,121,240,168,232,14,91,214,188,196,109,210, 164,222,0,109,139,132,113,91,16,118,173,178,176,80,132,34,41,199,51,206,250, 224,132,60,115,192,94,107,163,219,212,226,225,65,169,148,108,213,46,174,173, 103,110,189,229,166,149,254,31,51,44,144,108,187,182,11,251,201,206,86,138, 208,59,51,86,132,235,81,225,88,34,190,8,184>>.
+
+incorrect_emailaddress_pkix_cert() ->
+ <<48,130,3,74,48,130,2,50,2,9,0,133,49,203,25,198,156,252,230,48,13,6,9,42,134, 72,134,247,13,1,1,5,5,0,48,103,49,11,48,9,6,3,85,4,6,19,2,65,85,49,19,48,17, 6,3,85,4,8,12,10,83,111,109,101,45,83,116,97,116,101,49,33,48,31,6,3,85,4,10, 12,24,73,110,116,101,114,110,101,116,32,87,105,100,103,105,116,115,32,80,116, 121,32,76,116,100,49,32,48,30,6,9,42,134,72,134,247,13,1,9,1,12,17,105,110, 118,97,108,105,100,64,101,109,97,105,108,46,99,111,109,48,30,23,13,49,51,49, 49,48,55,50,48,53,54,49,56,90,23,13,49,52,49,49,48,55,50,48,53,54,49,56,90, 48,103,49,11,48,9,6,3,85,4,6,19,2,65,85,49,19,48,17,6,3,85,4,8,12,10,83,111, 109,101,45,83,116,97,116,101,49,33,48,31,6,3,85,4,10,12,24,73,110,116,101, 114,110,101,116,32,87,105,100,103,105,116,115,32,80,116,121,32,76,116,100,49, 32,48,30,6,9,42,134,72,134,247,13,1,9,1,12,17,105,110,118,97,108,105,100,64, 101,109,97,105,108,46,99,111,109,48,130,1,34,48,13,6,9,42,134,72,134,247,13, 1,1,1,5,0,3,130,1,15,0,48,130,1,10,2,130,1,1,0,190,243,49,213,219,60,232,105, 1,127,126,9,130,15,60,190,78,100,148,235,246,223,21,91,238,200,251,84,55,212, 78,32,120,61,85,172,0,144,248,5,165,29,143,79,64,178,51,153,203,76,115,238, 192,49,173,37,121,203,89,62,157,13,181,166,30,112,154,40,202,140,104,211,157, 73,244,9,78,236,70,153,195,158,233,141,42,238,2,143,160,225,249,27,30,140, 151,176,43,211,87,114,164,108,69,47,39,195,123,185,179,219,28,218,122,53,83, 77,48,81,184,14,91,243,12,62,146,86,210,248,228,171,146,225,87,51,146,155, 116,112,238,212,36,111,58,41,67,27,6,61,61,3,84,150,126,214,121,57,38,12,87, 121,67,244,37,45,145,234,131,115,134,58,194,5,36,166,52,59,229,32,47,152,80, 237,190,58,182,248,98,7,165,198,211,5,31,231,152,116,31,108,71,218,64,188, 178,143,27,167,79,15,112,196,103,116,212,65,197,94,37,4,132,103,91,217,73, 223,207,185,7,153,221,240,232,31,44,102,108,82,83,56,242,210,214,74,71,246, 177,217,148,227,220,230,4,176,226,74,194,37,2,3,1,0,1,48,13,6,9,42,134,72, 134,247,13,1,1,5,5,0,3,130,1,1,0,89,247,141,154,173,123,123,203,143,85,28,79, 73,37,164,6,17,89,171,224,149,22,134,17,198,146,158,192,241,41,253,58,230, 133,71,189,43,66,123,88,15,242,119,227,249,99,137,61,200,54,161,0,177,167, 169,114,80,148,90,22,97,78,162,181,75,93,209,116,245,46,81,232,64,157,93,136, 52,57,229,113,197,218,113,93,42,161,213,104,205,137,30,144,183,58,10,98,47, 227,177,96,40,233,98,150,209,217,68,22,221,133,27,161,152,237,46,36,179,59, 172,97,134,194,205,101,137,71,192,57,153,20,114,27,173,233,166,45,56,0,61, 205,45,202,139,7,132,103,248,193,157,184,123,43,62,172,236,110,49,62,209,78, 249,83,219,133,1,213,143,73,174,16,113,143,189,41,84,60,128,222,30,177,104, 134,220,52,239,171,76,59,176,36,113,176,214,118,16,44,235,21,167,199,216,200, 76,219,142,248,13,70,145,205,216,230,226,148,97,223,216,179,68,209,222,63, 140,137,24,164,192,149,194,79,119,247,75,159,49,116,70,241,70,116,11,40,119, 176,157,36,160,102,140,255,34,248,25,231,136,59>>.
diff --git a/lib/runtime_tools/src/observer_backend.erl b/lib/runtime_tools/src/observer_backend.erl
index 670e216d97..202129c61a 100644
--- a/lib/runtime_tools/src/observer_backend.erl
+++ b/lib/runtime_tools/src/observer_backend.erl
@@ -77,8 +77,8 @@ sys_info() ->
| MemInfo].
alloc_info() ->
- {_,_,AllocTypes,_} = erlang:system_info(allocator),
- try erlang:system_info({allocator_sizes,AllocTypes}) of
+ AlcuAllocs = erlang:system_info(alloc_util_allocators),
+ try erlang:system_info({allocator_sizes, AlcuAllocs}) of
Allocators -> Allocators
catch _:_ -> []
end.
diff --git a/lib/sasl/doc/src/error_logging.xml b/lib/sasl/doc/src/error_logging.xml
index 5707bc4d69..f624fed1c7 100644
--- a/lib/sasl/doc/src/error_logging.xml
+++ b/lib/sasl/doc/src/error_logging.xml
@@ -173,8 +173,9 @@
<section>
<title>Report Browser</title>
<p>The report browser is used to browse and format error reports
- written by the error logger handler <c>error_logger_mf_h</c>.</p>
- <p>The <c>error_logger_mf_h</c> handler writes all reports to a
+ written by the error logger handler <c>log_mf_h</c> defined in
+ <c>stdlib</c>.</p>
+ <p>The <c>log_mf_h</c> handler writes all reports to a
report logging directory. This directory is specified when
configuring the SASL application.</p>
<p>If the report browser is
diff --git a/lib/sasl/doc/src/sasl_app.xml b/lib/sasl/doc/src/sasl_app.xml
index 446baccb08..af9e73027f 100644
--- a/lib/sasl/doc/src/sasl_app.xml
+++ b/lib/sasl/doc/src/sasl_app.xml
@@ -51,7 +51,7 @@
<section>
<title>Error Logger Event Handlers</title>
- <p>The following error logger event handlers are defined in
+ <p>The following error logger event handlers are used by
the SASL application.</p>
<taglist>
<tag><c>sasl_report_tty_h</c></tag>
@@ -62,11 +62,10 @@
<item>
<p>Formats and writes <em>supervisor reports</em>, <em>crash report</em> and <em>progress report</em> to a single file.</p>
</item>
- <tag><c>error_logger_mf_h</c></tag>
+ <tag><c>log_mf_h</c></tag>
<item>
<p>This error logger writes <em>all</em> events sent to
- the error logger to disk. It installs the <c>log_mf_h</c>
- event handler in the <c>error_logger</c> process.</p>
+ the error logger to disk.</p>
<p>To activate this event handler, the following three sasl
configuration parameters must be set:
<c>error_logger_mf_dir</c>, <c>error_logger_mf_maxbytes</c>
@@ -109,18 +108,18 @@
<item>
<p>Specifies in which directory the files are stored. If this
parameter is undefined or <c>false</c>,
- the <c>error_logger_mf_h</c> is not installed.</p>
+ the <c>log_mf_h</c> handler is not installed.</p>
</item>
<tag><c><![CDATA[error_logger_mf_maxbytes = integer() <optional>]]></c></tag>
<item>
<p>Specifies how large each individual file can be. If this
- parameter is undefined, the <c>error_logger_mf_h</c> is not
+ parameter is undefined, the <c>log_mf_h</c> handler is not
installed.</p>
</item>
<tag><c><![CDATA[error_logger_mf_maxfiles = 0<integer()<256 <optional>]]></c></tag>
<item>
<p>Specifies how many files are used. If this parameter is
- undefined, the <c>error_logger_mf_h</c> is not installed.</p>
+ undefined, the <c>log_mf_h</c> handler is not installed.</p>
</item>
<tag><c><![CDATA[overload_max_intensity = float() > 0 <optional>]]></c></tag>
<item>
diff --git a/lib/sasl/doc/src/systools.xml b/lib/sasl/doc/src/systools.xml
index 84fed0a25f..284047163e 100644
--- a/lib/sasl/doc/src/systools.xml
+++ b/lib/sasl/doc/src/systools.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>1996</year>
- <year>2011</year>
+ <year>2013</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -139,7 +139,7 @@
<type>
<v>Name = string()</v>
<v>Opt = src_tests | {path,[Dir]} | local | {variables,[Var]} | exref |
- {exref,[App]}] | silent | {outdir,Dir} | no_warn_sasl |
+ {exref,[App]}] | silent | {outdir,Dir} | no_dot_erlang | no_warn_sasl |
warnings_as_errors</v>
<v>&nbsp;Dir = string()</v>
<v>&nbsp;Var = {VarName,Prefix}</v>
@@ -252,6 +252,8 @@
<c>Module:format_error(Error)</c>.</p>
<p>If the option <c>warnings_as_errors</c> is provided, warnings
are treated as errors.</p>
+ <p>If the option <c>no_dot_erlang</c> is provided, the instruction to
+ load the <c>.erlang</c> file during boot is <em>NOT</em> included.</p>
</desc>
</func>
<func>
diff --git a/lib/sasl/src/sasl.erl b/lib/sasl/src/sasl.erl
index 989f99dc82..fdea6da13e 100644
--- a/lib/sasl/src/sasl.erl
+++ b/lib/sasl/src/sasl.erl
@@ -82,8 +82,8 @@ get_mf() ->
MaxB = get_mf_maxb(),
MaxF = get_mf_maxf(),
case {Dir, MaxB, MaxF} of
- {undefined,undefined,undefined} = R ->
- R;
+ {undefined,undefined,undefined} ->
+ undefined;
{undefined,_,_} ->
exit({missing_config, {sasl, error_logger_mf_dir}});
{_,undefined,_} ->
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index b2e95fdbee..bab88552f9 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1135,10 +1135,10 @@ generate_script(Output, Release, Appls, Flags) ->
load_appl_mods(Appls, Mandatory ++ Preloaded,
PathFlag, Variables) ++
[{path, create_path(Appls, PathFlag, Variables)}] ++
- create_kernel_procs(Appls) ++
- create_load_appls(Appls) ++
- create_start_appls(Appls) ++
- script_end()
+ create_kernel_procs(Appls) ++
+ create_load_appls(Appls) ++
+ create_start_appls(Appls) ++
+ script_end(lists:member(no_dot_erlang, Flags))
},
ScriptFile = Output ++ ".script",
@@ -1229,9 +1229,12 @@ create_load_appls([]) ->
%%______________________________________________________________________
%% The final part of the script.
-script_end() ->
+script_end(false) -> %% Do not skip loading of $HOME/.erlang
[{apply, {c, erlangrc, []}},
- {progress, started}].
+ {progress, started}];
+script_end(true) -> %% Ignore loading of $HOME/.erlang
+ [{progress, started}].
+
%%-----------------------------------------------------------------
%% Function: sort_appls(Appls) -> {ok, Appls'} | throw({error, Error})
@@ -2055,6 +2058,9 @@ cas([no_warn_sasl | Args], X) ->
%%% no_module_tests (kept for backwards compatibility, but ignored) ----
cas([no_module_tests | Args], X) ->
cas(Args, X);
+cas([no_dot_erlang | Args], X) ->
+ cas(Args, X);
+
%%% ERROR --------------------------------------------------------------
cas([Y | Args], X) ->
cas(Args, X++[Y]).
diff --git a/lib/sasl/test/release_handler_SUITE.erl b/lib/sasl/test/release_handler_SUITE.erl
index a56924d5ca..d7369b0ecf 100644
--- a/lib/sasl/test/release_handler_SUITE.erl
+++ b/lib/sasl/test/release_handler_SUITE.erl
@@ -64,7 +64,7 @@ cases() ->
supervisor_which_children_timeout,
release_handler_which_releases, install_release_syntax_check,
upgrade_supervisor, upgrade_supervisor_fail, otp_9864,
- otp_10463_upgrade_script_regexp].
+ otp_10463_upgrade_script_regexp, no_dot_erlang].
groups() ->
[{release,[],
@@ -1709,6 +1709,37 @@ otp_10463_upgrade_script_regexp(_Config) ->
release_handler:upgrade_script(kernel,code:lib_dir(kernel)),
ok.
+no_dot_erlang(Conf) ->
+ PrivDir = priv_dir(Conf),
+ {ok, OrigWd} = file:get_cwd(),
+ try
+ ok = file:set_cwd(PrivDir),
+
+ Erl = filename:join([code:root_dir(),"bin","erl"]),
+ Args = " -noinput -run io put_chars \"TESTOK\" -run erlang halt",
+ ok = file:write_file(".erlang", <<"io:put_chars(\"DOT_ERLANG_READ\\n\").\n">>),
+
+ case os:cmd(Erl ++ Args) of
+ "DOT_ERLANG_READ" ++ _ -> ok;
+ Other1 ->
+ io:format("Failed: ~s~n",[Erl ++ Args]),
+ io:format("Expected: ~s ++ _~n",["DOT_ERLANG_READ "]),
+ io:format("Got: ~s~n",[Other1]),
+ exit(failed_to_start, test_error)
+ end,
+ NO_DOT_ERL = " -boot no_dot_erlang",
+ case os:cmd(Erl ++ NO_DOT_ERL ++ Args) of
+ "TESTOK" ++ _ -> ok;
+ Other2 ->
+ io:format("Failed: ~s~n",[Erl ++ Args]),
+ io:format("Expected: ~s~n",["TESTOK"]),
+ io:format("Got: ~s~n",[Other2]),
+ exit(failed_to_start, no_dot_erlang)
+ end
+ after
+ _ = file:delete(".erlang"),
+ ok = file:set_cwd(OrigWd)
+ end.
%%%=================================================================
%%% Misceleaneous functions
diff --git a/lib/sasl/test/systools_SUITE.erl b/lib/sasl/test/systools_SUITE.erl
index 3921b2d3bb..9efc8f8392 100644
--- a/lib/sasl/test/systools_SUITE.erl
+++ b/lib/sasl/test/systools_SUITE.erl
@@ -43,6 +43,7 @@
-export([script_options/1, normal_script/1, unicode_script/1,
unicode_script/2, no_mod_vsn_script/1,
wildcard_script/1, variable_script/1, no_sasl_script/1,
+ no_dot_erlang_script/1,
abnormal_script/1, src_tests_script/1, crazy_script/1,
included_script/1, included_override_script/1,
included_fail_script/1, included_bug_script/1, exref_script/1,
@@ -79,7 +80,8 @@ groups() ->
[{script, [],
[script_options, normal_script, unicode_script, no_mod_vsn_script,
wildcard_script, variable_script, abnormal_script,
- no_sasl_script, src_tests_script, crazy_script,
+ no_sasl_script, no_dot_erlang_script,
+ src_tests_script, crazy_script,
included_script, included_override_script,
included_fail_script, included_bug_script, exref_script,
otp_3065_circular_dependenies, included_and_used_sort_script]},
@@ -457,6 +459,34 @@ no_sasl_script(Config) when is_list(Config) ->
ok = file:set_cwd(OldDir),
ok.
+%% make_script: Create script with no_dot_erlang. Check script contents.
+no_dot_erlang_script(Config) when is_list(Config) ->
+ {ok, OldDir} = file:get_cwd(),
+
+ {LatestDir, LatestName} = create_script(latest1_no_sasl,Config),
+
+ DataDir = filename:absname(?copydir),
+ LibDir = [fname([DataDir, d_normal, lib])],
+ P = [fname([LibDir, '*', ebin]),
+ fname([DataDir, lib, kernel, ebin]),
+ fname([DataDir, lib, stdlib, ebin]),
+ fname([DataDir, lib, sasl, ebin])],
+
+ ok = file:set_cwd(LatestDir),
+
+ {ok, _ , []} =
+ systools:make_script(LatestName,[{path, P},silent, no_warn_sasl]),
+ {ok, [{_, _, LoadDotErlang}]} = read_script_file(LatestName),
+ [erlangrc] = [E || {apply, {c, E, []}} <- LoadDotErlang],
+
+ {ok, _ , []} =
+ systools:make_script(LatestName,[{path, P},silent, no_warn_sasl, no_dot_erlang]),
+ {ok, [{_, _, DoNotLoadDotErlang}]} = read_script_file(LatestName),
+ [] = [E || {apply, {c, E, []}} <- DoNotLoadDotErlang],
+
+ ok = file:set_cwd(OldDir),
+ ok.
+
%% make_script: Do not check date of object file or that source code
%% can be found.
diff --git a/lib/snmp/doc/src/notes.xml b/lib/snmp/doc/src/notes.xml
index 7514c52dda..d213b67052 100644
--- a/lib/snmp/doc/src/notes.xml
+++ b/lib/snmp/doc/src/notes.xml
@@ -34,6 +34,125 @@
<section>
+ <title>SNMP Development Toolkit 4.25</title>
+ <p>Version 4.25 supports code replacement in runtime from/to
+ version 4.24.2, 4.24.1, 4.24, 4.23.1 and 4.23. </p>
+
+ <section>
+ <title>Improvements and new features</title>
+<!--
+ <p>-</p>
+-->
+
+ <list type="bulleted">
+ <item>
+ <p>[agent] Enable SNMP to create missing database directories. </p>
+ <p>Add
+ <seealso marker="snmp_app#db_init_error">
+ {db_init_error, create_db_and_dir}</seealso> option to SNMP
+ <seealso marker="snmp_app#manager_opts_and_types">manager</seealso>
+ and
+ <seealso marker="snmp_app#agent_opts_and_types">agent</seealso>.
+ This allows them to create any missing parent directories for
+ <c>db_dir</c>, rather than treating any missing directories
+ as a fatal error.
+ The default for <c>db_init_error</c>, which is <c>terminate</c>,
+ is unchanged. </p>
+ <p>Steve Vinoski</p>
+ <p>Own Id: OTP-11352</p>
+ </item>
+
+ <item>
+ <p>[manager] Improved handling of unexpected return values from
+ <seealso marker="snmpm_user">snmpm_user</seealso>
+ callback functions. </p>
+ <p>Violations of the documented API (crashes or invalid return
+ values) will now result in an error message. </p>
+ <p>Own Id: OTP-11307</p>
+ </item>
+
+ <item>
+ <p>Add (atl) log conversion block option. </p>
+ <p>It is now possible to request that the Audit Trail Log should
+ be blocked during conversion (<c>log_to_txt</c> or <c>log_to_io</c>).
+ This could be usefull when coverting an entire large log (when
+ there is a chance it may otherwise wrap during conversion). </p>
+ <p>See
+ agent
+ <seealso marker="snmpa#log_to_txt">log_to_txt</seealso> and
+ <seealso marker="snmpa#log_to_io">log_to_io</seealso> and also
+ manager
+ <seealso marker="snmpm#log_to_txt">log_to_txt</seealso> and
+ <seealso marker="snmpm#log_to_io">log_to_io</seealso>
+ for details. </p>
+ <p>Own Id: OTP-11396</p>
+ <p>Own Id: seq12433</p>
+ </item>
+
+ <item>
+ <p>When converting an Audit Trail Log to text, a corrupt
+ log entry could cause the entire conversion to fail. </p>
+ <p>Also, for a log with sequence numbers, failing to
+ decode a log entry would cause the conversion to fail
+ (not because of the failed decode, but because of the
+ failure to write the error message). </p>
+ <p>Own Id: OTP-111453</p>
+ <p>Aux Id: Seq 12459</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ <section>
+ <title>Fixed Bugs and Malfunctions</title>
+<!--
+ <p>-</p>
+-->
+
+ <list type="bulleted">
+ <item>
+ <p>Wrong block cypher type used for AES ('aes_cbf128'
+ instead of 'aes_cfb128') when performing AES block
+ encrypt/decrypt which breaks SNMP usmAesCfb128Protocol
+ in agent and manager. </p>
+ <p>Own Id: OTP-11412</p>
+ </item>
+
+ <item>
+ <p>[manager] When performing the AES encryption, invalid values for
+ the EngineBoots and EngineTime was used. </p>
+ <p>The values of the local agent was used, which would have produced
+ "some" values if an agent was actually running.
+ If not it would have caused a crash. </p>
+ <p>Own Id: OTP-11413</p>
+ </item>
+
+ </list>
+
+ </section>
+
+ <section>
+ <title>Incompatibilities</title>
+ <p>-</p>
+
+<!--
+ <list type="bulleted">
+ <item>
+ <p>[manager] The old Addr-and-Port based API functions, previously
+ long deprecated and marked for deletion in R16B, has now been
+ removed. </p>
+ <p>Own Id: OTP-10027</p>
+ </item>
+
+ </list>
+-->
+ </section>
+
+ </section> <!-- 4.25 -->
+
+
+ <section>
<title>SNMP Development Toolkit 4.24.2</title>
<p>Version 4.24.2 supports code replacement in runtime from/to
version 4.24.1, 4.24, 4.23.1 and 4.23. </p>
@@ -96,7 +215,7 @@
</list>
-->
</section>
-
+
</section> <!-- 4.24.2 -->
diff --git a/lib/snmp/doc/src/snmp.xml b/lib/snmp/doc/src/snmp.xml
index 3e6610891f..97b479385c 100644
--- a/lib/snmp/doc/src/snmp.xml
+++ b/lib/snmp/doc/src/snmp.xml
@@ -341,8 +341,9 @@
<func>
<name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block | Start) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Block | Stop) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop, Block) -> ok | {error, Reason}</name>
<fsummary>Convert an Audit Trail Log to text format</fsummary>
<type>
<v>LogDir = string()</v>
@@ -352,53 +353,56 @@
<v>LogName = string()</v>
<v>LogFile = string()</v>
<v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v>
+ <v>Block = boolean()</v>
<v>Reason = term()</v>
</type>
<desc>
- <p>Converts an Audit Trail Log to a readable text file, where
- each item has a trailing TAB character, and any TAB
- character in the body of an item has been replaced by ESC
- TAB.
- </p>
+ <p>Converts an Audit Trail Log to a readable text file, where
+ each item has a trailing TAB character, and any TAB
+ character in the body of an item has been replaced by ESC
+ TAB. </p>
<p>The function can be used on a running system, or by copying
- the entire log directory and calling this function. SNMP
- must be running in order to provide MIB information.
- </p>
+ the entire log directory and calling this function. SNMP
+ must be running in order to provide MIB information. </p>
<p><c>LogDir</c> is the name of the directory where the audit
- trail log is stored.
- <c>Mibs</c> is a list of Mibs to be used. The function uses
- the information in the Mibs to convert for example object
- identifiers to their symbolic name.
- <c>OutFile</c> is the name of the generated text-file.
- <c>LogName</c> is the name of the log,
- <c>LogFile</c> is the name of the log file.
- <c>Start</c> is the start (first) date and time from which
- log events will be converted and
- <c>Stop</c> is the stop (last) date and time to which log
- events will be converted.
- </p>
- <p>The format of an audit trail log text item is as follows:
- </p>
- <p><c>Tag Addr - Community [TimeStamp] Vsn</c><br></br>
- <c>PDU</c></p>
- <p>where <c>Tag</c> is <c>request</c>, <c>response</c>,
- <c>report</c>, <c>trap</c> or <c>inform</c>; Addr is
- <c>IP:Port</c> (or comma space separated list of such);
- <c>Community</c> is the community parameter (SNMP version
- v1 and v2), or <c>SecLevel:"AuthEngineID":"UserName"</c>
- (SNMP v3); <c>TimeStamp</c> is a date and time stamp,
- and <c>Vsn</c> is the SNMP version. <c>PDU</c> is a textual
- version of the protocol data unit. There is a new line
- between <c>Vsn</c> and <c>PDU</c>.</p>
-
+ trail log is stored.
+ <c>Mibs</c> is a list of Mibs to be used. The function uses
+ the information in the Mibs to convert for example object
+ identifiers to their symbolic name.
+ <c>OutFile</c> is the name of the generated text-file.
+ <c>LogName</c> is the name of the log,
+ <c>LogFile</c> is the name of the log file.
+ <c>Start</c> is the start (first) date and time from which
+ log events will be converted and
+ <c>Stop</c> is the stop (last) date and time to which log
+ events will be converted.
+ The <c>Block</c> argument indicates if the log should be blocked
+ during conversion. This could be usefull when converting large
+ logs (when otherwise the log could wrap during conversion).
+ Defaults to <c>true</c>.
+ </p>
+ <p>The format of an audit trail log text item is as follows: </p>
+ <p><c>Tag Addr - Community [TimeStamp] Vsn</c><br></br>
+ <c>PDU</c></p>
+ <p>where <c>Tag</c> is <c>request</c>, <c>response</c>,
+ <c>report</c>, <c>trap</c> or <c>inform</c>; Addr is
+ <c>IP:Port</c> (or comma space separated list of such);
+ <c>Community</c> is the community parameter (SNMP version
+ v1 and v2), or <c>SecLevel:"AuthEngineID":"UserName"</c>
+ (SNMP v3); <c>TimeStamp</c> is a date and time stamp,
+ and <c>Vsn</c> is the SNMP version. <c>PDU</c> is a textual
+ version of the protocol data unit. There is a new line
+ between <c>Vsn</c> and <c>PDU</c>.</p>
+
<marker id="log_to_io"></marker>
</desc>
</func>
<func>
<name>log_to_io(LogDir, Mibs, LogName, LogFile) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Block | Start) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Block | Stop) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop, Block) -> ok | {error, Reason}</name>
<fsummary>Convert an Audit Trail Log to text format</fsummary>
<type>
<v>LogDir = string()</v>
diff --git a/lib/snmp/doc/src/snmp_app.xml b/lib/snmp/doc/src/snmp_app.xml
index e5a05342c1..9ede75b943 100644
--- a/lib/snmp/doc/src/snmp_app.xml
+++ b/lib/snmp/doc/src/snmp_app.xml
@@ -763,12 +763,15 @@
</item>
<marker id="db_init_error"></marker>
- <tag><c>db_init_error() = terminate | create</c></tag>
+ <tag><c>db_init_error() = terminate | create | create_db_and_dir</c></tag>
<item>
<p>Defines what to do if the agent or manager is unable to open an
existing database file. <c>terminate</c> means that the
agent/manager will terminate and <c>create</c> means that the
- agent/manager will remove the faulty file(s) and create new ones.</p>
+ agent/manager will remove the faulty file(s) and create new ones,
+ and <c>create_db_and_dir</c> means that the agent/manager will
+ create the database file along with any missing parent directories
+ for the database file.</p>
<p>Default is <c>terminate</c>.</p>
</item>
diff --git a/lib/snmp/doc/src/snmp_config.xml b/lib/snmp/doc/src/snmp_config.xml
index 61ee7f00ee..30b46e6aa8 100644
--- a/lib/snmp/doc/src/snmp_config.xml
+++ b/lib/snmp/doc/src/snmp_config.xml
@@ -792,12 +792,15 @@ in so far as it will be converted to the new format if found.
</item>
<marker id="db_init_error"></marker>
- <tag><c>db_init_error() = terminate | create</c></tag>
+ <tag><c>db_init_error() = terminate | create | create_db_and_dir</c></tag>
<item>
<p>Defines what to do if the agent is unable to open an
existing database file. <c>terminate</c> means that the
- agent/manager will terminate and <c>create</c> means that the
- agent/manager will remove the faulty file(s) and create new ones.</p>
+ agent/manager will terminate, <c>create</c> means that the
+ agent/manager will remove the faulty file(s) and create new ones,
+ and <c>create_db_and_dir</c> means that the agent/manager will
+ create the database file along with any missing parent directories
+ for the database file.</p>
<p>Default is <c>terminate</c>.</p>
</item>
diff --git a/lib/snmp/doc/src/snmpa.xml b/lib/snmp/doc/src/snmpa.xml
index 77146f3a89..cc8681e5c8 100644
--- a/lib/snmp/doc/src/snmpa.xml
+++ b/lib/snmp/doc/src/snmpa.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2004</year><year>2012</year>
+ <year>2004</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -557,32 +557,39 @@ notification_delivery_info() = #snmpa_notification_delivery_info{}
<func>
<name>log_to_txt(LogDir)</name>
- <name>log_to_txt(LogDir, Mibs)</name>
- <name>log_to_txt(LogDir, Mibs, OutFile) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Block | Mibs)</name>
+ <name>log_to_txt(LogDir, Mibs, Block | OutFile) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, Block | LogName) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, Block | LogFile) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block | Start) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start) -> ok | {error, Reason}</name>
<name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop) -> ok | {error, Reason}</name>
<fsummary>Convert an Audit Trail Log to text format</fsummary>
<type>
<v>LogDir = string()</v>
<v>Mibs = [MibName]</v>
<v>MibName = string()</v>
+ <v>Block = boolean()</v>
<v>OutFile = string()</v>
<v>LogName = string()</v>
<v>LogFile = string()</v>
- <v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v>
+ <v>Start = Stop = null | calendar:datetime() | {local_time, calendar:datetime()} | {universal_time, calendar:datetime()} </v>
<v>Reason = disk_log_open_error() | file_open_error() | term()</v>
<v>disk_log_open_error() = {LogName, term()}</v>
<v>file_open_error() = {OutFile, term()}</v>
</type>
<desc>
<p>Converts an Audit Trail Log to a readable text file.
- <c>OutFile</c> defaults to "./snmpa_log.txt".
- <c>LogName</c> defaults to "snmpa_log".
- <c>LogFile</c> defaults to "snmpa.log".
- See <seealso marker="snmp#log_to_txt">snmp:log_to_txt</seealso>
- for more info.</p>
+ <c>OutFile</c> defaults to "./snmpa_log.txt".
+ <c>LogName</c> defaults to "snmpa_log".
+ <c>LogFile</c> defaults to "snmpa.log". </p>
+ <p>The <c>Block</c> option indicates if the log should be blocked
+ during conversion. This could be usefull when converting large
+ logs (when otherwise the log could wrap during conversion).
+ Defaults to <c>true</c>. </p>
+ <p>See <seealso marker="snmp#log_to_txt">snmp:log_to_txt</seealso>
+ for more info.</p>
<marker id="log_to_io"></marker>
</desc>
@@ -590,19 +597,22 @@ notification_delivery_info() = #snmpa_notification_delivery_info{}
<func>
<name>log_to_io(LogDir) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName, LogFile) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Block | Mibs) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, Block | LogName) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, Block | LogFile) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Block | Start) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start) -> ok | {error, Reason}</name>
<name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop) -> ok | {error, Reason}</name>
<fsummary>Convert an Audit Trail Log to text format</fsummary>
<type>
<v>LogDir = string()</v>
<v>Mibs = [MibName]</v>
<v>MibName = string()</v>
+ <v>Block = boolean()</v>
<v>LogName = string()</v>
<v>LogFile = string()</v>
- <v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v>
+ <v>Start = Stop = null | calendar:datetime() | {local_time, calendar:datetime()} | {universal_time, calendar:datetime()} </v>
<v>Reason = disk_log_open_error() | file_open_error() | term()</v>
<v>disk_log_open_error() = {LogName, term()}</v>
<v>file_open_error() = {OutFile, term()}</v>
@@ -612,6 +622,10 @@ notification_delivery_info() = #snmpa_notification_delivery_info{}
prints it on stdio.
<c>LogName</c> defaults to "snmpa_log".
<c>LogFile</c> defaults to "snmpa.log".
+ <p>The <c>Block</c> option indicates if the log should be blocked
+ during conversion. This could be usefull when converting large
+ logs (when otherwise the log could wrap during conversion).
+ Defaults to <c>true</c>. </p>
See <seealso marker="snmp#log_to_io">snmp:log_to_io</seealso>
for more info.</p>
diff --git a/lib/snmp/doc/src/snmpm.xml b/lib/snmp/doc/src/snmpm.xml
index 07fdd208ff..a0a1b5716d 100644
--- a/lib/snmp/doc/src/snmpm.xml
+++ b/lib/snmp/doc/src/snmpm.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2004</year><year>2012</year>
+ <year>2004</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -1209,32 +1209,40 @@ priv_key = [integer()] (length is 16 if priv = usmDESPrivProtocol | usmAesCfb1
</func>
<func>
- <name>log_to_txt(LogDir, Mibs)</name>
- <name>log_to_txt(LogDir, Mibs, OutFile) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile) -> ok | {error, Reason}</name>
- <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir)</name>
+ <name>log_to_txt(LogDir, Block | Mibs)</name>
+ <name>log_to_txt(LogDir, Mibs, Block | OutFile) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, Block | LogName) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, Block | LogFile) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block | Start) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start) -> ok | {error, Reason}</name>
<name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name>
+ <name>log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop) -> ok | {error, Reason}</name>
<fsummary>Convert an Audit Trail Log to text format</fsummary>
<type>
<v>LogDir = string()</v>
<v>Mibs = [MibName]</v>
<v>MibName = string()</v>
+ <v>Block = boolean()</v>
<v>OutFile = string()</v>
<v>LogName = string()</v>
<v>LogFile = string()</v>
- <v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v>
+ <v>Start = Stop = null | calendar:datetime() | {local_time, calendar:datetime()} | {universal_time, calendar:datetime()} </v>
<v>Reason = disk_log_open_error() | file_open_error() | term()</v>
<v>disk_log_open_error() = {LogName, term()}</v>
<v>file_open_error() = {OutFile, term()}</v>
</type>
<desc>
<p>Converts an Audit Trail Log to a readable text file.
- <c>OutFile</c> defaults to "./snmpm_log.txt".
- <c>LogName</c> defaults to "snmpm_log".
- <c>LogFile</c> defaults to "snmpm.log".
- See <seealso marker="snmp#log_to_txt">snmp:log_to_txt</seealso>
- for more info.</p>
+ <c>OutFile</c> defaults to "./snmpm_log.txt".
+ <c>LogName</c> defaults to "snmpm_log".
+ <c>LogFile</c> defaults to "snmpm.log".
+ <p>The <c>Block</c> argument indicates if the log should be blocked
+ during conversion. This could be usefull when converting large
+ logs (when otherwise the log could wrap during conversion).
+ Defaults to <c>true</c>. </p>
+ See <seealso marker="snmp#log_to_txt">snmp:log_to_txt</seealso>
+ for more info.</p>
<marker id="log_to_io"></marker>
</desc>
@@ -1242,20 +1250,23 @@ priv_key = [integer()] (length is 16 if priv = usmDESPrivProtocol | usmAesCfb1
<func>
<name>log_to_io(LogDir) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Block | Mibs) -> ok | {error, Reason}</name>
<name>log_to_io(LogDir, Mibs) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName, LogFile) -> ok | {error, Reason}</name>
- <name>log_to_io(LogDir, Mibs, LogName, LogFile, Start) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, Block | LogName) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, Block | LogFile) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Block | Start) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start) -> ok | {error, Reason}</name>
<name>log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) -> ok | {error, Reason}</name>
+ <name>log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop) -> ok | {error, Reason}</name>
<fsummary>Convert an Audit Trail Log to text format</fsummary>
<type>
<v>LogDir = string()</v>
<v>Mibs = [MibName]</v>
<v>MibName = string()</v>
+ <v>Block = boolean()</v>
<v>LogName = string()</v>
<v>LogFile = string()</v>
- <v>Start = Stop = null | datetime() | {local_time,datetime()} | {universal_time,datetime()} </v>
+ <v>Start = Stop = null | calendar:datetime() | {local_time, calendar:datetime()} | {universal_time, calendar:datetime()} </v>
<v>Reason = disk_log_open_error() | file_open_error() | term()</v>
<v>disk_log_open_error() = {LogName, term()}</v>
<v>file_open_error() = {OutFile, term()}</v>
@@ -1265,6 +1276,10 @@ priv_key = [integer()] (length is 16 if priv = usmDESPrivProtocol | usmAesCfb1
prints it on stdio.
<c>LogName</c> defaults to "snmpm_log".
<c>LogFile</c> defaults to "snmpm.log".
+ <p>The <c>Block</c> argument indicates if the log should be blocked
+ during conversion. This could be usefull when converting large
+ logs (when otherwise the log could wrap during conversion).
+ Defaults to <c>true</c>. </p>
See <seealso marker="snmp#log_to_io">snmp:log_to_io</seealso>
for more info.</p>
diff --git a/lib/snmp/doc/src/snmpm_user.xml b/lib/snmp/doc/src/snmpm_user.xml
index 1823e0c815..cb2deab976 100644
--- a/lib/snmp/doc/src/snmpm_user.xml
+++ b/lib/snmp/doc/src/snmpm_user.xml
@@ -1,10 +1,10 @@
-<?xml version="1.0" encoding="latin1" ?>
+<?xml version="1.0" encoding="iso-8859-1" ?>
<!DOCTYPE erlref SYSTEM "erlref.dtd">
<erlref>
<header>
<copyright>
- <year>2004</year><year>2009</year>
+ <year>2004</year><year>2013</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -56,40 +56,59 @@
<item>
<p>handle_report/3</p>
</item>
+ <item>
+ <p>handle_invalid_result/2</p>
+ </item>
</list>
<p>The semantics of them and their exact signatures are explained
- below. </p>
- <p>Note that if an agent is registered using the old, no longer
- documented, functions (using Addr and Port), the old variant of the
- callback functions, handle_pdu, handle_trap, handle_inform and
- handle_report, will be called. </p>
+ below. </p>
+ <p>Some of the function has no defined return value (<c>void()</c>),
+ they can ofcourse return anythyng. But the functions that do have
+ specified return value(s) <em>must</em> adhere to this. None of the
+ functions can use exit of throw to return. </p>
- <marker id="handle_error"></marker>
+ <marker id="types"></marker>
</description>
+
+ <section>
+ <title>DATA TYPES</title>
+ <code type="none"><![CDATA[
+snmp_gen_info() = {ErrorStatus :: atom(),
+ ErrorIndex :: pos_integer(),
+ Varbinds :: [snmp:varbind()]}
+snmp_v1_trap_info() :: {Enteprise :: snmp:oid(),
+ Generic :: integer(),
+ Spec :: integer(),
+ Timestamp :: integer(),
+ Varbinds :: [snmp:varbind()]}
+ ]]></code>
+ <marker id="handle_error"></marker>
+ </section>
+
<funcs>
<func>
- <name>handle_error(ReqId, Reason, UserData) -> Reply</name>
+ <name>handle_error(ReqId, Reason, UserData) -> void()</name>
<fsummary>Handle error</fsummary>
<type>
<v>ReqId = integer()</v>
<v>Reason = {unexpected_pdu, SnmpInfo} | {invalid_sec_info, SecInfo, SnmpInfo} | {empty_message, Addr, Port} | term()</v>
+ <v>SnmpInfo = snmp_gen_info()</v>
+ <v>SecInfo = term()</v>
<v>Addr = ip_address()</v>
<v>Port = integer()</v>
<v>UserData = term()</v>
- <v>Reply = ignore</v>
</type>
<desc>
<p>This function is called when the manager needs to
- communicate an "asynchronous" error, to the user:
- e.g. failure to send an asynchronous message (i.e. encoding
- error), a received message was discarded due to security
- error, the manager failed to generate a response message to
- a received inform-request, or when receiving an unexpected
- PDU from an agent (could be an expired async request). </p>
- <p>If <c>ReqId</c> is less then 0, it means that this
- information was not available to the manager (that info was
- never retrieved before the message was discarded).
- </p>
+ communicate an "asynchronous" error to the user:
+ e.g. failure to send an asynchronous message (i.e. encoding
+ error), a received message was discarded due to security
+ error, the manager failed to generate a response message to
+ a received inform-request, or when receiving an unexpected
+ PDU from an agent (could be an expired async request). </p>
+ <p>If <c>ReqId</c> is less then 0, it means that this
+ information was not available to the manager (that info was
+ never retrieved before the message was discarded). </p>
<p>For <c>SnmpInfo</c> see handle_agent below.</p>
<marker id="handle_agent"></marker>
@@ -104,22 +123,22 @@
<v>Port = integer()</v>
<v>Type = pdu | trap | report | inform</v>
<v>SnmpInfo = SnmpPduInfo | SnmpTrapInfo | SnmpReportInfo | SnmpInformInfo</v>
- <v>ErrorStatus = atom()</v>
- <v>ErrorIndex = integer()</v>
- <v>Varbinds = [varbind()]</v>
- <v>varbind() = #varbind</v>
+ <v>SnmpPduInfo = snmp_gen_info()</v>
+ <v>SnmpTrapInfo = snmp_v1_trap_info()</v>
+ <v>SnmpReportInfo = snmp_gen_info()</v>
+ <v>SnmpInformInfo = snmp_gen_info()</v>
<v>UserData = term()</v>
- <v>Reply = ignore | {register, UserId, TargetName, agent_info()}</v>
+ <v>Reply = ignore | {register, UserId, TargetName, AgentConfig}</v>
<v>UserId = term()</v>
<v>TargetName = target_name()</v>
- <v>agent_info() = [{agent_info_item(), agent_info_value()}]</v>
+ <v>AgentConfig = [agent_config()]</v>
</type>
<desc>
- <p>This function is called when a message is received from an
- unknown agent.</p>
+ <p>This function is called when a message is received from an
+ unknown agent.</p>
<p>Note that this will always be the default user that is called.</p>
- <p>For more info about the <c>agent_info()</c>, see
- <seealso marker="snmpm#register_agent">register_agent</seealso>.</p>
+ <p>For more info about the <c>agent_config()</c>, see
+ <seealso marker="snmpm#register_agent">register_agent</seealso>.</p>
<p>The arguments <c>Type</c> and <c>SnmpInfo</c> relates in the
following way: </p>
@@ -148,7 +167,7 @@
</list>
<p>The only user which would return
- <c>{register, UserId, TargetName, agent_info()}</c> is the
+ <c>{register, UserId, TargetName, AgentConfig}</c> is the
<em>default user</em>.</p>
<marker id="handle_pdu"></marker>
@@ -156,18 +175,13 @@
</func>
<func>
- <name>handle_pdu(TargetName, ReqId, SnmpPduInfo, UserData) -> Reply</name>
+ <name>handle_pdu(TargetName, ReqId, SnmpPduInfo, UserData) -> void()</name>
<fsummary>Handle the reply to an asynchronous request</fsummary>
<type>
<v>TargetName = target_name()</v>
<v>ReqId = term()</v>
- <v>SnmpPduInfo = {ErrorStatus, ErrorIndex, Varbinds}</v>
- <v>ErrorStatus = atom()</v>
- <v>ErrorIndex = integer()</v>
- <v>Varbinds = [varbind()]</v>
- <v>varbind() = #varbind</v>
+ <v>SnmpPduInfo = snmp_gen_info()</v>
<v>UserData = term()</v>
- <v>Reply = ignore</v>
</type>
<desc>
<p>Handle the reply to an asynchronous request, such as
@@ -186,27 +200,19 @@
<fsummary>Handle a trap/notification message</fsummary>
<type>
<v>TargetName = TargetName2 = target_name()</v>
- <v>SnmpTrapInfo = {Enteprise, Generic, Spec, Timestamp, Varbinds} | {ErrorStatus, ErrorIndex, Varbinds}</v>
- <v>Enterprise = oid()</v>
- <v>Generic = integer()</v>
- <v>Spec = integer()</v>
- <v>Timestamp = integer()</v>
- <v>ErrorStatus = atom()</v>
- <v>ErrorIndex = integer()</v>
- <v>Varbinds = [varbind()]</v>
- <v>varbind() = #varbind</v>
+ <v>SnmpTrapInfo = snmp_v1_trap_info() | snmp_gen_info()</v>
<v>UserData = term()</v>
- <v>Reply = ignore | unregister | {register, UserId, TargetName2, agent_info()}</v>
+ <v>Reply = ignore | unregister | {register, UserId, TargetName2, AgentConfig}</v>
<v>UserId = term()</v>
- <v>agent_info() = [{agent_info_item(), agent_info_value()}]</v>
+ <v>AgentConfig = [agent_config()]</v>
</type>
<desc>
<p>Handle a trap/notification message from an agent.</p>
- <p>For more info about the <c>agent_info()</c>, see
- <seealso marker="snmpm#register_agent">register_agent</seealso></p>
+ <p>For more info about the <c>agent_config()</c>, see
+ <seealso marker="snmpm#register_agent">register_agent</seealso></p>
<p>The only user which would return
- <c>{register, UserId, TargetName2, agent_info()}</c> is the
- <em>default user</em>.</p>
+ <c>{register, UserId, TargetName2, agent_info()}</c> is the
+ <em>default user</em>.</p>
<marker id="handle_inform"></marker>
</desc>
@@ -217,29 +223,25 @@
<fsummary>Handle a inform message</fsummary>
<type>
<v>TargetName = TargetName2 = target_name()</v>
- <v>SnmpInformInfo = {ErrorStatus, ErrorIndex, Varbinds}</v>
- <v>ErrorStatus = atom()</v>
- <v>ErrorIndex = integer()</v>
- <v>Varbinds = [varbind()]</v>
- <v>varbind() = #varbind</v>
+ <v>SnmpInformInfo = snmp_gen_info()</v>
<v>UserData = term()</v>
- <v>Reply = ignore | unregister | {register, UserId, TargetName2, agent_info()}</v>
+ <v>Reply = ignore | no_reply | unregister | {register, UserId, TargetName2, AgentConfig}</v>
<v>UserId = term()</v>
- <v>agent_info() = [{agent_info_item(), agent_info_value()}]</v>
+ <v>AgentConfig = [agent_config()]</v>
</type>
<desc>
<p>Handle a inform message.</p>
- <p>For more info about the <c>agent_info()</c>, see
- <seealso marker="snmpm#register_agent">register_agent</seealso></p>
+ <p>For more info about the <c>agent_config()</c>, see
+ <seealso marker="snmpm#register_agent">register_agent</seealso></p>
<p>The only user which would return
- <c>{register, UserId, TargetName2, agent_info()}</c> is the
- <em>default user</em>.</p>
- <p>If the
- <seealso marker="snmp_app">inform request behaviour</seealso>
- configuration option is set to <c>user</c> or
- <c>{user, integer()}</c>, the response (acknowledgment) to this
- inform-request will be sent when this function returns.</p>
-
+ <c>{register, UserId, TargetName2, AgentConfig}</c> is the
+ <em>default user</em>.</p>
+ <p>If the
+ <seealso marker="snmp_app">inform request behaviour</seealso>
+ configuration option is set to <c>user</c> or
+ <c>{user, integer()}</c>, the response (acknowledgment) to this
+ inform-request will be sent when this function returns.</p>
+
<marker id="handle_report"></marker>
</desc>
</func>
@@ -251,23 +253,46 @@
<v>TargetName = TargetName2 = target_name()</v>
<v>Addr = ip_address()</v>
<v>Port = integer()</v>
- <v>SnmpReportInfo = {ErrorStatus, ErrorIndex, Varbinds}</v>
- <v>ErrorStatus = atom()</v>
- <v>ErrorIndex = integer()</v>
- <v>Varbinds = [varbind()]</v>
- <v>varbind() = #varbind</v>
+ <v>SnmpReportInfo = snmp_gen_info()</v>
<v>UserData = term()</v>
- <v>Reply = ignore | unregister | {register, UserId, TargetName2, agent_info()}</v>
+ <v>Reply = ignore | unregister | {register, UserId, TargetName2, AgentConfig}</v>
<v>UserId = term()</v>
- <v>agent_info() = [{agent_info_item(), agent_info_value()}]</v>
+ <v>AgentConfig = [agent_config()]</v>
</type>
<desc>
<p>Handle a report message.</p>
- <p>For more info about the <c>agent_info()</c>, see
- <seealso marker="snmpm#register_agent">register_agent</seealso></p>
+ <p>For more info about the <c>agent_config()</c>, see
+ <seealso marker="snmpm#register_agent">register_agent</seealso></p>
<p>The only user which would return
- <c>{register, UserId, TargetName2, agent_info()}</c> is the
- <em>default user</em>.</p>
+ <c>{register, UserId, TargetName2, AgentConfig}</c> is the
+ <em>default user</em>.</p>
+
+ <marker id="handle_invalid_result"></marker>
+ </desc>
+ </func>
+
+ <func>
+ <name>handle_invalid_result(IN, OUT) -> void()</name>
+ <fsummary>Handle a report message</fsummary>
+ <type>
+ <v>IN = {Func, Args}</v>
+ <v>Func = atom()</v>
+ <v>Args = list()</v>
+ <v>OUT = {crash, CrashInfo} | {result, InvalidResult}</v>
+ <v>CrashInfo = {ErrorType, Error, Stacktrace}</v>
+ <v>ErrorType = atom()</v>
+ <v>Error = term()</v>
+ <v>Stacktrace = list()</v>
+ <v>InvalidResult = term()</v>
+ </type>
+ <desc>
+ <p>If <em>any</em> of the <em>other</em> callback functions crashes
+ (exit, throw or a plain crash) or return an invalid result (if a valid
+ return has been specified), this function is called.
+ The purpose is to allow the user handle this
+ error (for instance to issue an error report).</p>
+ <p><c>IN</c> reprecents the function called (and its arguments).
+ <c>OUT</c> represents the unexpected/invalid result. </p>
</desc>
</func>
</funcs>
diff --git a/lib/snmp/examples/ex2/snmp_ex2_manager.erl b/lib/snmp/examples/ex2/snmp_ex2_manager.erl
index 1b247d713d..a9dcc09b77 100644
--- a/lib/snmp/examples/ex2/snmp_ex2_manager.erl
+++ b/lib/snmp/examples/ex2/snmp_ex2_manager.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -43,7 +43,8 @@
handle_pdu/4,
handle_trap/3,
handle_inform/3,
- handle_report/3]).
+ handle_report/3,
+ handle_invalid_result/3]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -406,5 +407,9 @@ handle_report(TargetName, SnmpReport, Server) when is_pid(Server) ->
report_callback(Server, handle_inform, {TargetName, SnmpReport}),
ok.
+handle_invalid_result(In, Out, Server) when is_pid(Server) ->
+ report_callback(Server, handle_invalid_result, {In, Out}),
+ ok.
+
report_callback(Pid, Tag, Info) ->
Pid ! {snmp_callback, Tag, Info}.
diff --git a/lib/snmp/src/agent/snmpa.erl b/lib/snmp/src/agent/snmpa.erl
index a95e41ea42..aea63effe6 100644
--- a/lib/snmp/src/agent/snmpa.erl
+++ b/lib/snmp/src/agent/snmpa.erl
@@ -85,11 +85,10 @@
-export([add_agent_caps/2, del_agent_caps/1, get_agent_caps/0]).
%% Audit Trail Log functions
--export([log_to_txt/1,
- log_to_txt/2, log_to_txt/3, log_to_txt/4,
- log_to_txt/5, log_to_txt/6, log_to_txt/7,
- log_to_io/1, log_to_io/2, log_to_io/3,
- log_to_io/4, log_to_io/5, log_to_io/6,
+-export([log_to_txt/1, log_to_txt/2, log_to_txt/3, log_to_txt/4,
+ log_to_txt/5, log_to_txt/6, log_to_txt/7, log_to_txt/8,
+ log_to_io/1, log_to_io/2, log_to_io/3, log_to_io/4,
+ log_to_io/5, log_to_io/6, log_to_io/7,
log_info/0,
change_log_size/1,
get_log_type/0, get_log_type/1,
@@ -130,7 +129,8 @@
-include("snmpa_internal.hrl").
-include_lib("snmp/include/snmp_types.hrl"). % type of me needed.
--define(DISCO_EXTRA_INFO, undefined).
+-define(DISCO_EXTRA_INFO, undefined).
+-define(ATL_BLOCK_DEFAULT, true).
%%-----------------------------------------------------------------
@@ -872,43 +872,207 @@ get_agent_caps() ->
%%% Audit Trail Log functions
%%%-----------------------------------------------------------------
+-spec log_to_txt(LogDir :: snmp:dir()) ->
+ snmp:void().
+
log_to_txt(LogDir) ->
log_to_txt(LogDir, []).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()]) ->
+ snmp:void().
+
+log_to_txt(LogDir, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Mibs = [],
+ OutFile = "snmpa_log.txt",
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
+
log_to_txt(LogDir, Mibs) ->
+ Block = ?ATL_BLOCK_DEFAULT,
OutFile = "snmpa_log.txt",
LogName = ?audit_trail_log_name,
LogFile = ?audit_trail_log_file,
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ OutFile = "snmpa_log.txt",
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile) ->
+ Block = ?ATL_BLOCK_DEFAULT,
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
LogName = ?audit_trail_log_name,
LogFile = ?audit_trail_log_file,
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile, LogName) ->
+ Block = ?ATL_BLOCK_DEFAULT,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
LogFile = ?audit_trail_log_file,
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile) ->
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Start :: snmp_log:log_time()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start) ->
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Block :: boolean(),
+ Start :: snmp_log:log_time()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Start :: snmp_log:log_time(),
+ Stop :: snmp_log:log_time()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start);
+
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) ->
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Block :: boolean(),
+ Start :: snmp_log:log_time(),
+ Stop :: snmp_log:log_time()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop) ->
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop).
log_to_io(LogDir) ->
log_to_io(LogDir, []).
+
+log_to_io(LogDir, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Mibs = [],
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs) ->
+ Block = ?ATL_BLOCK_DEFAULT,
LogName = ?audit_trail_log_name,
LogFile = ?audit_trail_log_file,
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile).
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block).
+
+log_to_io(LogDir, Mibs, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs, LogName) ->
+ Block = ?ATL_BLOCK_DEFAULT,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block).
+
+log_to_io(LogDir, Mibs, LogName, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
LogFile = ?audit_trail_log_file,
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile).
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs, LogName, LogFile) ->
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs, LogName, LogFile, Start) ->
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Start).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start);
log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) ->
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop) ->
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop).
log_info() ->
diff --git a/lib/snmp/src/agent/snmpa_local_db.erl b/lib/snmp/src/agent/snmpa_local_db.erl
index 5198c6ec4e..f991244287 100644
--- a/lib/snmp/src/agent/snmpa_local_db.erl
+++ b/lib/snmp/src/agent/snmpa_local_db.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -191,6 +191,12 @@ dets_open(DbDir, DbInitError, Opts) ->
end
end;
_ ->
+ case DbInitError of
+ create_db_and_dir ->
+ ok = filelib:ensure_dir(Filename);
+ _ ->
+ ok
+ end,
case do_dets_open(Name, Filename, Opts) of
{ok, Dets} ->
?vdebug("dets open done",[]),
diff --git a/lib/snmp/src/agent/snmpa_mpd.erl b/lib/snmp/src/agent/snmpa_mpd.erl
index 2d37ea56f0..11ae806866 100644
--- a/lib/snmp/src/agent/snmpa_mpd.erl
+++ b/lib/snmp/src/agent/snmpa_mpd.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -657,7 +657,7 @@ generate_response_msg(Vsn, RePdu, Type,
?SEC_USM ->
snmpa_usm
end,
- SecEngineID = LocalEngineID,
+ SecEngineID = LocalEngineID, % 3.1.1a
?vtrace("generate_response_msg -> SecEngineID: ~w", [SecEngineID]),
case (catch SecModule:generate_outgoing_msg(Message,
SecEngineID,
diff --git a/lib/snmp/src/agent/snmpa_supervisor.erl b/lib/snmp/src/agent/snmpa_supervisor.erl
index aebcdbaa84..77ed54bee4 100644
--- a/lib/snmp/src/agent/snmpa_supervisor.erl
+++ b/lib/snmp/src/agent/snmpa_supervisor.erl
@@ -356,7 +356,7 @@ init([AgentType, Opts]) ->
SymStoreSpec =
worker_spec(snmpa_symbolic_store, SymStoreArgs, Restart, 2000),
- LdbArgs = [Prio, DbDir, LdbOpts],
+ LdbArgs = [Prio, DbDir, DbInitError, LdbOpts],
LocalDbSpec =
worker_spec(snmpa_local_db, LdbArgs, Restart, 5000),
diff --git a/lib/snmp/src/agent/snmpa_symbolic_store.erl b/lib/snmp/src/agent/snmpa_symbolic_store.erl
index 00178f4bcd..a922d62ba8 100644
--- a/lib/snmp/src/agent/snmpa_symbolic_store.erl
+++ b/lib/snmp/src/agent/snmpa_symbolic_store.erl
@@ -642,10 +642,10 @@ code_change(_Vsn, S, _Extra) ->
{ok, S}.
-stop_backup_server(undefined) ->
- ok;
-stop_backup_server({Pid, _}) when is_pid(Pid) ->
- exit(Pid, kill).
+%% stop_backup_server(undefined) ->
+%% ok;
+%% stop_backup_server({Pid, _}) when is_pid(Pid) ->
+%% exit(Pid, kill).
diff --git a/lib/snmp/src/agent/snmpa_usm.erl b/lib/snmp/src/agent/snmpa_usm.erl
index 6f54307f9f..719ea4e356 100644
--- a/lib/snmp/src/agent/snmpa_usm.erl
+++ b/lib/snmp/src/agent/snmpa_usm.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -16,6 +16,9 @@
%%
%% %CopyrightEnd%
%%
+%% AES: RFC 3826
+%%
+
-module(snmpa_usm).
%% Avoid warning for local function error/1 clashing with autoimported BIF.
@@ -652,7 +655,10 @@ get_des_salt() ->
[?i32(EngineBoots), ?i32(SaltInt)].
aes_encrypt(PrivKey, Data) ->
- snmp_usm:aes_encrypt(PrivKey, Data, fun get_aes_salt/0).
+ EngineBoots = snmp_framework_mib:get_engine_boots(),
+ EngineTime = snmp_framework_mib:get_engine_time(),
+ snmp_usm:aes_encrypt(PrivKey, Data, fun get_aes_salt/0,
+ EngineBoots, EngineTime).
aes_decrypt(PrivKey, UsmSecParams, EncData) ->
#usmSecurityParameters{msgPrivacyParameters = PrivParams,
diff --git a/lib/snmp/src/app/Makefile b/lib/snmp/src/app/Makefile
index 716add8b9e..b8cc4b8754 100644
--- a/lib/snmp/src/app/Makefile
+++ b/lib/snmp/src/app/Makefile
@@ -2,7 +2,7 @@
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2012. All Rights Reserved.
+# Copyright Ericsson AB 2003-2013. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -79,6 +79,8 @@ endif
# FLAGS
# ----------------------------------------------------
+ERL_COMPILE_FLAGS += -pa $(ERL_TOP)/lib/snmp/ebin
+
ifeq ($(WARN_UNUSED_VARS),true)
ERL_COMPILE_FLAGS += +warn_unused_vars
endif
diff --git a/lib/snmp/src/app/snmp.appup.src b/lib/snmp/src/app/snmp.appup.src
index 6edcf7e833..fa4b72ab68 100644
--- a/lib/snmp/src/app/snmp.appup.src
+++ b/lib/snmp/src/app/snmp.appup.src
@@ -27,26 +27,10 @@
%% {load_module, snmp_pdus, soft_purge, soft_purge, []}
%% {update, snmpa_local_db, soft, soft_purge, soft_purge, []}
%% {add_module, snmpm_net_if_mt}
-
[
- {"4.24.1",
- [
- {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_mib, soft, soft_purge, soft_purge, []}
- ]
- },
- {"4.24",
- [
- {load_module, snmp_conf, soft_purge, soft_purge, []},
- {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge,
- [snmp_conf]},
- {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_local_db, soft, soft_purge, soft_purge, []},
- {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_mib, soft, soft_purge, soft_purge, []}
- ]
- },
+ {"4.24.2", [{restart_application, snmp}]},
+ {"4.24.1", [{restart_application, snmp}]},
+ {"4.24", [{restart_application, snmp}]},
{"4.23.1", [{restart_application, snmp}]},
{"4.23", [{restart_application, snmp}]}
],
@@ -57,24 +41,9 @@
%% {remove, {snmpm_net_if_mt, soft_purge, soft_purge}}
[
- {"4.24.1",
- [
- {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_mib, soft, soft_purge, soft_purge, []}
- ]
- },
- {"4.24",
- [
- {load_module, snmp_conf, soft_purge, soft_purge, []},
- {load_module, snmp_view_based_acm_mib, soft_purge, soft_purge,
- [snmp_conf]},
- {load_module, snmpa, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_local_db, soft, soft_purge, soft_purge, []},
- {update, snmpa_agent, soft, soft_purge, soft_purge, [snmpa_agent]},
- {update, snmpa_mib, soft, soft_purge, soft_purge, []}
- ]
- },
+ {"4.24.2", [{restart_application, snmp}]},
+ {"4.24.1", [{restart_application, snmp}]},
+ {"4.24", [{restart_application, snmp}]},
{"4.23.1", [{restart_application, snmp}]},
{"4.23", [{restart_application, snmp}]}
]
diff --git a/lib/snmp/src/app/snmp.erl b/lib/snmp/src/app/snmp.erl
index 1bb562654a..8b3a8af77d 100644
--- a/lib/snmp/src/app/snmp.erl
+++ b/lib/snmp/src/app/snmp.erl
@@ -49,8 +49,8 @@
read_mib/1,
- log_to_txt/5, log_to_txt/6, log_to_txt/7,
- log_to_io/4, log_to_io/5, log_to_io/6,
+ log_to_txt/5, log_to_txt/6, log_to_txt/7, log_to_txt/8,
+ log_to_io/4, log_to_io/5, log_to_io/6, log_to_io/7,
change_log_size/2,
octet_string_to_bits/1, bits_to_octet_string/1,
@@ -91,7 +91,31 @@
]).
-export_type([
+ dir/0,
+ snmp_timer/0,
+
+ engine_id/0,
+ tdomain/0,
+ community/0,
+ mms/0,
+ version/0,
+ sec_model/0,
+ sec_name/0,
+ sec_level/0,
+
oid/0,
+ varbind/0,
+ ivarbind/0,
+ asn1_type/0,
+ table_info/0,
+ variable_info/0,
+ me/0,
+ trap/0,
+ notification/0,
+ pdu/0,
+ trappdu/0,
+ mib/0,
+ mib_name/0,
void/0
]).
@@ -148,15 +172,42 @@
-define(APPLICATION, snmp).
+-define(ATL_BLOCK_DEFAULT, true).
+-include_lib("snmp/include/snmp_types.hrl").
%%-----------------------------------------------------------------
%% Types
%%-----------------------------------------------------------------
--type oid() :: [non_neg_integer()].
--type void() :: term().
+-type dir() :: string().
+-type snmp_timer() :: #snmp_incr_timer{}.
+
+-type engine_id() :: string().
+-type tdomain() :: transportDomainUdpIpv4 | transportDomainUdpIpv6.
+-type community() :: string().
+-type mms() :: non_neg_integer().
+-type version() :: v1 | v2 | v3.
+-type sec_model() :: any | v1 | v2c | usm.
+-type sec_name() :: string().
+-type sec_level() :: noAuthNoPriv | authNoPriv | authPriv.
+
+-type oid() :: [non_neg_integer()].
+-type varbind() :: #varbind{}.
+-type ivarbind() :: #ivarbind{}.
+-type asn1_type() :: #asn1_type{}.
+-type table_info() :: #table_info{}.
+-type variable_info() :: #variable_info{}.
+-type me() :: #me{}.
+-type trap() :: #trap{}.
+-type notification() :: #notification{}.
+-type mib() :: #mib{}.
+-type mib_name() :: string().
+-type pdu() :: #pdu{}.
+-type trappdu() :: #trappdu{}.
+
+-type void() :: term().
%%-----------------------------------------------------------------
@@ -854,18 +905,60 @@ read_mib(FileName) ->
%%%-----------------------------------------------------------------
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile) ->
- snmp_log:log_to_txt(LogName, LogFile, LogDir, Mibs, OutFile).
+ Block = ?ATL_BLOCK_DEFAULT,
+ Start = null,
+ Stop = null,
+ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop).
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Start = null,
+ Stop = null,
+ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop);
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start) ->
- snmp_log:log_to_txt(LogName, LogFile, LogDir, Mibs, OutFile, Start).
+ Block = ?ATL_BLOCK_DEFAULT,
+ Stop = null,
+ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop).
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Stop = null,
+ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop);
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) ->
- snmp_log:log_to_txt(LogName, LogFile, LogDir, Mibs, OutFile, Start, Stop).
+ Block = ?ATL_BLOCK_DEFAULT,
+ log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop).
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop) ->
+ snmp_log:log_to_txt(LogName, Block, LogFile, LogDir, Mibs, OutFile,
+ Start, Stop).
+
log_to_io(LogDir, Mibs, LogName, LogFile) ->
- snmp_log:log_to_io(LogName, LogFile, LogDir, Mibs).
+ Block = ?ATL_BLOCK_DEFAULT,
+ Start = null,
+ Stop = null,
+ log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Start = null,
+ Stop = null,
+ log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop);
log_to_io(LogDir, Mibs, LogName, LogFile, Start) ->
- snmp_log:log_to_io(LogName, LogFile, LogDir, Mibs, Start).
+ Block = ?ATL_BLOCK_DEFAULT,
+ Stop = null,
+ log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Stop = null,
+ log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop);
log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) ->
- snmp_log:log_to_io(LogName, LogFile, LogDir, Mibs, Start, Stop).
+ Block = ?ATL_BLOCK_DEFAULT,
+ log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop) ->
+ snmp_log:log_to_io(LogName, Block, LogFile, LogDir, Mibs, Start, Stop).
change_log_size(LogName, NewSize) ->
snmp_log:change_size(LogName, NewSize).
@@ -878,12 +971,12 @@ change_log_size(LogName, NewSize) ->
%% Usage: erl -s snmp str_apply '{Mod,Func,ArgList}'
str_apply([Atom]) ->
Str = atom_to_list(Atom),
- {Mod,Func,Args} = to_erlang_term(Str),
- apply(Mod,Func,Args).
+ {Mod, Func, Args} = to_erlang_term(Str),
+ apply(Mod, Func, Args).
to_erlang_term(String) ->
{ok, Tokens, _} = erl_scan:string(lists:append([String, ". "])),
- {ok,Term} = erl_parse:parse_term(Tokens),
+ {ok, Term} = erl_parse:parse_term(Tokens),
Term.
diff --git a/lib/snmp/src/app/snmp_internal.hrl b/lib/snmp/src/app/snmp_internal.hrl
index 5ff715e0b7..f04fa4dd53 100644
--- a/lib/snmp/src/app/snmp_internal.hrl
+++ b/lib/snmp/src/app/snmp_internal.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2006-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2006-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -24,6 +24,8 @@
-define(APPLICATION, snmp).
-endif.
+-define(STACK(), erlang:get_stacktrace()).
+
-define(snmp_info(C, F, A), ?snmp_msg(info_msg, C, F, A)).
-define(snmp_warning(C, F, A), ?snmp_msg(warning_msg, C, F, A)).
-define(snmp_error(C, F, A), ?snmp_msg(error_msg, C, F, A)).
diff --git a/lib/snmp/src/manager/snmpm.erl b/lib/snmp/src/manager/snmpm.erl
index 6ac0115dad..c97b635fc6 100644
--- a/lib/snmp/src/manager/snmpm.erl
+++ b/lib/snmp/src/manager/snmpm.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -75,11 +75,10 @@
%%
%% Logging
- log_to_txt/1,
- log_to_txt/2, log_to_txt/3, log_to_txt/4,
- log_to_txt/5, log_to_txt/6, log_to_txt/7,
- log_to_io/1, log_to_io/2, log_to_io/3,
- log_to_io/4, log_to_io/5, log_to_io/6,
+ log_to_txt/1, log_to_txt/2, log_to_txt/3, log_to_txt/4,
+ log_to_txt/5, log_to_txt/6, log_to_txt/7, log_to_txt/8,
+ log_to_io/1, log_to_io/2, log_to_io/3, log_to_io/4,
+ log_to_io/5, log_to_io/6, log_to_io/7,
change_log_size/1,
get_log_type/0,
set_log_type/1,
@@ -111,6 +110,12 @@
-export([start_link/3, snmpm_start_verify/2, snmpm_start_verify/3]).
-export([target_name/1, target_name/2]).
+-export_type([
+ register_timeout/0,
+ agent_config/0,
+ target_name/0
+ ]).
+
-include_lib("snmp/src/misc/snmp_debug.hrl").
-include_lib("snmp/include/snmp_types.hrl").
@@ -119,6 +124,26 @@
-include("snmp_verbosity.hrl").
-define(DEFAULT_AGENT_PORT, 161).
+-define(ATL_BLOCK_DEFAULT, true).
+
+
+%%-----------------------------------------------------------------
+%% Types
+%%-----------------------------------------------------------------
+
+-type register_timeout() :: pos_integer() | snmp:snmp_timer().
+-type agent_config() :: {engine_id, snmp:engine_id()} | % Mandatory
+ {address, inet:ip_address()} | % Mandatory
+ {port, inet:port_number()} | % Optional
+ {tdomain, snmp:tdomain()} | % Optional
+ {community, snmp:community()} | % Optional
+ {timeout, register_timeout()} | % Optional
+ {max_message_size, snmp:mms()} | % Optional
+ {version, snmp:version()} | % Optional
+ {sec_moduel, snmp:sec_model()} | % Optional
+ {sec_name, snmp:sec_name()} | % Optional
+ {sec_level, snmp:sec_level()}. % Optional
+-type target_name() :: string().
%% This function is called when the snmp application
@@ -762,43 +787,204 @@ cancel_async_request(UserId, ReqId) ->
%%% Audit Trail Log functions (for backward compatibility)
%%%-----------------------------------------------------------------
+-spec log_to_txt(LogDir :: snmp:dir()) ->
+ snmp:void().
+
log_to_txt(LogDir) ->
log_to_txt(LogDir, []).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()]) ->
+ snmp:void().
+
+log_to_txt(LogDir, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Mibs = [],
+ OutFile = "snmpm_log.txt",
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs) ->
+ Block = ?ATL_BLOCK_DEFAULT,
+ OutFile = "snmpm_log.txt",
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
OutFile = "snmpm_log.txt",
LogName = ?audit_trail_log_name,
LogFile = ?audit_trail_log_file,
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile) ->
+ Block = ?ATL_BLOCK_DEFAULT,
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
LogName = ?audit_trail_log_name,
LogFile = ?audit_trail_log_file,
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile, LogName) ->
+ Block = ?ATL_BLOCK_DEFAULT,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
LogFile = ?audit_trail_log_file,
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile) ->
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Block :: boolean()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Start :: snmp_log:log_time()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block);
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start) ->
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Block :: boolean(),
+ Start :: snmp_log:log_time()) ->
+ snmp:void();
+ (LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Start :: snmp_log:log_time(),
+ Stop :: snmp_log:log_time()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start);
log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop) ->
- snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Start, Stop).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop).
+
+-spec log_to_txt(LogDir :: snmp:dir(),
+ Mibs :: [snmp:mib_name()],
+ OutFile :: file:filename(),
+ LogName :: string(),
+ LogFile :: string(),
+ Block :: boolean(),
+ Start :: snmp_log:log_time(),
+ Stop :: snmp_log:log_time()) ->
+ snmp:void().
+
+log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop) ->
+ snmp:log_to_txt(LogDir, Mibs, OutFile, LogName, LogFile, Block, Start, Stop).
log_to_io(LogDir) ->
log_to_io(LogDir, []).
+
+log_to_io(LogDir, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ Mibs = [],
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs) ->
LogName = ?audit_trail_log_name,
LogFile = ?audit_trail_log_file,
snmp:log_to_io(LogDir, Mibs, LogName, LogFile).
+
+log_to_io(LogDir, Mibs, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ LogName = ?audit_trail_log_name,
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs, LogName) ->
+ Block = ?ATL_BLOCK_DEFAULT,
LogFile = ?audit_trail_log_file,
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile).
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block).
+
+log_to_io(LogDir, Mibs, LogName, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ LogFile = ?audit_trail_log_file,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs, LogName, LogFile) ->
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block);
log_to_io(LogDir, Mibs, LogName, LogFile, Start) ->
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Start).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start);
log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop) ->
- snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Start, Stop).
+ Block = ?ATL_BLOCK_DEFAULT,
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop).
+
+log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop) ->
+ snmp:log_to_io(LogDir, Mibs, LogName, LogFile, Block, Start, Stop).
change_log_size(NewSize) ->
diff --git a/lib/snmp/src/manager/snmpm_config.erl b/lib/snmp/src/manager/snmpm_config.erl
index 736debe544..2101ad46e1 100644
--- a/lib/snmp/src/manager/snmpm_config.erl
+++ b/lib/snmp/src/manager/snmpm_config.erl
@@ -1215,6 +1215,12 @@ dets_open(Dir, DbInitError, Repair, AutoSave) ->
end
end;
_ ->
+ case DbInitError of
+ create_db_and_dir ->
+ ok = filelib:ensure_dir(Filename);
+ _ ->
+ ok
+ end,
case do_dets_open(Name, Filename, Repair, AutoSave) of
{ok, _Dets} ->
ok;
@@ -1316,7 +1322,14 @@ verify_option({server, ServerOpts}) ->
verify_server_opts(ServerOpts);
verify_option({note_store, NoteStoreOpts}) ->
verify_note_store_opts(NoteStoreOpts);
-verify_option({config, ConfOpts}) ->
+verify_option({config, ConfOpts0}) ->
+ %% Make sure any db_dir option is first in the options list to make it
+ %% easier to check if the db_init_error option specifies that a missing
+ %% db_dir should be created.
+ ConfOpts = case lists:keytake(db_dir, 1, ConfOpts0) of
+ false -> ConfOpts0;
+ {value, Result, OtherOpts} -> [Result|OtherOpts]
+ end,
verify_config_opts(ConfOpts);
verify_option({versions, Vsns}) ->
verify_versions(Vsns);
@@ -1365,7 +1378,12 @@ verify_config_opts([{dir, Dir}|Opts]) ->
verify_conf_dir(Dir),
verify_config_opts(Opts);
verify_config_opts([{db_dir, Dir}|Opts]) ->
- verify_conf_db_dir(Dir),
+ case lists:keyfind(db_init_error, 1, Opts) of
+ {db_init_error, create_db_and_dir} ->
+ verify_conf_db_dir(Dir, false);
+ _ ->
+ verify_conf_db_dir(Dir, true)
+ end,
verify_config_opts(Opts);
verify_config_opts([{db_init_error, DbInitErr}|Opts]) ->
verify_conf_db_init_error(DbInitErr),
@@ -1443,7 +1461,7 @@ verify_conf_dir(Dir) ->
error({invalid_conf_dir, Dir})
end.
-verify_conf_db_dir(Dir) ->
+verify_conf_db_dir(Dir, true) ->
case (catch verify_dir(Dir)) of
ok ->
ok;
@@ -1451,13 +1469,16 @@ verify_conf_db_dir(Dir) ->
error({invalid_conf_db_dir, Dir, Reason});
_ ->
error({invalid_conf_db_dir, Dir})
- end.
-
+ end;
+verify_conf_db_dir(_Dir, false) ->
+ ok.
verify_conf_db_init_error(terminate) ->
ok;
verify_conf_db_init_error(create) ->
ok;
+verify_conf_db_init_error(create_db_and_dir) ->
+ ok;
verify_conf_db_init_error(InvalidDbInitError) ->
error({invalid_conf_db_init_error, InvalidDbInitError}).
diff --git a/lib/snmp/src/manager/snmpm_server.erl b/lib/snmp/src/manager/snmpm_server.erl
index 61d22362cc..9c79df2748 100644
--- a/lib/snmp/src/manager/snmpm_server.erl
+++ b/lib/snmp/src/manager/snmpm_server.erl
@@ -488,7 +488,7 @@ cancel_async_request(UserId, ReqId) ->
%% discovery(UserId, BAddr, Port, Config, Expire, ExtraInfo) ->
%% call({discovery, self(), UserId, BAddr, Port, Config, Expire, ExtraInfo}).
-
+
verbosity(Verbosity) ->
case ?vvalidate(Verbosity) of
Verbosity ->
@@ -1851,7 +1851,17 @@ handle_snmp_error(Addr, Port, ReqId, Reason, State) ->
handle_error(_UserId, Mod, Reason, ReqId, Data, _State) ->
?vtrace("handle_error -> entry when"
"~n Mod: ~p", [Mod]),
- F = fun() -> (catch Mod:handle_error(ReqId, Reason, Data)) end,
+ F = fun() ->
+ try
+ begin
+ Mod:handle_error(ReqId, Reason, Data)
+ end
+ catch
+ T:E ->
+ CallbackArgs = [ReqId, Reason, Data],
+ handle_invalid_result(handle_error, CallbackArgs, T, E)
+ end
+ end,
handle_callback(F),
ok.
@@ -2031,7 +2041,15 @@ handle_pdu(_UserId, Mod, target_name = _RegType, TargetName, _Addr, _Port,
?vtrace("handle_pdu(target_name) -> entry when"
"~n Mod: ~p", [Mod]),
F = fun() ->
- (catch Mod:handle_pdu(TargetName, ReqId, SnmpResponse, Data))
+ try
+ begin
+ Mod:handle_pdu(TargetName, ReqId, SnmpResponse, Data)
+ end
+ catch
+ T:E ->
+ CallbackArgs = [TargetName, ReqId, SnmpResponse, Data],
+ handle_invalid_result(handle_pdu, CallbackArgs, T, E)
+ end
end,
handle_callback(F),
ok;
@@ -2064,8 +2082,37 @@ do_handle_agent(DefUserId, DefMod,
SnmpInfo, DefData, State) ->
?vdebug("do_handle_agent -> entry when"
"~n DefUserId: ~p", [DefUserId]),
- case (catch DefMod:handle_agent(Addr, Port, Type, SnmpInfo, DefData)) of
- {'EXIT', {undef, _}} when Type =:= pdu ->
+ try DefMod:handle_agent(Addr, Port, Type, SnmpInfo, DefData) of
+ {register, UserId2, TargetName, Config} ->
+ ?vtrace("do_handle_agent -> register: "
+ "~n UserId2: ~p"
+ "~n TargetName: ~p"
+ "~n Config: ~p",
+ [UserId2, TargetName, Config]),
+ Config2 = ensure_present([{address, Addr}, {port, Port}], Config),
+ Config3 = [{reg_type, target_name} | Config2],
+ case snmpm_config:register_agent(UserId2,
+ TargetName, Config3) of
+ ok ->
+ ok;
+ {error, Reason} ->
+ error_msg("failed registering agent - "
+ "handling agent "
+ "~p <~p,~p>: ~n~w",
+ [TargetName, Addr, Port, Reason]),
+ ok
+ end;
+
+ ignore ->
+ ?vdebug("do_handle_agent -> ignore", []),
+ ok;
+
+ InvalidResult ->
+ CallbackArgs = [Addr, Port, Type, SnmpInfo, DefData],
+ handle_invalid_result(handle_agent, CallbackArgs, InvalidResult)
+
+ catch
+ error:{undef, _} when Type =:= pdu ->
%% Maybe, still on the old API
?vdebug("do_handle_agent -> maybe still on the old api", []),
case (catch DefMod:handle_agent(Addr, Port, SnmpInfo, DefData)) of
@@ -2113,10 +2160,10 @@ do_handle_agent(DefUserId, DefMod,
ok
end;
- {'EXIT', {undef, _}} ->
+ error:{undef, _} ->
%% If the user does not implement the new API (but the
%% old), then this clause catches all non-pdu handle_agent
- %% calls. These calls was previously never made,so we make
+ %% calls. These calls was previously never made, so we make
%% a best-effert call (using reg-type target_name) to the
%% various callback functions, and leave it to the user to
%% figure out
@@ -2148,31 +2195,11 @@ do_handle_agent(DefUserId, DefMod,
"regarding agent "
"<~p,~p>: ~n~w", [Type, Addr, Port, SnmpInfo])
end;
-
- {register, UserId2, TargetName, Config} ->
- ?vtrace("do_handle_agent -> register: "
- "~n UserId2: ~p"
- "~n TargetName: ~p"
- "~n Config: ~p",
- [UserId2, TargetName, Config]),
- Config2 = ensure_present([{address, Addr}, {port, Port}], Config),
- Config3 = [{reg_type, target_name} | Config2],
- case snmpm_config:register_agent(UserId2,
- TargetName, Config3) of
- ok ->
- ok;
- {error, Reason} ->
- error_msg("failed registering agent - "
- "handling agent "
- "~p <~p,~p>: ~n~w",
- [TargetName, Addr, Port, Reason]),
- ok
- end;
- _Ignore ->
- ?vdebug("do_handle_agent -> ignore", []),
- ok
-
+ T:E ->
+ CallbackArgs = [Addr, Port, Type, SnmpInfo, DefData],
+ handle_invalid_result(handle_agent, CallbackArgs, T, E)
+
end.
ensure_present([], Config) ->
@@ -2305,15 +2332,17 @@ do_handle_trap(UserId, Mod,
RegType, Target, Addr, Port, SnmpTrapInfo, Data, _State) ->
?vdebug("do_handle_trap -> entry with"
"~n UserId: ~p", [UserId]),
- HandleTrap =
+ {HandleTrap, CallbackArgs} =
case RegType of
target_name ->
- fun() -> Mod:handle_trap(Target, SnmpTrapInfo, Data) end;
+ {fun() -> Mod:handle_trap(Target, SnmpTrapInfo, Data) end,
+ [Target, SnmpTrapInfo, Data]};
addr_port ->
- fun() -> Mod:handle_trap(Addr, Port, SnmpTrapInfo, Data) end
+ {fun() -> Mod:handle_trap(Addr, Port, SnmpTrapInfo, Data) end,
+ [Addr, Port, SnmpTrapInfo, Data]}
end,
- case (catch HandleTrap()) of
+ try HandleTrap() of
{register, UserId2, Config} ->
?vtrace("do_handle_trap -> register: "
"~n UserId2: ~p"
@@ -2362,9 +2391,17 @@ do_handle_trap(UserId, Mod,
[Addr, Port, Reason]),
ok
end;
- _Ignore ->
+ ignore ->
?vtrace("do_handle_trap -> ignore", []),
- ok
+ ok;
+
+ InvalidResult ->
+ handle_invalid_result(handle_trap, CallbackArgs, InvalidResult)
+
+ catch
+ T:E ->
+ handle_invalid_result(handle_trap, CallbackArgs, T, E)
+
end.
@@ -2465,16 +2502,18 @@ do_handle_inform(UserId, Mod, Ref,
RegType, Target, Addr, Port, SnmpInform, Data, State) ->
?vdebug("do_handle_inform -> entry with"
"~n UserId: ~p", [UserId]),
- HandleInform =
+ {HandleInform, CallbackArgs} =
case RegType of
target_name ->
- fun() -> Mod:handle_inform(Target, SnmpInform, Data) end;
+ {fun() -> Mod:handle_inform(Target, SnmpInform, Data) end,
+ [Target, SnmpInform, Data]};
addr_port ->
- fun() -> Mod:handle_inform(Addr, Port, SnmpInform, Data) end
+ {fun() -> Mod:handle_inform(Addr, Port, SnmpInform, Data) end,
+ [Addr, Port, SnmpInform, Data]}
end,
Rep =
- case (catch HandleInform()) of
+ try HandleInform() of
{register, UserId2, Config} ->
?vtrace("do_handle_inform -> register: "
"~n UserId2: ~p"
@@ -2494,6 +2533,7 @@ do_handle_inform(UserId, Mod, Ref,
[Target2, Addr, Port, Reason]),
reply
end;
+
{register, UserId2, Target2, Config} ->
?vtrace("do_handle_inform -> register: "
"~n UserId2: ~p"
@@ -2512,6 +2552,7 @@ do_handle_inform(UserId, Mod, Ref,
[Target2, Addr, Port, Reason]),
reply
end;
+
unregister ->
?vtrace("do_handle_inform -> unregister", []),
case snmpm_config:unregister_agent(UserId,
@@ -2525,12 +2566,25 @@ do_handle_inform(UserId, Mod, Ref,
[Addr, Port, Reason]),
reply
end;
+
no_reply ->
?vtrace("do_handle_inform -> no_reply", []),
no_reply;
- _Ignore ->
+
+ ignore ->
?vtrace("do_handle_inform -> ignore", []),
+ reply;
+
+ InvalidResult ->
+ handle_invalid_result(handle_inform, CallbackArgs,
+ InvalidResult),
reply
+
+ catch
+ T:E ->
+ handle_invalid_result(handle_inform, CallbackArgs, T, E),
+ reply
+
end,
handle_inform_response(Rep, Ref, Addr, Port, State),
ok.
@@ -2760,15 +2814,17 @@ do_handle_report(UserId, Mod,
RegType, Target, Addr, Port, SnmpReport, Data, _State) ->
?vdebug("do_handle_report -> entry with"
"~n UserId: ~p", [UserId]),
- HandleReport =
+ {HandleReport, CallbackArgs} =
case RegType of
target_name ->
- fun() -> Mod:handle_report(Target, SnmpReport, Data) end;
+ {fun() -> Mod:handle_report(Target, SnmpReport, Data) end,
+ [Target, SnmpReport, Data]};
addr_port ->
- fun() -> Mod:handle_report(Addr, Port, SnmpReport, Data) end
+ {fun() -> Mod:handle_report(Addr, Port, SnmpReport, Data) end,
+ [Addr, Port, SnmpReport, Data]}
end,
- case (catch HandleReport()) of
+ try HandleReport() of
{register, UserId2, Config} ->
?vtrace("do_handle_report -> register: "
"~n UserId2: ~p"
@@ -2788,6 +2844,7 @@ do_handle_report(UserId, Mod,
[Addr, Port, Reason]),
ok
end;
+
{register, UserId2, Target2, Config} ->
?vtrace("do_handle_report -> register: "
"~n UserId2: ~p"
@@ -2806,6 +2863,7 @@ do_handle_report(UserId, Mod,
[Target2, Addr, Port, Reason]),
reply
end;
+
unregister ->
?vtrace("do_handle_trap -> unregister", []),
case snmpm_config:unregister_agent(UserId,
@@ -2819,9 +2877,20 @@ do_handle_report(UserId, Mod,
[Addr, Port, Reason]),
ok
end;
- _Ignore ->
+
+ ignore ->
?vtrace("do_handle_report -> ignore", []),
- ok
+ ok;
+
+ InvalidResult ->
+ handle_invalid_result(handle_report, CallbackArgs, InvalidResult),
+ reply
+
+ catch
+ T:E ->
+ handle_invalid_result(handle_report, CallbackArgs, T, E),
+ reply
+
end.
@@ -2835,6 +2904,25 @@ handle_callback(F) ->
end).
+
+handle_invalid_result(Func, Args, T, E) ->
+ Stacktrace = ?STACK(),
+ error_msg("Callback function failed: "
+ "~n Function: ~p"
+ "~n Args: ~p"
+ "~n Error Type: ~p"
+ "~n Error: ~p"
+ "~n Stacktrace: ~p",
+ [Func, Args, T, E, Stacktrace]).
+
+handle_invalid_result(Func, Args, InvalidResult) ->
+ error_msg("Callback function returned invalid result: "
+ "~n Function: ~p"
+ "~n Args: ~p"
+ "~n Invalid result: ~p",
+ [Func, Args, InvalidResult]).
+
+
handle_down(MonRef) ->
(catch do_handle_down(MonRef)).
diff --git a/lib/snmp/src/manager/snmpm_user.erl b/lib/snmp/src/manager/snmpm_user.erl
index 78aa560b2e..e6b0b6943e 100644
--- a/lib/snmp/src/manager/snmpm_user.erl
+++ b/lib/snmp/src/manager/snmpm_user.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2009. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -19,79 +19,100 @@
-module(snmpm_user).
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{handle_error, 3},
- {handle_agent, 5},
- {handle_pdu, 4},
- {handle_trap, 3},
- {handle_inform, 3},
- {handle_report, 3}];
-behaviour_info(_) ->
- undefined.
-
-
-%% handle_error(ReqId, Reason, UserData) -> Reply
-%% ReqId -> integer()
-%% Reason -> term()
-%% UserData -> term() (supplied when the user register)
-%% Reply -> ignore
-
-%% handle_agent(Addr, Port, Type, SnmpInfo, UserData) -> Reply
-%% Addr -> term()
-%% Port -> integer()
-%% Type -> pdu | trap | inform | report
-%% SnmpInfo -> {ErrorStatus, ErrorIndex, Varbinds}
-%% UserId -> term()
-%% ErrorStatus -> atom()
-%% ErrorIndex -> integer()
-%% Varbinds -> [varbind()]
-%% UserData -> term() (supplied when the user register)
-%% Reply -> ignore | {register, UserId, agent_info()}
-%% agent_info() -> [{agent_info_item(), agent_info_value()}]
-%% This is the same info as in update_agent_info/4
-
-%% handle_pdu(TargetName, ReqId, SnmpResponse, UserData) -> Reply
-%% TargetName -> target_name()
-%% ReqId -> term() (returned when calling ag(...), ...)
-%% SnmpResponse -> {ErrorStatus, ErrorIndex, Varbinds}
-%% ErrorStatus -> atom()
-%% ErrorIndex -> integer()
-%% Varbinds -> [varbind()]
-%% UserData -> term() (supplied when the user register)
-%% Reply -> ignore
-
-%% handle_trap(TargetName, SnmpTrapInfo, UserData) -> Reply
-%% TargetName -> target_name()
-%% SnmpTrapInfo -> {Enteprise, Generic, Spec, Timestamp, Varbinds} |
-%% {ErrorStatus, ErrorIndex, Varbinds}
-%% Enteprise -> oid()
-%% Generic -> integer()
-%% Spec -> integer()
-%% Timestamp -> integer()
-%% ErrorStatus -> atom()
-%% ErrorIndex -> integer()
-%% Varbinds -> [varbind()]
-%% UserData -> term() (supplied when the user register)
-%% Reply -> ignore | unregister | {register, UserId, agent_info()}
-
-%% handle_inform(TargetName, SnmpInform, UserData) -> Reply
-%% TargetName -> target_name()
-%% SnmpInform -> {ErrorStatus, ErrorIndex, Varbinds}
-%% ErrorStatus -> atom()
-%% ErrorIndex -> integer()
-%% Varbinds -> [varbind()]
-%% UserData -> term() (supplied when the user register)
-%% Reply -> ignore | unregister | {register, UserId, agent_info()}
-%%
-
-%% handle_report(TargetName, SnmpReport, UserData) -> Reply
-%% TargetName -> target_name()
-%% SnmpReport -> {ErrorStatus, ErrorIndex, Varbinds}
-%% ErrorStatus -> integer()
-%% ErrorIndex -> integer()
-%% Varbinds -> [varbind()]
-%% UserData -> term() (supplied when the user register)
-%% Reply -> ignore | unregister | {register, UserId, agent_info()}
+-export_type([
+ snmp_gen_info/0,
+ snmp_v1_trap_info/0
+ ]).
+
+-type snmp_gen_info() :: {ErrorStatus :: atom(),
+ ErrorIndex :: pos_integer(),
+ Varbinds :: [snmp:varbind()]}.
+-type snmp_v1_trap_info() :: {Enteprise :: snmp:oid(),
+ Generic :: integer(),
+ Spec :: integer(),
+ Timestamp :: integer(),
+ Varbinds :: [snmp:varbind()]}.
+-type ip_address() :: inet:ip_address().
+-type port_number() :: inet:port_number().
+
+
+%% *** handle_error ***
+%% An "asynchronous" error has been detected
+
+-callback handle_error(ReqId :: integer(),
+ Reason :: {unexpected_pdu, SnmpInfo :: snmp_gen_info()} |
+ {invalid_sec_info, SecInfo :: term(), SnmpInfo :: snmp_gen_info()} |
+ {empty_message, Addr :: ip_address(), Port :: port_number()} |
+ term(),
+ UserData :: term()) ->
+ snmp:void().
+
+
+%% *** handle_agent ***
+%% A message was received from an unknown agent
+
+-callback handle_agent(Addr :: term(),
+ Port :: pos_integer(),
+ Type :: pdu | trap | inform | report,
+ SnmpInfo :: snmp_gen_info() | snmp_v1_trap_info(),
+ UserData :: term()) ->
+ Reply :: ignore |
+ {register,
+ UserId :: term(),
+ RTargetName :: snmpm:target_name(),
+ AgentConfig :: [snmpm:agent_config()]}.
+
+
+%% *** handle_pdu ***
+%% Handle the reply to an async request (such as get, get-next and set).
+
+-callback handle_pdu(TargetName :: snmpm:target_name(),
+ ReqId :: term(),
+ SnmpResponse :: snmp_gen_info(),
+ UserData :: term()) ->
+ snmp:void().
+
+
+%% *** handle_trap ***
+%% Handle a trap/notification message received from an agent
+
+-callback handle_trap(TargetName :: snmpm:target_name(),
+ SnmpTrapInfo :: snmp_gen_info() | snmp_v1_trap_info(),
+ UserData :: term()) ->
+ Reply :: ignore |
+ unregister |
+ {register,
+ UserId :: term(),
+ RTargetName :: snmpm:target_name(),
+ AgentConfig :: [snmpm:agent_config()]}.
+
+
+%% *** handle_inform ***
+%% Handle a inform message received from an agent
+
+-callback handle_inform(TargetName :: snmpm:target_name(),
+ SnmpInform :: snmp_gen_info(),
+ UserData :: term()) ->
+ Reply :: ignore | no_reply |
+ unregister |
+ {register,
+ UserId :: term(),
+ RTargetName :: snmpm:target_name(),
+ AgentConfig :: [snmpm:agent_config()]}.
+
+
+%% *** handle_report ***
+%% Handle a report message received from an agent
+
+-callback handle_report(TargetName :: snmpm:target_name(),
+ SnmpReport :: snmp_gen_info(),
+ UserData :: term()) ->
+ Reply :: ignore |
+ unregister |
+ {register,
+ UserId :: term(),
+ RTargetName :: snmpm:target_name(),
+ AgentConfig :: [snmpm:agent_config()]}.
+
+
diff --git a/lib/snmp/src/manager/snmpm_usm.erl b/lib/snmp/src/manager/snmpm_usm.erl
index 497d6d6102..0a8a6436a3 100644
--- a/lib/snmp/src/manager/snmpm_usm.erl
+++ b/lib/snmp/src/manager/snmpm_usm.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -19,6 +19,9 @@
%%-----------------------------------------------------------------
%% This module implements the User Based Security Model for SNMP,
%% as defined in rfc2274.
+%%
+%% AES: RFC 3826
+%%
%%-----------------------------------------------------------------
-module(snmpm_usm).
@@ -416,11 +419,14 @@ get_des_salt() ->
[?i32(EngineBoots), ?i32(SaltInt)].
aes_encrypt(PrivKey, Data) ->
- snmp_usm:aes_encrypt(PrivKey, Data, fun get_aes_salt/0).
+ EngineBoots = get_engine_boots(),
+ EngineTime = get_engine_time(),
+ snmp_usm:aes_encrypt(PrivKey, Data, fun get_aes_salt/0,
+ EngineBoots, EngineTime).
aes_decrypt(PrivKey, UsmSecParams, EncData) ->
- #usmSecurityParameters{msgPrivacyParameters = MsgPrivParams,
- msgAuthoritativeEngineTime = EngineTime,
+ #usmSecurityParameters{msgPrivacyParameters = MsgPrivParams,
+ msgAuthoritativeEngineTime = EngineTime,
msgAuthoritativeEngineBoots = EngineBoots} =
UsmSecParams,
snmp_usm:aes_decrypt(PrivKey, MsgPrivParams, EncData,
diff --git a/lib/snmp/src/misc/snmp_config.erl b/lib/snmp/src/misc/snmp_config.erl
index 945b8719fc..a222f842e5 100644
--- a/lib/snmp/src/misc/snmp_config.erl
+++ b/lib/snmp/src/misc/snmp_config.erl
@@ -233,16 +233,18 @@ config_agent_sys() ->
fun verify_verbosity/1),
DbDir = ask("5. Database directory (absolute path)?", DefDir,
fun verify_dir/1),
- MibStorageType = ask("6. Mib storage type (ets/dets/mnesia)?", "ets",
+ DbInitError = ask("6. How to handle DB init error?",
+ "terminate", fun verify_db_init_error/1),
+ MibStorageType = ask("7. Mib storage type (ets/dets/mnesia)?", "ets",
fun verify_mib_storage_type/1),
MibStorage =
case MibStorageType of
ets ->
[{module, snmpa_mib_storage_ets}];
dets ->
- DetsDir = ask("6b. Mib storage directory (absolute path)?",
+ DetsDir = ask("7b. Mib storage directory (absolute path)?",
DbDir, fun verify_dir/1),
- DetsAction = ask("6c. Mib storage [dets] database start "
+ DetsAction = ask("7c. Mib storage [dets] database start "
"action "
"(default/clear/keep)?",
"default", fun verify_mib_storage_action/1),
@@ -257,7 +259,7 @@ config_agent_sys() ->
end;
mnesia ->
Nodes = [],
- MnesiaAction = ask("6b. Mib storage [mnesia] database start "
+ MnesiaAction = ask("7b. Mib storage [mnesia] database start "
"action "
"(default/clear/keep)?",
"default", fun verify_mib_storage_action/1),
@@ -275,80 +277,80 @@ config_agent_sys() ->
%% Here we should ask about mib-server data module,
%% but as we only have one at the moment...
- TargetCacheVerb = ask("7. Target cache verbosity "
+ TargetCacheVerb = ask("8. Target cache verbosity "
"(silence/info/log/debug/trace)?", "silence",
fun verify_verbosity/1),
- SymStoreVerb = ask("8. Symbolic store verbosity "
+ SymStoreVerb = ask("9. Symbolic store verbosity "
"(silence/info/log/debug/trace)?", "silence",
fun verify_verbosity/1),
- LocalDbVerb = ask("9. Local DB verbosity "
+ LocalDbVerb = ask("10. Local DB verbosity "
"(silence/info/log/debug/trace)?", "silence",
fun verify_verbosity/1),
- LocalDbRepair = ask("10. Local DB repair (true/false/force)?", "true",
+ LocalDbRepair = ask("11. Local DB repair (true/false/force)?", "true",
fun verify_dets_repair/1),
- LocalDbAutoSave = ask("11. Local DB auto save (infinity/milli seconds)?",
+ LocalDbAutoSave = ask("12. Local DB auto save (infinity/milli seconds)?",
"5000", fun verify_dets_auto_save/1),
- ErrorMod = ask("12. Error report module?", "snmpa_error_logger", fun verify_module/1),
- Type = ask("13. Agent type (master/sub)?", "master",
+ ErrorMod = ask("13. Error report module?", "snmpa_error_logger", fun verify_module/1),
+ Type = ask("14. Agent type (master/sub)?", "master",
fun verify_agent_type/1),
AgentConfig =
case Type of
master ->
- MasterAgentVerb = ask("14. Master-agent verbosity "
+ MasterAgentVerb = ask("15. Master-agent verbosity "
"(silence/info/log/debug/trace)?",
"silence",
fun verify_verbosity/1),
- ForceLoad = ask("15. Shall the agent re-read the "
+ ForceLoad = ask("16. Shall the agent re-read the "
"configuration files during startup ~n"
" (and ignore the configuration "
"database) (true/false)?", "true",
fun verify_bool/1),
- MultiThreaded = ask("16. Multi threaded agent (true/false)?",
+ MultiThreaded = ask("17. Multi threaded agent (true/false)?",
"false",
fun verify_bool/1),
- MeOverride = ask("17. Check for duplicate mib entries when "
+ MeOverride = ask("18. Check for duplicate mib entries when "
"installing a mib (true/false)?", "false",
fun verify_bool/1),
- TrapOverride = ask("18. Check for duplicate trap names when "
+ TrapOverride = ask("19. Check for duplicate trap names when "
"installing a mib (true/false)?", "false",
fun verify_bool/1),
- MibServerVerb = ask("19. Mib server verbosity "
+ MibServerVerb = ask("20. Mib server verbosity "
"(silence/info/log/debug/trace)?",
"silence",
fun verify_verbosity/1),
- MibServerCache = ask("20. Mib server cache "
+ MibServerCache = ask("21. Mib server cache "
"(true/false)?",
"true",
fun verify_bool/1),
- NoteStoreVerb = ask("21. Note store verbosity "
+ NoteStoreVerb = ask("22. Note store verbosity "
"(silence/info/log/debug/trace)?",
"silence",
fun verify_verbosity/1),
- NoteStoreTimeout = ask("22. Note store GC timeout?", "30000",
+ NoteStoreTimeout = ask("23. Note store GC timeout?", "30000",
fun verify_timeout/1),
ATL =
- case ask("23. Shall the agent use an audit trail log "
+ case ask("24. Shall the agent use an audit trail log "
"(y/n)?",
"n", fun verify_yes_or_no/1) of
yes ->
- ATLType = ask("23b. Audit trail log type "
+ ATLType = ask("24b. Audit trail log type "
"(write/read_write)?",
"read_write", fun verify_atl_type/1),
- ATLDir = ask("23c. Where to store the "
+ ATLDir = ask("24c. Where to store the "
"audit trail log?",
DefDir, fun verify_dir/1),
- ATLMaxFiles = ask("23d. Max number of files?",
+ ATLMaxFiles = ask("24d. Max number of files?",
"10",
fun verify_pos_integer/1),
- ATLMaxBytes = ask("23e. Max size (in bytes) "
+ ATLMaxBytes = ask("24e. Max size (in bytes) "
"of each file?",
"10240",
fun verify_pos_integer/1),
ATLSize = {ATLMaxBytes, ATLMaxFiles},
- ATLRepair = ask("23f. Audit trail log repair "
+ ATLRepair = ask("24f. Audit trail log repair "
"(true/false/truncate/snmp_repair)?", "true",
fun verify_atl_repair/1),
- ATLSeqNo = ask("23g. Audit trail log "
+ ATLSeqNo = ask("24g. Audit trail log "
"sequence-numbering (true/false)?",
"false",
fun verify_atl_seqno/1),
@@ -360,33 +362,33 @@ config_agent_sys() ->
no ->
[]
end,
- NetIfVerb = ask("24. Network interface verbosity "
+ NetIfVerb = ask("25. Network interface verbosity "
"(silence/info/log/debug/trace)?",
"silence",
fun verify_verbosity/1),
- NetIfMod = ask("25. Which network interface module shall be used?",
+ NetIfMod = ask("26. Which network interface module shall be used?",
"snmpa_net_if", fun verify_module/1),
NetIfOpts =
case NetIfMod of
snmpa_net_if ->
NetIfBindTo =
- ask("25a. Bind the agent IP address "
+ ask("26a. Bind the agent IP address "
"(true/false)?",
"false", fun verify_bool/1),
NetIfNoReuse =
- ask("25b. Shall the agents "
+ ask("26b. Shall the agents "
"IP address "
"and port be not reusable "
"(true/false)?",
"false", fun verify_bool/1),
NetIfReqLimit =
- ask("25c. Agent request limit "
+ ask("26c. Agent request limit "
"(used for flow control) "
"(infinity/pos integer)?",
"infinity",
fun verify_netif_req_limit/1),
NetIfRecbuf =
- case ask("25d. Receive buffer size of the "
+ case ask("26d. Receive buffer size of the "
"agent (in bytes) "
"(default/pos integer)?",
"default",
@@ -397,7 +399,7 @@ config_agent_sys() ->
[{recbuf, RecBufSz}]
end,
NetIfSndbuf =
- case ask("25e. Send buffer size of the agent "
+ case ask("26e. Send buffer size of the agent "
"(in bytes) (default/pos integer)?",
"default",
fun verify_netif_sndbuf/1) of
@@ -407,7 +409,7 @@ config_agent_sys() ->
[{sndbuf, SndBufSz}]
end,
NetIfFilter =
- case ask("25f. Do you wish to specify a "
+ case ask("26f. Do you wish to specify a "
"network interface filter module "
"(or use default)",
"default", fun verify_module/1) of
@@ -426,18 +428,18 @@ config_agent_sys() ->
NetIf = [{module, NetIfMod},
{verbosity, NetIfVerb},
{options, NetIfOpts}],
- TermDiscoEnable = ask("26a. Allow terminating discovery "
+ TermDiscoEnable = ask("27. Allow terminating discovery "
"(true/false)?", "true",
fun verify_bool/1),
TermDiscoConf =
case TermDiscoEnable of
true ->
TermDiscoStage2 =
- ask("26b. Second stage behaviour "
+ ask("27a. Second stage behaviour "
"(discovery/plain)?", "discovery",
fun verify_term_disco_behaviour/1),
TermDiscoTrigger =
- ask("26c. Trigger username "
+ ask("27b. Trigger username "
"(default/a string)?", "default",
fun verify_term_disco_trigger_username/1),
[{enable, TermDiscoEnable},
@@ -448,7 +450,7 @@ config_agent_sys() ->
{stage2, discovery},
{trigger_username, ""}]
end,
- OrigDiscoEnable = ask("27a. Allow originating discovery "
+ OrigDiscoEnable = ask("28. Allow originating discovery "
"(true/false)?", "true",
fun verify_bool/1),
OrigDiscoConf =
@@ -471,7 +473,7 @@ config_agent_sys() ->
{verbosity, NoteStoreVerb}]},
{net_if, NetIf}] ++ ATL;
sub ->
- SubAgentVerb = ask("14. Sub-agent verbosity "
+ SubAgentVerb = ask("15. Sub-agent verbosity "
"(silence/info/log/debug/trace)?",
"silence",
fun verify_verbosity/1),
@@ -480,11 +482,12 @@ config_agent_sys() ->
{config, [{dir, ConfigDir}]}]
end,
SysConfig =
- [{priority, Prio},
- {versions, Vsns},
- {db_dir, DbDir},
- {mib_storage, MibStorage},
- {target_cache, [{verbosity, TargetCacheVerb}]},
+ [{priority, Prio},
+ {versions, Vsns},
+ {db_dir, DbDir},
+ {db_init_error, DbInitError},
+ {mib_storage, MibStorage},
+ {target_cache, [{verbosity, TargetCacheVerb}]},
{symbolic_store, [{verbosity, SymStoreVerb}]},
{local_db, [{repair, LocalDbRepair},
{auto_save, LocalDbAutoSave},
@@ -630,19 +633,21 @@ config_manager_sys() ->
fun verify_verbosity/1),
ConfigDbDir = ask("5. Database directory (absolute path)?",
DefDir, fun verify_dir/1),
- ConfigDbRepair = ask("6. Database repair "
+ ConfigDbInitError = ask("6. How to handle DB init error?",
+ "terminate", fun verify_db_init_error/1),
+ ConfigDbRepair = ask("7. Database repair "
"(true/false/force)?", "true",
fun verify_dets_repair/1),
- ConfigDbAutoSave = ask("7. Database auto save "
+ ConfigDbAutoSave = ask("8. Database auto save "
"(infinity/milli seconds)?",
"5000", fun verify_dets_auto_save/1),
IRB =
- case ask("8. Inform request behaviour (auto/user)?",
+ case ask("9. Inform request behaviour (auto/user)?",
"auto", fun verify_irb/1) of
auto ->
auto;
user ->
- case ask("8b. Use default GC timeout"
+ case ask("9b. Use default GC timeout"
"(default/seconds)?",
"default", fun verify_irb_user/1) of
default ->
@@ -651,31 +656,31 @@ config_manager_sys() ->
{user, IrbGcTo}
end
end,
- ServerVerb = ask("9. Server verbosity "
+ ServerVerb = ask("10. Server verbosity "
"(silence/info/log/debug/trace)?",
"silence",
fun verify_verbosity/1),
- ServerTimeout = ask("10. Server GC timeout?", "30000",
+ ServerTimeout = ask("11. Server GC timeout?", "30000",
fun verify_timeout/1),
- NoteStoreVerb = ask("11. Note store verbosity "
+ NoteStoreVerb = ask("12. Note store verbosity "
"(silence/info/log/debug/trace)?",
"silence",
fun verify_verbosity/1),
- NoteStoreTimeout = ask("12. Note store GC timeout?", "30000",
+ NoteStoreTimeout = ask("13. Note store GC timeout?", "30000",
fun verify_timeout/1),
- NetIfMod = ask("13. Which network interface module shall be used?",
+ NetIfMod = ask("14. Which network interface module shall be used?",
"snmpm_net_if", fun verify_module/1),
- NetIfVerb = ask("14. Network interface verbosity "
+ NetIfVerb = ask("15. Network interface verbosity "
"(silence/info/log/debug/trace)?", "silence",
fun verify_verbosity/1),
- NetIfBindTo = ask("15. Bind the manager IP address "
+ NetIfBindTo = ask("16. Bind the manager IP address "
"(true/false)?",
"false", fun verify_bool/1),
- NetIfNoReuse = ask("16. Shall the manager IP address and port "
+ NetIfNoReuse = ask("17. Shall the manager IP address and port "
"be not reusable (true/false)?",
"false", fun verify_bool/1),
NetIfRecbuf =
- case ask("17. Receive buffer size of the manager (in bytes) "
+ case ask("18. Receive buffer size of the manager (in bytes) "
"(default/pos integer)?", "default",
fun verify_netif_recbuf/1) of
default ->
@@ -684,7 +689,7 @@ config_manager_sys() ->
[{recbuf, RecBufSz}]
end,
NetIfSndbuf =
- case ask("18. Send buffer size of the manager (in bytes) "
+ case ask("19. Send buffer size of the manager (in bytes) "
"(default/pos integer)?", "default",
fun verify_netif_sndbuf/1) of
default ->
@@ -700,28 +705,28 @@ config_manager_sys() ->
{verbosity, NetIfVerb},
{options, NetIfOpts}],
ATL =
- case ask("19. Shall the manager use an audit trail log "
+ case ask("20. Shall the manager use an audit trail log "
"(y/n)?",
"n", fun verify_yes_or_no/1) of
yes ->
- ATLType = ask("19b. Audit trail log type "
+ ATLType = ask("20b. Audit trail log type "
"(write/read_write)?",
"read_write", fun verify_atl_type/1),
- ATLDir = ask("19c. Where to store the "
+ ATLDir = ask("20c. Where to store the "
"audit trail log?",
DefDir, fun verify_dir/1),
- ATLMaxFiles = ask("19d. Max number of files?",
+ ATLMaxFiles = ask("20d. Max number of files?",
"10",
fun verify_pos_integer/1),
- ATLMaxBytes = ask("19e. Max size (in bytes) "
+ ATLMaxBytes = ask("20e. Max size (in bytes) "
"of each file?",
"10240",
fun verify_pos_integer/1),
ATLSize = {ATLMaxBytes, ATLMaxFiles},
- ATLRepair = ask("19f. Audit trail log repair "
+ ATLRepair = ask("20f. Audit trail log repair "
"(true/false/truncate/snmp_repair)?", "true",
fun verify_atl_repair/1),
- ATLSeqNo = ask("19g. Audit trail log sequence-numbering "
+ ATLSeqNo = ask("20g. Audit trail log sequence-numbering "
"(true/false)?", "false",
fun verify_atl_seqno/1),
[{audit_trail_log, [{type, ATLType},
@@ -733,14 +738,14 @@ config_manager_sys() ->
[]
end,
DefUser =
- case ask("20. Do you wish to assign a default user [yes] or use~n"
+ case ask("21. Do you wish to assign a default user [yes] or use~n"
" the default settings [no] (y/n)?", "n",
fun verify_yes_or_no/1) of
yes ->
- DefUserMod = ask("20b. Default user module?",
+ DefUserMod = ask("21b. Default user module?",
"snmpm_user_default",
fun verify_module/1),
- DefUserData = ask("20c. Default user data?", "undefined",
+ DefUserData = ask("21c. Default user data?", "undefined",
fun verify_user_data/1),
[{def_user_mod, DefUserMod},
{def_user_data, DefUserData}];
@@ -750,11 +755,12 @@ config_manager_sys() ->
SysConfig =
[{priority, Prio},
{versions, Vsns},
- {config, [{dir, ConfigDir},
- {verbosity, ConfigVerb},
- {db_dir, ConfigDbDir},
- {repair, ConfigDbRepair},
- {auto_save, ConfigDbAutoSave}]},
+ {config, [{dir, ConfigDir},
+ {db_dir, ConfigDbDir},
+ {db_init_error, ConfigDbInitError},
+ {repair, ConfigDbRepair},
+ {auto_save, ConfigDbAutoSave},
+ {verbosity, ConfigVerb}]},
{inform_request_behaviour, IRB},
{mibs, []},
{server, [{timeout, ServerTimeout},
@@ -1069,6 +1075,16 @@ verify_dir(Dir) ->
_E ->
{error, "invalid directory (not absolute): " ++ Dir}
end.
+
+
+verify_db_init_error("terminate") ->
+ {ok, true};
+verify_db_init_error("create") ->
+ {ok, create};
+verify_db_init_error("create_db_and_dir") ->
+ {ok, create_db_and_dir};
+verify_db_init_error(R) ->
+ {error, "invalid DB init error: " ++ R}.
verify_notif_type("trap") -> {ok, trap};
@@ -1164,13 +1180,20 @@ verify_dets_auto_save(I0) ->
%% I know that this is a little of the edge, but...
+verify_module(M) when is_atom(M) ->
+ {ok, M};
+verify_module(M0) when is_list(M0) ->
+ {ok, list_to_atom(M0)};
verify_module(M0) ->
- case (catch list_to_atom(M0)) of
- M when is_atom(M) ->
- {ok, M};
- _ ->
- {error, "invalid module: " ++ M0}
- end.
+ {error, lists:flatten(io_lib:format("invalid module: ~p", [M0]))}.
+
+%% verify_module(M0) ->
+%% case (catch list_to_atom(M0)) of
+%% M when is_atom(M) ->
+%% {ok, M};
+%% _ ->
+%% {error, "invalid module: " ++ M0}
+%% end.
verify_agent_type("master") ->
@@ -2168,6 +2191,8 @@ write_sys_config_file_agent_opt(Fid, {config, Opts}) ->
ok = io:format(Fid, "}", []);
write_sys_config_file_agent_opt(Fid, {db_dir, Dir}) ->
ok = io:format(Fid, " {db_dir, \"~s\"}", [Dir]);
+write_sys_config_file_agent_opt(Fid, {db_init_error, Action}) ->
+ ok = io:format(Fid, " {db_init_error, ~w}", [Action]);
write_sys_config_file_agent_opt(Fid, {mib_storage, ets}) ->
ok = io:format(Fid, " {mib_storage, ets}", []);
write_sys_config_file_agent_opt(Fid, {mib_storage, {dets, Dir}}) ->
@@ -2344,6 +2369,8 @@ write_sys_config_file_manager_config_opt(Fid, {dir, Dir}) ->
ok = io:format(Fid, "{dir, \"~s\"}", [Dir]);
write_sys_config_file_manager_config_opt(Fid, {db_dir, Dir}) ->
ok = io:format(Fid, "{db_dir, \"~s\"}", [Dir]);
+write_sys_config_file_manager_config_opt(Fid, {db_init_error, Action}) ->
+ ok = io:format(Fid, "{db_init_error, ~w}", [Action]);
write_sys_config_file_manager_config_opt(Fid, {repair, Rep}) ->
ok = io:format(Fid, "{repair, ~w}", [Rep]);
write_sys_config_file_manager_config_opt(Fid, {auto_save, As}) ->
diff --git a/lib/snmp/src/misc/snmp_log.erl b/lib/snmp/src/misc/snmp_log.erl
index a8c5df0b64..ae28df37fa 100644
--- a/lib/snmp/src/misc/snmp_log.erl
+++ b/lib/snmp/src/misc/snmp_log.erl
@@ -24,8 +24,8 @@
create/4, create/5, create/6, open/1, open/2,
change_size/2, close/1, sync/1, info/1,
log/4,
- log_to_txt/5, log_to_txt/6, log_to_txt/7,
- log_to_io/4, log_to_io/5, log_to_io/6
+ log_to_txt/6, log_to_txt/7, log_to_txt/8,
+ log_to_io/5, log_to_io/6, log_to_io/7
]).
-export([
upgrade/1, upgrade/2,
@@ -34,7 +34,17 @@
-export([
validate/1, validate/2
]).
+%% <BACKWARD-COMPAT>
+-export([
+ log_to_txt/5,
+ log_to_io/4
+ ]).
+%% </BACKWARD-COMPAT>
+-export_type([
+ log/0,
+ log_time/0
+ ]).
-define(SNMP_USE_V3, true).
-include("snmp_types.hrl").
@@ -42,12 +52,24 @@
-define(VMODULE,"LOG").
-include("snmp_verbosity.hrl").
--define(LOG_FORMAT, internal).
--define(LOG_TYPE, wrap).
+-define(LOG_FORMAT, internal).
+-define(LOG_TYPE, wrap).
+-define(BLOCK_DEFAULT, true).
-record(snmp_log, {id, seqno}).
+%%-----------------------------------------------------------------
+%% Types
+%%-----------------------------------------------------------------
+
+-opaque log() :: #snmp_log{}.
+-type log_time() :: null |
+ calendar:datetime() |
+ {local_time, calendar:datetime()} |
+ {universal_time, calendar:datetime()}.
+
+
%% --------------------------------------------------------------------
%% Exported functions
%% --------------------------------------------------------------------
@@ -322,7 +344,6 @@ validate_loop(Error, _Log, _Write, _PrevTS, _PrevSN) ->
%% log(Log, Packet, Addr, Port)
%%-----------------------------------------------------------------
-
log(#snmp_log{id = Log, seqno = SeqNo}, Packet, Addr, Port) ->
?vtrace("log -> entry with"
"~n Log: ~p"
@@ -378,53 +399,86 @@ do_change_size(Log, NewSize) ->
%% -- log_to_txt ---
+%% <BACKWARD-COMPAT>
log_to_txt(Log, FileName, Dir, Mibs, TextFile) ->
- log_to_txt(Log, FileName, Dir, Mibs, TextFile, null, null).
+ log_to_txt(Log, ?BLOCK_DEFAULT, FileName, Dir, Mibs, TextFile).
+%% </BACKWARD-COMPAT>
+log_to_txt(Log, Block, FileName, Dir, Mibs, TextFile)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ log_to_txt(Log, Block, FileName, Dir, Mibs, TextFile, null, null);
+%% <BACKWARD-COMPAT>
log_to_txt(Log, FileName, Dir, Mibs, TextFile, Start) ->
- log_to_txt(Log, FileName, Dir, Mibs, TextFile, Start, null).
-
-log_to_txt(Log, FileName, Dir, Mibs, TextFile, Start, Stop)
- when is_list(Mibs) andalso is_list(TextFile) ->
+ log_to_txt(Log, ?BLOCK_DEFAULT, FileName, Dir, Mibs, TextFile, Start, null).
+%% </BACKWARD-COMPAT>
+
+log_to_txt(Log, Block, FileName, Dir, Mibs, TextFile, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ log_to_txt(Log, Block, FileName, Dir, Mibs, TextFile, Start, null);
+%% <BACKWARD-COMPAT>
+log_to_txt(Log, FileName, Dir, Mibs, TextFile, Start, Stop) ->
+ log_to_txt(Log, ?BLOCK_DEFAULT, FileName, Dir, Mibs, TextFile, Start, Stop).
+%% </BACKWARD-COMPAT>
+
+log_to_txt(Log, Block, FileName, Dir, Mibs, TextFile, Start, Stop)
+ when (((Block =:= true) orelse (Block =:= false)) andalso
+ is_list(Mibs) andalso is_list(TextFile)) ->
?vtrace("log_to_txt -> entry with"
"~n Log: ~p"
+ "~n Block: ~p"
"~n FileName: ~p"
"~n Dir: ~p"
"~n Mibs: ~p"
"~n TextFile: ~p"
"~n Start: ~p"
"~n Stop: ~p",
- [Log, FileName, Dir, Mibs, TextFile, Start, Stop]),
+ [Log, Block, FileName, Dir, Mibs, TextFile, Start, Stop]),
File = filename:join(Dir, FileName),
Converter = fun(L) ->
do_log_to_file(L, TextFile, Mibs, Start, Stop)
end,
- log_convert(Log, File, Converter).
+ log_convert(Log, Block, File, Converter).
%% -- log_to_io ---
+%% <BACKWARD-COMPAT>
log_to_io(Log, FileName, Dir, Mibs) ->
- log_to_io(Log, FileName, Dir, Mibs, null, null).
+ log_to_io(Log, ?BLOCK_DEFAULT, FileName, Dir, Mibs, null, null).
+%% </BACKWARD-COMPAT>
+log_to_io(Log, Block, FileName, Dir, Mibs)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ log_to_io(Log, Block, FileName, Dir, Mibs, null, null);
+%% <BACKWARD-COMPAT>
log_to_io(Log, FileName, Dir, Mibs, Start) ->
- log_to_io(Log, FileName, Dir, Mibs, Start, null).
-
-log_to_io(Log, FileName, Dir, Mibs, Start, Stop)
+ log_to_io(Log, ?BLOCK_DEFAULT, FileName, Dir, Mibs, Start, null).
+%% </BACKWARD-COMPAT>
+
+log_to_io(Log, Block, FileName, Dir, Mibs, Start)
+ when ((Block =:= true) orelse (Block =:= false)) ->
+ log_to_io(Log, Block, FileName, Dir, Mibs, Start, null);
+%% <BACKWARD-COMPAT>
+log_to_io(Log, FileName, Dir, Mibs, Start, Stop) ->
+ log_to_io(Log, ?BLOCK_DEFAULT, FileName, Dir, Mibs, Start, Stop).
+%% </BACKWARD-COMPAT>
+
+log_to_io(Log, Block, FileName, Dir, Mibs, Start, Stop)
when is_list(Mibs) ->
?vtrace("log_to_io -> entry with"
"~n Log: ~p"
+ "~n Block: ~p"
"~n FileName: ~p"
"~n Dir: ~p"
"~n Mibs: ~p"
"~n Start: ~p"
"~n Stop: ~p",
- [Log, FileName, Dir, Mibs, Start, Stop]),
+ [Log, Block, FileName, Dir, Mibs, Start, Stop]),
File = filename:join(Dir, FileName),
Converter = fun(L) ->
do_log_to_io(L, Mibs, Start, Stop)
end,
- log_convert(Log, File, Converter).
+ log_convert(Log, Block, File, Converter).
%% --------------------------------------------------------------------
@@ -433,53 +487,121 @@ log_to_io(Log, FileName, Dir, Mibs, Start, Stop)
%% -- log_convert ---
-log_convert(#snmp_log{id = Log}, File, Converter) ->
- do_log_convert(Log, File, Converter);
-log_convert(Log, File, Converter) ->
- do_log_convert(Log, File, Converter).
+log_convert(#snmp_log{id = Log}, Block, File, Converter) ->
+ do_log_convert(Log, Block, File, Converter);
+log_convert(Log, Block, File, Converter) ->
+ do_log_convert(Log, Block, File, Converter).
-do_log_convert(Log, File, Converter) ->
+do_log_convert(Log, Block, File, Converter) ->
%% ?vtrace("do_log_converter -> entry with"
- %% "~n Log: ~p"
- %% "~n File: ~p"
- %% "~n disk_log:info(Log): ~p", [Log, File, disk_log:info(Log)]),
+ %% "~n Log: ~p"
+ %% "~n Block: ~p"
+ %% "~n File: ~p"
+ %% [Log, Block, File]),
+ Verbosity = get(verbosity),
{Pid, Ref} =
erlang:spawn_monitor(
fun() ->
- Result = do_log_convert2(Log, File, Converter),
+ put(sname, lc),
+ put(verbosity, Verbosity),
+ ?vlog("begin converting", []),
+ Result = do_log_convert2(Log, Block, File, Converter),
+ ?vlog("convert result: ~p", [Result]),
exit(Result)
end),
receive
{'DOWN', Ref, process, Pid, Result} ->
%% ?vtrace("do_log_converter -> received result"
- %% "~n Result: ~p"
- %% "~n disk_log:info(Log): ~p",
- %% [Result, disk_log:info(Log)]),
+ %% "~n Result: ~p", [Result]),
Result
end.
-do_log_convert2(Log, File, Converter) ->
+do_log_convert2(Log, Block, File, Converter) ->
+
+ %% ?vtrace("do_log_converter2 -> entry with"
+ %% "~n Log: ~p"
+ %% "~n Block: ~p"
+ %% "~n File: ~p"
+ %% "~n disk_log:info(Log): ~p",
+ %% [Log, Block, File, disk_log:info(Log)]),
+
%% First check if the caller process has already opened the
%% log, because if we close an already open log we will cause
%% a runtime error.
+
+ ?vtrace("do_log_convert2 -> entry - check if owner", []),
case is_owner(Log) of
true ->
- Converter(Log);
+ ?vtrace("do_log_converter2 -> convert an already owned log", []),
+ maybe_block(Log, Block),
+ Res = Converter(Log),
+ maybe_unblock(Log, Block),
+ Res;
false ->
%% Not yet member of the ruling party, apply for membership...
+ ?vtrace("do_log_converter2 -> convert log", []),
case log_open(Log, File) of
{ok, _} ->
+ ?vdebug("do_log_convert2 -> opened - now convert", []),
+ maybe_block(Log, Block),
Res = Converter(Log),
+ maybe_unblock(Log, Block),
disk_log:close(Log),
+ ?vdebug("do_log_convert2 -> converted - done: "
+ "~n Result: ~p", [Res]),
Res;
{error, {name_already_open, _}} ->
- Converter(Log);
+ ?vdebug("do_log_convert2 -> "
+ "already opened - now convert", []),
+ maybe_block(Log, Block),
+ Res = Converter(Log),
+ maybe_unblock(Log, Block),
+ ?vdebug("do_log_convert2 -> converted - done: "
+ "~n Result: ~p", [Res]),
+ Res;
{error, Reason} ->
+ ?vinfo("do_log_converter2 -> "
+ "failed converting log - open failed: "
+ "~n Reason: ~p", [Reason]),
{error, {Log, Reason}}
end
end.
+maybe_block(_Log, false = _Block) ->
+ %% ?vtrace("maybe_block(false) -> entry", []),
+ ok;
+maybe_block(Log, true = _Block) ->
+ %% ?vtrace("maybe_block(true) -> entry when"
+ %% "~n Log Status: ~p", [log_status(Log)]),
+ Res = disk_log:block(Log, true),
+ %% ?vtrace("maybe_block(true) -> "
+ %% "~n Log Status: ~p"
+ %% "~n Res: ~p", [log_status(Log), Res]),
+ Res.
+
+maybe_unblock(_Log, false = _Block) ->
+ %% ?vtrace("maybe_unblock(false) -> entry", []),
+ ok;
+maybe_unblock(Log, true = _Block) ->
+ %% ?vtrace("maybe_unblock(true) -> entry when"
+ %% "~n Log Status: ~p", [log_status(Log)]),
+ Res = disk_log:unblock(Log),
+ %% ?vtrace("maybe_unblock(true) -> "
+ %% "~n Log Status: ~p"
+ %% "~n Res: ~p", [log_status(Log), Res]),
+ Res.
+
+%% log_status(Log) ->
+%% Info = disk_log:info(Log),
+%% case lists:keysearch(status, 1, Info) of
+%% {value, {status, Status}} ->
+%% Status;
+%% false ->
+%% undefined
+%% end.
+
+
%% -- do_log_to_text ---
do_log_to_file(Log, TextFile, Mibs, Start, Stop) ->
diff --git a/lib/snmp/src/misc/snmp_usm.erl b/lib/snmp/src/misc/snmp_usm.erl
index 67e3476816..32198deb8b 100644
--- a/lib/snmp/src/misc/snmp_usm.erl
+++ b/lib/snmp/src/misc/snmp_usm.erl
@@ -16,6 +16,8 @@
%%
%% %CopyrightEnd%
%%
+%% AES: RFC 3826
+%%
-module(snmp_usm).
@@ -24,7 +26,7 @@
-export([passwd2localized_key/3, localize_key/3]).
-export([auth_in/4, auth_out/4, set_msg_auth_params/3]).
-export([des_encrypt/3, des_decrypt/3]).
--export([aes_encrypt/3, aes_decrypt/5]).
+-export([aes_encrypt/5, aes_decrypt/5]).
-define(SNMP_USE_V3, true).
@@ -42,6 +44,9 @@
-define(i32(Int), (Int bsr 24) band 255, (Int bsr 16) band 255, (Int bsr 8) band 255, Int band 255).
+-define(BLOCK_CIPHER_AES, aes_cfb128).
+-define(BLOCK_CIPHER_DES, des_cbc).
+
%%-----------------------------------------------------------------
%% Func: passwd2localized_key/3
@@ -210,7 +215,8 @@ des_encrypt(PrivKey, Data, SaltFun) ->
IV = list_to_binary(snmp_misc:str_xor(PreIV, Salt)),
TailLen = (8 - (length(Data) rem 8)) rem 8,
Tail = mk_tail(TailLen),
- EncData = crypto:block_encrypt(des_cbc, DesKey, IV, [Data,Tail]),
+ EncData = crypto:block_encrypt(?BLOCK_CIPHER_DES,
+ DesKey, IV, [Data,Tail]),
{ok, binary_to_list(EncData), Salt}.
des_decrypt(PrivKey, MsgPrivParams, EncData)
@@ -224,7 +230,8 @@ des_decrypt(PrivKey, MsgPrivParams, EncData)
Salt = MsgPrivParams,
IV = list_to_binary(snmp_misc:str_xor(PreIV, Salt)),
%% Whatabout errors here??? E.g. not a mulitple of 8!
- Data = binary_to_list(crypto:block_decrypt(des_cbc, DesKey, IV, EncData)),
+ Data = binary_to_list(crypto:block_decrypt(?BLOCK_CIPHER_DES,
+ DesKey, IV, EncData)),
Data2 = snmp_pdus:strip_encrypted_scoped_pdu_data(Data),
{ok, Data2};
des_decrypt(PrivKey, BadMsgPrivParams, EncData) ->
@@ -236,13 +243,12 @@ des_decrypt(PrivKey, BadMsgPrivParams, EncData) ->
throw({error, {bad_msgPrivParams, PrivKey, BadMsgPrivParams, EncData}}).
-aes_encrypt(PrivKey, Data, SaltFun) ->
+aes_encrypt(PrivKey, Data, SaltFun, EngineBoots, EngineTime) ->
AesKey = PrivKey,
Salt = SaltFun(),
- EngineBoots = snmp_framework_mib:get_engine_boots(),
- EngineTime = snmp_framework_mib:get_engine_time(),
IV = list_to_binary([?i32(EngineBoots), ?i32(EngineTime) | Salt]),
- EncData = crypto:block_encrypt(aes_cbf128, AesKey, IV, Data),
+ EncData = crypto:block_encrypt(?BLOCK_CIPHER_AES,
+ AesKey, IV, Data),
{ok, binary_to_list(EncData), Salt}.
aes_decrypt(PrivKey, MsgPrivParams, EncData, EngineBoots, EngineTime)
@@ -251,7 +257,8 @@ aes_decrypt(PrivKey, MsgPrivParams, EncData, EngineBoots, EngineTime)
Salt = MsgPrivParams,
IV = list_to_binary([?i32(EngineBoots), ?i32(EngineTime) | Salt]),
%% Whatabout errors here??? E.g. not a mulitple of 8!
- Data = binary_to_list(crypto:block_decrypt(aes_cbf128, AesKey, IV, EncData)),
+ Data = binary_to_list(crypto:block_decrypt(?BLOCK_CIPHER_AES,
+ AesKey, IV, EncData)),
Data2 = snmp_pdus:strip_encrypted_scoped_pdu_data(Data),
{ok, Data2}.
diff --git a/lib/snmp/src/misc/snmp_verbosity.erl b/lib/snmp/src/misc/snmp_verbosity.erl
index df5986b7bc..f27c31db03 100644
--- a/lib/snmp/src/misc/snmp_verbosity.erl
+++ b/lib/snmp/src/misc/snmp_verbosity.erl
@@ -148,6 +148,8 @@ image_of_sname(mnifl) -> "M-NET-IF-LOGGER";
image_of_sname(mnifw) -> io_lib:format("M-NET-IF-worker(~p)", [self()]);
image_of_sname(mconf) -> "M-CONF";
+image_of_sname(lc) -> io_lib:format("LOG-CONVERTER(~p)", [self()]);
+
image_of_sname(mgr) -> "MGR";
image_of_sname(mgr_misc) -> "MGR_MISC";
diff --git a/lib/snmp/test/snmp_agent_test.erl b/lib/snmp/test/snmp_agent_test.erl
index 7e683e315a..b7d34eb198 100644
--- a/lib/snmp/test/snmp_agent_test.erl
+++ b/lib/snmp/test/snmp_agent_test.erl
@@ -34,6 +34,7 @@
%% all_tcs - misc
app_info/1,
info_test/1,
+ create_local_db_dir/1,
%% all_tcs - test_v1
simple/1,
@@ -1506,7 +1507,8 @@ finish_misc(Config) ->
misc_cases() ->
[
app_info,
- info_test
+ info_test,
+ create_local_db_dir
].
app_info(suite) -> [];
@@ -1539,7 +1541,75 @@ app_dir(App) ->
"undefined"
end.
+create_local_db_dir(Config) when is_list(Config) ->
+ ?P(create_local_db_dir),
+ DataDir = snmp_test_lib:lookup(data_dir, Config),
+ T = erlang:now(),
+ [As,Bs,Cs] = [integer_to_list(I) || I <- tuple_to_list(T)],
+ DbDir = filename:join([DataDir, As, Bs, Cs]),
+ ok = del_dir(DbDir, 3),
+ Name = list_to_atom(atom_to_list(create_local_db_dir)
+ ++"-"++As++"-"++Bs++"-"++Cs),
+ Pa = filename:dirname(code:which(?MODULE)),
+ {ok,Node} = ?t:start_node(Name, slave, [{args, "-pa "++Pa}]),
+
+ %% first start with a nonexisting DbDir
+ Fun1 = fun() ->
+ false = filelib:is_dir(DbDir),
+ process_flag(trap_exit,true),
+ {error, {error, {failed_open_dets, {file_error, _, _}}}} =
+ snmpa_local_db:start_link(normal, DbDir, [{verbosity,trace}]),
+ false = filelib:is_dir(DbDir),
+ {ok, not_found}
+ end,
+ {ok, not_found} = nodecall(Node, Fun1),
+ %% now start with a nonexisting DbDir but pass the
+ %% create_local_db_dir option as well
+ Fun2 = fun() ->
+ false = filelib:is_dir(DbDir),
+ process_flag(trap_exit,true),
+ {ok, _Pid} =
+ snmpa_local_db:start_link(normal, DbDir,
+ create_db_and_dir, [{verbosity,trace}]),
+ snmpa_local_db:stop(),
+ true = filelib:is_dir(DbDir),
+ {ok, found}
+ end,
+ {ok, found} = nodecall(Node, Fun2),
+ %% cleanup
+ ?t:stop_node(Node),
+ ok = del_dir(DbDir, 3),
+ ok.
+
+nodecall(Node, Fun) ->
+ Parent = self(),
+ Ref = make_ref(),
+ spawn_link(Node,
+ fun() ->
+ Res = Fun(),
+ unlink(Parent),
+ Parent ! {Ref, Res}
+ end),
+ receive
+ {Ref, Res} ->
+ Res
+ end.
+del_dir(_Dir, 0) ->
+ ok;
+del_dir(Dir, Depth) ->
+ case filelib:is_dir(Dir) of
+ true ->
+ {ok, Files} = file:list_dir(Dir),
+ lists:map(fun(F) ->
+ Nm = filename:join(Dir,F),
+ ok = file:delete(Nm)
+ end, Files),
+ ok = file:del_dir(Dir),
+ del_dir(filename:dirname(Dir), Depth-1);
+ false ->
+ ok
+ end.
%v1_cases() -> [loop_mib];
v1_cases() ->
diff --git a/lib/snmp/test/snmp_log_test.erl b/lib/snmp/test/snmp_log_test.erl
index e9345b44cc..fb7285110f 100644
--- a/lib/snmp/test/snmp_log_test.erl
+++ b/lib/snmp/test/snmp_log_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -331,7 +331,7 @@ log_to_io1(doc) -> "Log to io from the same process that opened "
log_to_io1(Config) when is_list(Config) ->
p(log_to_io1),
put(sname,l2i1),
- put(verbosity,trace),
+ put(verbosity,debug),
?DBG("log_to_io1 -> start", []),
Dir = ?config(log_dir, Config),
Name = "snmp_test_l2i1",
@@ -365,7 +365,7 @@ log_to_io1(Config) when is_list(Config) ->
display_info(Info),
?DBG("log_to_io1 -> do the convert to io (stdout)", []),
- ? line ok = snmp_log:log_to_io(Log, File, Dir, []),
+ ? line ok = snmp:log_to_io(Dir, [], Name, File, false),
?DBG("log_to_io1 -> close log", []),
?line ok = snmp_log:close(Log),
@@ -377,7 +377,7 @@ log_to_io1(Config) when is_list(Config) ->
%%======================================================================
%% Start a logger-process that logs messages with a certain interval.
%% Start a reader-process that reads messages from the log at a certain
-%% point of time.
+%% point in time.
log_to_io2(suite) -> [];
log_to_io2(doc) -> "Log to io from a different process than which "
@@ -386,7 +386,7 @@ log_to_io2(Config) when is_list(Config) ->
process_flag(trap_exit, true),
p(log_to_io2),
put(sname, l2i2),
- put(verbosity,trace),
+ put(verbosity,debug),
?DBG("log_to_io2 -> start", []),
Dir = ?config(log_dir, Config),
Name = "snmp_test_l2i2",
@@ -414,13 +414,13 @@ log_to_io2(Config) when is_list(Config) ->
log_reader_log_to(Reader,
fun() ->
I = disk_log:info(Log),
- R = snmp_log:log_to_io(Log, File, Dir, []),
+ R = snmp:log_to_io(Dir, [], Name, File, true),
{R, I}
end),
case Res of
- {ok, Info} ->
- ?DBG("log_to_io2 -> ~n Info: ~p", [Info]),
+ {ok, _Info} ->
+ ?DBG("log_to_io2 -> ~n Info: ~p", [_Info]),
ok;
{Error, Info} ->
?DBG("log_to_io2 -> log to io failed: "
@@ -445,7 +445,7 @@ log_to_txt1(suite) -> [];
log_to_txt1(Config) when is_list(Config) ->
p(log_to_txt1),
put(sname,l2t1),
- put(verbosity,trace),
+ put(verbosity,debug),
?DBG("log_to_txt1 -> start", []),
Name = "snmp_test_l2t1",
@@ -463,7 +463,7 @@ log_to_txt2(suite) -> [];
log_to_txt2(Config) when is_list(Config) ->
p(log_to_txt2),
put(sname,l2t2),
- put(verbosity,trace),
+ put(verbosity,debug),
?DBG("log_to_txt2 -> start", []),
Name = "snmp_test_l2t2",
@@ -520,14 +520,21 @@ log_to_txt(Name, SeqNoGen, Config) when is_list(Config) ->
?line {ok, Info} = snmp_log:info(Log),
display_info(Info),
- Out1 = join(Dir, "snmp_text-1.txt"),
- ?DBG("log_to_txt -> do the convert to a text file when"
- "~n Out1: ~p", [Out1]),
- ?line ok = snmp:log_to_txt(Dir, [], Out1, Log, File),
+ Out1a = join(Dir, "snmp_text-1-unblocked.txt"),
+ ?DBG("log_to_txt -> do the convert to a text file (~s) unblocked", [Out1a]),
+ ?line ok = snmp:log_to_txt(Dir, [], Out1a, Log, File, false),
+
+ ?line {ok, #file_info{size = Size1a}} = file:read_file_info(Out1a),
+ ?DBG("log_to_txt -> text file size: ~p", [Size1a]),
+ validate_size(Size1a),
+
+ Out1b = join(Dir, "snmp_text-1-blocked.txt"),
+ ?DBG("log_to_txt -> do the convert to a text file (~s) blocked", [Out1b]),
+ ?line ok = snmp:log_to_txt(Dir, [], Out1b, Log, File, true),
- ?line {ok, #file_info{size = Size1}} = file:read_file_info(Out1),
- ?DBG("log_to_txt -> text file size: ~p", [Size1]),
- validate_size(Size1),
+ ?line {ok, #file_info{size = Size1b}} = file:read_file_info(Out1b),
+ ?DBG("log_to_txt -> text file size: ~p", [Size1b]),
+ validate_size(Size1b, {eq, Size1a}),
Out2 = join(Dir, "snmp_text-2.txt"),
?DBG("log_to_txt -> do the convert to a text file when"
@@ -538,7 +545,7 @@ log_to_txt(Name, SeqNoGen, Config) when is_list(Config) ->
?line {ok, #file_info{size = Size2}} = file:read_file_info(Out2),
?DBG("log_to_txt -> text file size: ~p", [Size2]),
- validate_size(Size2, {le, Size1}),
+ validate_size(Size2, {le, Size1a}),
%% Calculate new start / stop times...
GStart = calendar:datetime_to_gregorian_seconds(Start),
@@ -568,7 +575,7 @@ log_to_txt(Name, SeqNoGen, Config) when is_list(Config) ->
?line {ok, #file_info{size = Size3}} = file:read_file_info(Out3),
?DBG("log_to_txt -> text file size: ~p", [Size3]),
- validate_size(Size3, {l, Size1}),
+ validate_size(Size3, {l, Size1a}),
?DBG("log_to_txt -> close log", []),
?line ok = snmp_log:close(Log),
@@ -593,7 +600,7 @@ log_to_txt3(Config) when is_list(Config) ->
process_flag(trap_exit, true),
p(log_to_txt3),
put(sname,l2t3),
- put(verbosity,trace),
+ put(verbosity,debug),
?DBG("log_to_txt3 -> start", []),
Dir = ?config(log_dir, Config),
Name = "snmp_test_l2t3",
@@ -637,8 +644,8 @@ log_to_txt3(Config) when is_list(Config) ->
end),
case Res of
- {ok, Info} ->
- ?DBG("log_to_txt3 -> ~n Info: ~p", [Info]),
+ {ok, _Info} ->
+ ?DBG("log_to_txt3 -> ~n Info: ~p", [_Info]),
?line {ok, #file_info{size = FileSize}} =
file:read_file_info(TxtFile),
?DBG("log_to_txt3 -> text file size: ~p", [FileSize]),
@@ -667,6 +674,8 @@ validate_size(_) ->
validate_size(0, _) ->
?FAIL(invalid_size);
+validate_size(A, {eq, A}) ->
+ ok;
validate_size(A, {le, B}) when A =< B ->
ok;
validate_size(A, {l, B}) when A < B ->
@@ -695,11 +704,11 @@ log_writer_start(Name, File, Size, Repair) ->
log_writer_stop(Pid) ->
Pid ! {stop, self()},
- T1 = t(),
+ _T1 = t(),
receive
{'EXIT', Pid, normal} ->
- T2 = t(),
- ?DBG("it took ~w ms to stop the writer", [T2 - T1]),
+ _T2 = t(),
+ ?DBG("it took ~w ms to stop the writer", [_T2 - _T1]),
ok
after 60000 ->
Msg = receive Any -> Any after 0 -> nothing end,
@@ -712,11 +721,11 @@ log_writer_info(Pid) ->
log_writer_sleep(Pid, Time) ->
Pid ! {sleep, Time, self()},
- T1 = t(),
+ _T1 = t(),
receive
{sleeping, Pid} ->
- T2 = t(),
- ?DBG("it took ~w ms to put the writer to sleep", [T2 - T1]),
+ _T2 = t(),
+ ?DBG("it took ~w ms to put the writer to sleep", [_T2 - _T1]),
ok;
{'EXIT', Pid, Reason} ->
{error, Reason}
@@ -784,11 +793,11 @@ lp(F, A) ->
log_reader_start() ->
Pid = spawn_link(?MODULE, log_reader_main, [self()]),
- T1 = t(),
+ _T1 = t(),
receive
{started, Pid} ->
- T2 = t(),
- ?DBG("it took ~w ms to start the reader", [T2 - T1]),
+ _T2 = t(),
+ ?DBG("it took ~w ms to start the reader", [_T2 - _T1]),
{ok, Pid};
{'EXIT', Pid, Reason} ->
{error, Reason}
@@ -798,11 +807,11 @@ log_reader_start() ->
log_reader_stop(Pid) ->
Pid ! {stop, self()},
- T1 = t(),
+ _T1 = t(),
receive
{'EXIT', Pid, normal} ->
- T2 = t(),
- ?DBG("it took ~w ms to put the reader to eleep", [T2 - T1]),
+ _T2 = t(),
+ ?DBG("it took ~w ms to put the reader to eleep", [_T2 - _T1]),
ok
after 1000 ->
Msg = receive Any -> Any after 0 -> nothing end,
diff --git a/lib/snmp/test/snmp_manager_config_test.erl b/lib/snmp/test/snmp_manager_config_test.erl
index 3192fe1b40..7b9924b83c 100644
--- a/lib/snmp/test/snmp_manager_config_test.erl
+++ b/lib/snmp/test/snmp_manager_config_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -57,6 +57,7 @@
start_with_invalid_users_conf_file1/1,
start_with_invalid_agents_conf_file1/1,
start_with_invalid_usm_conf_file1/1,
+ start_with_create_db_and_dir_opt/1,
@@ -139,8 +140,13 @@ init_per_testcase(Case, Config) when is_list(Config) ->
file:make_dir(MgrTopDir = filename:join(CaseTopDir, "manager/")),
?line ok =
file:make_dir(MgrConfDir = filename:join(MgrTopDir, "conf/")),
- ?line ok =
- file:make_dir(MgrDbDir = filename:join(MgrTopDir, "db/")),
+ MgrDbDir = filename:join(MgrTopDir, "db/"),
+ case Case of
+ start_with_create_db_and_dir_opt ->
+ ok;
+ _ ->
+ ?line ok = file:make_dir(MgrDbDir)
+ end,
?line ok =
file:make_dir(MgrLogDir = filename:join(MgrTopDir, "log/")),
[{case_top_dir, CaseTopDir},
@@ -174,6 +180,7 @@ groups() ->
start_without_mandatory_opts2,
start_with_all_valid_opts, start_with_unknown_opts,
start_with_incorrect_opts,
+ start_with_create_db_and_dir_opt,
start_with_invalid_manager_conf_file1,
start_with_invalid_users_conf_file1,
start_with_invalid_agents_conf_file1,
@@ -332,7 +339,8 @@ start_with_all_valid_opts(Conf) when is_list(Conf) ->
{no_reuse, false}]}],
ServerOpts = [{timeout, 10000}, {verbosity, trace}],
NoteStoreOpts = [{timeout, 20000}, {verbosity, trace}],
- ConfigOpts = [{dir, ConfDir}, {verbosity, trace}, {db_dir, DbDir}],
+ ConfigOpts = [{dir, ConfDir}, {verbosity, trace},
+ {db_dir, DbDir}, {db_init_error, create}],
Mibs = [join(StdMibDir, "SNMP-NOTIFICATION-MIB"),
join(StdMibDir, "SNMP-USER-BASED-SM-MIB")],
Prio = normal,
@@ -1674,7 +1682,34 @@ start_with_invalid_usm_conf_file1(Conf) when is_list(Conf) ->
%% ---
%%
+start_with_create_db_and_dir_opt(suite) -> [];
+start_with_create_db_and_dir_opt(doc) ->
+ "Start the snmp manager config process with the\n"
+ "create_db_and_dir option.";
+start_with_create_db_and_dir_opt(Conf) when is_list(Conf) ->
+ put(tname, swcdado),
+ p("start"),
+ process_flag(trap_exit, true),
+ ConfDir = ?config(manager_conf_dir, Conf),
+ DbDir = ?config(manager_db_dir, Conf),
+ true = not filelib:is_dir(DbDir) and not filelib:is_file(DbDir),
+ write_manager_conf(ConfDir),
+
+ p("verify nonexistent db_dir"),
+ ConfigOpts01 = [{verbosity,trace}, {dir, ConfDir}, {db_dir, DbDir}],
+ {error, Reason01} = config_start([{config, ConfigOpts01}]),
+ p("nonexistent db_dir res: ~p", [Reason01]),
+ {invalid_conf_db_dir, _, not_found} = Reason01,
+ p("verify nonexistent db_dir gets created"),
+ ConfigOpts02 = [{db_init_error, create_db_and_dir} | ConfigOpts01],
+ {ok, _Pid} = config_start([{config, ConfigOpts02}]),
+ true = filelib:is_dir(DbDir),
+ p("verified: nonexistent db_dir was correctly created"),
+ ok = config_stop(),
+
+ p("done"),
+ ok.
%%
%% ---
diff --git a/lib/snmp/test/snmp_manager_test.erl b/lib/snmp/test/snmp_manager_test.erl
index dedbae5ce4..5fe18980bc 100644
--- a/lib/snmp/test/snmp_manager_test.erl
+++ b/lib/snmp/test/snmp_manager_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -1615,10 +1615,10 @@ simple_sync_get1(Config) when is_list(Config) ->
ok.
do_simple_sync_get(Node, Addr, Port, Oids) ->
- ?line {ok, Reply, Rem} = mgr_user_sync_get(Node, Addr, Port, Oids),
+ ?line {ok, Reply, _Rem} = mgr_user_sync_get(Node, Addr, Port, Oids),
?DBG("~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
%% verify that the operation actually worked:
%% The order should be the same, so no need to seach
@@ -1682,10 +1682,10 @@ do_simple_sync_get2(Config, Get, PostVerify) ->
do_simple_sync_get2(Node, TargetName, Oids, Get, PostVerify)
when is_function(Get, 3) andalso is_function(PostVerify, 0) ->
- ?line {ok, Reply, Rem} = Get(Node, TargetName, Oids),
+ ?line {ok, Reply, _Rem} = Get(Node, TargetName, Oids),
?DBG("~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
%% verify that the operation actually worked:
%% The order should be the same, so no need to seach
@@ -2061,10 +2061,10 @@ simple_sync_get_next1(Config) when is_list(Config) ->
do_simple_get_next(N, Node, Addr, Port, Oids, Verify) ->
p("issue get-next command ~w", [N]),
case mgr_user_sync_get_next(Node, Addr, Port, Oids) of
- {ok, Reply, Rem} ->
+ {ok, Reply, _Rem} ->
?DBG("get-next ok:"
"~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
Verify(Reply);
Error ->
@@ -2217,10 +2217,10 @@ do_simple_sync_get_next2(Config, GetNext, PostVerify)
do_simple_get_next(N, Node, TargetName, Oids, Verify, GetNext, PostVerify) ->
p("issue get-next command ~w", [N]),
case GetNext(Node, TargetName, Oids) of
- {ok, Reply, Rem} ->
+ {ok, Reply, _Rem} ->
?DBG("get-next ok:"
"~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
PostVerify(Verify(Reply));
Error ->
@@ -2551,10 +2551,10 @@ simple_sync_set1(Config) when is_list(Config) ->
do_simple_set1(Node, Addr, Port, VAVs) ->
[SysName, SysLoc] = value_of_vavs(VAVs),
- ?line {ok, Reply, Rem} = mgr_user_sync_set(Node, Addr, Port, VAVs),
+ ?line {ok, Reply, _Rem} = mgr_user_sync_set(Node, Addr, Port, VAVs),
?DBG("~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
%% verify that the operation actually worked:
%% The order should be the same, so no need to seach
@@ -2631,10 +2631,10 @@ do_simple_sync_set2(Config, Set, PostVerify)
do_simple_set2(Node, TargetName, VAVs, Set, PostVerify) ->
[SysName, SysLoc] = value_of_vavs(VAVs),
- ?line {ok, Reply, Rem} = Set(Node, TargetName, VAVs),
+ ?line {ok, Reply, _Rem} = Set(Node, TargetName, VAVs),
?DBG("~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
%% verify that the operation actually worked:
%% The order should be the same, so no need to seach
@@ -3026,10 +3026,10 @@ fl(L) ->
do_simple_get_bulk1(N, Node, Addr, Port, NonRep, MaxRep, Oids, Verify) ->
p("issue get-bulk command ~w", [N]),
case mgr_user_sync_get_bulk(Node, Addr, Port, NonRep, MaxRep, Oids) of
- {ok, Reply, Rem} ->
+ {ok, Reply, _Rem} ->
?DBG("get-bulk ok:"
"~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
Verify(Reply);
Error ->
@@ -3213,10 +3213,10 @@ do_simple_get_bulk2(N,
is_function(PostVerify) ->
p("issue get-bulk command ~w", [N]),
case GetBulk(NonRep, MaxRep, Oids) of
- {ok, Reply, Rem} ->
+ {ok, Reply, _Rem} ->
?DBG("get-bulk ok:"
"~n Reply: ~p"
- "~n Rem: ~w", [Reply, Rem]),
+ "~n Rem: ~w", [Reply, _Rem]),
PostVerify(Verify(Reply));
Error ->
@@ -5609,11 +5609,11 @@ init_mgr_user_data1(Conf) ->
[{address, Addr},
{port, Port},
{engine_id, "agentEngine"}]),
- Agents = mgr_user_which_own_agents(Node),
- ?DBG("Own agents: ~p", [Agents]),
+ _Agents = mgr_user_which_own_agents(Node),
+ ?DBG("Own agents: ~p", [_Agents]),
- ?line {ok, DefAgentConf} = mgr_user_agent_info(Node, TargetName, all),
- ?DBG("Default agent config: ~n~p", [DefAgentConf]),
+ ?line {ok, _DefAgentConf} = mgr_user_agent_info(Node, TargetName, all),
+ ?DBG("Default agent config: ~n~p", [_DefAgentConf]),
?line ok = mgr_user_update_agent_info(Node, TargetName,
community, "all-rights"),
@@ -5624,8 +5624,8 @@ init_mgr_user_data1(Conf) ->
?line ok = mgr_user_update_agent_info(Node, TargetName,
max_message_size, 1024),
- ?line {ok, AgentConf} = mgr_user_agent_info(Node, TargetName, all),
- ?DBG("Updated agent config: ~n~p", [AgentConf]),
+ ?line {ok, _AgentConf} = mgr_user_agent_info(Node, TargetName, all),
+ ?DBG("Updated agent config: ~n~p", [_AgentConf]),
Conf.
init_mgr_user_data2(Conf) ->
@@ -5639,11 +5639,11 @@ init_mgr_user_data2(Conf) ->
[{address, Addr},
{port, Port},
{engine_id, "agentEngine"}]),
- Agents = mgr_user_which_own_agents(Node),
- ?DBG("Own agents: ~p", [Agents]),
+ _Agents = mgr_user_which_own_agents(Node),
+ ?DBG("Own agents: ~p", [_Agents]),
- ?line {ok, DefAgentConf} = mgr_user_agent_info(Node, TargetName, all),
- ?DBG("Default agent config: ~n~p", [DefAgentConf]),
+ ?line {ok, _DefAgentConf} = mgr_user_agent_info(Node, TargetName, all),
+ ?DBG("Default agent config: ~n~p", [_DefAgentConf]),
?line ok = mgr_user_update_agent_info(Node, TargetName,
community, "all-rights"),
@@ -5652,8 +5652,8 @@ init_mgr_user_data2(Conf) ->
?line ok = mgr_user_update_agent_info(Node, TargetName,
max_message_size, 1024),
- ?line {ok, AgentConf} = mgr_user_agent_info(Node, TargetName, all),
- ?DBG("Updated agent config: ~n~p", [AgentConf]),
+ ?line {ok, _AgentConf} = mgr_user_agent_info(Node, TargetName, all),
+ ?DBG("Updated agent config: ~n~p", [_AgentConf]),
Conf.
fin_mgr_user_data1(Conf) ->
@@ -5853,12 +5853,12 @@ mgr_user_name_to_oid(Node, Name) ->
start_manager(Node, Vsns, Config) ->
start_manager(Node, Vsns, Config, []).
-start_manager(Node, Vsns, Conf0, Opts) ->
+start_manager(Node, Vsns, Conf0, _Opts) ->
?DBG("start_manager -> entry with"
"~n Node: ~p"
"~n Vsns: ~p"
"~n Conf0: ~p"
- "~n Opts: ~p", [Node, Vsns, Conf0, Opts]),
+ "~n Opts: ~p", [Node, Vsns, Conf0, _Opts]),
AtlDir = ?config(manager_log_dir, Conf0),
ConfDir = ?config(manager_conf_dir, Conf0),
@@ -5908,12 +5908,12 @@ stop_manager(Node, Conf) ->
start_agent(Node, Vsns, Config) ->
start_agent(Node, Vsns, Config, []).
-start_agent(Node, Vsns, Conf0, Opts) ->
+start_agent(Node, Vsns, Conf0, _Opts) ->
?DBG("start_agent -> entry with"
"~n Node: ~p"
"~n Vsns: ~p"
"~n Conf0: ~p"
- "~n Opts: ~p", [Node, Vsns, Conf0, Opts]),
+ "~n Opts: ~p", [Node, Vsns, Conf0, _Opts]),
AtlDir = ?config(agent_log_dir, Conf0),
ConfDir = ?config(agent_conf_dir, Conf0),
diff --git a/lib/snmp/test/snmp_manager_user.erl b/lib/snmp/test/snmp_manager_user.erl
index 4e789bbaec..ddbe156130 100644
--- a/lib/snmp/test/snmp_manager_user.erl
+++ b/lib/snmp/test/snmp_manager_user.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -681,6 +681,15 @@ loop(#state{parent = Parent, id = Id} = S) ->
Parent ! {async_event, TargetName, {report, SnmpReport}},
loop(S);
+ {handle_invalid_result, _Pid, In, Out} ->
+ d("loop -> received invalid result callback from manager for "
+ "~n In: ~p",
+ "~n Out: ~p", [In, Out]),
+ info("received invalid result message: "
+ "~n In: ~p"
+ "~n Out: ~p", [In, Out]),
+ loop(S);
+
{'EXIT', Parent, Reason} ->
d("received exit signal from parent: ~n~p", [Reason]),
info("received exit signal from parent: ~n~p", [Reason]),
@@ -770,7 +779,7 @@ handle_pdu(TargetName, ReqId, SnmpResponse, UserPid) ->
handle_trap(TargetName, SnmpTrap, UserPid) ->
UserPid ! {handle_trap, self(), TargetName, SnmpTrap},
- ok.
+ ignore.
handle_inform(TargetName, SnmpInform, UserPid) ->
UserPid ! {handle_inform, self(), TargetName, SnmpInform},
@@ -778,12 +787,12 @@ handle_inform(TargetName, SnmpInform, UserPid) ->
{handle_inform_no_response, TargetName} ->
no_reply;
{handle_inform_response, TargetName} ->
- ok
+ ignore
end.
handle_report(TargetName, SnmpReport, UserPid) ->
UserPid ! {handle_report, self(), TargetName, SnmpReport},
- ok.
+ ignore.
%%----------------------------------------------------------------------
diff --git a/lib/snmp/test/snmp_test_manager.erl b/lib/snmp/test/snmp_test_manager.erl
index 1f3383a7a8..925ae77ab5 100644
--- a/lib/snmp/test/snmp_test_manager.erl
+++ b/lib/snmp/test/snmp_test_manager.erl
@@ -47,7 +47,8 @@
handle_pdu/4,
handle_trap/3,
handle_inform/3,
- handle_report/3
+ handle_report/3,
+ handle_invalid_result/3
]).
@@ -279,12 +280,18 @@ handle_info({snmp_inform, TargetName, Info, Pid},
handle_info({snmp_report, TargetName, Info, Pid},
#state{parent = P} = State) ->
info_msg("received snmp report: "
- "~n TargetName: ~p"
- "~n Info: ~p", [TargetName, Info]),
+ "~n TargetName: ~p"
+ "~n Info: ~p", [TargetName, Info]),
Pid ! {snmp_report_reply, ignore, self()},
P ! {snmp_report, TargetName, Info},
{noreply, State};
+handle_info({snmp_invalid_result, In, Out}, State) ->
+ error_msg("Callback failure: "
+ "~n In: ~p"
+ "~n Out: ~p", [In, Out]),
+ {noreply, State};
+
handle_info(Info, State) ->
error_msg("received unknown info: "
"~n Info: ~p", [Info]),
@@ -369,7 +376,12 @@ handle_report(TargetName, SnmpInfo, Pid) ->
after 10000 ->
ignore
end.
-
+
+
+handle_invalid_result(In, Out, Pid) ->
+ Pid ! {snmp_invalid_result, In, Out},
+ ignore.
+
%%----------------------------------------------------------------------
diff --git a/lib/snmp/vsn.mk b/lib/snmp/vsn.mk
index 2164121e86..70f7c2b19a 100644
--- a/lib/snmp/vsn.mk
+++ b/lib/snmp/vsn.mk
@@ -18,6 +18,6 @@
# %CopyrightEnd%
APPLICATION = snmp
-SNMP_VSN = 4.24.2
+SNMP_VSN = 4.25
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(SNMP_VSN)$(PRE_VSN)"
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index 141d3df38e..c1a51d57fc 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -53,8 +53,7 @@
returned by ssh:daemon/[1,2,3]</c></p>
<p><c>ssh_connection_ref() - opaque to the user
returned by ssh:connect/3</c></p>
- <p><c>ip_address() - {N1,N2,N3,N4} % IPv4 |
- {K1,K2,K3,K4,K5,K6,K7,K8} % IPv6</c></p>
+ <p><c>ip_address() - inet::ip_address()</c></p>
<p><c>subsystem_spec() = {subsystem_name(),
{channel_callback(), channel_init_args()}} </c></p>
<p><c>subsystem_name() = string() </c></p>
@@ -141,7 +140,7 @@
<p>Sets the preferred public key algorithm to use for user
authentication. If the the preferred algorithm fails for
some reason, the other algorithm is tried. The default is
- to try <c><![CDATA[ssh_rsa]]></c> first.</p>
+ to try <c><![CDATA['ssh-rsa']]></c> first.</p>
</item>
<tag><c><![CDATA[{pref_public_key_algs, list()}]]></c></tag>
<item>
@@ -181,10 +180,6 @@
<item>
<p>Allow an existing file descriptor to be used
(simply passed on to the transport protocol).</p></item>
- <tag><c><![CDATA[{ipv6_disabled, boolean()}]]></c></tag>
- <item>
- <p>Determines if SSH shall use IPv6 or not.</p>
- </item>
<tag><c><![CDATA[{rekey_limit, integer()}]]></c></tag>
<item>
<p>Provide, in bytes, when rekeying should be initiated,
@@ -202,8 +197,11 @@
Value}] </name>
<fsummary> Retrieves information about a connection. </fsummary>
<type>
- <v>Option = client_version | server_version | peer</v>
- <v>Value = term() </v>
+ <v>Option = client_version | server_version | user | peer | sockname </v>
+ <v>Value = [option_value()] </v>
+ <v>option_value() = {{Major::integer(), Minor::integer()}, VersionString::string()} | User::string() |
+ Peer::{inet:hostname(), {inet::ip_adress(), inet::port_number()}} |
+ Sockname::{inet::ip_adress(), inet::port_number()} () </v>
</type>
<desc>
<p> Retrieves information about a connection.
@@ -248,14 +246,15 @@
requested by the client. Default is to use the erlang shell:
<c><![CDATA[{shell, start, []}]]></c>
</item>
- <tag><c><![CDATA[{ssh_cli,{channel_callback(),
- channel_init_args()}}]]></c></tag>
+ <tag><c><![CDATA[{ssh_cli, {channel_callback(),
+ channel_init_args()} | no_cli}]]></c></tag>
<item>
- Provides your own cli implementation, i.e. a channel callback
+ Provides your own CLI implementation, i.e. a channel callback
module that implements a shell and command execution. Note
that you may customize the shell read-eval-print loop using the
option <c>shell</c> which is much less work than implementing
- your own cli channel.
+ your own CLI channel. If set to <c>no_cli</c> you will disable
+ CLI channels and only subsystem channels will be allowed.
</item>
<tag><c><![CDATA[{user_dir, String}]]></c></tag>
<item>
@@ -296,7 +295,7 @@
user. From a security perspective this option makes
the server very vulnerable.</p>
</item>
- <tag><c><![CDATA[{pwdfun, fun(User::string(), password::string() -> boolean()}]]></c></tag>
+ <tag><c><![CDATA[{pwdfun, fun(User::string(), password::string()) -> boolean()}]]></c></tag>
<item>
<p>Provide a function for password validation. This is called
with user and password as strings, and should return
@@ -313,26 +312,23 @@
<item>
<p>Allow an existing file-descriptor to be used
(simply passed on to the transport protocol).</p></item>
- <tag><c><![CDATA[{ip_v6_disabled, boolean()}]]></c></tag>
+ <tag><c><![CDATA[{failfun, fun(User::string(), PeerAddress::ip_address(), Reason::term()) -> _}]]></c></tag>
<item>
- <p>Determines if SSH shall use IPv6 or not (only used when
- HostAddress is set to any).</p></item>
- <tag><c><![CDATA[{failfun, fun()}]]></c></tag>
- <item>
- <p>Provide a fun() to implement your own logging when a user fails to authenticate.</p>
+ <p>Provide a fun to implement your own logging when a user fails to authenticate.</p>
</item>
- <tag><c><![CDATA[{connectfun, fun()}]]></c></tag>
+ <tag><c><![CDATA[{connectfun, fun(User::string(), PeerAddress::ip_address(), Method::string()) ->_}]]></c></tag>
<item>
- <p>Provide a fun() to implement your own logging when a user authenticates to the server.</p>
+ <p>Provide a fun to implement your own logging when a user authenticates to the server.</p>
</item>
- <tag><c><![CDATA[{disconnectfun, fun()}]]></c></tag>
+ <tag><c><![CDATA[{disconnectfun, fun(Reason:term()) -> _}]]></c></tag>
<item>
- <p>Provide a fun() to implement your own logging when a user disconnects from the server.</p>
+ <p>Provide a fun to implement your own logging when a user disconnects from the server.</p>
</item>
</taglist>
</desc>
</func>
+
<func>
<name>shell(Host) -> </name>
<name>shell(Host, Option) -> </name>
diff --git a/lib/ssh/doc/src/ssh_client_key_api.xml b/lib/ssh/doc/src/ssh_client_key_api.xml
index b9b1ec4efa..2fa06f8bf1 100644
--- a/lib/ssh/doc/src/ssh_client_key_api.xml
+++ b/lib/ssh/doc/src/ssh_client_key_api.xml
@@ -41,12 +41,14 @@
<p>Type definitions that are used more than once in this module
and/or abstractions to indicate the intended use of the data
- type:</p>
+ type. For more details on public key data types
+ see the <seealso marker="public_key:public_key_records"> public_key user's guide.</seealso>
+ </p>
<p> boolean() = true | false</p>
<p> string() = [byte()] </p>
<p> public_key() = #'RSAPublicKey'{}| {integer(), #'Dss-Parms'{}}| term()</p>
- <p> private_key() = #'RSAPublicKey'{}| {integer(), #'Dss-Parms'{}}| term()</p>
+ <p> private_key() = #'RSAPrivateKey'{} | #'DSAPrivateKey'{} | term()</p>
<p> public_key_algorithm() = 'ssh-rsa'| 'ssh-dss' | atom()</p>
</section>
diff --git a/lib/ssh/doc/src/ssh_server_key_api.xml b/lib/ssh/doc/src/ssh_server_key_api.xml
index c4562e1211..ee537f2f60 100644
--- a/lib/ssh/doc/src/ssh_server_key_api.xml
+++ b/lib/ssh/doc/src/ssh_server_key_api.xml
@@ -29,7 +29,7 @@
-behaviour(ssh_server_key_api).
</modulesummary>
<description>
- <p> Behaviour describing the API for an SSH server's public key handling.By implementing the callbacks defined
+ <p> Behaviour describing the API for an SSH server's public key handling. By implementing the callbacks defined
in this behavior it is possible to customize the SSH server's public key
handling. By default the SSH application implements this behavior
with help of the standard openssh files, see <seealso marker="SSH_app"> ssh(6)</seealso>.</p>
@@ -40,13 +40,15 @@
<p>Type definitions that are used more than once in this module
and/or abstractions to indicate the intended use of the data
- type:</p>
+ type. For more details on public key data types
+ see the <seealso marker="public_key:public_key_records"> public_key user's guide.</seealso>
+ </p>
<p> boolean() = true | false</p>
<p> string() = [byte()]</p>
- <p> public_key() = #'RSAPublicKey'{}| {integer(), #'Dss-Parms'{}}| term()</p>
- <p> private_key() = #'RSAPublicKey'{}| {integer(), #'Dss-Parms'{}}| term()</p>
- <p> public_key_algorithm() = 'ssh-rsa'| 'ssh-dss' | atom()</p>
+ <p> public_key() = #'RSAPublicKey'{} | {integer(), #'Dss-Parms'{}} | term()</p>
+ <p> private_key() = #'RSAPrivateKey'{} | #'DSAPrivateKey'{} | term()</p>
+ <p> public_key_algorithm() = 'ssh-rsa' | 'ssh-dss' | atom()</p>
</section>
<funcs>
@@ -56,7 +58,7 @@
<fsummary>Fetches the hosts private key </fsummary>
<type>
<v>Algorithm = public_key_algorithm()</v>
- <d> Host key algorithm. Should support 'ssh-rsa'| 'ssh-dss' but additional algorithms
+ <d> Host key algorithm. Should support 'ssh-rsa' | 'ssh-dss' but additional algorithms
can be handled.</d>
<v> DaemonOptions = proplists:proplist() </v>
<d>Options provided to <seealso marker="ssh#daemon-2">ssh:daemon/[2,3]</seealso></d>
diff --git a/lib/ssh/src/Makefile b/lib/ssh/src/Makefile
index 93d0b54f57..2ef2859fd7 100644
--- a/lib/ssh/src/Makefile
+++ b/lib/ssh/src/Makefile
@@ -53,7 +53,6 @@ MODULES= \
ssh_connection_sup \
ssh_connection \
ssh_connection_handler \
- ssh_connection_manager \
ssh_shell \
ssh_system_sup \
ssh_subsystem_sup \
@@ -67,12 +66,12 @@ MODULES= \
ssh_file \
ssh_io \
ssh_math \
+ ssh_message \
ssh_no_io \
ssh_sftp \
ssh_sftpd \
ssh_sftpd_file\
ssh_transport \
- ssh_userreg \
ssh_xfer
PUBLIC_HRL_FILES= ssh.hrl ssh_userauth.hrl ssh_xfer.hrl
diff --git a/lib/ssh/src/ssh.app.src b/lib/ssh/src/ssh.app.src
index 49707f3378..74d7293be0 100644
--- a/lib/ssh/src/ssh.app.src
+++ b/lib/ssh/src/ssh.app.src
@@ -8,6 +8,7 @@
ssh_acceptor,
ssh_acceptor_sup,
ssh_auth,
+ ssh_message,
ssh_bits,
ssh_cli,
ssh_client_key_api,
@@ -15,7 +16,6 @@
ssh_channel_sup,
ssh_connection,
ssh_connection_handler,
- ssh_connection_manager,
ssh_connection_sup,
ssh_daemon_channel,
ssh_shell,
@@ -34,7 +34,6 @@
ssh_sup,
ssh_system_sup,
ssh_transport,
- ssh_userreg,
ssh_xfer]},
{registered, []},
{applications, [kernel, stdlib, crypto, public_key]},
diff --git a/lib/ssh/src/ssh.erl b/lib/ssh/src/ssh.erl
index 7d5478c3f6..2685b1553b 100644
--- a/lib/ssh/src/ssh.erl
+++ b/lib/ssh/src/ssh.erl
@@ -32,9 +32,8 @@
shell/1, shell/2, shell/3]).
%%--------------------------------------------------------------------
-%% Function: start([, Type]) -> ok
-%%
-%% Type = permanent | transient | temporary
+-spec start() -> ok.
+-spec start(permanent | transient | temporary) -> ok.
%%
%% Description: Starts the ssh application. Default type
%% is temporary. see application(3)
@@ -52,7 +51,7 @@ start(Type) ->
application:start(ssh, Type).
%%--------------------------------------------------------------------
-%% Function: stop() -> ok
+-spec stop() -> ok.
%%
%% Description: Stops the ssh application.
%%--------------------------------------------------------------------
@@ -60,13 +59,8 @@ stop() ->
application:stop(ssh).
%%--------------------------------------------------------------------
-%% Function: connect(Host, Port, Options) ->
-%% connect(Host, Port, Options, Timeout -> ConnectionRef | {error, Reason}
-%%
-%% Host - string()
-%% Port - integer()
-%% Options - [{Option, Value}]
-%% Timeout - infinity | integer().
+-spec connect(string(), integer(), proplists:proplists()) -> {ok, pid()} | {error, term()}.
+-spec connect(string(), integer(), proplists:proplists(), timeout()) -> {ok, pid()} | {error, term()}.
%%
%% Description: Starts an ssh connection.
%%--------------------------------------------------------------------
@@ -77,83 +71,52 @@ connect(Host, Port, Options, Timeout) ->
{error, _Reason} = Error ->
Error;
{SocketOptions, SshOptions} ->
- DisableIpv6 = proplists:get_value(ipv6_disabled, SshOptions, false),
- Inet = inetopt(DisableIpv6),
- do_connect(Host, Port, [Inet | SocketOptions],
- [{user_pid, self()}, {host, Host} | fix_idle_time(SshOptions)], Timeout, DisableIpv6)
+ {_, Transport, _} = TransportOpts =
+ proplists:get_value(transport, Options, {tcp, gen_tcp, tcp_closed}),
+ Inet = proplists:get_value(inet, SshOptions, inet),
+ try Transport:connect(Host, Port, [ {active, false}, Inet | SocketOptions], Timeout) of
+ {ok, Socket} ->
+ Opts = [{user_pid, self()}, {host, Host} | fix_idle_time(SshOptions)],
+ ssh_connection_handler:start_connection(client, Socket, Opts, Timeout);
+ {error, Reason} ->
+ {error, Reason}
+ catch
+ exit:{function_clause, _} ->
+ {error, {options, {transport, TransportOpts}}};
+ exit:badarg ->
+ {error, {options, {socket_options, SocketOptions}}}
+ end
end.
-do_connect(Host, Port, SocketOptions, SshOptions, Timeout, DisableIpv6) ->
- try sshc_sup:start_child([[{address, Host}, {port, Port},
- {role, client},
- {channel_pid, self()},
- {socket_opts, SocketOptions},
- {ssh_opts, SshOptions}]]) of
- {ok, ConnectionSup} ->
- {ok, Manager} =
- ssh_connection_sup:connection_manager(ConnectionSup),
- msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout)
- catch
- exit:{noproc, _} ->
- {error, ssh_not_started}
- end.
-msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout) ->
- receive
- {Manager, is_connected} ->
- {ok, Manager};
- %% When the connection fails
- %% ssh_connection_sup:connection_manager
- %% might return undefined as the connection manager
- %% could allready have terminated, so we will not
- %% match the Manager in this case
- {_, not_connected, {error, econnrefused}} when DisableIpv6 == false ->
- do_connect(Host, Port, proplists:delete(inet6, SocketOptions),
- SshOptions, Timeout, true);
- {_, not_connected, {error, Reason}} ->
- {error, Reason};
- {_, not_connected, Other} ->
- {error, Other};
- {From, user_password} ->
- Pass = io:get_password(),
- From ! Pass,
- msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout);
- {From, question} ->
- Answer = io:get_line(""),
- From ! Answer,
- msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout)
- after Timeout ->
- ssh_connection_manager:stop(Manager),
- {error, timeout}
- end.
%%--------------------------------------------------------------------
-%% Function: close(ConnectionRef) -> ok
+-spec close(pid()) -> ok.
%%
%% Description: Closes an ssh connection.
%%--------------------------------------------------------------------
close(ConnectionRef) ->
- ssh_connection_manager:stop(ConnectionRef).
+ ssh_connection_handler:stop(ConnectionRef).
%%--------------------------------------------------------------------
-%% Function: connection_info(ConnectionRef) -> [{Option, Value}]
+-spec connection_info(pid(), [atom()]) -> [{atom(), term()}].
%%
%% Description: Retrieves information about a connection.
%%--------------------------------------------------------------------
connection_info(ConnectionRef, Options) ->
- ssh_connection_manager:connection_info(ConnectionRef, Options).
+ ssh_connection_handler:connection_info(ConnectionRef, Options).
%%--------------------------------------------------------------------
-%% Function: channel_info(ConnectionRef) -> [{Option, Value}]
+-spec channel_info(pid(), channel_id(), [atom()]) -> [{atom(), term()}].
%%
%% Description: Retrieves information about a connection.
%%--------------------------------------------------------------------
channel_info(ConnectionRef, ChannelId, Options) ->
- ssh_connection_manager:channel_info(ConnectionRef, ChannelId, Options).
+ ssh_connection_handler:channel_info(ConnectionRef, ChannelId, Options).
%%--------------------------------------------------------------------
-%% Function: daemon(Port) ->
-%% daemon(Port, Options) ->
-%% daemon(Address, Port, Options) -> SshSystemRef
-%%
+-spec daemon(integer()) -> {ok, pid()}.
+-spec daemon(integer(), proplists:proplist()) -> {ok, pid()}.
+-spec daemon(any | inet:ip_address(), integer(), proplists:proplist()) -> {ok, pid()}.
+
%% Description: Starts a server listening for SSH connections
%% on the given port.
%%--------------------------------------------------------------------
@@ -170,11 +133,11 @@ daemon(HostAddr, Port, Options0) ->
_ ->
Options0
end,
- DisableIpv6 = proplists:get_value(ipv6_disabled, Options0, false),
+
{Host, Inet, Options} = case HostAddr of
any ->
{ok, Host0} = inet:gethostname(),
- {Host0, inetopt(DisableIpv6), Options1};
+ {Host0, proplists:get_value(inet, Options1, inet), Options1};
{_,_,_,_} ->
{HostAddr, inet,
[{ip, HostAddr} | Options1]};
@@ -185,9 +148,8 @@ daemon(HostAddr, Port, Options0) ->
start_daemon(Host, Port, Options, Inet).
%%--------------------------------------------------------------------
-%% Function: stop_listener(SysRef) -> ok
-%% stop_listener(Address, Port) -> ok
-%%
+-spec stop_listener(pid()) -> ok.
+-spec stop_listener(inet:ip_address(), integer()) -> ok.
%%
%% Description: Stops the listener, but leaves
%% existing connections started by the listener up and running.
@@ -198,9 +160,8 @@ stop_listener(Address, Port) ->
ssh_system_sup:stop_listener(Address, Port).
%%--------------------------------------------------------------------
-%% Function: stop_daemon(SysRef) -> ok
-%%% stop_daemon(Address, Port) -> ok
-%%
+-spec stop_daemon(pid()) -> ok.
+-spec stop_daemon(inet:ip_address(), integer()) -> ok.
%%
%% Description: Stops the listener and all connections started by
%% the listener.
@@ -211,9 +172,10 @@ stop_daemon(Address, Port) ->
ssh_system_sup:stop_system(Address, Port).
%%--------------------------------------------------------------------
-%% Function: shell(Host [,Port,Options]) -> {ok, ConnectionRef} |
-%% {error, Reason}
-%%
+-spec shell(string()) -> _.
+-spec shell(string(), proplists:proplist()) -> _.
+-spec shell(string(), integer(), proplists:proplist()) -> _.
+
%% Host = string()
%% Port = integer()
%% Options = [{Option, Value}]
@@ -379,9 +341,9 @@ handle_ssh_option({user_dir, Value} = Opt) when is_list(Value) ->
Opt;
handle_ssh_option({user_dir_fun, Value} = Opt) when is_function(Value) ->
Opt;
-handle_ssh_option({silently_accept_hosts, Value} = Opt) when Value == true; Value == false ->
+handle_ssh_option({silently_accept_hosts, Value} = Opt) when is_boolean(Value) ->
Opt;
-handle_ssh_option({user_interaction, Value} = Opt) when Value == true; Value == false ->
+handle_ssh_option({user_interaction, Value} = Opt) when is_boolean(Value) ->
Opt;
handle_ssh_option({public_key_alg, ssh_dsa}) ->
{public_key_alg, 'ssh-dss'};
@@ -429,9 +391,8 @@ handle_ssh_option({disconnectfun , Value} = Opt) when is_function(Value) ->
handle_ssh_option({failfun, Value} = Opt) when is_function(Value) ->
Opt;
-handle_ssh_option({ipv6_disabled, Value} = Opt) when Value == true;
- Value == false ->
- Opt;
+handle_ssh_option({ipv6_disabled, Value} = Opt) when is_boolean(Value) ->
+ throw({error, {{ipv6_disabled, Opt}, option_no_longer_valid_use_inet_option_instead}});
handle_ssh_option({transport, {Protocol, Cb, ClosTag}} = Opt) when is_atom(Protocol),
is_atom(Cb),
is_atom(ClosTag) ->
@@ -440,13 +401,14 @@ handle_ssh_option({subsystems, Value} = Opt) when is_list(Value) ->
Opt;
handle_ssh_option({ssh_cli, {Cb, _}}= Opt) when is_atom(Cb) ->
Opt;
+handle_ssh_option({ssh_cli, no_cli} = Opt) ->
+ Opt;
handle_ssh_option({shell, {Module, Function, _}} = Opt) when is_atom(Module),
is_atom(Function) ->
Opt;
handle_ssh_option({shell, Value} = Opt) when is_function(Value) ->
Opt;
-handle_ssh_option({quiet_mode, Value} = Opt) when Value == true;
- Value == false ->
+handle_ssh_option({quiet_mode, Value} = Opt) when is_boolean(Value) ->
Opt;
handle_ssh_option({idle_time, Value} = Opt) when is_integer(Value), Value > 0 ->
Opt;
@@ -459,10 +421,8 @@ handle_inet_option({active, _} = Opt) ->
throw({error, {{eoptions, Opt}, "Ssh has built in flow control, "
"and activ is handled internaly user is not allowd"
"to specify this option"}});
-handle_inet_option({inet, _} = Opt) ->
- throw({error, {{eoptions, Opt},"Is set internaly use ipv6_disabled to"
- " enforce iv4 in the server, client will fallback to ipv4 if"
- " it can not use ipv6"}});
+handle_inet_option({inet, Value} = Opt) when (Value == inet) or (Value == inet6) ->
+ Opt;
handle_inet_option({reuseaddr, _} = Opt) ->
throw({error, {{eoptions, Opt},"Is set internaly user is not allowd"
"to specify this option"}});
@@ -485,18 +445,3 @@ handle_pref_algs([H|T], Acc) ->
_ ->
false
end.
-%% Has IPv6 been disabled?
-inetopt(true) ->
- inet;
-inetopt(false) ->
- case gen_tcp:listen(0, [inet6]) of
- {ok, Dummyport} ->
- gen_tcp:close(Dummyport),
- inet6;
- _ ->
- inet
- end.
-
-%%%
-%% Deprecated
-%%%
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index 4fd347ba8f..94ced9da6f 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2004-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2004-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -29,6 +29,8 @@
-define(SSH_DEFAULT_PORT, 22).
-define(SSH_MAX_PACKET_SIZE, (256*1024)).
-define(SSH_LENGHT_INDICATOR_SIZE, 4).
+-define(REKEY_TIMOUT, 3600000).
+-define(REKEY_DATA_TIMOUT, 60000).
-define(FALSE, 0).
-define(TRUE, 1).
diff --git a/lib/ssh/src/ssh_acceptor.erl b/lib/ssh/src/ssh_acceptor.erl
index d023656c32..91905b2eaf 100644
--- a/lib/ssh/src/ssh_acceptor.erl
+++ b/lib/ssh/src/ssh_acceptor.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -25,7 +25,6 @@
-export([start_link/5]).
%% spawn export
-%% TODO: system messages
-export([acceptor_init/6, acceptor_loop/6]).
-define(SLEEP_TIME, 200).
@@ -81,17 +80,15 @@ acceptor_loop(Callback, Port, Address, Opts, ListenSocket, AcceptTimeout) ->
ListenSocket, AcceptTimeout)
end.
-handle_connection(Callback, Address, Port, Options, Socket) ->
+handle_connection(_Callback, Address, Port, Options, Socket) ->
SystemSup = ssh_system_sup:system_supervisor(Address, Port),
{ok, SubSysSup} = ssh_system_sup:start_subsystem(SystemSup, Options),
- ConnectionSup = ssh_system_sup:connection_supervisor(SystemSup),
- {ok, Pid} =
- ssh_connection_sup:start_manager_child(ConnectionSup,
- [server, Socket, Options]),
- Callback:controlling_process(Socket, Pid),
- SshOpts = proplists:get_value(ssh_opts, Options),
- Pid ! {start_connection, server, [Address, Port, Socket, SshOpts, SubSysSup]}.
-
+ ConnectionSup = ssh_subsystem_sup:connection_supervisor(SubSysSup),
+ ssh_connection_handler:start_connection(server, Socket,
+ [{supervisors, [{system_sup, SystemSup},
+ {subsystem_sup, SubSysSup},
+ {connection_sup, ConnectionSup}]}
+ | Options], infinity).
handle_error(timeout) ->
ok;
diff --git a/lib/ssh/src/ssh_acceptor_sup.erl b/lib/ssh/src/ssh_acceptor_sup.erl
index f37e1fe4ff..2be729d305 100644
--- a/lib/ssh/src/ssh_acceptor_sup.erl
+++ b/lib/ssh/src/ssh_acceptor_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -84,8 +84,8 @@ child_spec(ServerOpts) ->
[{active, false},
{reuseaddr, true}] ++ SocketOpts,
ServerOpts, Timeout]},
- Restart = permanent,
- Shutdown = 3600,
+ Restart = transient,
+ Shutdown = brutal_kill,
Modules = [ssh_acceptor],
Type = worker,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
diff --git a/lib/ssh/src/ssh_auth.erl b/lib/ssh/src/ssh_auth.erl
index cb0c7751f0..1fa3df847f 100644
--- a/lib/ssh/src/ssh_auth.erl
+++ b/lib/ssh/src/ssh_auth.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -30,8 +30,7 @@
-export([publickey_msg/1, password_msg/1, keyboard_interactive_msg/1,
service_request_msg/1, init_userauth_request_msg/1,
userauth_request_msg/1, handle_userauth_request/3,
- handle_userauth_info_request/3, handle_userauth_info_response/2,
- userauth_messages/0
+ handle_userauth_info_request/3, handle_userauth_info_response/2
]).
%%--------------------------------------------------------------------
@@ -43,7 +42,6 @@ publickey_msg([Alg, #ssh{user = User,
opts = Opts} = Ssh]) ->
Hash = sha, %% Maybe option?!
- ssh_bits:install_messages(userauth_pk_messages()),
KeyCb = proplists:get_value(key_cb, Opts, ssh_file),
case KeyCb:user_key(Alg, Opts) of
@@ -69,7 +67,6 @@ publickey_msg([Alg, #ssh{user = User,
password_msg([#ssh{opts = Opts, io_cb = IoCb,
user = User, service = Service} = Ssh]) ->
- ssh_bits:install_messages(userauth_passwd_messages()),
Password = case proplists:get_value(password, Opts) of
undefined ->
user_interaction(IoCb, Ssh);
@@ -99,7 +96,6 @@ user_interaction(IoCb, Ssh) ->
%% See RFC 4256 for info on keyboard-interactive
keyboard_interactive_msg([#ssh{user = User,
service = Service} = Ssh]) ->
- ssh_bits:install_messages(userauth_keyboard_interactive_messages()),
ssh_transport:ssh_packet(
#ssh_msg_userauth_request{user = User,
service = Service,
@@ -239,7 +235,6 @@ handle_userauth_request(#ssh_msg_userauth_request{user = User,
partial_success = false}, Ssh)}
end;
?FALSE ->
- ssh_bits:install_messages(userauth_pk_messages()),
{not_authorized, {User, undefined},
ssh_transport:ssh_packet(
#ssh_msg_userauth_pk_ok{algorithm_name = Alg,
@@ -275,26 +270,10 @@ handle_userauth_info_request(
handle_userauth_info_response(#ssh_msg_userauth_info_response{},
_Auth) ->
throw(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
- description = "Server does not support"
- "keyboard-interactive",
+ description = "Server does not support"
+ "keyboard-interactive",
language = "en"}).
-userauth_messages() ->
- [ {ssh_msg_userauth_request, ?SSH_MSG_USERAUTH_REQUEST,
- [string,
- string,
- string,
- '...']},
-
- {ssh_msg_userauth_failure, ?SSH_MSG_USERAUTH_FAILURE,
- [string,
- boolean]},
-
- {ssh_msg_userauth_success, ?SSH_MSG_USERAUTH_SUCCESS,
- []},
-
- {ssh_msg_userauth_banner, ?SSH_MSG_USERAUTH_BANNER,
- [string,
- string]}].
+
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
@@ -386,13 +365,8 @@ algorithm_string('ssh-rsa') ->
algorithm_string('ssh-dss') ->
"ssh-dss".
-decode_keyboard_interactive_prompts(NumPrompts, Data) ->
- Types = lists:append(lists:duplicate(NumPrompts, [string, boolean])),
- pairwise_tuplify(ssh_bits:decode(Data, Types)).
-
-pairwise_tuplify([E1, E2 | Rest]) -> [{E1, E2} | pairwise_tuplify(Rest)];
-pairwise_tuplify([]) -> [].
-
+decode_keyboard_interactive_prompts(_NumPrompts, Data) ->
+ ssh_message:decode_keyboard_interactive_prompts(Data, []).
keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) ->
NumPrompts = length(PromptInfos),
@@ -431,50 +405,29 @@ keyboard_interact(IoCb, Name, Instr, Prompts, Opts) ->
end,
Prompts).
-userauth_passwd_messages() ->
- [
- {ssh_msg_userauth_passwd_changereq, ?SSH_MSG_USERAUTH_PASSWD_CHANGEREQ,
- [string,
- string]}
- ].
-
-userauth_keyboard_interactive_messages() ->
- [ {ssh_msg_userauth_info_request, ?SSH_MSG_USERAUTH_INFO_REQUEST,
- [string,
- string,
- string,
- uint32,
- '...']},
-
- {ssh_msg_userauth_info_response, ?SSH_MSG_USERAUTH_INFO_RESPONSE,
- [uint32,
- '...']}
- ].
-
-userauth_pk_messages() ->
- [ {ssh_msg_userauth_pk_ok, ?SSH_MSG_USERAUTH_PK_OK,
- [string, % algorithm name
- binary]} % key blob
- ].
-
other_alg('ssh-rsa') ->
'ssh-dss';
other_alg('ssh-dss') ->
'ssh-rsa'.
-decode_public_key_v2(K_S, "ssh-rsa") ->
- case ssh_bits:decode(K_S,[string,mpint,mpint]) of
- ["ssh-rsa", E, N] ->
- {ok, #'RSAPublicKey'{publicExponent = E, modulus = N}};
- _ ->
- {error, bad_format}
- end;
-decode_public_key_v2(K_S, "ssh-dss") ->
- case ssh_bits:decode(K_S,[string,mpint,mpint,mpint,mpint]) of
- ["ssh-dss",P,Q,G,Y] ->
- {ok, {Y, #'Dss-Parms'{p = P, q = Q, g = G}}};
- _ ->
- {error, bad_format}
- end;
+decode_public_key_v2(<<?UINT32(Len0), _:Len0/binary,
+ ?UINT32(Len1), BinE:Len1/binary,
+ ?UINT32(Len2), BinN:Len2/binary>>
+ ,"ssh-rsa") ->
+ E = ssh_bits:erlint(Len1, BinE),
+ N = ssh_bits:erlint(Len2, BinN),
+ {ok, #'RSAPublicKey'{publicExponent = E, modulus = N}};
+decode_public_key_v2(<<?UINT32(Len0), _:Len0/binary,
+ ?UINT32(Len1), BinP:Len1/binary,
+ ?UINT32(Len2), BinQ:Len2/binary,
+ ?UINT32(Len3), BinG:Len3/binary,
+ ?UINT32(Len4), BinY:Len4/binary>>
+ , "ssh-dss") ->
+ P = ssh_bits:erlint(Len1, BinP),
+ Q = ssh_bits:erlint(Len2, BinQ),
+ G = ssh_bits:erlint(Len3, BinG),
+ Y = ssh_bits:erlint(Len4, BinY),
+ {ok, {Y, #'Dss-Parms'{p = P, q = Q, g = G}}};
+
decode_public_key_v2(_, _) ->
{error, bad_format}.
diff --git a/lib/ssh/src/ssh_bits.erl b/lib/ssh/src/ssh_bits.erl
index fc6efc817f..2b0241cb83 100644
--- a/lib/ssh/src/ssh_bits.erl
+++ b/lib/ssh/src/ssh_bits.erl
@@ -25,19 +25,9 @@
-include("ssh.hrl").
--export([encode/1, encode/2]).
--export([decode/1, decode/2, decode/3]).
--export([mpint/1, bignum/1, string/1, name_list/1]).
--export([b64_encode/1, b64_decode/1]).
--export([install_messages/1, uninstall_messages/1]).
-
-%% integer utils
--export([isize/1]).
+-export([encode/2]).
+-export([mpint/1, erlint/2, string/1, name_list/1]).
-export([random/1]).
--export([xor_bits/2, fill_bits/2]).
--export([i2bin/2, bin2i/1]).
-
--import(lists, [foreach/2, reverse/1]).
-define(name_list(X),
(fun(B) -> ?binary(B) end)(list_to_binary(name_concat(X)))).
@@ -95,38 +85,6 @@ mpint_pos(X,I,Ds) ->
mpint_pos(X bsr 8,I+1,[(X band 255)|Ds]).
-%% BIGNUM representation SSH1
-bignum(X) ->
- XSz = isize(X),
- Pad = (8 - (XSz rem 8)) rem 8,
- <<?UINT16(XSz),0:Pad/unsigned-integer,X:XSz/big-unsigned-integer>>.
-
-
-install_messages(Codes) ->
- foreach(fun({Name, Code, Ts}) ->
- put({msg_name,Code}, {Name,Ts}),
- put({msg_code,Name}, {Code,Ts})
- end, Codes).
-
-uninstall_messages(Codes) ->
- foreach(fun({Name, Code, _Ts}) ->
- erase({msg_name,Code}),
- erase({msg_code,Name})
- end, Codes).
-
-%%
-%% Encode a record, the type spec is expected to be
-%% in process dictionary under the key {msg_code, RecodeName}
-%%
-encode(Record) ->
- case get({msg_code, element(1, Record)}) of
- undefined ->
- {error, unimplemented};
- {Code, Ts} ->
- Data = enc(tl(tuple_to_list(Record)), Ts),
- list_to_binary([Code, Data])
- end.
-
encode(List, Types) ->
list_to_binary(enc(List, Types)).
@@ -136,230 +94,58 @@ encode(List, Types) ->
enc(Xs, Ts) ->
enc(Xs, Ts, 0).
-enc(Xs, [Type|Ts], Offset) ->
- case Type of
- boolean ->
- X=hd(Xs),
- [?boolean(X) | enc(tl(Xs), Ts, Offset+1)];
- byte ->
- X=hd(Xs),
- [?byte(X) | enc(tl(Xs), Ts,Offset+1)];
- uint16 ->
- X=hd(Xs),
- [?uint16(X) | enc(tl(Xs), Ts,Offset+2)];
- uint32 ->
- X=hd(Xs),
- [?uint32(X) | enc(tl(Xs), Ts,Offset+4)];
- uint64 ->
- X=hd(Xs),
- [?uint64(X) | enc(tl(Xs), Ts,Offset+8)];
- mpint ->
- Y=mpint(hd(Xs)),
- [Y | enc(tl(Xs), Ts,Offset+size(Y))];
- bignum ->
- Y=bignum(hd(Xs)),
- [Y | enc(tl(Xs),Ts,Offset+size(Y))];
- string ->
- X0=hd(Xs),
- Y=?string(X0),
- [Y | enc(tl(Xs),Ts,Offset+size(Y))];
- binary ->
- X0=hd(Xs),
- Y=?binary(X0),
- [Y | enc(tl(Xs), Ts,Offset+size(Y))];
- name_list ->
- X0=hd(Xs),
- Y=?name_list(X0),
- [Y | enc(tl(Xs), Ts, Offset+size(Y))];
- cookie ->
- [random(16) | enc(tl(Xs), Ts, Offset+16)];
- {pad,N} ->
- K = (N - (Offset rem N)) rem N,
- [fill_bits(K,0) | enc(Xs, Ts, Offset+K)];
- '...' when Ts==[] ->
- X=hd(Xs),
- if is_binary(X) ->
- [X];
- is_list(X) ->
- [list_to_binary(X)];
- X==undefined ->
- []
- end
+enc(Xs, [boolean|Ts], Offset) ->
+ X = hd(Xs),
+ [?boolean(X) | enc(tl(Xs), Ts, Offset+1)];
+enc(Xs, [byte|Ts], Offset) ->
+ X = hd(Xs),
+ [?byte(X) | enc(tl(Xs), Ts,Offset+1)];
+enc(Xs, [uint16|Ts], Offset) ->
+ X = hd(Xs),
+ [?uint16(X) | enc(tl(Xs), Ts,Offset+2)];
+enc(Xs, [uint32 |Ts], Offset) ->
+ X = hd(Xs),
+ [?uint32(X) | enc(tl(Xs), Ts,Offset+4)];
+enc(Xs, [uint64|Ts], Offset) ->
+ X = hd(Xs),
+ [?uint64(X) | enc(tl(Xs), Ts,Offset+8)];
+enc(Xs, [mpint|Ts], Offset) ->
+ Y = mpint(hd(Xs)),
+ [Y | enc(tl(Xs), Ts,Offset+size(Y))];
+enc(Xs, [string|Ts], Offset) ->
+ X0 = hd(Xs),
+ Y = ?string(X0),
+ [Y | enc(tl(Xs),Ts,Offset+size(Y))];
+enc(Xs, [binary|Ts], Offset) ->
+ X0 = hd(Xs),
+ Y = ?binary(X0),
+ [Y | enc(tl(Xs), Ts,Offset+size(Y))];
+enc(Xs, [name_list|Ts], Offset) ->
+ X0 = hd(Xs),
+ Y = ?name_list(X0),
+ [Y | enc(tl(Xs), Ts, Offset+size(Y))];
+enc(Xs, [cookie|Ts], Offset) ->
+ [random(16) | enc(tl(Xs), Ts, Offset+16)];
+enc(Xs, [{pad,N}|Ts], Offset) ->
+ K = (N - (Offset rem N)) rem N,
+ [fill_bits(K,0) | enc(Xs, Ts, Offset+K)];
+enc(Xs, ['...'| []], _Offset) ->
+ X = hd(Xs),
+ if is_binary(X) ->
+ [X];
+ is_list(X) ->
+ [list_to_binary(X)];
+ X==undefined ->
+ []
end;
enc([], [],_) ->
[].
-
-
-%%
-%% Decode a SSH record the type is encoded as the first byte
-%% and the type spec MUST be installed in {msg_name, ID}
-%%
-
-decode(Binary = <<?BYTE(ID), _/binary>>) ->
- case get({msg_name, ID}) of
- undefined ->
- {unknown, Binary};
- {Name, Ts} ->
- {_, Elems} = decode(Binary,1,Ts),
- list_to_tuple([Name | Elems])
- end.
-
-%%
-%% Decode a binary form offset 0
-%%
-
-decode(Binary, Types) when is_binary(Binary) andalso is_list(Types) ->
- {_,Elems} = decode(Binary, 0, Types),
- Elems.
-
-
-%%
-%% Decode a binary from byte offset Offset
-%% return {UpdatedOffset, DecodedElements}
-%%
-decode(Binary, Offset, Types) ->
- decode(Binary, Offset, Types, []).
-
-decode(Binary, Offset, [Type|Ts], Acc) ->
- case Type of
- boolean ->
- <<_:Offset/binary, ?BOOLEAN(X0), _/binary>> = Binary,
- X = if X0 == 0 -> false; true -> true end,
- decode(Binary, Offset+1, Ts, [X | Acc]);
-
- byte ->
- <<_:Offset/binary, ?BYTE(X), _/binary>> = Binary,
- decode(Binary, Offset+1, Ts, [X | Acc]);
-
- uint16 ->
- <<_:Offset/binary, ?UINT16(X), _/binary>> = Binary,
- decode(Binary, Offset+2, Ts, [X | Acc]);
-
- uint32 ->
- <<_:Offset/binary, ?UINT32(X), _/binary>> = Binary,
- decode(Binary, Offset+4, Ts, [X | Acc]);
-
- uint64 ->
- <<_:Offset/binary, ?UINT64(X), _/binary>> = Binary,
- decode(Binary, Offset+8, Ts, [X | Acc]);
-
- mpint ->
- <<_:Offset/binary, ?UINT32(L), X0:L/binary,_/binary>> = Binary,
- Sz = L*8,
- <<X:Sz/big-signed-integer>> = X0,
- decode(Binary, Offset+4+L, Ts, [X | Acc]);
-
- bignum ->
- <<_:Offset/binary, ?UINT16(Bits),_/binary>> = Binary,
- L = (Bits+7) div 8,
- Pad = (8 - (Bits rem 8)) rem 8,
- <<_:Offset/binary, _:16, _:Pad, X:Bits/big-unsigned-integer,
- _/binary>> = Binary,
- decode(Binary, Offset+2+L, Ts, [X | Acc]);
-
- string ->
- Size = size(Binary),
- if Size < Offset + 4 ->
- %% empty string at end
- {Size, reverse(["" | Acc])};
- true ->
- <<_:Offset/binary,?UINT32(L), X:L/binary,_/binary>> =
- Binary,
- decode(Binary, Offset+4+L, Ts, [binary_to_list(X) |
- Acc])
- end;
-
- binary ->
- <<_:Offset/binary,?UINT32(L), X:L/binary,_/binary>> = Binary,
- decode(Binary, Offset+4+L, Ts, [X | Acc]);
-
- name_list ->
- <<_:Offset/binary,?UINT32(L), X:L/binary,_/binary>> = Binary,
- List = string:tokens(binary_to_list(X), ","),
- decode(Binary, Offset+4+L, Ts, [List | Acc]);
-
- cookie ->
- <<_:Offset/binary, X:16/binary, _/binary>> = Binary,
- decode(Binary, Offset+16, Ts, [X | Acc]);
-
- {pad,N} -> %% pad offset to a multiple of N
- K = (N - (Offset rem N)) rem N,
- decode(Binary, Offset+K, Ts, Acc);
-
+erlint(Len, BinInt) ->
+ Sz = Len*8,
+ <<Int:Sz/big-signed-integer>> = BinInt,
+ Int.
- '...' when Ts==[] ->
- <<_:Offset/binary, X/binary>> = Binary,
- {Offset+size(X), reverse([X | Acc])}
- end;
-decode(_Binary, Offset, [], Acc) ->
- {Offset, reverse(Acc)}.
-
-
-
-%% HACK WARNING :-)
--define(VERSION_MAGIC, 131).
--define(SMALL_INTEGER_EXT, $a).
--define(INTEGER_EXT, $b).
--define(SMALL_BIG_EXT, $n).
--define(LARGE_BIG_EXT, $o).
-
-isize(N) when N > 0 ->
- case term_to_binary(N) of
- <<?VERSION_MAGIC, ?SMALL_INTEGER_EXT, X>> ->
- isize_byte(X);
- <<?VERSION_MAGIC, ?INTEGER_EXT, X3,X2,X1,X0>> ->
- isize_bytes([X3,X2,X1,X0]);
- <<?VERSION_MAGIC, ?SMALL_BIG_EXT, S:8/big-unsigned-integer, 0,
- Ds:S/binary>> ->
- K = S - 1,
- <<_:K/binary, Top>> = Ds,
- isize_byte(Top)+K*8;
- <<?VERSION_MAGIC, ?LARGE_BIG_EXT, S:32/big-unsigned-integer, 0,
- Ds:S/binary>> ->
- K = S - 1,
- <<_:K/binary, Top>> = Ds,
- isize_byte(Top)+K*8
- end;
-isize(0) -> 0.
-
-%% big endian byte list
-isize_bytes([0|L]) ->
- isize_bytes(L);
-isize_bytes([Top|L]) ->
- isize_byte(Top) + length(L)*8.
-
-%% Well could be improved
-isize_byte(X) ->
- if X >= 2#10000000 -> 8;
- X >= 2#1000000 -> 7;
- X >= 2#100000 -> 6;
- X >= 2#10000 -> 5;
- X >= 2#1000 -> 4;
- X >= 2#100 -> 3;
- X >= 2#10 -> 2;
- X >= 2#1 -> 1;
- true -> 0
- end.
-
-%% Convert integer into binary
-%% When XLen is the wanted size in octets of the output
-i2bin(X, XLen) ->
- XSz = isize(X),
- Sz = XLen*8,
- if Sz < XSz ->
- exit(integer_to_large);
- true ->
- (<<X:Sz/big-unsigned-integer>>)
- end.
-
-%% Convert a binary into an integer
-%%
-bin2i(X) ->
- Sz = size(X)*8,
- <<Y:Sz/big-unsigned-integer>> = X,
- Y.
-
%%
%% Create a binary with constant bytes
%%
@@ -377,15 +163,6 @@ fill(N,C) ->
[C,Cs,Cs]
end.
-%% xor 2 binaries
-xor_bits(XBits, YBits) ->
- XSz = size(XBits)*8,
- YSz = size(YBits)*8,
- Sz = if XSz < YSz -> XSz; true -> YSz end, %% min
- <<X:Sz, _/binary>> = XBits,
- <<Y:Sz, _/binary>> = YBits,
- <<(X bxor Y):Sz>>.
-
%% random/1
%% Generate N random bytes
@@ -393,18 +170,5 @@ xor_bits(XBits, YBits) ->
random(N) ->
crypto:strong_rand_bytes(N).
-%%
-%% Base 64 encode/decode
-%%
-
-b64_encode(Bs) when is_list(Bs) ->
- base64:encode(Bs);
-b64_encode(Bin) when is_binary(Bin) ->
- base64:encode(Bin).
-
-b64_decode(Bin) when is_binary(Bin) ->
- base64:mime_decode(Bin);
-b64_decode(Cs) when is_list(Cs) ->
- base64:mime_decode(Cs).
diff --git a/lib/ssh/src/ssh_channel.erl b/lib/ssh/src/ssh_channel.erl
index 062ed764ca..508ae637cf 100644
--- a/lib/ssh/src/ssh_channel.erl
+++ b/lib/ssh/src/ssh_channel.erl
@@ -284,7 +284,7 @@ handle_info(Msg, #state{cm = ConnectionManager, channel_cb = Module,
terminate(Reason, #state{cm = ConnectionManager,
channel_id = ChannelId,
close_sent = false} = State) ->
- ssh_connection:close(ConnectionManager, ChannelId),
+ catch ssh_connection:close(ConnectionManager, ChannelId),
terminate(Reason, State#state{close_sent = true});
terminate(_, #state{channel_cb = Cb, channel_state = ChannelState}) ->
catch Cb:terminate(Cb, ChannelState),
diff --git a/lib/ssh/src/ssh_channel_sup.erl b/lib/ssh/src/ssh_channel_sup.erl
index 0093bce9c2..ee37ed35f8 100644
--- a/lib/ssh/src/ssh_channel_sup.erl
+++ b/lib/ssh/src/ssh_channel_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -31,7 +31,7 @@
-export([init/1]).
%%%=========================================================================
-%%% API
+%%% Internal API
%%%=========================================================================
start_link(Args) ->
supervisor:start_link(?MODULE, [Args]).
diff --git a/lib/ssh/src/ssh_cli.erl b/lib/ssh/src/ssh_cli.erl
index 54911e757c..5cb1e133d3 100644
--- a/lib/ssh/src/ssh_cli.erl
+++ b/lib/ssh/src/ssh_cli.erl
@@ -32,9 +32,6 @@
%% ssh_channel callbacks
-export([init/1, handle_ssh_msg/2, handle_msg/2, terminate/2]).
-%% backwards compatibility
--export([listen/1, listen/2, listen/3, listen/4, stop/1]).
-
%% state
-record(state, {
cm,
@@ -65,14 +62,14 @@ init([Shell]) ->
%%
%% Description: Handles channel messages received on the ssh-connection.
%%--------------------------------------------------------------------
-handle_ssh_msg({ssh_cm, _ConnectionManager,
+handle_ssh_msg({ssh_cm, _ConnectionHandler,
{data, _ChannelId, _Type, Data}},
#state{group = Group} = State) ->
List = binary_to_list(Data),
to_group(List, Group),
{ok, State};
-handle_ssh_msg({ssh_cm, ConnectionManager,
+handle_ssh_msg({ssh_cm, ConnectionHandler,
{pty, ChannelId, WantReply,
{TermName, Width, Height, PixWidth, PixHeight, Modes}}},
State0) ->
@@ -85,53 +82,53 @@ handle_ssh_msg({ssh_cm, ConnectionManager,
modes = Modes},
buf = empty_buf()},
set_echo(State),
- ssh_connection:reply_request(ConnectionManager, WantReply,
+ ssh_connection:reply_request(ConnectionHandler, WantReply,
success, ChannelId),
{ok, State};
-handle_ssh_msg({ssh_cm, ConnectionManager,
+handle_ssh_msg({ssh_cm, ConnectionHandler,
{env, ChannelId, WantReply, _Var, _Value}}, State) ->
- ssh_connection:reply_request(ConnectionManager,
+ ssh_connection:reply_request(ConnectionHandler,
WantReply, failure, ChannelId),
{ok, State};
-handle_ssh_msg({ssh_cm, ConnectionManager,
+handle_ssh_msg({ssh_cm, ConnectionHandler,
{window_change, ChannelId, Width, Height, PixWidth, PixHeight}},
#state{buf = Buf, pty = Pty0} = State) ->
Pty = Pty0#ssh_pty{width = Width, height = Height,
pixel_width = PixWidth,
pixel_height = PixHeight},
{Chars, NewBuf} = io_request({window_change, Pty0}, Buf, Pty),
- write_chars(ConnectionManager, ChannelId, Chars),
+ write_chars(ConnectionHandler, ChannelId, Chars),
{ok, State#state{pty = Pty, buf = NewBuf}};
-handle_ssh_msg({ssh_cm, ConnectionManager,
+handle_ssh_msg({ssh_cm, ConnectionHandler,
{shell, ChannelId, WantReply}}, State) ->
- NewState = start_shell(ConnectionManager, State),
- ssh_connection:reply_request(ConnectionManager, WantReply,
+ NewState = start_shell(ConnectionHandler, State),
+ ssh_connection:reply_request(ConnectionHandler, WantReply,
success, ChannelId),
{ok, NewState#state{channel = ChannelId,
- cm = ConnectionManager}};
+ cm = ConnectionHandler}};
-handle_ssh_msg({ssh_cm, ConnectionManager,
+handle_ssh_msg({ssh_cm, ConnectionHandler,
{exec, ChannelId, WantReply, Cmd}}, #state{exec=undefined} = State) ->
{Reply, Status} = exec(Cmd),
- write_chars(ConnectionManager,
+ write_chars(ConnectionHandler,
ChannelId, io_lib:format("~p\n", [Reply])),
- ssh_connection:reply_request(ConnectionManager, WantReply,
+ ssh_connection:reply_request(ConnectionHandler, WantReply,
success, ChannelId),
- ssh_connection:exit_status(ConnectionManager, ChannelId, Status),
- ssh_connection:send_eof(ConnectionManager, ChannelId),
- {stop, ChannelId, State#state{channel = ChannelId, cm = ConnectionManager}};
-handle_ssh_msg({ssh_cm, ConnectionManager,
+ ssh_connection:exit_status(ConnectionHandler, ChannelId, Status),
+ ssh_connection:send_eof(ConnectionHandler, ChannelId),
+ {stop, ChannelId, State#state{channel = ChannelId, cm = ConnectionHandler}};
+handle_ssh_msg({ssh_cm, ConnectionHandler,
{exec, ChannelId, WantReply, Cmd}}, State) ->
- NewState = start_shell(ConnectionManager, Cmd, State),
- ssh_connection:reply_request(ConnectionManager, WantReply,
+ NewState = start_shell(ConnectionHandler, Cmd, State),
+ ssh_connection:reply_request(ConnectionHandler, WantReply,
success, ChannelId),
{ok, NewState#state{channel = ChannelId,
- cm = ConnectionManager}};
+ cm = ConnectionHandler}};
-handle_ssh_msg({ssh_cm, _ConnectionManager, {eof, _ChannelId}}, State) ->
+handle_ssh_msg({ssh_cm, _ConnectionHandler, {eof, _ChannelId}}, State) ->
{ok, State};
handle_ssh_msg({ssh_cm, _, {signal, _, _}}, State) ->
@@ -159,16 +156,16 @@ handle_ssh_msg({ssh_cm, _, {exit_status, ChannelId, Status}}, State) ->
%%
%% Description: Handles other channel messages.
%%--------------------------------------------------------------------
-handle_msg({ssh_channel_up, ChannelId, ConnectionManager},
+handle_msg({ssh_channel_up, ChannelId, ConnectionHandler},
#state{channel = ChannelId,
- cm = ConnectionManager} = State) ->
+ cm = ConnectionHandler} = State) ->
{ok, State};
handle_msg({Group, Req}, #state{group = Group, buf = Buf, pty = Pty,
- cm = ConnectionManager,
+ cm = ConnectionHandler,
channel = ChannelId} = State) ->
{Chars, NewBuf} = io_request(Req, Buf, Pty),
- write_chars(ConnectionManager, ChannelId, Chars),
+ write_chars(ConnectionHandler, ChannelId, Chars),
{ok, State#state{buf = NewBuf}};
handle_msg({'EXIT', Group, _Reason}, #state{group = Group,
@@ -399,12 +396,12 @@ move_cursor(From, To, #ssh_pty{width=Width, term=Type}) ->
%% %%% write out characters
%% %%% make sure that there is data to send
%% %%% before calling ssh_connection:send
-write_chars(ConnectionManager, ChannelId, Chars) ->
+write_chars(ConnectionHandler, ChannelId, Chars) ->
case erlang:iolist_size(Chars) of
0 ->
ok;
_ ->
- ssh_connection:send(ConnectionManager, ChannelId,
+ ssh_connection:send(ConnectionHandler, ChannelId,
?SSH_EXTENDED_DATA_DEFAULT, Chars)
end.
@@ -434,18 +431,20 @@ bin_to_list(L) when is_list(L) ->
bin_to_list(I) when is_integer(I) ->
I.
-start_shell(ConnectionManager, State) ->
+start_shell(ConnectionHandler, State) ->
Shell = State#state.shell,
+ ConnectionInfo = ssh_connection_handler:info(ConnectionHandler,
+ [peer, user]),
ShellFun = case is_function(Shell) of
true ->
{ok, User} =
- ssh_userreg:lookup_user(ConnectionManager),
+ proplists:get_value(user, ConnectionInfo),
case erlang:fun_info(Shell, arity) of
{arity, 1} ->
fun() -> Shell(User) end;
{arity, 2} ->
- {ok, PeerAddr} =
- ssh_connection_manager:peer_addr(ConnectionManager),
+ [{_, PeerAddr}] =
+ proplists:get_value(peer, ConnectionInfo),
fun() -> Shell(User, PeerAddr) end;
_ ->
Shell
@@ -457,12 +456,15 @@ start_shell(ConnectionManager, State) ->
Group = group:start(self(), ShellFun, [{echo, Echo}]),
State#state{group = Group, buf = empty_buf()}.
-start_shell(_ConnectionManager, Cmd, #state{exec={M, F, A}} = State) ->
+start_shell(_ConnectionHandler, Cmd, #state{exec={M, F, A}} = State) ->
Group = group:start(self(), {M, F, A++[Cmd]}, [{echo, false}]),
State#state{group = Group, buf = empty_buf()};
-start_shell(ConnectionManager, Cmd, #state{exec=Shell} = State) when is_function(Shell) ->
+start_shell(ConnectionHandler, Cmd, #state{exec=Shell} = State) when is_function(Shell) ->
+
+ ConnectionInfo = ssh_connection_handler:info(ConnectionHandler,
+ [peer, user]),
{ok, User} =
- ssh_userreg:lookup_user(ConnectionManager),
+ proplists:get_value(user, ConnectionInfo),
ShellFun =
case erlang:fun_info(Shell, arity) of
{arity, 1} ->
@@ -470,8 +472,8 @@ start_shell(ConnectionManager, Cmd, #state{exec=Shell} = State) when is_function
{arity, 2} ->
fun() -> Shell(Cmd, User) end;
{arity, 3} ->
- {ok, PeerAddr} =
- ssh_connection_manager:peer_addr(ConnectionManager),
+ [{_, PeerAddr}] =
+ proplists:get_value(peer, ConnectionInfo),
fun() -> Shell(Cmd, User, PeerAddr) end;
_ ->
Shell
@@ -505,31 +507,3 @@ not_zero(0, B) ->
not_zero(A, _) ->
A.
-%%% Backwards compatibility
-
-%%--------------------------------------------------------------------
-%% Function: listen(...) -> {ok,Pid} | ignore | {error,Error}
-%% Description: Starts a listening server
-%% Note that the pid returned is NOT the pid of this gen_server;
-%% this server is started when an SSH connection is made on the
-%% listening port
-%%--------------------------------------------------------------------
-listen(Shell) ->
- listen(Shell, 22).
-
-listen(Shell, Port) ->
- listen(Shell, Port, []).
-
-listen(Shell, Port, Opts) ->
- listen(Shell, any, Port, Opts).
-
-listen(Shell, HostAddr, Port, Opts) ->
- ssh:daemon(HostAddr, Port, [{shell, Shell} | Opts]).
-
-
-%%--------------------------------------------------------------------
-%% Function: stop(Pid) -> ok
-%% Description: Stops the listener
-%%--------------------------------------------------------------------
-stop(Pid) ->
- ssh:stop_listener(Pid).
diff --git a/lib/ssh/src/ssh_connect.hrl b/lib/ssh/src/ssh_connect.hrl
index 932b0642f1..8421b07167 100644
--- a/lib/ssh/src/ssh_connect.hrl
+++ b/lib/ssh/src/ssh_connect.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -21,6 +21,8 @@
%%% Description : SSH connection protocol
+-type channel_id() :: integer().
+
-define(DEFAULT_PACKET_SIZE, 32768).
-define(DEFAULT_WINDOW_SIZE, 2*?DEFAULT_PACKET_SIZE).
-define(DEFAULT_TIMEOUT, 5000).
@@ -260,6 +262,7 @@
port,
options,
exec,
+ system_supervisor,
sub_system_supervisor,
connection_supervisor
}).
diff --git a/lib/ssh/src/ssh_connection.erl b/lib/ssh/src/ssh_connection.erl
index 9424cdd423..03dddae3c8 100644
--- a/lib/ssh/src/ssh_connection.erl
+++ b/lib/ssh/src/ssh_connection.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -29,232 +29,205 @@
-include("ssh_connect.hrl").
-include("ssh_transport.hrl").
+%% API
-export([session_channel/2, session_channel/4,
exec/4, shell/2, subsystem/4, send/3, send/4, send/5,
- send_eof/2, adjust_window/3, open_pty/3, open_pty/7,
- open_pty/9, setenv/5, window_change/4, window_change/6,
+ send_eof/2, adjust_window/3, setenv/5, close/2, reply_request/4]).
+
+%% Potential API currently unsupported and not tested
+-export([open_pty/3, open_pty/7,
+ open_pty/9, window_change/4, window_change/6,
direct_tcpip/6, direct_tcpip/8, tcpip_forward/3,
- cancel_tcpip_forward/3, signal/3, exit_status/3, encode_ip/1, close/2,
- reply_request/4]).
+ cancel_tcpip_forward/3, signal/3, exit_status/3]).
--export([channel_data/6, handle_msg/4, channel_eof_msg/1,
+%% Internal application API
+-export([channel_data/5, handle_msg/3, channel_eof_msg/1,
channel_close_msg/1, channel_success_msg/1, channel_failure_msg/1,
+ channel_status_msg/1,
channel_adjust_window_msg/2, channel_data_msg/3,
channel_open_msg/5, channel_open_confirmation_msg/4,
channel_open_failure_msg/4, channel_request_msg/4,
global_request_msg/3, request_failure_msg/0,
request_success_msg/1, bind/4, unbind/3, unbind_channel/2,
- bound_channel/3, messages/0]).
+ bound_channel/3, encode_ip/1]).
%%--------------------------------------------------------------------
-%%% Internal application API
+%%% API
%%--------------------------------------------------------------------
%%--------------------------------------------------------------------
-%% Function: session_channel(ConnectionManager
-%% [, InitialWindowSize, MaxPacketSize],
-%% Timeout) -> {ok, }
-%% ConnectionManager = pid()
-%% InitialWindowSize = integer()
-%% MaxPacketSize = integer()
-%%
+-spec session_channel(pid(), timeout()) -> {ok, channel_id()} | {error, term()}.
+-spec session_channel(pid(), integer(), integer(), timeout()) -> {ok, channel_id()} | {error, term()}.
+
%% Description: Opens a channel for a ssh session. A session is a
%% remote execution of a program. The program may be a shell, an
%% application, a system command, or some built-in subsystem.
%% --------------------------------------------------------------------
-session_channel(ConnectionManager, Timeout) ->
- session_channel(ConnectionManager,
+
+session_channel(ConnectionHandler, Timeout) ->
+ session_channel(ConnectionHandler,
?DEFAULT_WINDOW_SIZE, ?DEFAULT_PACKET_SIZE,
Timeout).
-session_channel(ConnectionManager, InitialWindowSize,
+
+session_channel(ConnectionHandler, InitialWindowSize,
MaxPacketSize, Timeout) ->
- ssh_connection_manager:open_channel(ConnectionManager, "session", <<>>,
+ case ssh_connection_handler:open_channel(ConnectionHandler, "session", <<>>,
InitialWindowSize,
- MaxPacketSize, Timeout).
+ MaxPacketSize, Timeout) of
+ {open, Channel} ->
+ {ok, Channel};
+ Error ->
+ Error
+ end.
+
%%--------------------------------------------------------------------
-%% Function: exec(ConnectionManager, ChannelId, Command, Timeout) ->
-%%
-%% ConnectionManager = pid()
-%% ChannelId = integer()
-%% Cmd = string()
-%% Timeout = integer()
-%%
+-spec exec(pid(), channel_id(), string(), timeout()) -> success | failure.
+
%% Description: Will request that the server start the
%% execution of the given command.
%%--------------------------------------------------------------------
-exec(ConnectionManager, ChannelId, Command, TimeOut) ->
- ssh_connection_manager:request(ConnectionManager, self(), ChannelId, "exec",
- true, [?string(Command)], TimeOut).
+exec(ConnectionHandler, ChannelId, Command, TimeOut) ->
+ ssh_connection_handler:request(ConnectionHandler, self(), ChannelId, "exec",
+ true, [?string(Command)], TimeOut).
+
%%--------------------------------------------------------------------
-%% Function: shell(ConnectionManager, ChannelId) ->
-%%
-%% ConnectionManager = pid()
-%% ChannelId = integer()
-%%
+-spec shell(pid(), channel_id()) -> _.
+
%% Description: Will request that the user's default shell (typically
%% defined in /etc/passwd in UNIX systems) be started at the other
%% end.
%%--------------------------------------------------------------------
-shell(ConnectionManager, ChannelId) ->
- ssh_connection_manager:request(ConnectionManager, self(), ChannelId,
+shell(ConnectionHandler, ChannelId) ->
+ ssh_connection_handler:request(ConnectionHandler, self(), ChannelId,
"shell", false, <<>>, 0).
%%--------------------------------------------------------------------
-%% Function: subsystem(ConnectionManager, ChannelId, SubSystem, TimeOut) ->
-%%
-%% ConnectionManager = pid()
-%% ChannelId = integer()
-%% SubSystem = string()
-%% TimeOut = integer()
-%%
+-spec subsystem(pid(), channel_id(), string(), timeout()) ->
+ success | failure | {error, timeout}.
%%
%% Description: Executes a predefined subsystem.
%%--------------------------------------------------------------------
-subsystem(ConnectionManager, ChannelId, SubSystem, TimeOut) ->
- ssh_connection_manager:request(ConnectionManager, self(),
+subsystem(ConnectionHandler, ChannelId, SubSystem, TimeOut) ->
+ ssh_connection_handler:request(ConnectionHandler, self(),
ChannelId, "subsystem",
true, [?string(SubSystem)], TimeOut).
%%--------------------------------------------------------------------
-%% Function: send(ConnectionManager, ChannelId, Type, Data, [TimeOut]) ->
+-spec send(pid(), channel_id(), iodata()) ->
+ ok | {error, closed}.
+-spec send(pid(), channel_id(), integer()| iodata(), timeout() | iodata()) ->
+ ok | {error, timeout} | {error, closed}.
+-spec send(pid(), channel_id(), integer(), iodata(), timeout()) ->
+ ok | {error, timeout} | {error, closed}.
%%
%%
%% Description: Sends channel data.
%%--------------------------------------------------------------------
-send(ConnectionManager, ChannelId, Data) ->
- send(ConnectionManager, ChannelId, 0, Data, infinity).
-send(ConnectionManager, ChannelId, Data, TimeOut) when is_integer(TimeOut) ->
- send(ConnectionManager, ChannelId, 0, Data, TimeOut);
-send(ConnectionManager, ChannelId, Data, infinity) ->
- send(ConnectionManager, ChannelId, 0, Data, infinity);
-send(ConnectionManager, ChannelId, Type, Data) ->
- send(ConnectionManager, ChannelId, Type, Data, infinity).
-send(ConnectionManager, ChannelId, Type, Data, TimeOut) ->
- ssh_connection_manager:send(ConnectionManager, ChannelId,
+send(ConnectionHandler, ChannelId, Data) ->
+ send(ConnectionHandler, ChannelId, 0, Data, infinity).
+send(ConnectionHandler, ChannelId, Data, TimeOut) when is_integer(TimeOut) ->
+ send(ConnectionHandler, ChannelId, 0, Data, TimeOut);
+send(ConnectionHandler, ChannelId, Data, infinity) ->
+ send(ConnectionHandler, ChannelId, 0, Data, infinity);
+send(ConnectionHandler, ChannelId, Type, Data) ->
+ send(ConnectionHandler, ChannelId, Type, Data, infinity).
+send(ConnectionHandler, ChannelId, Type, Data, TimeOut) ->
+ ssh_connection_handler:send(ConnectionHandler, ChannelId,
Type, Data, TimeOut).
%%--------------------------------------------------------------------
-%% Function: send_eof(ConnectionManager, ChannelId) ->
+-spec send_eof(pid(), channel_id()) -> ok | {error, closed}.
%%
%%
%% Description: Sends eof on the channel <ChannelId>.
%%--------------------------------------------------------------------
-send_eof(ConnectionManager, Channel) ->
- ssh_connection_manager:send_eof(ConnectionManager, Channel).
+send_eof(ConnectionHandler, Channel) ->
+ ssh_connection_handler:send_eof(ConnectionHandler, Channel).
%%--------------------------------------------------------------------
-%% Function: adjust_window(ConnectionManager, Channel, Bytes) ->
+-spec adjust_window(pid(), channel_id(), integer()) -> ok.
%%
%%
%% Description: Adjusts the ssh flowcontrol window.
%%--------------------------------------------------------------------
-adjust_window(ConnectionManager, Channel, Bytes) ->
- ssh_connection_manager:adjust_window(ConnectionManager, Channel, Bytes).
+adjust_window(ConnectionHandler, Channel, Bytes) ->
+ ssh_connection_handler:adjust_window(ConnectionHandler, Channel, Bytes).
%%--------------------------------------------------------------------
-%% Function: setenv(ConnectionManager, ChannelId, Var, Value, TimeOut) ->
+-spec setenv(pid(), channel_id(), string(), string(), timeout()) -> success | failure.
%%
%%
%% Description: Environment variables may be passed to the shell/command to be
%% started later.
%%--------------------------------------------------------------------
-setenv(ConnectionManager, ChannelId, Var, Value, TimeOut) ->
- ssh_connection_manager:request(ConnectionManager, ChannelId,
+setenv(ConnectionHandler, ChannelId, Var, Value, TimeOut) ->
+ ssh_connection_handler:request(ConnectionHandler, ChannelId,
"env", true, [?string(Var), ?string(Value)], TimeOut).
%%--------------------------------------------------------------------
-%% Function: close(ConnectionManager, ChannelId) ->
+-spec close(pid(), channel_id()) -> ok.
%%
%%
%% Description: Sends a close message on the channel <ChannelId>.
%%--------------------------------------------------------------------
-close(ConnectionManager, ChannelId) ->
- ssh_connection_manager:close(ConnectionManager, ChannelId).
-
+close(ConnectionHandler, ChannelId) ->
+ ssh_connection_handler:close(ConnectionHandler, ChannelId).
%%--------------------------------------------------------------------
-%% Function: reply_request(ConnectionManager, WantReply, Status, CannelId) ->_
+-spec reply_request(pid(), boolean(), success | failure, channel_id()) -> ok.
%%
%%
%% Description: Send status replies to requests that want such replies.
%%--------------------------------------------------------------------
-reply_request(ConnectionManager, true, Status, ChannelId) ->
- ssh_connection_manager:reply_request(ConnectionManager, Status, ChannelId),
- ok;
+reply_request(ConnectionHandler, true, Status, ChannelId) ->
+ ssh_connection_handler:reply_request(ConnectionHandler, Status, ChannelId);
reply_request(_,false, _, _) ->
ok.
-
%%--------------------------------------------------------------------
-%% Function: window_change(ConnectionManager, Channel, Width, Height) ->
-%%
-%%
-%% Description: Not yet officialy supported.
+%% Not yet officialy supported! The following functions are part of the
+%% initial contributed ssh application. They are untested. Do we want them?
+%% Should they be documented and tested?
%%--------------------------------------------------------------------
-window_change(ConnectionManager, Channel, Width, Height) ->
- window_change(ConnectionManager, Channel, Width, Height, 0, 0).
-window_change(ConnectionManager, Channel, Width, Height,
+window_change(ConnectionHandler, Channel, Width, Height) ->
+ window_change(ConnectionHandler, Channel, Width, Height, 0, 0).
+window_change(ConnectionHandler, Channel, Width, Height,
PixWidth, PixHeight) ->
- ssh_connection_manager:request(ConnectionManager, Channel,
+ ssh_connection_handler:request(ConnectionHandler, Channel,
"window-change", false,
[?uint32(Width), ?uint32(Height),
?uint32(PixWidth), ?uint32(PixHeight)], 0).
-%%--------------------------------------------------------------------
-%% Function: signal(ConnectionManager, Channel, Sig) ->
-%%
-%%
-%% Description: Not yet officialy supported.
-%%--------------------------------------------------------------------
-signal(ConnectionManager, Channel, Sig) ->
- ssh_connection_manager:request(ConnectionManager, Channel,
+
+signal(ConnectionHandler, Channel, Sig) ->
+ ssh_connection_handler:request(ConnectionHandler, Channel,
"signal", false, [?string(Sig)], 0).
-%%--------------------------------------------------------------------
-%% Function: signal(ConnectionManager, Channel, Status) ->
-%%
-%%
-%% Description: Not yet officialy supported.
-%%--------------------------------------------------------------------
-exit_status(ConnectionManager, Channel, Status) ->
- ssh_connection_manager:request(ConnectionManager, Channel,
- "exit-status", false, [?uint32(Status)], 0).
+exit_status(ConnectionHandler, Channel, Status) ->
+ ssh_connection_handler:request(ConnectionHandler, Channel,
+ "exit-status", false, [?uint32(Status)], 0).
-%%--------------------------------------------------------------------
-%% Function: open_pty(ConnectionManager, Channel, TimeOut) ->
-%%
-%%
-%% Description: Not yet officialy supported.
-%%--------------------------------------------------------------------
-open_pty(ConnectionManager, Channel, TimeOut) ->
- open_pty(ConnectionManager, Channel,
+open_pty(ConnectionHandler, Channel, TimeOut) ->
+ open_pty(ConnectionHandler, Channel,
os:getenv("TERM"), 80, 24, [], TimeOut).
-open_pty(ConnectionManager, Channel, Term, Width, Height, PtyOpts, TimeOut) ->
- open_pty(ConnectionManager, Channel, Term, Width,
+open_pty(ConnectionHandler, Channel, Term, Width, Height, PtyOpts, TimeOut) ->
+ open_pty(ConnectionHandler, Channel, Term, Width,
Height, 0, 0, PtyOpts, TimeOut).
-open_pty(ConnectionManager, Channel, Term, Width, Height,
+open_pty(ConnectionHandler, Channel, Term, Width, Height,
PixWidth, PixHeight, PtyOpts, TimeOut) ->
- ssh_connection_manager:request(ConnectionManager,
+ ssh_connection_handler:request(ConnectionHandler,
Channel, "pty-req", true,
[?string(Term),
?uint32(Width), ?uint32(Height),
?uint32(PixWidth),?uint32(PixHeight),
encode_pty_opts(PtyOpts)], TimeOut).
-
-%%--------------------------------------------------------------------
-%% Function: direct_tcpip(ConnectionManager, RemoteHost,
-%% RemotePort, OrigIP, OrigPort, Timeout) ->
-%%
-%%
-%% Description: Not yet officialy supported.
-%%--------------------------------------------------------------------
-direct_tcpip(ConnectionManager, RemoteHost,
+direct_tcpip(ConnectionHandler, RemoteHost,
RemotePort, OrigIP, OrigPort, Timeout) ->
- direct_tcpip(ConnectionManager, RemoteHost, RemotePort, OrigIP, OrigPort,
+ direct_tcpip(ConnectionHandler, RemoteHost, RemotePort, OrigIP, OrigPort,
?DEFAULT_WINDOW_SIZE, ?DEFAULT_PACKET_SIZE, Timeout).
-direct_tcpip(ConnectionManager, RemoteIP, RemotePort, OrigIP, OrigPort,
+direct_tcpip(ConnectionHandler, RemoteIP, RemotePort, OrigIP, OrigPort,
InitialWindowSize, MaxPacketSize, Timeout) ->
case {encode_ip(RemoteIP), encode_ip(OrigIP)} of
{false, _} ->
@@ -262,7 +235,7 @@ direct_tcpip(ConnectionManager, RemoteIP, RemotePort, OrigIP, OrigPort,
{_, false} ->
{error, einval};
{RIP, OIP} ->
- ssh_connection_manager:open_channel(ConnectionManager,
+ ssh_connection_handler:open_channel(ConnectionHandler,
"direct-tcpip",
[?string(RIP),
?uint32(RemotePort),
@@ -272,34 +245,24 @@ direct_tcpip(ConnectionManager, RemoteIP, RemotePort, OrigIP, OrigPort,
MaxPacketSize,
Timeout)
end.
-%%--------------------------------------------------------------------
-%% Function: tcpip_forward(ConnectionManager, BindIP, BindPort) ->
-%%
-%%
-%% Description: Not yet officialy supported.
-%%--------------------------------------------------------------------
-tcpip_forward(ConnectionManager, BindIP, BindPort) ->
+
+tcpip_forward(ConnectionHandler, BindIP, BindPort) ->
case encode_ip(BindIP) of
false ->
{error, einval};
IPStr ->
- ssh_connection_manager:global_request(ConnectionManager,
+ ssh_connection_handler:global_request(ConnectionHandler,
"tcpip-forward", true,
[?string(IPStr),
?uint32(BindPort)])
end.
-%%--------------------------------------------------------------------
-%% Function: cancel_tcpip_forward(ConnectionManager, BindIP, Port) ->
-%%
-%%
-%% Description: Not yet officialy supported.
-%%--------------------------------------------------------------------
-cancel_tcpip_forward(ConnectionManager, BindIP, Port) ->
+
+cancel_tcpip_forward(ConnectionHandler, BindIP, Port) ->
case encode_ip(BindIP) of
false ->
{error, einval};
IPStr ->
- ssh_connection_manager:global_request(ConnectionManager,
+ ssh_connection_handler:global_request(ConnectionHandler,
"cancel-tcpip-forward", true,
[?string(IPStr),
?uint32(Port)])
@@ -308,22 +271,23 @@ cancel_tcpip_forward(ConnectionManager, BindIP, Port) ->
%%--------------------------------------------------------------------
%%% Internal API
%%--------------------------------------------------------------------
-channel_data(ChannelId, DataType, Data, Connection, ConnectionPid, From)
+channel_data(ChannelId, DataType, Data, Connection, From)
when is_list(Data)->
channel_data(ChannelId, DataType,
- list_to_binary(Data), Connection, ConnectionPid, From);
+ list_to_binary(Data), Connection, From);
channel_data(ChannelId, DataType, Data,
- #connection{channel_cache = Cache} = Connection, ConnectionPid,
+ #connection{channel_cache = Cache} = Connection,
From) ->
case ssh_channel:cache_lookup(Cache, ChannelId) of
#channel{remote_id = Id, sent_close = false} = Channel0 ->
- {SendList, Channel} = update_send_window(Channel0#channel{flow_control = From}, DataType,
- Data, Connection),
+ {SendList, Channel} =
+ update_send_window(Channel0#channel{flow_control = From}, DataType,
+ Data, Connection),
Replies =
lists:map(fun({SendDataType, SendData}) ->
- {connection_reply, ConnectionPid,
+ {connection_reply,
channel_data_msg(Id,
SendDataType,
SendData)}
@@ -333,7 +297,7 @@ channel_data(ChannelId, DataType, Data,
Cache),
{{replies, Replies ++ FlowCtrlMsgs}, Connection};
_ ->
- gen_server:reply(From, {error, closed}),
+ gen_fsm:reply(From, {error, closed}),
{noreply, Connection}
end.
@@ -341,7 +305,7 @@ handle_msg(#ssh_msg_channel_open_confirmation{recipient_channel = ChannelId,
sender_channel = RemoteId,
initial_window_size = WindowSz,
maximum_packet_size = PacketSz},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
#channel{remote_id = undefined} = Channel =
ssh_channel:cache_lookup(Cache, ChannelId),
@@ -357,7 +321,7 @@ handle_msg(#ssh_msg_channel_open_failure{recipient_channel = ChannelId,
reason = Reason,
description = Descr,
lang = Lang},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
ssh_channel:cache_delete(Cache, ChannelId),
{Reply, Connection} =
@@ -365,51 +329,59 @@ handle_msg(#ssh_msg_channel_open_failure{recipient_channel = ChannelId,
{{replies, [Reply]}, Connection};
handle_msg(#ssh_msg_channel_success{recipient_channel = ChannelId},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
- {Reply, Connection} = reply_msg(Channel, Connection0, success),
- {{replies, [Reply]}, Connection};
+ case reply_msg(Channel, Connection0, success) of
+ {[], Connection} ->
+ {noreply, Connection};
+ {Reply, Connection} ->
+ {{replies, [Reply]}, Connection}
+ end;
handle_msg(#ssh_msg_channel_failure{recipient_channel = ChannelId},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
- {Reply, Connection} = reply_msg(Channel, Connection0, failure),
- {{replies, [Reply]}, Connection};
+ case reply_msg(Channel, Connection0, failure) of
+ {[], Connection} ->
+ {noreply, Connection};
+ {Reply, Connection} ->
+ {{replies, [Reply]}, Connection}
+ end;
+
handle_msg(#ssh_msg_channel_eof{recipient_channel = ChannelId},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
{Reply, Connection} = reply_msg(Channel, Connection0, {eof, ChannelId}),
{{replies, [Reply]}, Connection};
handle_msg(#ssh_msg_channel_close{recipient_channel = ChannelId},
- #connection{channel_cache = Cache} = Connection0,
- ConnectionPid, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{sent_close = Closed, remote_id = RemoteId, flow_control = FlowControl} = Channel ->
+ #channel{sent_close = Closed, remote_id = RemoteId,
+ flow_control = FlowControl} = Channel ->
ssh_channel:cache_delete(Cache, ChannelId),
{CloseMsg, Connection} =
reply_msg(Channel, Connection0, {closed, ChannelId}),
-
- ConnReplyMsgs =
- case Closed of
- true -> [];
- false ->
- RemoteCloseMsg = channel_close_msg(RemoteId),
- [{connection_reply, ConnectionPid, RemoteCloseMsg}]
- end,
-
- %% if there was a send() in progress, make it fail
- SendReplyMsgs =
- case FlowControl of
- undefined -> [];
- From ->
- [{flow_control, From, {error, closed}}]
- end,
-
- Replies = ConnReplyMsgs ++ [CloseMsg] ++ SendReplyMsgs,
- {{replies, Replies}, Connection};
+ ConnReplyMsgs =
+ case Closed of
+ true -> [];
+ false ->
+ RemoteCloseMsg = channel_close_msg(RemoteId),
+ [{connection_reply, RemoteCloseMsg}]
+ end,
+
+ %% if there was a send() in progress, make it fail
+ SendReplyMsgs =
+ case FlowControl of
+ undefined -> [];
+ From ->
+ [{flow_control, From, {error, closed}}]
+ end,
+
+ Replies = ConnReplyMsgs ++ [CloseMsg] ++ SendReplyMsgs,
+ {{replies, Replies}, Connection};
undefined ->
{{replies, []}, Connection0}
@@ -417,21 +389,24 @@ handle_msg(#ssh_msg_channel_close{recipient_channel = ChannelId},
handle_msg(#ssh_msg_channel_data{recipient_channel = ChannelId,
data = Data},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
- #channel{recv_window_size = Size} = Channel =
- ssh_channel:cache_lookup(Cache, ChannelId),
- WantedSize = Size - size(Data),
- ssh_channel:cache_update(Cache, Channel#channel{
- recv_window_size = WantedSize}),
- {Replies, Connection} =
- channel_data_reply(Cache, Channel, Connection0, 0, Data),
- {{replies, Replies}, Connection};
+ case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{recv_window_size = Size} = Channel ->
+ WantedSize = Size - size(Data),
+ ssh_channel:cache_update(Cache, Channel#channel{
+ recv_window_size = WantedSize}),
+ {Replies, Connection} =
+ channel_data_reply(Cache, Channel, Connection0, 0, Data),
+ {{replies, Replies}, Connection};
+ undefined ->
+ {noreply, Connection0}
+ end;
handle_msg(#ssh_msg_channel_extended_data{recipient_channel = ChannelId,
data_type_code = DataType,
data = Data},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
#channel{recv_window_size = Size} = Channel =
ssh_channel:cache_lookup(Cache, ChannelId),
@@ -444,9 +419,7 @@ handle_msg(#ssh_msg_channel_extended_data{recipient_channel = ChannelId,
handle_msg(#ssh_msg_channel_window_adjust{recipient_channel = ChannelId,
bytes_to_add = Add},
- #connection{channel_cache = Cache} = Connection,
- ConnectionPid, _) ->
-
+ #connection{channel_cache = Cache} = Connection, _) ->
#channel{send_window_size = Size, remote_id = RemoteId} =
Channel0 = ssh_channel:cache_lookup(Cache, ChannelId),
@@ -455,8 +428,7 @@ handle_msg(#ssh_msg_channel_window_adjust{recipient_channel = ChannelId,
0, undefined, Connection),
Replies = lists:map(fun({Type, Data}) ->
- {connection_reply, ConnectionPid,
- channel_data_msg(RemoteId, Type, Data)}
+ {connection_reply, channel_data_msg(RemoteId, Type, Data)}
end, SendList),
FlowCtrlMsgs = flow_control(Channel, Cache),
{{replies, Replies ++ FlowCtrlMsgs}, Connection};
@@ -464,10 +436,9 @@ handle_msg(#ssh_msg_channel_window_adjust{recipient_channel = ChannelId,
handle_msg(#ssh_msg_channel_open{channel_type = "session" = Type,
sender_channel = RemoteId,
initial_window_size = WindowSz,
- maximum_packet_size = PacketSz}, Connection0,
- ConnectionPid, server) ->
+ maximum_packet_size = PacketSz}, Connection0, server) ->
- try setup_session(Connection0, ConnectionPid, RemoteId,
+ try setup_session(Connection0, RemoteId,
Type, WindowSz, PacketSz) of
Result ->
Result
@@ -475,20 +446,20 @@ handle_msg(#ssh_msg_channel_open{channel_type = "session" = Type,
FailMsg = channel_open_failure_msg(RemoteId,
?SSH_OPEN_CONNECT_FAILED,
"Connection refused", "en"),
- {{replies, [{connection_reply, ConnectionPid, FailMsg}]},
+ {{replies, [{connection_reply, FailMsg}]},
Connection0}
end;
handle_msg(#ssh_msg_channel_open{channel_type = "session",
sender_channel = RemoteId},
- Connection, ConnectionPid, client) ->
+ Connection, client) ->
%% Client implementations SHOULD reject any session channel open
%% requests to make it more difficult for a corrupt server to attack the
%% client. See See RFC 4254 6.1.
FailMsg = channel_open_failure_msg(RemoteId,
?SSH_OPEN_CONNECT_FAILED,
"Connection refused", "en"),
- {{replies, [{connection_reply, ConnectionPid, FailMsg}]},
+ {{replies, [{connection_reply, FailMsg}]},
Connection};
handle_msg(#ssh_msg_channel_open{channel_type = "forwarded-tcpip" = Type,
@@ -496,8 +467,7 @@ handle_msg(#ssh_msg_channel_open{channel_type = "forwarded-tcpip" = Type,
initial_window_size = RWindowSz,
maximum_packet_size = RPacketSz,
data = Data},
- #connection{channel_cache = Cache} = Connection0,
- ConnectionPid, server) ->
+ #connection{channel_cache = Cache} = Connection0, server) ->
<<?UINT32(ALen), Address:ALen/binary, ?UINT32(Port),
?UINT32(OLen), Orig:OLen/binary, ?UINT32(OrigPort)>> = Data,
@@ -507,7 +477,7 @@ handle_msg(#ssh_msg_channel_open{channel_type = "forwarded-tcpip" = Type,
?SSH_OPEN_CONNECT_FAILED,
"Connection refused", "en"),
{{replies,
- [{connection_reply, ConnectionPid, FailMsg}]}, Connection0};
+ [{connection_reply, FailMsg}]}, Connection0};
ChannelPid ->
{ChannelId, Connection1} = new_channel_id(Connection0),
LWindowSz = ?DEFAULT_WINDOW_SIZE,
@@ -528,32 +498,31 @@ handle_msg(#ssh_msg_channel_open{channel_type = "forwarded-tcpip" = Type,
{open, Channel, {forwarded_tcpip,
decode_ip(Address), Port,
decode_ip(Orig), OrigPort}}),
- {{replies, [{connection_reply, ConnectionPid, OpenConfMsg},
+ {{replies, [{connection_reply, OpenConfMsg},
OpenMsg]}, Connection}
end;
handle_msg(#ssh_msg_channel_open{channel_type = "forwarded-tcpip",
sender_channel = RemoteId},
- Connection, ConnectionPid, client) ->
+ Connection, client) ->
%% Client implementations SHOULD reject direct TCP/IP open requests for
%% security reasons. See RFC 4254 7.2.
FailMsg = channel_open_failure_msg(RemoteId,
?SSH_OPEN_CONNECT_FAILED,
"Connection refused", "en"),
- {{replies, [{connection_reply, ConnectionPid, FailMsg}]}, Connection};
+ {{replies, [{connection_reply, FailMsg}]}, Connection};
-handle_msg(#ssh_msg_channel_open{sender_channel = RemoteId}, Connection,
- ConnectionPid, _) ->
+handle_msg(#ssh_msg_channel_open{sender_channel = RemoteId}, Connection, _) ->
FailMsg = channel_open_failure_msg(RemoteId,
?SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
"Not allowed", "en"),
- {{replies, [{connection_reply, ConnectionPid, FailMsg}]}, Connection};
+ {{replies, [{connection_reply, FailMsg}]}, Connection};
handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "exit-status",
data = Data},
- #connection{channel_cache = Cache} = Connection, _, _) ->
+ #connection{channel_cache = Cache} = Connection, _) ->
<<?UINT32(Status)>> = Data,
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
{Reply, Connection} =
@@ -564,8 +533,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "exit-signal",
want_reply = false,
data = Data},
- #connection{channel_cache = Cache} = Connection0,
- ConnectionPid, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
<<?UINT32(SigLen), SigName:SigLen/binary,
?BOOLEAN(_Core),
?UINT32(ErrLen), Err:ErrLen/binary,
@@ -578,14 +546,14 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
binary_to_list(Err),
binary_to_list(Lang)}),
CloseMsg = channel_close_msg(RemoteId),
- {{replies, [{connection_reply, ConnectionPid, CloseMsg}, Reply]},
+ {{replies, [{connection_reply, CloseMsg}, Reply]},
Connection};
handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "xon-xoff",
want_reply = false,
data = Data},
- #connection{channel_cache = Cache} = Connection, _, _) ->
+ #connection{channel_cache = Cache} = Connection, _) ->
<<?BOOLEAN(CDo)>> = Data,
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
{Reply, Connection} =
@@ -596,7 +564,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "window-change",
want_reply = false,
data = Data},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
<<?UINT32(Width),?UINT32(Height),
?UINT32(PixWidth), ?UINT32(PixHeight)>> = Data,
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
@@ -609,7 +577,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "signal",
data = Data},
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
<<?UINT32(SigLen), SigName:SigLen/binary>> = Data,
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
@@ -622,8 +590,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "subsystem",
want_reply = WantReply,
data = Data},
- #connection{channel_cache = Cache} = Connection,
- ConnectionPid, server) ->
+ #connection{channel_cache = Cache} = Connection, server) ->
<<?UINT32(SsLen), SsName:SsLen/binary>> = Data,
#channel{remote_id = RemoteId} = Channel0 =
@@ -631,22 +598,23 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
ReplyMsg = {subsystem, ChannelId, WantReply, binary_to_list(SsName)},
- try start_subsytem(SsName, Connection, Channel0, ReplyMsg) of
- {ok, Pid} ->
- erlang:monitor(process, Pid),
- Channel = Channel0#channel{user = Pid},
- ssh_channel:cache_update(Cache, Channel),
- Reply = {connection_reply, ConnectionPid,
- channel_success_msg(RemoteId)},
- {{replies, [Reply]}, Connection}
- catch _:_ ->
- Reply = {connection_reply, ConnectionPid,
- channel_failure_msg(RemoteId)},
- {{replies, [Reply]}, Connection}
+ try
+ {ok, Pid} = start_subsytem(SsName, Connection, Channel0, ReplyMsg),
+ erlang:monitor(process, Pid),
+ Channel = Channel0#channel{user = Pid},
+ ssh_channel:cache_update(Cache, Channel),
+ Reply = {connection_reply,
+ channel_success_msg(RemoteId)},
+ {{replies, [Reply]}, Connection}
+ catch
+ _:_ ->
+ ErrorReply = {connection_reply,
+ channel_failure_msg(RemoteId)},
+ {{replies, [ErrorReply]}, Connection}
end;
handle_msg(#ssh_msg_channel_request{request_type = "subsystem"},
- Connection, _, client) ->
+ Connection, client) ->
%% The client SHOULD ignore subsystem requests. See RFC 4254 6.5.
{{replies, []}, Connection};
@@ -654,8 +622,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "pty-req",
want_reply = WantReply,
data = Data},
- #connection{channel_cache = Cache} = Connection,
- ConnectionPid, server) ->
+ #connection{channel_cache = Cache} = Connection, server) ->
<<?UINT32(TermLen), BTermName:TermLen/binary,
?UINT32(Width),?UINT32(Height),
?UINT32(PixWidth), ?UINT32(PixHeight),
@@ -667,27 +634,26 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
- handle_cli_msg(Connection, ConnectionPid, Channel,
+ handle_cli_msg(Connection, Channel,
{pty, ChannelId, WantReply, PtyRequest});
handle_msg(#ssh_msg_channel_request{request_type = "pty-req"},
- Connection, _, client) ->
+ Connection, client) ->
%% The client SHOULD ignore pty requests. See RFC 4254 6.2.
{{replies, []}, Connection};
handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "shell",
want_reply = WantReply},
- #connection{channel_cache = Cache} = Connection,
- ConnectionPid, server) ->
+ #connection{channel_cache = Cache} = Connection, server) ->
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
- handle_cli_msg(Connection, ConnectionPid, Channel,
+ handle_cli_msg(Connection, Channel,
{shell, ChannelId, WantReply});
handle_msg(#ssh_msg_channel_request{request_type = "shell"},
- Connection, _, client) ->
+ Connection, client) ->
%% The client SHOULD ignore shell requests. See RFC 4254 6.5.
{{replies, []}, Connection};
@@ -695,17 +661,16 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "exec",
want_reply = WantReply,
data = Data},
- #connection{channel_cache = Cache} = Connection,
- ConnectionPid, server) ->
+ #connection{channel_cache = Cache} = Connection, server) ->
<<?UINT32(Len), Command:Len/binary>> = Data,
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
- handle_cli_msg(Connection, ConnectionPid, Channel,
+ handle_cli_msg(Connection, Channel,
{exec, ChannelId, WantReply, binary_to_list(Command)});
handle_msg(#ssh_msg_channel_request{request_type = "exec"},
- Connection, _, client) ->
+ Connection, client) ->
%% The client SHOULD ignore exec requests. See RFC 4254 6.5.
{{replies, []}, Connection};
@@ -713,31 +678,30 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = "env",
want_reply = WantReply,
data = Data},
- #connection{channel_cache = Cache} = Connection,
- ConnectionPid, server) ->
+ #connection{channel_cache = Cache} = Connection, server) ->
<<?UINT32(VarLen),
Var:VarLen/binary, ?UINT32(ValueLen), Value:ValueLen/binary>> = Data,
Channel = ssh_channel:cache_lookup(Cache, ChannelId),
- handle_cli_msg(Connection, ConnectionPid, Channel,
+ handle_cli_msg(Connection, Channel,
{env, ChannelId, WantReply, Var, Value});
handle_msg(#ssh_msg_channel_request{request_type = "env"},
- Connection, _, client) ->
+ Connection, client) ->
%% The client SHOULD ignore env requests.
{{replies, []}, Connection};
handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
request_type = _Other,
- want_reply = WantReply}, #connection{channel_cache = Cache} = Connection,
- ConnectionPid, _) ->
+ want_reply = WantReply},
+ #connection{channel_cache = Cache} = Connection, _) ->
if WantReply == true ->
case ssh_channel:cache_lookup(Cache, ChannelId) of
#channel{remote_id = RemoteId} ->
FailMsg = channel_failure_msg(RemoteId),
- {{replies, [{connection_reply, ConnectionPid, FailMsg}]},
+ {{replies, [{connection_reply, FailMsg}]},
Connection};
undefined -> %% Chanel has been closed
{noreply, Connection}
@@ -748,61 +712,74 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
handle_msg(#ssh_msg_global_request{name = _Type,
want_reply = WantReply,
- data = _Data}, Connection,
- ConnectionPid, _) ->
+ data = _Data}, Connection, _) ->
if WantReply == true ->
FailMsg = request_failure_msg(),
- {{replies, [{connection_reply, ConnectionPid, FailMsg}]},
+ {{replies, [{connection_reply, FailMsg}]},
Connection};
true ->
{noreply, Connection}
end;
-%%% This transport message will also be handled at the connection level
+handle_msg(#ssh_msg_request_failure{},
+ #connection{requests = [{_, From} | Rest]} = Connection, _) ->
+ {{replies, [{channel_requst_reply, From, {failure, <<>>}}]},
+ Connection#connection{requests = Rest}};
+handle_msg(#ssh_msg_request_success{data = Data},
+ #connection{requests = [{_, From} | Rest]} = Connection, _) ->
+ {{replies, [{channel_requst_reply, From, {success, Data}}]},
+ Connection#connection{requests = Rest}};
+
handle_msg(#ssh_msg_disconnect{code = Code,
description = Description,
language = _Lang },
- #connection{channel_cache = Cache} = Connection0, _, _) ->
+ #connection{channel_cache = Cache} = Connection0, _) ->
{Connection, Replies} =
ssh_channel:cache_foldl(fun(Channel, {Connection1, Acc}) ->
{Reply, Connection2} =
reply_msg(Channel,
- Connection1, {closed, Channel#channel.local_id}),
+ Connection1,
+ {closed, Channel#channel.local_id}),
{Connection2, [Reply | Acc]}
end, {Connection0, []}, Cache),
ssh_channel:cache_delete(Cache),
{disconnect, {Code, Description}, {{replies, Replies}, Connection}}.
-handle_cli_msg(#connection{channel_cache = Cache} = Connection0,
- ConnectionPid,
+handle_cli_msg(#connection{channel_cache = Cache} = Connection,
#channel{user = undefined,
+ remote_id = RemoteId,
local_id = ChannelId} = Channel0, Reply0) ->
- case (catch start_cli(Connection0, ChannelId)) of
+ case (catch start_cli(Connection, ChannelId)) of
{ok, Pid} ->
erlang:monitor(process, Pid),
Channel = Channel0#channel{user = Pid},
ssh_channel:cache_update(Cache, Channel),
- {Reply, Connection} = reply_msg(Channel, Connection0, Reply0),
- {{replies, [Reply]}, Connection};
- _ ->
- Reply = {connection_reply, ConnectionPid,
- request_failure_msg()},
- {{replies, [Reply]}, Connection0}
+ Reply = {connection_reply,
+ channel_success_msg(RemoteId)},
+ {{replies, [{channel_data, Pid, Reply0}, Reply]}, Connection};
+ _Other ->
+ Reply = {connection_reply,
+ channel_failure_msg(RemoteId)},
+ {{replies, [Reply]}, Connection}
end;
-handle_cli_msg(Connection0, _, Channel, Reply0) ->
+handle_cli_msg(Connection0, Channel, Reply0) ->
{Reply, Connection} = reply_msg(Channel, Connection0, Reply0),
{{replies, [Reply]}, Connection}.
-
channel_eof_msg(ChannelId) ->
#ssh_msg_channel_eof{recipient_channel = ChannelId}.
channel_close_msg(ChannelId) ->
#ssh_msg_channel_close {recipient_channel = ChannelId}.
+channel_status_msg({success, ChannelId}) ->
+ channel_success_msg(ChannelId);
+channel_status_msg({failure, ChannelId}) ->
+ channel_failure_msg(ChannelId).
+
channel_success_msg(ChannelId) ->
#ssh_msg_channel_success{recipient_channel = ChannelId}.
@@ -880,70 +857,6 @@ bound_channel(IP, Port, Connection) ->
_ -> undefined
end.
-messages() ->
- [ {ssh_msg_global_request, ?SSH_MSG_GLOBAL_REQUEST,
- [string,
- boolean,
- '...']},
-
- {ssh_msg_request_success, ?SSH_MSG_REQUEST_SUCCESS,
- ['...']},
-
- {ssh_msg_request_failure, ?SSH_MSG_REQUEST_FAILURE,
- []},
-
- {ssh_msg_channel_open, ?SSH_MSG_CHANNEL_OPEN,
- [string,
- uint32,
- uint32,
- uint32,
- '...']},
-
- {ssh_msg_channel_open_confirmation, ?SSH_MSG_CHANNEL_OPEN_CONFIRMATION,
- [uint32,
- uint32,
- uint32,
- uint32,
- '...']},
-
- {ssh_msg_channel_open_failure, ?SSH_MSG_CHANNEL_OPEN_FAILURE,
- [uint32,
- uint32,
- string,
- string]},
-
- {ssh_msg_channel_window_adjust, ?SSH_MSG_CHANNEL_WINDOW_ADJUST,
- [uint32,
- uint32]},
-
- {ssh_msg_channel_data, ?SSH_MSG_CHANNEL_DATA,
- [uint32,
- binary]},
-
- {ssh_msg_channel_extended_data, ?SSH_MSG_CHANNEL_EXTENDED_DATA,
- [uint32,
- uint32,
- binary]},
-
- {ssh_msg_channel_eof, ?SSH_MSG_CHANNEL_EOF,
- [uint32]},
-
- {ssh_msg_channel_close, ?SSH_MSG_CHANNEL_CLOSE,
- [uint32]},
-
- {ssh_msg_channel_request, ?SSH_MSG_CHANNEL_REQUEST,
- [uint32,
- string,
- boolean,
- '...']},
-
- {ssh_msg_channel_success, ?SSH_MSG_CHANNEL_SUCCESS,
- [uint32]},
-
- {ssh_msg_channel_failure, ?SSH_MSG_CHANNEL_FAILURE,
- [uint32]}
- ].
-
encode_ip(Addr) when is_tuple(Addr) ->
case catch inet_parse:ntoa(Addr) of
{'EXIT',_} -> false;
@@ -965,14 +878,14 @@ start_channel(Cb, Id, Args, SubSysSup) ->
start_channel(Cb, Id, Args, SubSysSup, Exec) ->
ChildSpec = child_spec(Cb, Id, Args, Exec),
- ChannelSup =ssh_subsystem_sup:channel_supervisor(SubSysSup),
+ ChannelSup = ssh_subsystem_sup:channel_supervisor(SubSysSup),
ssh_channel_sup:start_child(ChannelSup, ChildSpec).
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
setup_session(#connection{channel_cache = Cache} = Connection0,
- ConnectionPid, RemoteId,
+ RemoteId,
Type, WindowSize, PacketSize) ->
{ChannelId, Connection} = new_channel_id(Connection0),
@@ -990,7 +903,7 @@ setup_session(#connection{channel_cache = Cache} = Connection0,
?DEFAULT_WINDOW_SIZE,
?DEFAULT_PACKET_SIZE),
- {{replies, [{connection_reply, ConnectionPid, OpenConfMsg}]}, Connection}.
+ {{replies, [{connection_reply, OpenConfMsg}]}, Connection}.
check_subsystem("sftp"= SsName, Options) ->
@@ -1019,35 +932,21 @@ child_spec(Callback, Id, Args, Exec) ->
Type = worker,
{Name, StartFunc, Restart, Shutdown, Type, [ssh_channel]}.
-%% Backwards compatibility
-start_cli(#connection{address = Address, port = Port, cli_spec = {Fun, [Shell]},
- options = Options},
- _ChannelId) when is_function(Fun) ->
- case Fun(Shell, Address, Port, Options) of
- NewFun when is_function(NewFun) ->
- {ok, NewFun()};
- Pid when is_pid(Pid) ->
- {ok, Pid}
- end;
-
+start_cli(#connection{cli_spec = no_cli}, _) ->
+ {error, cli_disabled};
start_cli(#connection{cli_spec = {CbModule, Args}, exec = Exec,
sub_system_supervisor = SubSysSup}, ChannelId) ->
start_channel(CbModule, ChannelId, Args, SubSysSup, Exec).
-start_subsytem(BinName, #connection{address = Address, port = Port,
- options = Options,
+start_subsytem(BinName, #connection{options = Options,
sub_system_supervisor = SubSysSup},
- #channel{local_id = ChannelId, remote_id = RemoteChannelId},
- ReplyMsg) ->
+ #channel{local_id = ChannelId}, _ReplyMsg) ->
Name = binary_to_list(BinName),
case check_subsystem(Name, Options) of
{Callback, Opts} when is_atom(Callback), Callback =/= none ->
start_channel(Callback, ChannelId, Opts, SubSysSup);
{Other, _} when Other =/= none ->
- handle_backwards_compatibility(Other, self(),
- ChannelId, RemoteChannelId,
- Options, Address, Port,
- {ssh_cm, self(), ReplyMsg})
+ {error, legacy_option_not_supported}
end.
channel_data_reply(_, #channel{local_id = ChannelId} = Channel,
@@ -1070,9 +969,12 @@ reply_msg(Channel, Connection, failure = Reply) ->
request_reply_or_data(Channel, Connection, Reply);
reply_msg(Channel, Connection, {closed, _} = Reply) ->
request_reply_or_data(Channel, Connection, Reply);
+reply_msg(undefined, Connection, _Reply) ->
+ {noreply, Connection};
reply_msg(#channel{user = ChannelPid}, Connection, Reply) ->
{{channel_data, ChannelPid, Reply}, Connection}.
+
request_reply_or_data(#channel{local_id = ChannelId, user = ChannelPid},
#connection{requests = Requests} =
Connection, Reply) ->
@@ -1080,10 +982,13 @@ request_reply_or_data(#channel{local_id = ChannelId, user = ChannelPid},
{value, {ChannelId, From}} ->
{{channel_requst_reply, From, Reply},
Connection#connection{requests =
- lists:keydelete(ChannelId, 1, Requests)}};
+ lists:keydelete(ChannelId, 1, Requests)}};
+ false when (Reply == success) or (Reply == failure) ->
+ {[], Connection};
false ->
{{channel_data, ChannelPid, Reply}, Connection}
end.
+
update_send_window(Channel, _, undefined,
#connection{channel_cache = Cache}) ->
do_update_send_window(Channel, Channel#channel.send_buf, Cache);
@@ -1139,7 +1044,7 @@ flow_control([], Channel, Cache) ->
[];
flow_control([_|_], #channel{flow_control = From,
- send_buf = []} = Channel, Cache) when From =/= undefined ->
+ send_buf = []} = Channel, Cache) when From =/= undefined ->
[{flow_control, Cache, Channel, From, ok}];
flow_control(_,_,_) ->
[].
@@ -1341,43 +1246,3 @@ decode_ip(Addr) when is_binary(Addr) ->
{ok,A} -> A
end.
-%% This is really awful and that is why it is beeing phased out.
-handle_backwards_compatibility({_,_,_,_,_,_} = ChildSpec, _, _, _, _,
- Address, Port, _) ->
- SystemSup = ssh_system_sup:system_supervisor(Address, Port),
- ChannelSup = ssh_system_sup:channel_supervisor(SystemSup),
- ssh_channel_sup:start_child(ChannelSup, ChildSpec);
-
-handle_backwards_compatibility(Module, ConnectionManager, ChannelId,
- RemoteChannelId, Opts,
- _, _, Msg) when is_atom(Module) ->
- {ok, SubSystemPid} = gen_server:start_link(Module, [Opts], []),
- SubSystemPid !
- {ssh_cm, ConnectionManager,
- {open, ChannelId, RemoteChannelId, {session}}},
- SubSystemPid ! Msg,
- {ok, SubSystemPid};
-
-handle_backwards_compatibility(Fun, ConnectionManager, ChannelId,
- RemoteChannelId,
- _, _, _, Msg) when is_function(Fun) ->
- SubSystemPid = Fun(),
- SubSystemPid !
- {ssh_cm, ConnectionManager,
- {open, ChannelId, RemoteChannelId, {session}}},
- SubSystemPid ! Msg,
- {ok, SubSystemPid};
-
-handle_backwards_compatibility(ChildSpec,
- ConnectionManager,
- ChannelId, RemoteChannelId, _,
- Address, Port, Msg) ->
- SystemSup = ssh_system_sup:system_supervisor(Address, Port),
- ChannelSup = ssh_system_sup:channel_supervisor(SystemSup),
- {ok, SubSystemPid}
- = ssh_channel_sup:start_child(ChannelSup, ChildSpec),
- SubSystemPid !
- {ssh_cm, ConnectionManager,
- {open, ChannelId, RemoteChannelId, {session}}},
- SubSystemPid ! Msg,
- {ok, SubSystemPid}.
diff --git a/lib/ssh/src/ssh_connection_controler.erl b/lib/ssh/src/ssh_connection_controler.erl
deleted file mode 100644
index ca3e62dc83..0000000000
--- a/lib/ssh/src/ssh_connection_controler.erl
+++ /dev/null
@@ -1,137 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2009-2010. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%--------------------------------------------------------------------
-%% File : ssh_connection_controler.erl
-%% Description :
-%%
-%%--------------------------------------------------------------------
-
--module(ssh_connection_controler).
-
--behaviour(gen_server).
-
-%%-----------------------------------------------------------------
-%% External exports
-%%-----------------------------------------------------------------
--export([start_link/1, start_handler_child/2, start_manager_child/2,
- connection_manager/1]).
-
-%%-----------------------------------------------------------------
-%% Internal exports
-%%-----------------------------------------------------------------
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- code_change/3, terminate/2, stop/1]).
-
--record(state, {role, manager, handler, timeout}).
-
-%%-----------------------------------------------------------------
-%% External interface functions
-%%-----------------------------------------------------------------
-%%-----------------------------------------------------------------
-%% Func: start/0
-%%-----------------------------------------------------------------
-start_link(Args) ->
- gen_server:start_link(?MODULE, [Args], []).
-
-%% Will be called from the manager child process
-start_handler_child(ServerRef, Args) ->
- gen_server:call(ServerRef, {handler, self(), Args}, infinity).
-
-%% Will be called from the acceptor process
-start_manager_child(ServerRef, Args) ->
- gen_server:call(ServerRef, {manager, Args}, infinity).
-
-connection_manager(ServerRef) ->
- {ok, gen_server:call(ServerRef, manager, infinity)}.
-
-%%-----------------------------------------------------------------
-%% Internal interface functions
-%%-----------------------------------------------------------------
-%%-----------------------------------------------------------------
-%% Func: stop/1
-%%-----------------------------------------------------------------
-stop(Pid) ->
- gen_server:cast(Pid, stop).
-
-%%-----------------------------------------------------------------
-%% Server functions
-%%-----------------------------------------------------------------
-%%-----------------------------------------------------------------
-%% Func: init/1
-%%-----------------------------------------------------------------
-init([Opts]) ->
- process_flag(trap_exit, true),
- case proplists:get_value(role, Opts) of
- client ->
- {ok, Manager} = ssh_connection_manager:start_link([client, Opts]),
- {ok, #state{role = client, manager = Manager}};
- _server ->
- %% Children started by acceptor process
- {ok, #state{role = server}}
- end.
-
-
-%%-----------------------------------------------------------------
-%% Func: terminate/2
-%%-----------------------------------------------------------------
-terminate(_Reason, #state{}) ->
- ok.
-
-%%-----------------------------------------------------------------
-%% Func: handle_call/3
-%%-----------------------------------------------------------------
-handle_call({handler, Pid, [Role, Socket, Opts]}, _From, State) ->
- {ok, Handler} = ssh_connection_handler:start_link(Role, Pid, Socket, Opts),
- {reply, {ok, Handler}, State#state{handler = Handler}};
-handle_call({manager, [server = Role, Socket, Opts, SubSysSup]}, _From, State) ->
- {ok, Manager} = ssh_connection_manager:start_link([Role, Socket, Opts, SubSysSup]),
- {reply, {ok, Manager}, State#state{manager = Manager}};
-handle_call({manager, [client = Role | Opts]}, _From, State) ->
- {ok, Manager} = ssh_connection_manager:start_link([Role, Opts]),
- {reply, {ok, Manager}, State#state{manager = Manager}};
-handle_call(manager, _From, State) ->
- {reply, State#state.manager, State};
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State};
-handle_call(_, _, State) ->
- {noreply, State, State#state.timeout}.
-
-%%-----------------------------------------------------------------
-%% Func: handle_cast/2
-%%-----------------------------------------------------------------
-handle_cast(stop, State) ->
- {stop, normal, State};
-handle_cast(_, State) ->
- {noreply, State, State#state.timeout}.
-
-%%-----------------------------------------------------------------
-%% Func: handle_info/2
-%%-----------------------------------------------------------------
-%% handle_info(ssh_connected, State) ->
-%% {stop, normal, State};
-%% Servant termination.
-handle_info({'EXIT', _Pid, Reason}, State) ->
- {stop, Reason, State}.
-
-%%-----------------------------------------------------------------
-%% Func: code_change/3
-%%-----------------------------------------------------------------
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 9de4dd5967..3462b98172 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -18,10 +18,11 @@
%%
%%
%%----------------------------------------------------------------------
-%% Purpose: Handles the setup of an ssh connection, e.i. both the
-%% setup SSH Transport Layer Protocol (RFC 4253) and Authentication
-%% Protocol (RFC 4252). Details of the different protocols are
-%% implemented in ssh_transport.erl, ssh_auth.erl
+%% Purpose: Handles an ssh connection, e.i. both the
+%% setup SSH Transport Layer Protocol (RFC 4253), Authentication
+%% Protocol (RFC 4252) and SSH connection Protocol (RFC 4255)
+%% Details of the different protocols are
+%% implemented in ssh_transport.erl, ssh_auth.erl and ssh_connection.erl
%% ----------------------------------------------------------------------
-module(ssh_connection_handler).
@@ -33,10 +34,14 @@
-include("ssh_auth.hrl").
-include("ssh_connect.hrl").
--export([start_link/4, send/2, renegotiate/1, send_event/2,
- connection_info/3,
- peer_address/1,
- renegotiate_data/1]).
+-export([start_link/3]).
+
+%% Internal application API
+-export([open_channel/6, reply_request/3, request/6, request/7,
+ global_request/4, send/5, send_eof/2, info/1, info/2,
+ connection_info/2, channel_info/3,
+ adjust_window/3, close/2, stop/1, renegotiate/1, renegotiate_data/1,
+ start_connection/4]).
%% gen_fsm callbacks
-export([hello/2, kexinit/2, key_exchange/2, new_keys/2,
@@ -45,10 +50,14 @@
-export([init/1, handle_event/3,
handle_sync_event/4, handle_info/3, terminate/3, code_change/4]).
-%% spawn export
--export([ssh_info_handler/3]).
-
-record(state, {
+ role,
+ client,
+ starter,
+ auth_user,
+ connection_state,
+ latest_channel_id = 0,
+ idle_timer_ref,
transport_protocol, % ex: tcp
transport_cb,
transport_close_tag,
@@ -59,105 +68,234 @@
undecoded_packet_length, % integer()
key_exchange_init_msg, % #ssh_msg_kexinit{}
renegotiate = false, % boolean()
- manager, % pid()
connection_queue,
address,
port,
opts
}).
--define(DBG_MESSAGE, true).
+-type state_name() :: hello | kexinit | key_exchange | new_keys | userauth | connection.
+-type gen_fsm_state_return() :: {next_state, state_name(), term()} |
+ {next_state, state_name(), term(), timeout()} |
+ {stop, term(), term()}.
%%====================================================================
%% Internal application API
%%====================================================================
+
%%--------------------------------------------------------------------
-%% Function: start_link() -> ok,Pid} | ignore | {error,Error}
-%% Description:Creates a gen_fsm process which calls Module:init/1 to
-%% initialize. To ensure a synchronized start-up procedure, this function
-%% does not return until Module:init/1 has returned.
+-spec start_connection(client| server, port(), proplists:proplist(),
+ timeout()) -> {ok, pid()} | {error, term()}.
%%--------------------------------------------------------------------
-start_link(Role, Manager, Socket, Options) ->
- gen_fsm:start_link(?MODULE, [Role, Manager, Socket, Options], []).
-
-send(ConnectionHandler, Data) ->
- send_all_state_event(ConnectionHandler, {send, Data}).
+start_connection(client = Role, Socket, Options, Timeout) ->
+ try
+ {ok, Pid} = sshc_sup:start_child([Role, Socket, Options]),
+ {_, Callback, _} =
+ proplists:get_value(transport, Options, {tcp, gen_tcp, tcp_closed}),
+ ok = socket_control(Socket, Pid, Callback),
+ Ref = erlang:monitor(process, Pid),
+ handshake(Pid, Ref, Timeout)
+ catch
+ exit:{noproc, _} ->
+ {error, ssh_not_started};
+ _:Error ->
+ {error, Error}
+ end;
-renegotiate(ConnectionHandler) ->
- send_all_state_event(ConnectionHandler, renegotiate).
-
-renegotiate_data(ConnectionHandler) ->
- send_all_state_event(ConnectionHandler, data_size).
-connection_info(ConnectionHandler, From, Options) ->
- send_all_state_event(ConnectionHandler, {info, From, Options}).
+start_connection(server = Role, Socket, Options, Timeout) ->
+ try
+ Sups = proplists:get_value(supervisors, Options),
+ ConnectionSup = proplists:get_value(connection_sup, Sups),
+ Opts = [{supervisors, Sups}, {user_pid, self()} | proplists:get_value(ssh_opts, Options, [])],
+ {ok, Pid} = ssh_connection_sup:start_child(ConnectionSup, [Role, Socket, Opts]),
+ {_, Callback, _} = proplists:get_value(transport, Options, {tcp, gen_tcp, tcp_closed}),
+ socket_control(Socket, Pid, Callback),
+ Ref = erlang:monitor(process, Pid),
+ handshake(Pid, Ref, Timeout)
+ catch
+ exit:{noproc, _} ->
+ {error, ssh_not_started};
+ _:Error ->
+ {error, Error}
+ end.
-%% Replaced with option to connection_info/3. For now keep
-%% for backwards compatibility
-peer_address(ConnectionHandler) ->
- sync_send_all_state_event(ConnectionHandler, peer_address).
+start_link(Role, Socket, Options) ->
+ {ok, proc_lib:spawn_link(?MODULE, init, [[Role, Socket, Options]])}.
-%%====================================================================
-%% gen_fsm callbacks
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Function: init(Args) -> {ok, StateName, State} |
-%% {ok, StateName, State, Timeout} |
-%% ignore |
-%% {stop, StopReason}
-%% Description:Whenever a gen_fsm is started using gen_fsm:start/[3,4] or
-%% gen_fsm:start_link/3,4, this function is called by the new process to
-%% initialize.
-%%--------------------------------------------------------------------
-init([Role, Manager, Socket, SshOpts]) ->
+init([Role, Socket, SshOpts]) ->
process_flag(trap_exit, true),
{NumVsn, StrVsn} = ssh_transport:versions(Role, SshOpts),
- ssh_bits:install_messages(ssh_transport:transport_messages(NumVsn)),
{Protocol, Callback, CloseTag} =
proplists:get_value(transport, SshOpts, {tcp, gen_tcp, tcp_closed}),
+ Cache = ssh_channel:cache_create(),
+ State0 = #state{
+ role = Role,
+ connection_state = #connection{channel_cache = Cache,
+ channel_id_seed = 0,
+ port_bindings = [],
+ requests = [],
+ options = SshOpts},
+ socket = Socket,
+ decoded_data_buffer = <<>>,
+ encoded_data_buffer = <<>>,
+ transport_protocol = Protocol,
+ transport_cb = Callback,
+ transport_close_tag = CloseTag,
+ opts = SshOpts
+ },
+
+ State = init_role(State0),
+
try init_ssh(Role, NumVsn, StrVsn, SshOpts, Socket) of
Ssh ->
- {ok, hello, #state{ssh_params =
- Ssh#ssh{send_sequence = 0, recv_sequence = 0},
- socket = Socket,
- decoded_data_buffer = <<>>,
- encoded_data_buffer = <<>>,
- transport_protocol = Protocol,
- transport_cb = Callback,
- transport_close_tag = CloseTag,
- manager = Manager,
- opts = SshOpts
- }}
+ gen_fsm:enter_loop(?MODULE, [], hello,
+ State#state{ssh_params = Ssh})
catch
- exit:Reason ->
- {stop, {shutdown, Reason}}
+ _:Error ->
+ gen_fsm:enter_loop(?MODULE, [], error, {Error, State0})
+ end.
+
+%%--------------------------------------------------------------------
+-spec open_channel(pid(), string(), iodata(), integer(), integer(),
+ timeout()) -> {open, channel_id()} | {open_error, term(), string(), string()}.
+%%--------------------------------------------------------------------
+open_channel(ConnectionHandler, ChannelType, ChannelSpecificData,
+ InitialWindowSize,
+ MaxPacketSize, Timeout) ->
+ sync_send_all_state_event(ConnectionHandler, {open, self(), ChannelType,
+ InitialWindowSize, MaxPacketSize,
+ ChannelSpecificData,
+ Timeout}).
+%%--------------------------------------------------------------------
+-spec request(pid(), pid(), channel_id(), string(), boolean(), iodata(),
+ timeout()) -> success | failure | ok | {error, term()}.
+%%--------------------------------------------------------------------
+request(ConnectionHandler, ChannelPid, ChannelId, Type, true, Data, Timeout) ->
+ sync_send_all_state_event(ConnectionHandler, {request, ChannelPid, ChannelId, Type, Data,
+ Timeout});
+request(ConnectionHandler, ChannelPid, ChannelId, Type, false, Data, _) ->
+ send_all_state_event(ConnectionHandler, {request, ChannelPid, ChannelId, Type, Data}).
+
+%%--------------------------------------------------------------------
+-spec request(pid(), channel_id(), string(), boolean(), iodata(),
+ timeout()) -> success | failure | {error, timeout}.
+%%--------------------------------------------------------------------
+request(ConnectionHandler, ChannelId, Type, true, Data, Timeout) ->
+ sync_send_all_state_event(ConnectionHandler, {request, ChannelId, Type, Data, Timeout});
+request(ConnectionHandler, ChannelId, Type, false, Data, _) ->
+ send_all_state_event(ConnectionHandler, {request, ChannelId, Type, Data}).
+
+%%--------------------------------------------------------------------
+-spec reply_request(pid(), success | failure, channel_id()) -> ok.
+%%--------------------------------------------------------------------
+reply_request(ConnectionHandler, Status, ChannelId) ->
+ send_all_state_event(ConnectionHandler, {reply_request, Status, ChannelId}).
+
+%%--------------------------------------------------------------------
+-spec global_request(pid(), string(), boolean(), iolist()) -> ok | error.
+%%--------------------------------------------------------------------
+global_request(ConnectionHandler, Type, true = Reply, Data) ->
+ case sync_send_all_state_event(ConnectionHandler,
+ {global_request, self(), Type, Reply, Data}) of
+ {ssh_cm, ConnectionHandler, {success, _}} ->
+ ok;
+ {ssh_cm, ConnectionHandler, {failure, _}} ->
+ error
+ end;
+global_request(ConnectionHandler, Type, false = Reply, Data) ->
+ send_all_state_event(ConnectionHandler, {global_request, self(), Type, Reply, Data}).
+
+%%--------------------------------------------------------------------
+-spec send(pid(), channel_id(), integer(), iolist(), timeout()) ->
+ ok | {error, timeout} | {error, closed}.
+%%--------------------------------------------------------------------
+send(ConnectionHandler, ChannelId, Type, Data, Timeout) ->
+ sync_send_all_state_event(ConnectionHandler, {data, ChannelId, Type, Data, Timeout}).
+
+%%--------------------------------------------------------------------
+-spec send_eof(pid(), channel_id()) -> ok | {error, closed}.
+%%--------------------------------------------------------------------
+send_eof(ConnectionHandler, ChannelId) ->
+ sync_send_all_state_event(ConnectionHandler, {eof, ChannelId}).
+
+%%--------------------------------------------------------------------
+-spec connection_info(pid(), [atom()]) -> proplists:proplist().
+%%--------------------------------------------------------------------
+connection_info(ConnectionHandler, Options) ->
+ sync_send_all_state_event(ConnectionHandler, {connection_info, Options}).
+
+%%--------------------------------------------------------------------
+-spec channel_info(pid(), channel_id(), [atom()]) -> proplists:proplist().
+%%--------------------------------------------------------------------
+channel_info(ConnectionHandler, ChannelId, Options) ->
+ sync_send_all_state_event(ConnectionHandler, {channel_info, ChannelId, Options}).
+
+%%--------------------------------------------------------------------
+-spec adjust_window(pid(), channel_id(), integer()) -> ok.
+%%--------------------------------------------------------------------
+adjust_window(ConnectionHandler, Channel, Bytes) ->
+ send_all_state_event(ConnectionHandler, {adjust_window, Channel, Bytes}).
+%%--------------------------------------------------------------------
+-spec renegotiate(pid()) -> ok.
+%%--------------------------------------------------------------------
+renegotiate(ConnectionHandler) ->
+ send_all_state_event(ConnectionHandler, renegotiate).
+
+%%--------------------------------------------------------------------
+-spec renegotiate_data(pid()) -> ok.
+%%--------------------------------------------------------------------
+renegotiate_data(ConnectionHandler) ->
+ send_all_state_event(ConnectionHandler, data_size).
+
+%%--------------------------------------------------------------------
+-spec close(pid(), channel_id()) -> ok.
+%%--------------------------------------------------------------------
+close(ConnectionHandler, ChannelId) ->
+ sync_send_all_state_event(ConnectionHandler, {close, ChannelId}).
+
+%%--------------------------------------------------------------------
+-spec stop(pid()) -> ok | {error, term()}.
+%%--------------------------------------------------------------------
+stop(ConnectionHandler)->
+ case sync_send_all_state_event(ConnectionHandler, stop) of
+ {error, closed} ->
+ ok;
+ Other ->
+ Other
end.
+
+info(ConnectionHandler) ->
+ info(ConnectionHandler, {info, all}).
+
+info(ConnectionHandler, ChannelProcess) ->
+ sync_send_all_state_event(ConnectionHandler, {info, ChannelProcess}).
+
+
+%%====================================================================
+%% gen_fsm callbacks
+%%====================================================================
+
%%--------------------------------------------------------------------
-%% Function:
-%% state_name(Event, State) -> {next_state, NextStateName, NextState}|
-%% {next_state, NextStateName,
-%% NextState, Timeout} |
-%% {stop, Reason, NewState}
-%% Description:There should be one instance of this function for each possible
-%% state name. Whenever a gen_fsm receives an event sent using
-%% gen_fsm:send_event/2, the instance of this function with the same name as
-%% the current state name StateName is called to handle the event. It is also
-%% called if a timeout occurs.
+-spec hello(socket_control | {info_line, list()} | {version_exchange, list()},
+ #state{}) -> gen_fsm_state_return().
%%--------------------------------------------------------------------
+
hello(socket_control, #state{socket = Socket, ssh_params = Ssh} = State) ->
VsnMsg = ssh_transport:hello_version_msg(string_version(Ssh)),
send_msg(VsnMsg, State),
- inet:setopts(Socket, [{packet, line}]),
- {next_state, hello, next_packet(State)};
+ inet:setopts(Socket, [{packet, line}, {active, once}]),
+ {next_state, hello, State};
-hello({info_line, _Line}, State) ->
- {next_state, hello, next_packet(State)};
+hello({info_line, _Line},#state{socket = Socket} = State) ->
+ inet:setopts(Socket, [{active, once}]),
+ {next_state, hello, State};
hello({version_exchange, Version}, #state{ssh_params = Ssh0,
socket = Socket} = State) ->
{NumVsn, StrVsn} = ssh_transport:handle_hello_version(Version),
case handle_version(NumVsn, StrVsn, Ssh0) of
{ok, Ssh1} ->
- inet:setopts(Socket, [{packet,0}, {mode,binary}]),
+ inet:setopts(Socket, [{packet,0}, {mode,binary}, {active, once}]),
{KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(Ssh1),
send_msg(SshPacket, State),
{next_state, kexinit, next_packet(State#state{ssh_params = Ssh,
@@ -173,12 +311,15 @@ hello({version_exchange, Version}, #state{ssh_params = Ssh0,
handle_disconnect(DisconnectMsg, State)
end.
+%%--------------------------------------------------------------------
+-spec kexinit({#ssh_msg_kexinit{}, binary()}, #state{}) -> gen_fsm_state_return().
+%%--------------------------------------------------------------------
kexinit({#ssh_msg_kexinit{} = Kex, Payload},
#state{ssh_params = #ssh{role = Role} = Ssh0,
- key_exchange_init_msg = OwnKex} =
- State) ->
+ key_exchange_init_msg = OwnKex} =
+ State) ->
Ssh1 = ssh_transport:key_init(opposite_role(Role), Ssh0, Payload),
- try ssh_transport:handle_kexinit_msg(Kex, OwnKex, Ssh1) of
+ case ssh_transport:handle_kexinit_msg(Kex, OwnKex, Ssh1) of
{ok, NextKexMsg, Ssh} when Role == client ->
send_msg(NextKexMsg, State),
{next_state, key_exchange,
@@ -186,157 +327,75 @@ kexinit({#ssh_msg_kexinit{} = Kex, Payload},
{ok, Ssh} when Role == server ->
{next_state, key_exchange,
next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State)
end.
-
+
+%%--------------------------------------------------------------------
+-spec key_exchange(#ssh_msg_kexdh_init{} | #ssh_msg_kexdh_reply{} |
+ #ssh_msg_kex_dh_gex_group{} | #ssh_msg_kex_dh_gex_request{} |
+ #ssh_msg_kex_dh_gex_request{} | #ssh_msg_kex_dh_gex_reply{}, #state{})
+ -> gen_fsm_state_return().
+%%--------------------------------------------------------------------
+
key_exchange(#ssh_msg_kexdh_init{} = Msg,
- #state{ssh_params = #ssh{role = server} =Ssh0} = State) ->
- try ssh_transport:handle_kexdh_init(Msg, Ssh0) of
+ #state{ssh_params = #ssh{role = server} = Ssh0} = State) ->
+ case ssh_transport:handle_kexdh_init(Msg, Ssh0) of
{ok, KexdhReply, Ssh1} ->
send_msg(KexdhReply, State),
{ok, NewKeys, Ssh} = ssh_transport:new_keys_message(Ssh1),
send_msg(NewKeys, State),
{next_state, new_keys, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State)
end;
-key_exchange({#ssh_msg_kexinit{} = Kex, Payload},
- #state{ssh_params = #ssh{role = Role} = Ssh0,
- key_exchange_init_msg = OwnKex} =
- State) ->
- Ssh1 = ssh_transport:key_init(opposite_role(Role), Ssh0, Payload),
- try ssh_transport:handle_kexinit_msg(Kex, OwnKex, Ssh1) of
- {ok, NextKexMsg, Ssh} when Role == client ->
- send_msg(NextKexMsg, State),
- {next_state, key_exchange,
- next_packet(State#state{ssh_params = Ssh})};
- {ok, Ssh} when Role == server ->
- {next_state, key_exchange,
- next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State)
- end;
-
key_exchange(#ssh_msg_kexdh_reply{} = Msg,
#state{ssh_params = #ssh{role = client} = Ssh0} = State) ->
- try ssh_transport:handle_kexdh_reply(Msg, Ssh0) of
- {ok, NewKeys, Ssh} ->
- send_msg(NewKeys, State),
- {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- {ErrorToDisplay, #ssh_msg_disconnect{} = DisconnectMsg} ->
- handle_disconnect(DisconnectMsg, State, ErrorToDisplay);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State)
- end;
+ {ok, NewKeys, Ssh} = ssh_transport:handle_kexdh_reply(Msg, Ssh0),
+ send_msg(NewKeys, State),
+ {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})};
key_exchange(#ssh_msg_kex_dh_gex_group{} = Msg,
#state{ssh_params = #ssh{role = server} = Ssh0} = State) ->
- try ssh_transport:handle_kex_dh_gex_group(Msg, Ssh0) of
- {ok, NextKexMsg, Ssh1} ->
- send_msg(NextKexMsg, State),
- {ok, NewKeys, Ssh} = ssh_transport:new_keys_message(Ssh1),
- send_msg(NewKeys, State),
- {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State)
- end;
+ {ok, NextKexMsg, Ssh1} = ssh_transport:handle_kex_dh_gex_group(Msg, Ssh0),
+ send_msg(NextKexMsg, State),
+ {ok, NewKeys, Ssh} = ssh_transport:new_keys_message(Ssh1),
+ send_msg(NewKeys, State),
+ {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})};
key_exchange(#ssh_msg_kex_dh_gex_request{} = Msg,
#state{ssh_params = #ssh{role = client} = Ssh0} = State) ->
- try ssh_transport:handle_kex_dh_gex_request(Msg, Ssh0) of
- {ok, NextKexMsg, Ssh} ->
- send_msg(NextKexMsg, State),
- {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State)
- end;
+ {ok, NextKexMsg, Ssh} = ssh_transport:handle_kex_dh_gex_request(Msg, Ssh0),
+ send_msg(NextKexMsg, State),
+ {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})};
+
key_exchange(#ssh_msg_kex_dh_gex_reply{} = Msg,
#state{ssh_params = #ssh{role = client} = Ssh0} = State) ->
- try ssh_transport:handle_kex_dh_gex_reply(Msg, Ssh0) of
- {ok, NewKeys, Ssh} ->
- send_msg(NewKeys, State),
- {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State)
- end.
+ {ok, NewKeys, Ssh} = ssh_transport:handle_kex_dh_gex_reply(Msg, Ssh0),
+ send_msg(NewKeys, State),
+ {next_state, new_keys, next_packet(State#state{ssh_params = Ssh})}.
+
+%%--------------------------------------------------------------------
+-spec new_keys(#ssh_msg_newkeys{}, #state{}) -> gen_fsm_state_return().
+%%--------------------------------------------------------------------
new_keys(#ssh_msg_newkeys{} = Msg, #state{ssh_params = Ssh0} = State0) ->
- try ssh_transport:handle_new_keys(Msg, Ssh0) of
- {ok, Ssh} ->
- {NextStateName, State} =
- after_new_keys(State0#state{ssh_params = Ssh}),
- {next_state, NextStateName, next_packet(State)}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State0);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED,
- description = Desc,
- language = "en"}, State0)
- end.
+ {ok, Ssh} = ssh_transport:handle_new_keys(Msg, Ssh0),
+ {NextStateName, State} =
+ after_new_keys(State0#state{ssh_params = Ssh}),
+ {next_state, NextStateName, next_packet(State)}.
+
+%%--------------------------------------------------------------------
+-spec userauth(#ssh_msg_service_request{} | #ssh_msg_service_accept{} |
+ #ssh_msg_userauth_request{} | #ssh_msg_userauth_info_request{} |
+ #ssh_msg_userauth_info_response{} | #ssh_msg_userauth_success{} |
+ #ssh_msg_userauth_failure{} | #ssh_msg_userauth_banner{},
+ #state{}) -> gen_fsm_state_return().
+%%--------------------------------------------------------------------
userauth(#ssh_msg_service_request{name = "ssh-userauth"} = Msg,
#state{ssh_params = #ssh{role = server,
session_id = SessionId} = Ssh0} = State) ->
- ssh_bits:install_messages(ssh_auth:userauth_messages()),
- try ssh_auth:handle_userauth_request(Msg, SessionId, Ssh0) of
- {ok, {Reply, Ssh}} ->
- send_msg(Reply, State),
- {next_state, userauth, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
- description = Desc,
- language = "en"}, State)
- end;
+ {ok, {Reply, Ssh}} = ssh_auth:handle_userauth_request(Msg, SessionId, Ssh0),
+ send_msg(Reply, State),
+ {next_state, userauth, next_packet(State#state{ssh_params = Ssh})};
userauth(#ssh_msg_service_accept{name = "ssh-userauth"},
#state{ssh_params = #ssh{role = client,
@@ -344,93 +403,55 @@ userauth(#ssh_msg_service_accept{name = "ssh-userauth"},
State) ->
{Msg, Ssh} = ssh_auth:init_userauth_request_msg(Ssh0),
send_msg(Msg, State),
- {next_state, userauth, next_packet(State#state{ssh_params = Ssh})};
+ {next_state, userauth, next_packet(State#state{auth_user = Ssh#ssh.user, ssh_params = Ssh})};
userauth(#ssh_msg_userauth_request{service = "ssh-connection",
method = "none"} = Msg,
#state{ssh_params = #ssh{session_id = SessionId, role = server,
service = "ssh-connection"} = Ssh0
} = State) ->
- try ssh_auth:handle_userauth_request(Msg, SessionId, Ssh0) of
- {not_authorized, {_User, _Reason}, {Reply, Ssh}} ->
- send_msg(Reply, State),
- {next_state, userauth, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
- description = Desc,
- language = "en"}, State)
- end;
+ {not_authorized, {_User, _Reason}, {Reply, Ssh}} =
+ ssh_auth:handle_userauth_request(Msg, SessionId, Ssh0),
+ send_msg(Reply, State),
+ {next_state, userauth, next_packet(State#state{ssh_params = Ssh})};
userauth(#ssh_msg_userauth_request{service = "ssh-connection",
method = Method} = Msg,
#state{ssh_params = #ssh{session_id = SessionId, role = server,
service = "ssh-connection",
peer = {_, Address}} = Ssh0,
- opts = Opts, manager = Pid} = State) ->
- try ssh_auth:handle_userauth_request(Msg, SessionId, Ssh0) of
+ opts = Opts, starter = Pid} = State) ->
+ case ssh_auth:handle_userauth_request(Msg, SessionId, Ssh0) of
{authorized, User, {Reply, Ssh}} ->
send_msg(Reply, State),
- ssh_userreg:register_user(User, Pid),
Pid ! ssh_connected,
connected_fun(User, Address, Method, Opts),
{next_state, connected,
- next_packet(State#state{ssh_params = Ssh})};
+ next_packet(State#state{auth_user = User, ssh_params = Ssh})};
{not_authorized, {User, Reason}, {Reply, Ssh}} ->
- retry_fun(User, Reason, Opts),
+ retry_fun(User, Address, Reason, Opts),
send_msg(Reply, State),
{next_state, userauth, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
- description = Desc,
- language = "en"}, State)
end;
userauth(#ssh_msg_userauth_info_request{} = Msg,
#state{ssh_params = #ssh{role = client,
io_cb = IoCb} = Ssh0} = State) ->
- try ssh_auth:handle_userauth_info_request(Msg, IoCb, Ssh0) of
- {ok, {Reply, Ssh}} ->
- send_msg(Reply, State),
- {next_state, userauth, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
- description = Desc,
- language = "en"}, State)
- end;
+ {ok, {Reply, Ssh}} = ssh_auth:handle_userauth_info_request(Msg, IoCb, Ssh0),
+ send_msg(Reply, State),
+ {next_state, userauth, next_packet(State#state{ssh_params = Ssh})};
userauth(#ssh_msg_userauth_info_response{} = Msg,
#state{ssh_params = #ssh{role = server} = Ssh0} = State) ->
- try ssh_auth:handle_userauth_info_response(Msg, Ssh0) of
- {ok, {Reply, Ssh}} ->
- send_msg(Reply, State),
- {next_state, userauth, next_packet(State#state{ssh_params = Ssh})}
- catch
- #ssh_msg_disconnect{} = DisconnectMsg ->
- handle_disconnect(DisconnectMsg, State);
- _:Error ->
- Desc = log_error(Error),
- handle_disconnect(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
- description = Desc,
- language = "en"}, State)
- end;
+ {ok, {Reply, Ssh}} = ssh_auth:handle_userauth_info_response(Msg, Ssh0),
+ send_msg(Reply, State),
+ {next_state, userauth, next_packet(State#state{ssh_params = Ssh})};
userauth(#ssh_msg_userauth_success{}, #state{ssh_params = #ssh{role = client} = Ssh,
- manager = Pid} = State) ->
+ starter = Pid} = State) ->
Pid ! ssh_connected,
- {next_state, connected, next_packet(State#state{ssh_params = Ssh#ssh{authenticated = true}})};
-
+ {next_state, connected, next_packet(State#state{ssh_params =
+ Ssh#ssh{authenticated = true}})};
userauth(#ssh_msg_userauth_failure{},
#state{ssh_params = #ssh{role = client,
userauth_methods = []}}
@@ -479,31 +500,28 @@ userauth(#ssh_msg_userauth_banner{message = Msg},
io:format("~s", [Msg]),
{next_state, userauth, next_packet(State)}.
+%%--------------------------------------------------------------------
+-spec connected({#ssh_msg_kexinit{}, binary()}, %%| %% #ssh_msg_kexdh_init{},
+ #state{}) -> gen_fsm_state_return().
+%%--------------------------------------------------------------------
connected({#ssh_msg_kexinit{}, _Payload} = Event, State) ->
- kexinit(Event, State#state{renegotiate = true});
-connected({#ssh_msg_kexdh_init{}, _Payload} = Event, State) ->
- key_exchange(Event, State#state{renegotiate = true}).
+ kexinit(Event, State#state{renegotiate = true}).
+%% ;
+%% connected(#ssh_msg_kexdh_init{} = Event, State) ->
+%% key_exchange(Event, State#state{renegotiate = true}).
%%--------------------------------------------------------------------
-%% Function:
-%% handle_event(Event, StateName, State) -> {next_state, NextStateName,
-%% NextState} |
-%% {next_state, NextStateName,
-%% NextState, Timeout} |
-%% {stop, Reason, NewState}
-%% Description: Whenever a gen_fsm receives an event sent using
-%% gen_fsm:send_all_state_event/2, this function is called to handle
-%% the event.
-%%--------------------------------------------------------------------
-handle_event({send, Data}, StateName, #state{ssh_params = Ssh0} = State) ->
- {Packet, Ssh} = ssh_transport:pack(Data, Ssh0),
- send_msg(Packet, State),
- {next_state, StateName, next_packet(State#state{ssh_params = Ssh})};
+-spec handle_event(#ssh_msg_disconnect{} | #ssh_msg_ignore{} | #ssh_msg_debug{} |
+ #ssh_msg_unimplemented{} | {adjust_window, integer(), integer()} |
+ {reply_request, success | failure, integer()} | renegotiate |
+ data_size | {request, pid(), integer(), integer(), iolist()} |
+ {request, integer(), integer(), iolist()}, state_name(),
+ #state{}) -> gen_fsm_state_return().
-handle_event(#ssh_msg_disconnect{} = Msg, _StateName,
- #state{manager = Pid} = State) ->
- (catch ssh_connection_manager:event(Pid, Msg)),
- {stop, normal, State};
+%%--------------------------------------------------------------------
+handle_event(#ssh_msg_disconnect{description = Desc} = DisconnectMsg, _StateName, #state{} = State) ->
+ handle_disconnect(DisconnectMsg, State),
+ {stop, {shutdown, Desc}, State};
handle_event(#ssh_msg_ignore{}, StateName, State) ->
{next_state, StateName, next_packet(State)};
@@ -519,30 +537,58 @@ handle_event(#ssh_msg_debug{}, StateName, State) ->
handle_event(#ssh_msg_unimplemented{}, StateName, State) ->
{next_state, StateName, next_packet(State)};
+handle_event({adjust_window, ChannelId, Bytes}, StateName,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ State =
+ case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{recv_window_size = WinSize, remote_id = Id} = Channel ->
+ ssh_channel:cache_update(Cache, Channel#channel{recv_window_size =
+ WinSize + Bytes}),
+ Msg = ssh_connection:channel_adjust_window_msg(Id, Bytes),
+ send_replies([{connection_reply, Msg}], State0);
+ undefined ->
+ State0
+ end,
+ {next_state, StateName, next_packet(State)};
+
+handle_event({reply_request, success, ChannelId}, StateName,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ State = case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{remote_id = RemoteId} ->
+ Msg = ssh_connection:channel_success_msg(RemoteId),
+ send_replies([{connection_reply, Msg}], State0);
+ undefined ->
+ State0
+ end,
+ {next_state, StateName, State};
+
handle_event(renegotiate, connected, #state{ssh_params = Ssh0}
= State) ->
{KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(Ssh0),
send_msg(SshPacket, State),
- {next_state, connected,
+ timer:apply_after(?REKEY_TIMOUT, gen_fsm, send_all_state_event, [self(), renegotiate]),
+ {next_state, kexinit,
next_packet(State#state{ssh_params = Ssh,
key_exchange_init_msg = KeyInitMsg,
renegotiate = true})};
handle_event(renegotiate, StateName, State) ->
+ timer:apply_after(?REKEY_TIMOUT, gen_fsm, send_all_state_event, [self(), renegotiatie]),
%% Allready in keyexcahange so ignore
{next_state, StateName, State};
-handle_event({info, From, Options}, StateName, #state{ssh_params = Ssh} = State) ->
- spawn(?MODULE, ssh_info_handler, [Options, Ssh, From]),
- {next_state, StateName, State};
+%% Rekey due to sent data limit reached?
handle_event(data_size, connected, #state{ssh_params = Ssh0} = State) ->
{ok, [{send_oct,Sent}]} = inet:getstat(State#state.socket, [send_oct]),
MaxSent = proplists:get_value(rekey_limit, State#state.opts, 1024000000),
+ timer:apply_after(?REKEY_DATA_TIMOUT, gen_fsm, send_all_state_event, [self(), data_size]),
case Sent >= MaxSent of
true ->
{KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(Ssh0),
send_msg(SshPacket, State),
- {next_state, connected,
+ {next_state, kexinit,
next_packet(State#state{ssh_params = Ssh,
key_exchange_init_msg = KeyInitMsg,
renegotiate = true})};
@@ -551,42 +597,196 @@ handle_event(data_size, connected, #state{ssh_params = Ssh0} = State) ->
end;
handle_event(data_size, StateName, State) ->
{next_state, StateName, State};
+
+handle_event({request, ChannelPid, ChannelId, Type, Data}, StateName, State0) ->
+ {{replies, Replies}, State1} = handle_request(ChannelPid, ChannelId,
+ Type, Data,
+ false, none, State0),
+ State = send_replies(Replies, State1),
+ {next_state, StateName, next_packet(State)};
+
+handle_event({request, ChannelId, Type, Data}, StateName, State0) ->
+ {{replies, Replies}, State1} = handle_request(ChannelId, Type, Data,
+ false, none, State0),
+ State = send_replies(Replies, State1),
+ {next_state, StateName, next_packet(State)};
+
handle_event({unknown, Data}, StateName, State) ->
Msg = #ssh_msg_unimplemented{sequence = Data},
send_msg(Msg, State),
{next_state, StateName, next_packet(State)}.
+
%%--------------------------------------------------------------------
-%% Function:
-%% handle_sync_event(Event, From, StateName,
-%% State) -> {next_state, NextStateName, NextState} |
-%% {next_state, NextStateName, NextState,
-%% Timeout} |
-%% {reply, Reply, NextStateName, NextState}|
-%% {reply, Reply, NextStateName, NextState,
-%% Timeout} |
-%% {stop, Reason, NewState} |
-%% {stop, Reason, Reply, NewState}
-%% Description: Whenever a gen_fsm receives an event sent using
-%% gen_fsm:sync_send_all_state_event/2,3, this function is called to handle
-%% the event.
+-spec handle_sync_event({request, pid(), channel_id(), integer(), binary(), timeout()} |
+ {request, channel_id(), integer(), binary(), timeout()} |
+ {global_request, pid(), integer(), boolean(), binary()} | {eof, integer()} |
+ {open, pid(), integer(), channel_id(), integer(), binary(), _} |
+ {send_window, channel_id()} | {recv_window, channel_id()} |
+ {connection_info, [client_version | server_version | peer |
+ sockname]} | {channel_info, channel_id(), [recv_window |
+ send_window]} |
+ {close, channel_id()} | stop, term(), state_name(), #state{})
+ -> gen_fsm_state_return().
%%--------------------------------------------------------------------
+handle_sync_event({request, ChannelPid, ChannelId, Type, Data, Timeout}, From, StateName, State0) ->
+ {{replies, Replies}, State1} = handle_request(ChannelPid,
+ ChannelId, Type, Data,
+ true, From, State0),
+ %% Note reply to channel will happen later when
+ %% reply is recived from peer on the socket
+ State = send_replies(Replies, State1),
+ start_timeout(ChannelId, From, Timeout),
+ handle_idle_timeout(State),
+ {next_state, StateName, next_packet(State)};
+
+handle_sync_event({request, ChannelId, Type, Data, Timeout}, From, StateName, State0) ->
+ {{replies, Replies}, State1} = handle_request(ChannelId, Type, Data,
+ true, From, State0),
+ %% Note reply to channel will happen later when
+ %% reply is recived from peer on the socket
+ State = send_replies(Replies, State1),
+ start_timeout(ChannelId, From, Timeout),
+ handle_idle_timeout(State),
+ {next_state, StateName, next_packet(State)};
+
+handle_sync_event({global_request, Pid, _, _, _} = Request, From, StateName,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ State1 = handle_global_request(Request, State0),
+ Channel = ssh_channel:cache_find(Pid, Cache),
+ State = add_request(true, Channel#channel.local_id, From, State1),
+ {next_state, StateName, next_packet(State)};
+
+handle_sync_event({data, ChannelId, Type, Data, Timeout}, From, StateName,
+ #state{connection_state = #connection{channel_cache = _Cache}
+ = Connection0} = State0) ->
+
+ case ssh_connection:channel_data(ChannelId, Type, Data, Connection0, From) of
+ {{replies, Replies}, Connection} ->
+ State = send_replies(Replies, State0#state{connection_state = Connection}),
+ start_timeout(ChannelId, From, Timeout),
+ {next_state, StateName, next_packet(State)};
+ {noreply, Connection} ->
+ start_timeout(ChannelId, From, Timeout),
+ {next_state, StateName, next_packet(State0#state{connection_state = Connection})}
+ end;
+
+handle_sync_event({eof, ChannelId}, _From, StateName,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{remote_id = Id, sent_close = false} ->
+ State = send_replies([{connection_reply,
+ ssh_connection:channel_eof_msg(Id)}], State0),
+ {reply, ok, StateName, next_packet(State)};
+ _ ->
+ {reply, {error,closed}, StateName, State0}
+ end;
-%% Replaced with option to connection_info/3. For now keep
-%% for backwards compatibility
-handle_sync_event(peer_address, _From, StateName,
- #state{ssh_params = #ssh{peer = {_, Address}}} = State) ->
- {reply, {ok, Address}, StateName, State}.
+handle_sync_event({open, ChannelPid, Type, InitialWindowSize, MaxPacketSize, Data, Timeout},
+ From, StateName, #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ erlang:monitor(process, ChannelPid),
+ {ChannelId, State1} = new_channel_id(State0),
+ Msg = ssh_connection:channel_open_msg(Type, ChannelId,
+ InitialWindowSize,
+ MaxPacketSize, Data),
+ State2 = send_replies([{connection_reply, Msg}], State1),
+ Channel = #channel{type = Type,
+ sys = "none",
+ user = ChannelPid,
+ local_id = ChannelId,
+ recv_window_size = InitialWindowSize,
+ recv_packet_size = MaxPacketSize},
+ ssh_channel:cache_update(Cache, Channel),
+ State = add_request(true, ChannelId, From, State2),
+ start_timeout(ChannelId, From, Timeout),
+ {next_state, StateName, next_packet(remove_timer_ref(State))};
+
+handle_sync_event({send_window, ChannelId}, _From, StateName,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State) ->
+ Reply = case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{send_window_size = WinSize,
+ send_packet_size = Packsize} ->
+ {ok, {WinSize, Packsize}};
+ undefined ->
+ {error, einval}
+ end,
+ {reply, Reply, StateName, next_packet(State)};
+
+handle_sync_event({recv_window, ChannelId}, _From, StateName,
+ #state{connection_state = #connection{channel_cache = Cache}}
+ = State) ->
+
+ Reply = case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{recv_window_size = WinSize,
+ recv_packet_size = Packsize} ->
+ {ok, {WinSize, Packsize}};
+ undefined ->
+ {error, einval}
+ end,
+ {reply, Reply, StateName, next_packet(State)};
+
+handle_sync_event({connection_info, Options}, _From, StateName, State) ->
+ Info = ssh_info(Options, State, []),
+ {reply, Info, StateName, State};
+
+handle_sync_event({channel_info, ChannelId, Options}, _From, StateName,
+ #state{connection_state = #connection{channel_cache = Cache}} = State) ->
+ case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{} = Channel ->
+ Info = ssh_channel_info(Options, Channel, []),
+ {reply, Info, StateName, State};
+ undefined ->
+ {reply, [], StateName, State}
+ end;
+
+handle_sync_event({info, ChannelPid}, _From, StateName,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State) ->
+ Result = ssh_channel:cache_foldl(
+ fun(Channel, Acc) when ChannelPid == all;
+ Channel#channel.user == ChannelPid ->
+ [Channel | Acc];
+ (_, Acc) ->
+ Acc
+ end, [], Cache),
+ {reply, {ok, Result}, StateName, State};
+
+handle_sync_event({close, ChannelId}, _, StateName,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ State =
+ case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{remote_id = Id} = Channel ->
+ State1 = send_replies([{connection_reply,
+ ssh_connection:channel_close_msg(Id)}], State0),
+ ssh_channel:cache_update(Cache, Channel#channel{sent_close = true}),
+ handle_idle_timeout(State1),
+ State1;
+ undefined ->
+ State0
+ end,
+ {reply, ok, StateName, next_packet(State)};
+
+handle_sync_event(stop, _, _StateName, #state{connection_state = Connection0,
+ role = Role,
+ opts = Opts} = State0) ->
+ {disconnect, Reason, {{replies, Replies}, Connection}} =
+ ssh_connection:handle_msg(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
+ description = "User closed down connection",
+ language = "en"}, Connection0, Role),
+ State = send_replies(Replies, State0),
+ SSHOpts = proplists:get_value(ssh_opts, Opts),
+ disconnect_fun(Reason, SSHOpts),
+ {stop, normal, ok, State#state{connection_state = Connection}}.
%%--------------------------------------------------------------------
-%% Function:
-%% handle_info(Info,StateName,State)-> {next_state, NextStateName, NextState}|
-%% {next_state, NextStateName, NextState,
-%% Timeout} |
-%% {stop, Reason, NewState}
-%% Description: This function is called by a gen_fsm when it receives any
-%% other message than a synchronous or asynchronous event
-%% (or a system message).
+-spec handle_info({atom(), port(), binary()} | {atom(), port()} |
+ term (), state_name(), #state{}) -> gen_fsm_state_return().
%%--------------------------------------------------------------------
+
handle_info({Protocol, Socket, "SSH-" ++ _ = Version}, hello,
#state{socket = Socket,
transport_protocol = Protocol} = State ) ->
@@ -651,15 +851,39 @@ handle_info({Protocol, Socket, Data}, Statename,
handle_info({CloseTag, _Socket}, _StateName,
#state{transport_close_tag = CloseTag,
ssh_params = #ssh{role = _Role, opts = _Opts}} = State) ->
- DisconnectMsg =
- #ssh_msg_disconnect{code = ?SSH_DISCONNECT_CONNECTION_LOST,
- description = "Connection Lost",
+ DisconnectMsg =
+ #ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
+ description = "Connection closed",
language = "en"},
- {stop, {shutdown, DisconnectMsg}, State};
+ handle_disconnect(DisconnectMsg, State);
+
+handle_info({timeout, {_, From} = Request}, Statename,
+ #state{connection_state = #connection{requests = Requests} = Connection} = State) ->
+ case lists:member(Request, Requests) of
+ true ->
+ gen_fsm:reply(From, {error, timeout}),
+ {next_state, Statename,
+ State#state{connection_state =
+ Connection#connection{requests =
+ lists:delete(Request, Requests)}}};
+ false ->
+ {next_state, Statename, State}
+ end;
+
+%%% Handle that ssh channels user process goes down
+handle_info({'DOWN', _Ref, process, ChannelPid, _Reason}, Statename, State0) ->
+ {{replies, Replies}, State1} = handle_channel_down(ChannelPid, State0),
+ State = send_replies(Replies, State1),
+ {next_state, Statename, next_packet(State)};
%%% So that terminate will be run when supervisor is shutdown
handle_info({'EXIT', _Sup, Reason}, _StateName, State) ->
- {stop, Reason, State};
+ {stop, {shutdown, Reason}, State};
+
+handle_info({check_cache, _ , _},
+ StateName, #state{connection_state =
+ #connection{channel_cache = Cache}} = State) ->
+ {next_state, StateName, check_cache(State, Cache)};
handle_info(UnexpectedMessage, StateName, #state{ssh_params = SshParams} = State) ->
Msg = lists:flatten(io_lib:format(
@@ -673,20 +897,16 @@ handle_info(UnexpectedMessage, StateName, #state{ssh_params = SshParams} = State
{next_state, StateName, State}.
%%--------------------------------------------------------------------
-%% Function: terminate(Reason, StateName, State) -> void()
-%% Description:This function is called by a gen_fsm when it is about
-%% to terminate. It should be the opposite of Module:init/1 and do any
-%% necessary cleaning up. When it returns, the gen_fsm terminates with
-%% Reason. The return value is ignored.
+-spec terminate(Reason::term(), state_name(), #state{}) -> _.
%%--------------------------------------------------------------------
terminate(normal, _, #state{transport_cb = Transport,
- socket = Socket,
- manager = Pid}) ->
- (catch ssh_userreg:delete_user(Pid)),
+ connection_state = Connection,
+ socket = Socket}) ->
+ terminate_subsytem(Connection),
(catch Transport:close(Socket)),
ok;
-%% Terminated as manager terminated
+%% Terminated by supervisor
terminate(shutdown, StateName, #state{ssh_params = Ssh0} = State) ->
DisconnectMsg =
#ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
@@ -696,31 +916,34 @@ terminate(shutdown, StateName, #state{ssh_params = Ssh0} = State) ->
send_msg(SshPacket, State),
terminate(normal, StateName, State#state{ssh_params = Ssh});
-terminate({shutdown, #ssh_msg_disconnect{} = Msg}, StateName, #state{ssh_params = Ssh0, manager = Pid} = State) ->
- {SshPacket, Ssh} = ssh_transport:ssh_packet(Msg, Ssh0),
- send_msg(SshPacket, State),
- ssh_connection_manager:event(Pid, Msg),
- terminate(normal, StateName, State#state{ssh_params = Ssh});
-terminate({shutdown, {#ssh_msg_disconnect{} = Msg, ErrorMsg}}, StateName, #state{ssh_params = Ssh0, manager = Pid} = State) ->
- {SshPacket, Ssh} = ssh_transport:ssh_packet(Msg, Ssh0),
+terminate({shutdown, #ssh_msg_disconnect{} = Msg}, StateName,
+ #state{ssh_params = Ssh0} = State) ->
+ {SshPacket, Ssh} = ssh_transport:ssh_packet(Msg, Ssh0),
send_msg(SshPacket, State),
- ssh_connection_manager:event(Pid, Msg, ErrorMsg),
- terminate(normal, StateName, State#state{ssh_params = Ssh});
-terminate(Reason, StateName, #state{ssh_params = Ssh0, manager = Pid} = State) ->
+ terminate(normal, StateName, State#state{ssh_params = Ssh});
+terminate({shutdown, _}, StateName, State) ->
+ terminate(normal, StateName, State);
+terminate(Reason, StateName, #state{ssh_params = Ssh0, starter = _Pid,
+ connection_state = Connection} = State) ->
+ terminate_subsytem(Connection),
log_error(Reason),
DisconnectMsg =
#ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
description = "Internal error",
language = "en"},
{SshPacket, Ssh} = ssh_transport:ssh_packet(DisconnectMsg, Ssh0),
- ssh_connection_manager:event(Pid, DisconnectMsg),
send_msg(SshPacket, State),
terminate(normal, StateName, State#state{ssh_params = Ssh}).
+terminate_subsytem(#connection{system_supervisor = SysSup,
+ sub_system_supervisor = SubSysSup}) when is_pid(SubSysSup) ->
+ ssh_system_sup:stop_subsystem(SysSup, SubSysSup);
+terminate_subsytem(_) ->
+ ok.
+
%%--------------------------------------------------------------------
-%% Function:
-%% code_change(OldVsn, StateName, State, Extra) -> {ok, StateName, NewState}
-%% Description: Convert process state when code is changed
+-spec code_change(OldVsn::term(), state_name(), Oldstate::term(), Extra::term()) ->
+ {ok, state_name(), #state{}}.
%%--------------------------------------------------------------------
code_change(_OldVsn, StateName, State, _Extra) ->
{ok, StateName, State}.
@@ -728,6 +951,39 @@ code_change(_OldVsn, StateName, State, _Extra) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+init_role(#state{role = client, opts = Opts} = State0) ->
+ Pid = proplists:get_value(user_pid, Opts),
+ TimerRef = get_idle_time(Opts),
+ timer:apply_after(?REKEY_TIMOUT, gen_fsm, send_all_state_event, [self(), renegotiate]),
+ timer:apply_after(?REKEY_DATA_TIMOUT, gen_fsm, send_all_state_event,
+ [self(), data_size]),
+ State0#state{starter = Pid,
+ idle_timer_ref = TimerRef};
+init_role(#state{role = server, opts = Opts, connection_state = Connection} = State) ->
+ Sups = proplists:get_value(supervisors, Opts),
+ Pid = proplists:get_value(user_pid, Opts),
+ SystemSup = proplists:get_value(system_sup, Sups),
+ SubSystemSup = proplists:get_value(subsystem_sup, Sups),
+ ConnectionSup = proplists:get_value(connection_sup, Sups),
+ Shell = proplists:get_value(shell, Opts),
+ Exec = proplists:get_value(exec, Opts),
+ CliSpec = proplists:get_value(ssh_cli, Opts, {ssh_cli, [Shell]}),
+ State#state{starter = Pid, connection_state = Connection#connection{
+ cli_spec = CliSpec,
+ exec = Exec,
+ system_supervisor = SystemSup,
+ sub_system_supervisor = SubSystemSup,
+ connection_supervisor = ConnectionSup
+ }}.
+
+get_idle_time(SshOptions) ->
+ case proplists:get_value(idle_time, SshOptions) of
+ infinity ->
+ infinity;
+ _IdleTime -> %% We dont want to set the timeout on first connect
+ undefined
+ end.
+
init_ssh(client = Role, Vsn, Version, Options, Socket) ->
IOCb = case proplists:get_value(user_interaction, Options, true) of
true ->
@@ -845,7 +1101,15 @@ send_all_state_event(FsmPid, Event) ->
gen_fsm:send_all_state_event(FsmPid, Event).
sync_send_all_state_event(FsmPid, Event) ->
- gen_fsm:sync_send_all_state_event(FsmPid, Event).
+ try gen_fsm:sync_send_all_state_event(FsmPid, Event, infinity)
+ catch
+ exit:{noproc, _} ->
+ {error, closed};
+ exit:{normal, _} ->
+ {error, closed};
+ exit:{{shutdown, _},_} ->
+ {error, closed}
+ end.
%% simulate send_all_state_event(self(), Event)
event(#ssh_msg_disconnect{} = Event, StateName, State) ->
@@ -858,10 +1122,33 @@ event(#ssh_msg_unimplemented{} = Event, StateName, State) ->
handle_event(Event, StateName, State);
%% simulate send_event(self(), Event)
event(Event, StateName, State) ->
- ?MODULE:StateName(Event, State).
+ try
+ ?MODULE:StateName(Event, State)
+ catch
+ throw:#ssh_msg_disconnect{} = DisconnectMsg ->
+ handle_disconnect(DisconnectMsg, State);
+ throw:{ErrorToDisplay, #ssh_msg_disconnect{} = DisconnectMsg} ->
+ handle_disconnect(DisconnectMsg, State, ErrorToDisplay);
+ _:Error ->
+ log_error(Error),
+ handle_disconnect(#ssh_msg_disconnect{code = error_code(StateName),
+ description = "Internal error",
+ language = "en"}, State)
+ end.
+error_code(key_exchange) ->
+ ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED;
+error_code(new_keys) ->
+ ?SSH_DISCONNECT_KEY_EXCHANGE_FAILED;
+error_code(_) ->
+ ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE.
generate_event(<<?BYTE(Byte), _/binary>> = Msg, StateName,
- #state{manager = Pid} = State0, EncData)
+ #state{
+ role = Role,
+ starter = User,
+ opts = Opts,
+ renegotiate = Renegotiation,
+ connection_state = Connection0} = State0, EncData)
when Byte == ?SSH_MSG_GLOBAL_REQUEST;
Byte == ?SSH_MSG_REQUEST_SUCCESS;
Byte == ?SSH_MSG_REQUEST_FAILURE;
@@ -876,18 +1163,40 @@ generate_event(<<?BYTE(Byte), _/binary>> = Msg, StateName,
Byte == ?SSH_MSG_CHANNEL_REQUEST;
Byte == ?SSH_MSG_CHANNEL_SUCCESS;
Byte == ?SSH_MSG_CHANNEL_FAILURE ->
-
- try
- ssh_connection_manager:event(Pid, Msg),
- State = generate_event_new_state(State0, EncData),
- next_packet(State),
- {next_state, StateName, State}
+ ConnectionMsg = ssh_message:decode(Msg),
+ State1 = generate_event_new_state(State0, EncData),
+ try ssh_connection:handle_msg(ConnectionMsg, Connection0, Role) of
+ {{replies, Replies}, Connection} ->
+ State = send_replies(Replies, State1#state{connection_state = Connection}),
+ {next_state, StateName, next_packet(State)};
+ {noreply, Connection} ->
+ {next_state, StateName, next_packet(State1#state{connection_state = Connection})};
+ {disconnect, {_, Reason}, {{replies, Replies}, Connection}} when
+ Role == client andalso ((StateName =/= connected) and (not Renegotiation)) ->
+ State = send_replies(Replies, State1#state{connection_state = Connection}),
+ User ! {self(), not_connected, Reason},
+ {stop, {shutdown, normal},
+ next_packet(State#state{connection_state = Connection})};
+ {disconnect, Reason, {{replies, Replies}, Connection}} ->
+ State = send_replies(Replies, State1#state{connection_state = Connection}),
+ SSHOpts = proplists:get_value(ssh_opts, Opts),
+ disconnect_fun(Reason, SSHOpts),
+ {stop, {shutdown, normal}, State#state{connection_state = Connection}}
catch
- exit:{noproc, Reason} ->
- {stop, {shutdown, Reason}, State0}
+ _:Error ->
+ {disconnect, Reason, {{replies, Replies}, Connection}} =
+ ssh_connection:handle_msg(
+ #ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
+ description = "Internal error",
+ language = "en"}, Connection0, Role),
+ State = send_replies(Replies, State1#state{connection_state = Connection}),
+ SSHOpts = proplists:get_value(ssh_opts, Opts),
+ disconnect_fun(Reason, SSHOpts),
+ {stop, {shutdown, Error}, State#state{connection_state = Connection}}
end;
+
generate_event(Msg, StateName, State0, EncData) ->
- Event = ssh_bits:decode(Msg),
+ Event = ssh_message:decode(Msg),
State = generate_event_new_state(State0, EncData),
case Event of
#ssh_msg_kexinit{} ->
@@ -897,6 +1206,100 @@ generate_event(Msg, StateName, State0, EncData) ->
event(Event, StateName, State)
end.
+
+handle_request(ChannelPid, ChannelId, Type, Data, WantReply, From,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{remote_id = Id} = Channel ->
+ update_sys(Cache, Channel, Type, ChannelPid),
+ Msg = ssh_connection:channel_request_msg(Id, Type,
+ WantReply, Data),
+ Replies = [{connection_reply, Msg}],
+ State = add_request(WantReply, ChannelId, From, State0),
+ {{replies, Replies}, State};
+ undefined ->
+ {{replies, []}, State0}
+ end.
+
+handle_request(ChannelId, Type, Data, WantReply, From,
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State0) ->
+ case ssh_channel:cache_lookup(Cache, ChannelId) of
+ #channel{remote_id = Id} ->
+ Msg = ssh_connection:channel_request_msg(Id, Type,
+ WantReply, Data),
+ Replies = [{connection_reply, Msg}],
+ State = add_request(WantReply, ChannelId, From, State0),
+ {{replies, Replies}, State};
+ undefined ->
+ {{replies, []}, State0}
+ end.
+
+handle_global_request({global_request, ChannelPid,
+ "tcpip-forward" = Type, WantReply,
+ <<?UINT32(IPLen),
+ IP:IPLen/binary, ?UINT32(Port)>> = Data},
+ #state{connection_state =
+ #connection{channel_cache = Cache}
+ = Connection0} = State) ->
+ ssh_channel:cache_update(Cache, #channel{user = ChannelPid,
+ type = "forwarded-tcpip",
+ sys = none}),
+ Connection = ssh_connection:bind(IP, Port, ChannelPid, Connection0),
+ Msg = ssh_connection:global_request_msg(Type, WantReply, Data),
+ send_replies([{connection_reply, Msg}], State#state{connection_state = Connection});
+
+handle_global_request({global_request, _Pid, "cancel-tcpip-forward" = Type,
+ WantReply, <<?UINT32(IPLen),
+ IP:IPLen/binary, ?UINT32(Port)>> = Data},
+ #state{connection_state = Connection0} = State) ->
+ Connection = ssh_connection:unbind(IP, Port, Connection0),
+ Msg = ssh_connection:global_request_msg(Type, WantReply, Data),
+ send_replies([{connection_reply, Msg}], State#state{connection_state = Connection});
+
+handle_global_request({global_request, _, "cancel-tcpip-forward" = Type,
+ WantReply, Data}, State) ->
+ Msg = ssh_connection:global_request_msg(Type, WantReply, Data),
+ send_replies([{connection_reply, Msg}], State).
+
+handle_idle_timeout(#state{opts = Opts}) ->
+ case proplists:get_value(idle_time, Opts, infinity) of
+ infinity ->
+ ok;
+ IdleTime ->
+ erlang:send_after(IdleTime, self(), {check_cache, [], []})
+ end.
+
+handle_channel_down(ChannelPid, #state{connection_state =
+ #connection{channel_cache = Cache}} =
+ State) ->
+ ssh_channel:cache_foldl(
+ fun(Channel, Acc) when Channel#channel.user == ChannelPid ->
+ ssh_channel:cache_delete(Cache,
+ Channel#channel.local_id),
+ Acc;
+ (_,Acc) ->
+ Acc
+ end, [], Cache),
+ {{replies, []}, check_cache(State, Cache)}.
+
+update_sys(Cache, Channel, Type, ChannelPid) ->
+ ssh_channel:cache_update(Cache,
+ Channel#channel{sys = Type, user = ChannelPid}).
+add_request(false, _ChannelId, _From, State) ->
+ State;
+add_request(true, ChannelId, From, #state{connection_state =
+ #connection{requests = Requests0} =
+ Connection} = State) ->
+ Requests = [{ChannelId, From} | Requests0],
+ State#state{connection_state = Connection#connection{requests = Requests}}.
+
+new_channel_id(#state{connection_state = #connection{channel_id_seed = Id} =
+ Connection}
+ = State) ->
+ {Id, State#state{connection_state =
+ Connection#connection{channel_id_seed = Id + 1}}}.
generate_event_new_state(#state{ssh_params =
#ssh{recv_sequence = SeqNum0}
= Ssh} = State, EncData) ->
@@ -906,7 +1309,6 @@ generate_event_new_state(#state{ssh_params =
encoded_data_buffer = EncData,
undecoded_packet_length = undefined}.
-
next_packet(#state{decoded_data_buffer = <<>>,
encoded_data_buffer = Buff,
ssh_params = #ssh{decrypt_block_size = BlockSize},
@@ -931,7 +1333,6 @@ after_new_keys(#state{renegotiate = true} = State) ->
{connected, State#state{renegotiate = false}};
after_new_keys(#state{renegotiate = false,
ssh_params = #ssh{role = client} = Ssh0} = State) ->
- ssh_bits:install_messages(ssh_auth:userauth_messages()),
{Msg, Ssh} = ssh_auth:service_request_msg(Ssh0),
send_msg(Msg, State),
{userauth, State#state{ssh_params = Ssh}};
@@ -981,10 +1382,16 @@ handle_ssh_packet(Length, StateName, #state{decoded_data_buffer = DecData0,
handle_disconnect(DisconnectMsg, State0)
end.
-handle_disconnect(#ssh_msg_disconnect{} = Msg, State) ->
- {stop, {shutdown, Msg}, State}.
-handle_disconnect(#ssh_msg_disconnect{} = Msg, State, ErrorMsg) ->
- {stop, {shutdown, {Msg, ErrorMsg}}, State}.
+handle_disconnect(#ssh_msg_disconnect{description = Desc} = Msg, #state{connection_state = Connection0,
+ role = Role} = State0) ->
+ {disconnect, _, {{replies, Replies}, Connection}} = ssh_connection:handle_msg(Msg, Connection0, Role),
+ State = send_replies(Replies, State0),
+ {stop, {shutdown, Desc}, State#state{connection_state = Connection}}.
+handle_disconnect(#ssh_msg_disconnect{description = Desc} = Msg, #state{connection_state = Connection0,
+ role = Role} = State0, ErrorMsg) ->
+ {disconnect, _, {{replies, Replies}, Connection}} = ssh_connection:handle_msg(Msg, Connection0, Role),
+ State = send_replies(Replies, State0),
+ {stop, {shutdown, {Desc, ErrorMsg}}, State#state{connection_state = Connection}}.
counterpart_versions(NumVsn, StrVsn, #ssh{role = server} = Ssh) ->
Ssh#ssh{c_vsn = NumVsn , c_version = StrVsn};
@@ -1003,45 +1410,67 @@ connected_fun(User, PeerAddr, Method, Opts) ->
catch Fun(User, PeerAddr, Method)
end.
-retry_fun(_, undefined, _) ->
+retry_fun(_, _, undefined, _) ->
ok;
-retry_fun(User, {error, Reason}, Opts) ->
+retry_fun(User, PeerAddr, {error, Reason}, Opts) ->
case proplists:get_value(failfun, Opts) of
undefined ->
ok;
Fun ->
- catch Fun(User, Reason)
+ do_retry_fun(Fun, User, PeerAddr, Reason)
end;
-retry_fun(User, Reason, Opts) ->
+retry_fun(User, PeerAddr, Reason, Opts) ->
case proplists:get_value(infofun, Opts) of
undefined ->
ok;
- Fun ->
- catch Fun(User, Reason)
+ Fun ->
+ do_retry_fun(Fun, User, PeerAddr, Reason)
end.
-ssh_info_handler(Options, Ssh, From) ->
- Info = ssh_info(Options, Ssh, []),
- ssh_connection_manager:send_msg({channel_requst_reply, From, Info}).
+do_retry_fun(Fun, User, PeerAddr, Reason) ->
+ case erlang:fun_info(Fun, arity) of
+ {arity, 2} -> %% Backwards compatible
+ catch Fun(User, Reason);
+ {arity, 3} ->
+ catch Fun(User, PeerAddr, Reason)
+ end.
-ssh_info([], _, Acc) ->
+ssh_info([], _State, Acc) ->
+ Acc;
+ssh_info([client_version | Rest], #state{ssh_params = #ssh{c_vsn = IntVsn,
+ c_version = StringVsn}} = State, Acc) ->
+ ssh_info(Rest, State, [{client_version, {IntVsn, StringVsn}} | Acc]);
+
+ssh_info([server_version | Rest], #state{ssh_params =#ssh{s_vsn = IntVsn,
+ s_version = StringVsn}} = State, Acc) ->
+ ssh_info(Rest, State, [{server_version, {IntVsn, StringVsn}} | Acc]);
+ssh_info([peer | Rest], #state{ssh_params = #ssh{peer = Peer}} = State, Acc) ->
+ ssh_info(Rest, State, [{peer, Peer} | Acc]);
+ssh_info([sockname | Rest], #state{socket = Socket} = State, Acc) ->
+ {ok, SockName} = inet:sockname(Socket),
+ ssh_info(Rest, State, [{sockname, SockName}|Acc]);
+ssh_info([user | Rest], #state{auth_user = User} = State, Acc) ->
+ ssh_info(Rest, State, [{user, User}|Acc]);
+ssh_info([ _ | Rest], State, Acc) ->
+ ssh_info(Rest, State, Acc).
+
+ssh_channel_info([], _, Acc) ->
Acc;
-ssh_info([client_version | Rest], #ssh{c_vsn = IntVsn,
- c_version = StringVsn} = SshParams, Acc) ->
- ssh_info(Rest, SshParams, [{client_version, {IntVsn, StringVsn}} | Acc]);
-
-ssh_info([server_version | Rest], #ssh{s_vsn = IntVsn,
- s_version = StringVsn} = SshParams, Acc) ->
- ssh_info(Rest, SshParams, [{server_version, {IntVsn, StringVsn}} | Acc]);
-
-ssh_info([peer | Rest], #ssh{peer = Peer} = SshParams, Acc) ->
- ssh_info(Rest, SshParams, [{peer, Peer} | Acc]);
-
-ssh_info([ _ | Rest], SshParams, Acc) ->
- ssh_info(Rest, SshParams, Acc).
+ssh_channel_info([recv_window | Rest], #channel{recv_window_size = WinSize,
+ recv_packet_size = Packsize
+ } = Channel, Acc) ->
+ ssh_channel_info(Rest, Channel, [{recv_window, {{win_size, WinSize},
+ {packet_size, Packsize}}} | Acc]);
+ssh_channel_info([send_window | Rest], #channel{send_window_size = WinSize,
+ send_packet_size = Packsize
+ } = Channel, Acc) ->
+ ssh_channel_info(Rest, Channel, [{send_window, {{win_size, WinSize},
+ {packet_size, Packsize}}} | Acc]);
+ssh_channel_info([ _ | Rest], Channel, Acc) ->
+ ssh_channel_info(Rest, Channel, Acc).
log_error(Reason) ->
Report = io_lib:format("Erlang ssh connection handler failed with reason: "
@@ -1050,3 +1479,101 @@ log_error(Reason) ->
[Reason, erlang:get_stacktrace()]),
error_logger:error_report(Report),
"Internal error".
+
+send_replies([], State) ->
+ State;
+send_replies([{connection_reply, Data} | Rest], #state{ssh_params = Ssh0} = State) ->
+ {Packet, Ssh} = ssh_transport:ssh_packet(Data, Ssh0),
+ send_msg(Packet, State),
+ send_replies(Rest, State#state{ssh_params = Ssh});
+send_replies([Msg | Rest], State) ->
+ catch send_reply(Msg),
+ send_replies(Rest, State).
+
+send_reply({channel_data, Pid, Data}) ->
+ Pid ! {ssh_cm, self(), Data};
+send_reply({channel_requst_reply, From, Data}) ->
+ gen_fsm:reply(From, Data);
+send_reply({flow_control, Cache, Channel, From, Msg}) ->
+ ssh_channel:cache_update(Cache, Channel#channel{flow_control = undefined}),
+ gen_fsm:reply(From, Msg);
+send_reply({flow_control, From, Msg}) ->
+ gen_fsm:reply(From, Msg).
+
+disconnect_fun(_, undefined) ->
+ ok;
+disconnect_fun(Reason, Opts) ->
+ case proplists:get_value(disconnectfun, Opts) of
+ undefined ->
+ ok;
+ Fun ->
+ catch Fun(Reason)
+ end.
+
+check_cache(#state{opts = Opts} = State, Cache) ->
+ %% Check the number of entries in Cache
+ case proplists:get_value(size, ets:info(Cache)) of
+ 0 ->
+ case proplists:get_value(idle_time, Opts, infinity) of
+ infinity ->
+ State;
+ Time ->
+ handle_idle_timer(Time, State)
+ end;
+ _ ->
+ State
+ end.
+
+handle_idle_timer(Time, #state{idle_timer_ref = undefined} = State) ->
+ TimerRef = erlang:send_after(Time, self(), {'EXIT', [], "Timeout"}),
+ State#state{idle_timer_ref=TimerRef};
+handle_idle_timer(_, State) ->
+ State.
+
+remove_timer_ref(State) ->
+ case State#state.idle_timer_ref of
+ infinity -> %% If the timer is not activated
+ State;
+ undefined -> %% If we already has cancelled the timer
+ State;
+ TimerRef -> %% Timer is active
+ erlang:cancel_timer(TimerRef),
+ State#state{idle_timer_ref = undefined}
+ end.
+
+socket_control(Socket, Pid, Transport) ->
+ case Transport:controlling_process(Socket, Pid) of
+ ok ->
+ send_event(Pid, socket_control);
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+handshake(Pid, Ref, Timeout) ->
+ receive
+ ssh_connected ->
+ erlang:demonitor(Ref),
+ {ok, Pid};
+ {Pid, not_connected, Reason} ->
+ {error, Reason};
+ {Pid, user_password} ->
+ Pass = io:get_password(),
+ Pid ! Pass,
+ handshake(Pid, Ref, Timeout);
+ {Pid, question} ->
+ Answer = io:get_line(""),
+ Pid ! Answer,
+ handshake(Pid, Ref, Timeout);
+ {'DOWN', _, process, Pid, {shutdown, Reason}} ->
+ {error, Reason};
+ {'DOWN', _, process, Pid, Reason} ->
+ {error, Reason}
+ after Timeout ->
+ stop(Pid),
+ {error, Timeout}
+ end.
+
+start_timeout(_,_, infinity) ->
+ ok;
+start_timeout(Channel, From, Time) ->
+ erlang:send_after(Time, self(), {timeout, {Channel, From}}).
diff --git a/lib/ssh/src/ssh_connection_manager.erl b/lib/ssh/src/ssh_connection_manager.erl
deleted file mode 100644
index 99a0b6a7c8..0000000000
--- a/lib/ssh/src/ssh_connection_manager.erl
+++ /dev/null
@@ -1,916 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-%%
-%%----------------------------------------------------------------------
-%% Purpose: Handles multiplexing to ssh channels and global connection
-%% requests e.i. the SSH Connection Protocol (RFC 4254), that provides
-%% interactive login sessions, remote execution of commands, forwarded
-%% TCP/IP connections, and forwarded X11 connections. Details of the
-%% protocol is implemented in ssh_connection.erl
-%% ----------------------------------------------------------------------
--module(ssh_connection_manager).
-
--behaviour(gen_server).
-
--include("ssh.hrl").
--include("ssh_connect.hrl").
--include("ssh_transport.hrl").
-
--export([start_link/1]).
-
--export([info/1, info/2,
- renegotiate/1, connection_info/2, channel_info/3,
- peer_addr/1, send_window/3, recv_window/3, adjust_window/3,
- close/2, stop/1, send/5,
- send_eof/2]).
-
--export([open_channel/6, reply_request/3, request/6, request/7, global_request/4, event/2, event/3, cast/2]).
-
-%% Internal application API and spawn
--export([send_msg/1, ssh_channel_info_handler/3]).
-
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--define(DBG_MESSAGE, true).
-
--record(state,
- {
- role,
- client,
- starter,
- connection, % pid()
- connection_state, % #connection{}
- latest_channel_id = 0,
- opts,
- channel_args,
- idle_timer_ref, % timerref
- connected
- }).
-
-%%====================================================================
-%% Internal application API
-%%====================================================================
-
-start_link(Opts) ->
- gen_server:start_link(?MODULE, Opts, []).
-
-open_channel(ConnectionManager, ChannelType, ChannelSpecificData,
- InitialWindowSize, MaxPacketSize, Timeout) ->
- case (catch call(ConnectionManager, {open, self(), ChannelType,
- InitialWindowSize,
- MaxPacketSize, ChannelSpecificData},
- Timeout)) of
- {open, Channel} ->
- {ok, Channel};
- Error ->
- %% TODO: Best way?
- Error
- end.
-
-request(ConnectionManager, ChannelPid, ChannelId, Type, true, Data, Timeout) ->
- call(ConnectionManager, {request, ChannelPid, ChannelId, Type, Data}, Timeout);
-request(ConnectionManager, ChannelPid, ChannelId, Type, false, Data, _) ->
- cast(ConnectionManager, {request, ChannelPid, ChannelId, Type, Data}).
-
-request(ConnectionManager, ChannelId, Type, true, Data, Timeout) ->
- call(ConnectionManager, {request, ChannelId, Type, Data}, Timeout);
-request(ConnectionManager, ChannelId, Type, false, Data, _) ->
- cast(ConnectionManager, {request, ChannelId, Type, Data}).
-
-reply_request(ConnectionManager, Status, ChannelId) ->
- cast(ConnectionManager, {reply_request, Status, ChannelId}).
-
-global_request(ConnectionManager, Type, true = Reply, Data) ->
- case call(ConnectionManager,
- {global_request, self(), Type, Reply, Data}) of
- {ssh_cm, ConnectionManager, {success, _}} ->
- ok;
- {ssh_cm, ConnectionManager, {failure, _}} ->
- error
- end;
-
-global_request(ConnectionManager, Type, false = Reply, Data) ->
- cast(ConnectionManager, {global_request, self(), Type, Reply, Data}).
-
-event(ConnectionManager, BinMsg, ErrorMsg) ->
- call(ConnectionManager, {ssh_msg, self(), BinMsg, ErrorMsg}).
-event(ConnectionManager, BinMsg) ->
- call(ConnectionManager, {ssh_msg, self(), BinMsg}).
-info(ConnectionManager) ->
- info(ConnectionManager, {info, all}).
-
-info(ConnectionManager, ChannelProcess) ->
- call(ConnectionManager, {info, ChannelProcess}).
-
-%% TODO: Do we really want this function? Should not
-%% renegotiation be triggered by configurable timer
-%% or amount of data sent counter!
-renegotiate(ConnectionManager) ->
- cast(ConnectionManager, renegotiate).
-renegotiate_data(ConnectionManager) ->
- cast(ConnectionManager, renegotiate_data).
-connection_info(ConnectionManager, Options) ->
- call(ConnectionManager, {connection_info, Options}).
-
-channel_info(ConnectionManager, ChannelId, Options) ->
- call(ConnectionManager, {channel_info, ChannelId, Options}).
-
-%% Replaced by option peer to connection_info/2 keep for now
-%% for Backwards compatibility!
-peer_addr(ConnectionManager) ->
- call(ConnectionManager, {peer_addr, self()}).
-
-%% Backwards compatibility!
-send_window(ConnectionManager, Channel, TimeOut) ->
- call(ConnectionManager, {send_window, Channel}, TimeOut).
-%% Backwards compatibility!
-recv_window(ConnectionManager, Channel, TimeOut) ->
- call(ConnectionManager, {recv_window, Channel}, TimeOut).
-
-adjust_window(ConnectionManager, Channel, Bytes) ->
- cast(ConnectionManager, {adjust_window, Channel, Bytes}).
-
-close(ConnectionManager, ChannelId) ->
- case call(ConnectionManager, {close, ChannelId}) of
- ok ->
- ok;
- {error, channel_closed} ->
- ok
- end.
-
-stop(ConnectionManager) ->
- case call(ConnectionManager, stop) of
- ok ->
- ok;
- {error, channel_closed} ->
- ok
- end.
-
-send(ConnectionManager, ChannelId, Type, Data, Timeout) ->
- call(ConnectionManager, {data, ChannelId, Type, Data}, Timeout).
-
-send_eof(ConnectionManager, ChannelId) ->
- call(ConnectionManager, {eof, ChannelId}).
-
-%%====================================================================
-%% gen_server callbacks
-%%====================================================================
-
-%%--------------------------------------------------------------------
-%% Function: init(Args) -> {ok, State} |
-%% {ok, State, Timeout} |
-%% ignore |
-%% {stop, Reason}
-%% Description: Initiates the server
-%%--------------------------------------------------------------------
-init([server, _Socket, Opts]) ->
- process_flag(trap_exit, true),
- ssh_bits:install_messages(ssh_connection:messages()),
- Cache = ssh_channel:cache_create(),
- {ok, #state{role = server,
- connection_state = #connection{channel_cache = Cache,
- channel_id_seed = 0,
- port_bindings = [],
- requests = []},
- opts = Opts,
- connected = false}};
-
-init([client, Opts]) ->
- process_flag(trap_exit, true),
- {links, [Parent]} = process_info(self(), links),
- ssh_bits:install_messages(ssh_connection:messages()),
- Cache = ssh_channel:cache_create(),
- Address = proplists:get_value(address, Opts),
- Port = proplists:get_value(port, Opts),
- SocketOpts = proplists:get_value(socket_opts, Opts),
- Options = proplists:get_value(ssh_opts, Opts),
- ChannelPid = proplists:get_value(channel_pid, Opts),
- self() !
- {start_connection, client, [Parent, Address, Port, SocketOpts, Options]},
- TimerRef = get_idle_time(Options),
-
- {ok, #state{role = client,
- client = ChannelPid,
- connection_state = #connection{channel_cache = Cache,
- channel_id_seed = 0,
- port_bindings = [],
- connection_supervisor = Parent,
- requests = []},
- opts = Opts,
- idle_timer_ref = TimerRef,
- connected = false}}.
-
-%%--------------------------------------------------------------------
-%% Function: %% handle_call(Request, From, State) -> {reply, Reply, State} |
-%% {reply, Reply, State, Timeout} |
-%% {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, Reply, State} |
-%% {stop, Reason, State}
-%% Description: Handling call messages
-%%--------------------------------------------------------------------
-handle_call({request, ChannelPid, ChannelId, Type, Data}, From, State0) ->
- {{replies, Replies}, State} = handle_request(ChannelPid,
- ChannelId, Type, Data,
- true, From, State0),
- %% Sends message to the connection handler process, reply to
- %% channel is sent later when reply arrives from the connection
- %% handler.
- lists:foreach(fun send_msg/1, Replies),
- SshOpts = proplists:get_value(ssh_opts, State0#state.opts),
- case proplists:get_value(idle_time, SshOpts) of
- infinity ->
- ok;
- _IdleTime ->
- erlang:send_after(5000, self(), {check_cache, [], []})
- end,
- {noreply, State};
-
-handle_call({request, ChannelId, Type, Data}, From, State0) ->
- {{replies, Replies}, State} = handle_request(ChannelId, Type, Data,
- true, From, State0),
- %% Sends message to the connection handler process, reply to
- %% channel is sent later when reply arrives from the connection
- %% handler.
- lists:foreach(fun send_msg/1, Replies),
- {noreply, State};
-
-%% Message from ssh_connection_handler
-handle_call({ssh_msg, Pid, Msg}, From,
- #state{connection_state = Connection0,
- role = Role, opts = Opts, connected = IsConnected,
- client = ClientPid}
- = State) ->
-
- %% To avoid that not all data sent by the other side is processes before
- %% possible crash in ssh_connection_handler takes down the connection.
- gen_server:reply(From, ok),
- ConnectionMsg = decode_ssh_msg(Msg),
- try ssh_connection:handle_msg(ConnectionMsg, Connection0, Pid, Role) of
- {{replies, Replies}, Connection} ->
- lists:foreach(fun send_msg/1, Replies),
- {noreply, State#state{connection_state = Connection}};
- {noreply, Connection} ->
- {noreply, State#state{connection_state = Connection}};
- {disconnect, {_, Reason}, {{replies, Replies}, Connection}}
- when Role == client andalso (not IsConnected) ->
- lists:foreach(fun send_msg/1, Replies),
- ClientPid ! {self(), not_connected, Reason},
- {stop, {shutdown, normal}, State#state{connection = Connection}};
- {disconnect, Reason, {{replies, Replies}, Connection}} ->
- lists:foreach(fun send_msg/1, Replies),
- SSHOpts = proplists:get_value(ssh_opts, Opts),
- disconnect_fun(Reason, SSHOpts),
- {stop, {shutdown, normal}, State#state{connection_state = Connection}}
- catch
- _:Error ->
- {disconnect, Reason, {{replies, Replies}, Connection}} =
- ssh_connection:handle_msg(
- #ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
- description = "Internal error",
- language = "en"}, Connection0, undefined,
- Role),
- lists:foreach(fun send_msg/1, Replies),
- SSHOpts = proplists:get_value(ssh_opts, Opts),
- disconnect_fun(Reason, SSHOpts),
- {stop, {shutdown, Error}, State#state{connection_state = Connection}}
- end;
-handle_call({ssh_msg, Pid, Msg, ErrorMsg}, From,
- #state{connection_state = Connection0,
- role = Role, opts = Opts, connected = IsConnected,
- client = ClientPid}
- = State) ->
-
- %% To avoid that not all data sent by the other side is processes before
- %% possible crash in ssh_connection_handler takes down the connection.
- gen_server:reply(From, ok),
- ConnectionMsg = decode_ssh_msg(Msg),
- try ssh_connection:handle_msg(ConnectionMsg, Connection0, Pid, Role) of
- {{replies, Replies}, Connection} ->
- lists:foreach(fun send_msg/1, Replies),
- {noreply, State#state{connection_state = Connection}};
- {noreply, Connection} ->
- {noreply, State#state{connection_state = Connection}};
- {disconnect, {_, Reason}, {{replies, Replies}, Connection}}
- when Role == client andalso (not IsConnected) ->
- lists:foreach(fun send_msg/1, Replies),
- ClientPid ! {self(), not_connected, {Reason, ErrorMsg}},
- {stop, {shutdown, normal}, State#state{connection = Connection}};
- {disconnect, Reason, {{replies, Replies}, Connection}} ->
- lists:foreach(fun send_msg/1, Replies),
- SSHOpts = proplists:get_value(ssh_opts, Opts),
- disconnect_fun(Reason, SSHOpts),
- {stop, {shutdown, normal}, State#state{connection_state = Connection}}
- catch
- _:Error ->
- {disconnect, Reason, {{replies, Replies}, Connection}} =
- ssh_connection:handle_msg(
- #ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
- description = "Internal error",
- language = "en"}, Connection0, undefined,
- Role),
- lists:foreach(fun send_msg/1, Replies),
- SSHOpts = proplists:get_value(ssh_opts, Opts),
- disconnect_fun(Reason, SSHOpts),
- {stop, {shutdown, Error}, State#state{connection_state = Connection}}
- end;
-handle_call({global_request, Pid, _, _, _} = Request, From,
- #state{connection_state =
- #connection{channel_cache = Cache}} = State0) ->
- State1 = handle_global_request(Request, State0),
- Channel = ssh_channel:cache_find(Pid, Cache),
- State = add_request(true, Channel#channel.local_id, From, State1),
- {noreply, State};
-
-handle_call({data, ChannelId, Type, Data}, From,
- #state{connection_state = #connection{channel_cache = _Cache}
- = Connection0,
- connection = ConnectionPid} = State) ->
- channel_data(ChannelId, Type, Data, Connection0, ConnectionPid, From,
- State);
-
-handle_call({eof, ChannelId}, _From,
- #state{connection = Pid, connection_state =
- #connection{channel_cache = Cache}} = State) ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{remote_id = Id, sent_close = false} ->
- send_msg({connection_reply, Pid,
- ssh_connection:channel_eof_msg(Id)}),
- {reply, ok, State};
- _ ->
- {reply, {error,closed}, State}
- end;
-
-handle_call({connection_info, Options}, From,
- #state{connection = Connection} = State) ->
- ssh_connection_handler:connection_info(Connection, From, Options),
- %% Reply will be sent by the connection handler by calling
- %% ssh_connection_handler:send_msg/1.
- {noreply, State};
-
-handle_call({channel_info, ChannelId, Options}, From,
- #state{connection_state = #connection{channel_cache = Cache}} = State) ->
-
- case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{} = Channel ->
- spawn(?MODULE, ssh_channel_info_handler, [Options, Channel, From]),
- {noreply, State};
- undefined ->
- {reply, []}
- end;
-
-handle_call({info, ChannelPid}, _From,
- #state{connection_state =
- #connection{channel_cache = Cache}} = State) ->
- Result = ssh_channel:cache_foldl(
- fun(Channel, Acc) when ChannelPid == all;
- Channel#channel.user == ChannelPid ->
- [Channel | Acc];
- (_, Acc) ->
- Acc
- end, [], Cache),
- {reply, {ok, Result}, State};
-
-handle_call({open, ChannelPid, Type, InitialWindowSize, MaxPacketSize, Data},
- From, #state{connection = Pid,
- connection_state =
- #connection{channel_cache = Cache}} = State0) ->
- erlang:monitor(process, ChannelPid),
- {ChannelId, State1} = new_channel_id(State0),
- Msg = ssh_connection:channel_open_msg(Type, ChannelId,
- InitialWindowSize,
- MaxPacketSize, Data),
- send_msg({connection_reply, Pid, Msg}),
- Channel = #channel{type = Type,
- sys = "none",
- user = ChannelPid,
- local_id = ChannelId,
- recv_window_size = InitialWindowSize,
- recv_packet_size = MaxPacketSize},
- ssh_channel:cache_update(Cache, Channel),
- State = add_request(true, ChannelId, From, State1),
- {noreply, remove_timer_ref(State)};
-
-handle_call({send_window, ChannelId}, _From,
- #state{connection_state =
- #connection{channel_cache = Cache}} = State) ->
- Reply = case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{send_window_size = WinSize,
- send_packet_size = Packsize} ->
- {ok, {WinSize, Packsize}};
- undefined ->
- {error, einval}
- end,
- {reply, Reply, State};
-
-handle_call({recv_window, ChannelId}, _From,
- #state{connection_state = #connection{channel_cache = Cache}}
- = State) ->
-
- Reply = case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{recv_window_size = WinSize,
- recv_packet_size = Packsize} ->
- {ok, {WinSize, Packsize}};
- undefined ->
- {error, einval}
- end,
- {reply, Reply, State};
-
-%% Replaced by option peer to connection_info/2 keep for now
-%% for Backwards compatibility!
-handle_call({peer_addr, _ChannelId}, _From,
- #state{connection = Pid} = State) ->
- Reply = ssh_connection_handler:peer_address(Pid),
- {reply, Reply, State};
-
-handle_call(opts, _, #state{opts = Opts} = State) ->
- {reply, Opts, State};
-
-handle_call({close, ChannelId}, _,
- #state{connection = Pid, connection_state =
- #connection{channel_cache = Cache}} = State) ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{remote_id = Id} = Channel ->
- send_msg({connection_reply, Pid,
- ssh_connection:channel_close_msg(Id)}),
- ssh_channel:cache_update(Cache, Channel#channel{sent_close = true}),
- SshOpts = proplists:get_value(ssh_opts, State#state.opts),
- case proplists:get_value(idle_time, SshOpts) of
- infinity ->
- ok;
- _IdleTime ->
- erlang:send_after(5000, self(), {check_cache, [], []})
- end,
- {reply, ok, State};
- undefined ->
- {reply, ok, State}
- end;
-
-handle_call(stop, _, #state{connection_state = Connection0,
- role = Role,
- opts = Opts} = State) ->
- {disconnect, Reason, {{replies, Replies}, Connection}} =
- ssh_connection:handle_msg(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
- description = "User closed down connection",
- language = "en"}, Connection0, undefined,
- Role),
- lists:foreach(fun send_msg/1, Replies),
- SSHOpts = proplists:get_value(ssh_opts, Opts),
- disconnect_fun(Reason, SSHOpts),
- {stop, normal, ok, State#state{connection_state = Connection}};
-
-%% API violation make it the violaters problem
-%% by ignoring it. The violating process will get
-%% a timeout or hang.
-handle_call(_, _, State) ->
- {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_cast(Msg, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State}
-%% Description: Handling cast messages
-%%--------------------------------------------------------------------
-handle_cast({request, ChannelPid, ChannelId, Type, Data}, State0) ->
- {{replies, Replies}, State} = handle_request(ChannelPid, ChannelId,
- Type, Data,
- false, none, State0),
- lists:foreach(fun send_msg/1, Replies),
- {noreply, State};
-
-handle_cast({request, ChannelId, Type, Data}, State0) ->
- {{replies, Replies}, State} = handle_request(ChannelId, Type, Data,
- false, none, State0),
- lists:foreach(fun send_msg/1, Replies),
- {noreply, State};
-
-handle_cast({reply_request, Status, ChannelId}, #state{connection_state =
- #connection{channel_cache = Cache}} = State0) ->
- State = case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{remote_id = RemoteId} ->
- cm_message({Status, RemoteId}, State0);
- undefined ->
- State0
- end,
- {noreply, State};
-
-handle_cast({global_request, _, _, _, _} = Request, State0) ->
- State = handle_global_request(Request, State0),
- {noreply, State};
-
-handle_cast(renegotiate, #state{connection = Pid} = State) ->
- ssh_connection_handler:renegotiate(Pid),
- {noreply, State};
-handle_cast(renegotiate_data, #state{connection = Pid} = State) ->
- ssh_connection_handler:renegotiate_data(Pid),
- {noreply, State};
-handle_cast({adjust_window, ChannelId, Bytes},
- #state{connection = Pid, connection_state =
- #connection{channel_cache = Cache}} = State) ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{recv_window_size = WinSize, remote_id = Id} = Channel ->
- ssh_channel:cache_update(Cache, Channel#channel{recv_window_size =
- WinSize + Bytes}),
- Msg = ssh_connection:channel_adjust_window_msg(Id, Bytes),
- send_msg({connection_reply, Pid, Msg});
- undefined ->
- ignore
- end,
- {noreply, State};
-
-handle_cast({success, ChannelId}, #state{connection = Pid} = State) ->
- Msg = ssh_connection:channel_success_msg(ChannelId),
- send_msg({connection_reply, Pid, Msg}),
- {noreply, State};
-
-handle_cast({failure, ChannelId}, #state{connection = Pid} = State) ->
- Msg = ssh_connection:channel_failure_msg(ChannelId),
- send_msg({connection_reply, Pid, Msg}),
- {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info(Info, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State}
-%% Description: Handling all non call/cast messages
-%%--------------------------------------------------------------------
-handle_info({start_connection, server,
- [Address, Port, Socket, Options, SubSysSup]},
- #state{connection_state = CState} = State) ->
- {ok, Connection} = ssh_transport:accept(Address, Port, Socket, Options),
- Shell = proplists:get_value(shell, Options),
- Exec = proplists:get_value(exec, Options),
- CliSpec = proplists:get_value(ssh_cli, Options, {ssh_cli, [Shell]}),
- ssh_connection_handler:send_event(Connection, socket_control),
- erlang:send_after(60000, self(), rekey_data),
- {noreply, State#state{connection = Connection,
- connection_state =
- CState#connection{address = Address,
- port = Port,
- cli_spec = CliSpec,
- options = Options,
- exec = Exec,
- sub_system_supervisor = SubSysSup
- }}};
-
-handle_info({start_connection, client,
- [Parent, Address, Port, SocketOpts, Options]},
- #state{client = Pid} = State) ->
- case (catch ssh_transport:connect(Parent, Address,
- Port, SocketOpts, Options)) of
- {ok, Connection} ->
- erlang:send_after(60000, self(), rekey_data),
- erlang:send_after(3600000, self(), rekey),
- {noreply, State#state{connection = Connection}};
- Reason ->
- Pid ! {self(), not_connected, Reason},
- {stop, {shutdown, normal}, State}
- end;
-handle_info({check_cache, _ , _},
- #state{connection_state =
- #connection{channel_cache = Cache}} = State) ->
- {noreply, check_cache(State, Cache)};
-handle_info({ssh_cm, _Sender, Msg}, State0) ->
- %% Backwards compatibility!
- State = cm_message(Msg, State0),
- {noreply, State};
-
-%% Nop backwards compatibility
-handle_info({same_user, _}, State) ->
- {noreply, State};
-
-handle_info(ssh_connected, #state{role = client, client = Pid}
- = State) ->
- Pid ! {self(), is_connected},
- {noreply, State#state{connected = true, opts = handle_password(State#state.opts)}};
-
-handle_info(ssh_connected, #state{role = server} = State) ->
- {noreply, State#state{connected = true}};
-
-%%% Handle that ssh channels user process goes down
-handle_info({'DOWN', _Ref, process, ChannelPid, _Reason}, State) ->
- handle_down(handle_channel_down(ChannelPid, State));
-
-%%% So that terminate will be run when supervisor is shutdown
-handle_info({'EXIT', _Sup, Reason}, State) ->
- {stop, Reason, State};
-handle_info(rekey, State) ->
- renegotiate(self()),
- erlang:send_after(3600000, self(), rekey),
- {noreply, State};
-handle_info(rekey_data, State) ->
- renegotiate_data(self()),
- erlang:send_after(60000, self(), rekey_data),
- {noreply, State}.
-handle_password(Opts) ->
- handle_rsa_password(handle_dsa_password(handle_normal_password(Opts))).
-handle_normal_password(Opts) ->
- case proplists:get_value(ssh_opts, Opts, false) of
- false ->
- Opts;
- SshOpts ->
- case proplists:get_value(password, SshOpts, false) of
- false ->
- Opts;
- _Password ->
- NewOpts = [{password, undefined}|lists:keydelete(password, 1, SshOpts)],
- [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
- end
- end.
-handle_dsa_password(Opts) ->
- case proplists:get_value(ssh_opts, Opts, false) of
- false ->
- Opts;
- SshOpts ->
- case proplists:get_value(dsa_pass_phrase, SshOpts, false) of
- false ->
- Opts;
- _Password ->
- NewOpts = [{dsa_pass_phrase, undefined}|lists:keydelete(dsa_pass_phrase, 1, SshOpts)],
- [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
- end
- end.
-handle_rsa_password(Opts) ->
- case proplists:get_value(ssh_opts, Opts, false) of
- false ->
- Opts;
- SshOpts ->
- case proplists:get_value(rsa_pass_phrase, SshOpts, false) of
- false ->
- Opts;
- _Password ->
- NewOpts = [{rsa_pass_phrase, undefined}|lists:keydelete(rsa_pass_phrase, 1, SshOpts)],
- [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
- end
- end.
-%%--------------------------------------------------------------------
-%% Function: terminate(Reason, State) -> void()
-%% Description: This function is called by a gen_server when it is about to
-%% terminate. It should be the opposite of Module:init/1 and do any necessary
-%% cleaning up. When it returns, the gen_server terminates with Reason.
-%% The return value is ignored.
-%%--------------------------------------------------------------------
-terminate(_Reason, #state{role = client,
- connection_state =
- #connection{connection_supervisor = Supervisor}}) ->
- sshc_sup:stop_child(Supervisor);
-
-terminate(_Reason, #state{role = server,
- connection_state =
- #connection{sub_system_supervisor = SubSysSup},
- opts = Opts}) ->
- Address = proplists:get_value(address, Opts),
- Port = proplists:get_value(port, Opts),
- SystemSup = ssh_system_sup:system_supervisor(Address, Port),
- ssh_system_sup:stop_subsystem(SystemSup, SubSysSup).
-
-%%--------------------------------------------------------------------
-%% Func: code_change(OldVsn, State, Extra) -> {ok, NewState}
-%% Description: Convert process state when code is changed
-%%--------------------------------------------------------------------
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%--------------------------------------------------------------------
-%%% Internal functions
-%%--------------------------------------------------------------------
-get_idle_time(SshOptions) ->
- case proplists:get_value(idle_time, SshOptions) of
- infinity ->
- infinity;
- _IdleTime -> %% We dont want to set the timeout on first connect
- undefined
- end.
-check_cache(State, Cache) ->
- %% Check the number of entries in Cache
- case proplists:get_value(size, ets:info(Cache)) of
- 0 ->
- Opts = proplists:get_value(ssh_opts, State#state.opts),
- case proplists:get_value(idle_time, Opts) of
- infinity ->
- State;
- undefined ->
- State;
- Time ->
- case State#state.idle_timer_ref of
- undefined ->
- TimerRef = erlang:send_after(Time, self(), {'EXIT', [], "Timeout"}),
- State#state{idle_timer_ref=TimerRef};
- _ ->
- State
- end
- end;
- _ ->
- State
- end.
-remove_timer_ref(State) ->
- case State#state.idle_timer_ref of
- infinity -> %% If the timer is not activated
- State;
- undefined -> %% If we already has cancelled the timer
- State;
- TimerRef -> %% Timer is active
- erlang:cancel_timer(TimerRef),
- State#state{idle_timer_ref = undefined}
- end.
-channel_data(Id, Type, Data, Connection0, ConnectionPid, From, State) ->
- case ssh_connection:channel_data(Id, Type, Data, Connection0,
- ConnectionPid, From) of
- {{replies, Replies}, Connection} ->
- lists:foreach(fun send_msg/1, Replies),
- {noreply, State#state{connection_state = Connection}};
- {noreply, Connection} ->
- {noreply, State#state{connection_state = Connection}}
- end.
-
-call(Pid, Msg) ->
- call(Pid, Msg, infinity).
-call(Pid, Msg, Timeout) ->
- try gen_server:call(Pid, Msg, Timeout) of
- Result ->
- Result
- catch
- exit:{timeout, _} ->
- {error, timeout};
- exit:{normal, _} ->
- {error, channel_closed};
- exit:{{shutdown, _}, _} ->
- {error, channel_closed};
- exit:{noproc,_} ->
- {error, channel_closed}
- end.
-
-cast(Pid, Msg) ->
- gen_server:cast(Pid, Msg).
-
-decode_ssh_msg(BinMsg) when is_binary(BinMsg)->
- ssh_bits:decode(BinMsg);
-decode_ssh_msg(Msg) ->
- Msg.
-
-
-send_msg(Msg) ->
- catch do_send_msg(Msg).
-do_send_msg({channel_data, Pid, Data}) ->
- Pid ! {ssh_cm, self(), Data};
-do_send_msg({channel_requst_reply, From, Data}) ->
- gen_server:reply(From, Data);
-do_send_msg({connection_reply, Pid, Data}) ->
- Msg = ssh_bits:encode(Data),
- ssh_connection_handler:send(Pid, Msg);
-do_send_msg({flow_control, Cache, Channel, From, Msg}) ->
- ssh_channel:cache_update(Cache, Channel#channel{flow_control = undefined}),
- gen_server:reply(From, Msg);
-do_send_msg({flow_control, From, Msg}) ->
- gen_server:reply(From, Msg).
-
-handle_request(ChannelPid, ChannelId, Type, Data, WantReply, From,
- #state{connection = Pid,
- connection_state =
- #connection{channel_cache = Cache}} = State0) ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{remote_id = Id} = Channel ->
- update_sys(Cache, Channel, Type, ChannelPid),
- Msg = ssh_connection:channel_request_msg(Id, Type,
- WantReply, Data),
- Replies = [{connection_reply, Pid, Msg}],
- State = add_request(WantReply, ChannelId, From, State0),
- {{replies, Replies}, State};
- undefined ->
- {{replies, []}, State0}
- end.
-
-handle_request(ChannelId, Type, Data, WantReply, From,
- #state{connection = Pid,
- connection_state =
- #connection{channel_cache = Cache}} = State0) ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
- #channel{remote_id = Id} ->
- Msg = ssh_connection:channel_request_msg(Id, Type,
- WantReply, Data),
- Replies = [{connection_reply, Pid, Msg}],
- State = add_request(WantReply, ChannelId, From, State0),
- {{replies, Replies}, State};
- undefined ->
- {{replies, []}, State0}
- end.
-
-handle_down({{replies, Replies}, State}) ->
- lists:foreach(fun send_msg/1, Replies),
- {noreply, State}.
-
-handle_channel_down(ChannelPid, #state{connection_state =
- #connection{channel_cache = Cache}} =
- State) ->
- ssh_channel:cache_foldl(
- fun(Channel, Acc) when Channel#channel.user == ChannelPid ->
- ssh_channel:cache_delete(Cache,
- Channel#channel.local_id),
- Acc;
- (_,Acc) ->
- Acc
- end, [], Cache),
- {{replies, []}, check_cache(State, Cache)}.
-
-update_sys(Cache, Channel, Type, ChannelPid) ->
- ssh_channel:cache_update(Cache,
- Channel#channel{sys = Type, user = ChannelPid}).
-
-add_request(false, _ChannelId, _From, State) ->
- State;
-add_request(true, ChannelId, From, #state{connection_state =
- #connection{requests = Requests0} =
- Connection} = State) ->
- Requests = [{ChannelId, From} | Requests0],
- State#state{connection_state = Connection#connection{requests = Requests}}.
-
-new_channel_id(#state{connection_state = #connection{channel_id_seed = Id} =
- Connection}
- = State) ->
- {Id, State#state{connection_state =
- Connection#connection{channel_id_seed = Id + 1}}}.
-
-handle_global_request({global_request, ChannelPid,
- "tcpip-forward" = Type, WantReply,
- <<?UINT32(IPLen),
- IP:IPLen/binary, ?UINT32(Port)>> = Data},
- #state{connection = ConnectionPid,
- connection_state =
- #connection{channel_cache = Cache}
- = Connection0} = State) ->
- ssh_channel:cache_update(Cache, #channel{user = ChannelPid,
- type = "forwarded-tcpip",
- sys = none}),
- Connection = ssh_connection:bind(IP, Port, ChannelPid, Connection0),
- Msg = ssh_connection:global_request_msg(Type, WantReply, Data),
- send_msg({connection_reply, ConnectionPid, Msg}),
- State#state{connection_state = Connection};
-
-handle_global_request({global_request, _Pid, "cancel-tcpip-forward" = Type,
- WantReply, <<?UINT32(IPLen),
- IP:IPLen/binary, ?UINT32(Port)>> = Data},
- #state{connection = Pid,
- connection_state = Connection0} = State) ->
- Connection = ssh_connection:unbind(IP, Port, Connection0),
- Msg = ssh_connection:global_request_msg(Type, WantReply, Data),
- send_msg({connection_reply, Pid, Msg}),
- State#state{connection_state = Connection};
-
-handle_global_request({global_request, _Pid, "cancel-tcpip-forward" = Type,
- WantReply, Data}, #state{connection = Pid} = State) ->
- Msg = ssh_connection:global_request_msg(Type, WantReply, Data),
- send_msg({connection_reply, Pid, Msg}),
- State.
-
-cm_message(Msg, State) ->
- {noreply, NewState} = handle_cast(Msg, State),
- NewState.
-
-disconnect_fun(Reason, Opts) ->
- case proplists:get_value(disconnectfun, Opts) of
- undefined ->
- ok;
- Fun ->
- catch Fun(Reason)
- end.
-
-ssh_channel_info_handler(Options, Channel, From) ->
- Info = ssh_channel_info(Options, Channel, []),
- send_msg({channel_requst_reply, From, Info}).
-
-ssh_channel_info([], _, Acc) ->
- Acc;
-
-ssh_channel_info([recv_window | Rest], #channel{recv_window_size = WinSize,
- recv_packet_size = Packsize
- } = Channel, Acc) ->
- ssh_channel_info(Rest, Channel, [{recv_window, {{win_size, WinSize},
- {packet_size, Packsize}}} | Acc]);
-ssh_channel_info([send_window | Rest], #channel{send_window_size = WinSize,
- send_packet_size = Packsize
- } = Channel, Acc) ->
- ssh_channel_info(Rest, Channel, [{send_window, {{win_size, WinSize},
- {packet_size, Packsize}}} | Acc]);
-ssh_channel_info([ _ | Rest], Channel, Acc) ->
- ssh_channel_info(Rest, Channel, Acc).
-
-
-
diff --git a/lib/ssh/src/ssh_connection_sup.erl b/lib/ssh/src/ssh_connection_sup.erl
index b620056310..c5abc8f23b 100644
--- a/lib/ssh/src/ssh_connection_sup.erl
+++ b/lib/ssh/src/ssh_connection_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -25,8 +25,9 @@
-behaviour(supervisor).
--export([start_link/1, start_handler_child/2, start_manager_child/2,
- connection_manager/1]).
+%% API
+-export([start_link/1]).
+-export([start_child/2]).
%% Supervisor callback
-export([init/1]).
@@ -37,83 +38,23 @@
start_link(Args) ->
supervisor:start_link(?MODULE, [Args]).
-%% Will be called from the manager child process
-start_handler_child(Sup, Args) ->
- [Spec] = child_specs(handler, Args),
- supervisor:start_child(Sup, Spec).
-
-%% Will be called from the acceptor process
-start_manager_child(Sup, Args) ->
- [Spec] = child_specs(manager, Args),
- supervisor:start_child(Sup, Spec).
-
-connection_manager(SupPid) ->
- try supervisor:which_children(SupPid) of
- Children ->
- {ok, ssh_connection_manager(Children)}
- catch exit:{noproc,_} ->
- {ok, undefined}
- end.
+start_child(Sup, Args) ->
+ supervisor:start_child(Sup, Args).
%%%=========================================================================
%%% Supervisor callback
%%%=========================================================================
-init([Args]) ->
- RestartStrategy = one_for_all,
+init(_) ->
+ RestartStrategy = simple_one_for_one,
MaxR = 0,
MaxT = 3600,
- Children = child_specs(Args),
- {ok, {{RestartStrategy, MaxR, MaxT}, Children}}.
-
-%%%=========================================================================
-%%% Internal functions
-%%%=========================================================================
-child_specs(Opts) ->
- case proplists:get_value(role, Opts) of
- client ->
- child_specs(manager, [client | Opts]);
- server ->
- %% Children started by acceptor process
- []
- end.
-
-% The manager process starts the handler process
-child_specs(manager, Opts) ->
- [manager_spec(Opts)];
-child_specs(handler, Opts) ->
- [handler_spec(Opts)].
-
-manager_spec([server = Role, Socket, Opts]) ->
- Name = make_ref(),
- StartFunc = {ssh_connection_manager, start_link, [[Role, Socket, Opts]]},
- Restart = temporary,
- Shutdown = 3600,
- Modules = [ssh_connection_manager],
- Type = worker,
- {Name, StartFunc, Restart, Shutdown, Type, Modules};
-
-manager_spec([client = Role | Opts]) ->
- Name = make_ref(),
- StartFunc = {ssh_connection_manager, start_link, [[Role, Opts]]},
- Restart = temporary,
- Shutdown = 3600,
- Modules = [ssh_connection_manager],
- Type = worker,
- {Name, StartFunc, Restart, Shutdown, Type, Modules}.
-handler_spec([Role, Socket, Opts]) ->
- Name = make_ref(),
- StartFunc = {ssh_connection_handler,
- start_link, [Role, self(), Socket, Opts]},
- Restart = temporary,
- Shutdown = 3600,
+ Name = undefined, % As simple_one_for_one is used.
+ StartFunc = {ssh_connection_handler, start_link, []},
+ Restart = temporary, % E.g. should not be restarted
+ Shutdown = 4000,
Modules = [ssh_connection_handler],
Type = worker,
- {Name, StartFunc, Restart, Shutdown, Type, Modules}.
-ssh_connection_manager([]) ->
- undefined;
-ssh_connection_manager([{_, Child, _, [ssh_connection_manager]} | _]) ->
- Child;
-ssh_connection_manager([_ | Rest]) ->
- ssh_connection_manager(Rest).
+ ChildSpec = {Name, StartFunc, Restart, Shutdown, Type, Modules},
+ {ok, {{RestartStrategy, MaxR, MaxT}, [ChildSpec]}}.
diff --git a/lib/ssh/src/ssh_io.erl b/lib/ssh/src/ssh_io.erl
index 01fc713569..832b144db9 100644
--- a/lib/ssh/src/ssh_io.erl
+++ b/lib/ssh/src/ssh_io.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -24,7 +24,6 @@
-module(ssh_io).
-export([yes_no/2, read_password/2, read_line/2, format/2]).
--import(lists, [reverse/1]).
-include("ssh.hrl").
read_line(Prompt, Ssh) ->
@@ -81,7 +80,7 @@ format(Fmt, Args) ->
trim(Line) when is_list(Line) ->
- reverse(trim1(reverse(trim1(Line))));
+ lists:reverse(trim1(lists:reverse(trim1(Line))));
trim(Other) -> Other.
trim1([$\s|Cs]) -> trim(Cs);
diff --git a/lib/ssh/src/ssh_message.erl b/lib/ssh/src/ssh_message.erl
new file mode 100644
index 0000000000..7bd0375521
--- /dev/null
+++ b/lib/ssh/src/ssh_message.erl
@@ -0,0 +1,529 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2013-2013. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+%%------------------------------------------------------------------
+-module(ssh_message).
+
+-include_lib("public_key/include/public_key.hrl").
+
+-include("ssh.hrl").
+-include("ssh_connect.hrl").
+-include("ssh_auth.hrl").
+-include("ssh_transport.hrl").
+
+-export([encode/1, decode/1, encode_host_key/1, decode_keyboard_interactive_prompts/2]).
+
+encode(#ssh_msg_global_request{
+ name = Name,
+ want_reply = Bool,
+ data = Data}) ->
+ ssh_bits:encode([?SSH_MSG_GLOBAL_REQUEST,
+ Name, Bool, Data], [byte, string, boolean, '...']);
+encode(#ssh_msg_request_success{data = Data}) ->
+ <<?BYTE(?SSH_MSG_REQUEST_SUCCESS), Data/binary>>;
+encode(#ssh_msg_request_failure{}) ->
+ <<?BYTE(?SSH_MSG_REQUEST_FAILURE)>>;
+encode(#ssh_msg_channel_open{
+ channel_type = Type,
+ sender_channel = Sender,
+ initial_window_size = Window,
+ maximum_packet_size = Max,
+ data = Data
+ }) ->
+ ssh_bits:encode([?SSH_MSG_CHANNEL_OPEN,
+ Type, Sender, Window, Max, Data], [byte, string, uint32,
+ uint32, uint32, '...']);
+encode(#ssh_msg_channel_open_confirmation{
+ recipient_channel = Recipient,
+ sender_channel = Sender,
+ initial_window_size = InitWindowSize,
+ maximum_packet_size = MaxPacketSize,
+ data = Data
+ }) ->
+ ssh_bits:encode([?SSH_MSG_CHANNEL_OPEN_CONFIRMATION, Recipient,
+ Sender, InitWindowSize, MaxPacketSize, Data],
+ [byte, uint32, uint32, uint32, uint32, '...']);
+encode(#ssh_msg_channel_open_failure{
+ recipient_channel = Recipient,
+ reason = Reason,
+ description = Desc,
+ lang = Lang
+ }) ->
+ ssh_bits:encode([?SSH_MSG_CHANNEL_OPEN_FAILURE, Recipient,
+ Reason, Desc, Lang], [byte, uint32, uint32, string, string]);
+encode(#ssh_msg_channel_window_adjust{
+ recipient_channel = Recipient,
+ bytes_to_add = Bytes
+ }) ->
+ ssh_bits:encode([?SSH_MSG_CHANNEL_WINDOW_ADJUST, Recipient, Bytes],
+ [byte, uint32, uint32]);
+encode(#ssh_msg_channel_data{
+ recipient_channel = Recipient,
+ data = Data
+ }) ->
+ ssh_bits:encode([?SSH_MSG_CHANNEL_DATA, Recipient, Data], [byte, uint32, binary]);
+
+encode(#ssh_msg_channel_extended_data{
+ recipient_channel = Recipient,
+ data_type_code = DataType,
+ data = Data
+ }) ->
+ ssh_bits:encode([?SSH_MSG_CHANNEL_EXTENDED_DATA, Recipient,
+ DataType, Data], [byte, uint32, uint32, binary]);
+
+encode(#ssh_msg_channel_eof{recipient_channel = Recipient
+ }) ->
+ <<?BYTE(?SSH_MSG_CHANNEL_EOF), ?UINT32(Recipient)>>;
+encode(#ssh_msg_channel_close{
+ recipient_channel = Recipient
+ }) ->
+ <<?BYTE(?SSH_MSG_CHANNEL_CLOSE), ?UINT32(Recipient)>>;
+encode(#ssh_msg_channel_request{
+ recipient_channel = Recipient,
+ request_type = Type,
+ want_reply = Bool,
+ data = Data
+ }) ->
+ ssh_bits:encode([?SSH_MSG_CHANNEL_REQUEST, Recipient, Type, Bool, Data],
+ [byte, uint32, string, boolean, '...']);
+encode(#ssh_msg_channel_success{
+ recipient_channel = Recipient
+ }) ->
+ <<?BYTE(?SSH_MSG_CHANNEL_SUCCESS), ?UINT32(Recipient)>>;
+encode(#ssh_msg_channel_failure{
+ recipient_channel = Recipient
+ }) ->
+ <<?BYTE(?SSH_MSG_CHANNEL_FAILURE), ?UINT32(Recipient)>>;
+
+encode(#ssh_msg_userauth_request{
+ user = User,
+ service = Service,
+ method = Method,
+ data = Data
+ }) ->
+ ssh_bits:encode([?SSH_MSG_USERAUTH_REQUEST, User, Service, Method, Data],
+ [byte, string, string, string, '...']);
+encode(#ssh_msg_userauth_failure{
+ authentications = Auths,
+ partial_success = Bool
+ }) ->
+ ssh_bits:encode([?SSH_MSG_USERAUTH_FAILURE, Auths, Bool],
+ [byte, string, boolean]);
+encode(#ssh_msg_userauth_success{}) ->
+ <<?BYTE(?SSH_MSG_USERAUTH_SUCCESS)>>;
+
+encode(#ssh_msg_userauth_banner{
+ message = Banner,
+ language = Lang
+ }) ->
+ ssh_bits:encode([?SSH_MSG_USERAUTH_BANNER, Banner, Lang],
+ [byte, string, string]);
+
+encode(#ssh_msg_userauth_pk_ok{
+ algorithm_name = Alg,
+ key_blob = KeyBlob
+ }) ->
+ ssh_bits:encode([?SSH_MSG_USERAUTH_PK_OK, Alg, KeyBlob],
+ [byte, string, binary]);
+
+encode(#ssh_msg_userauth_passwd_changereq{prompt = Prompt,
+ languge = Lang
+ })->
+ ssh_bits:encode([?SSH_MSG_USERAUTH_PASSWD_CHANGEREQ, Prompt, Lang],
+ [byte, string, string]);
+
+encode(#ssh_msg_userauth_info_request{
+ name = Name,
+ instruction = Inst,
+ language_tag = Lang,
+ num_prompts = NumPromtps,
+ data = Data}) ->
+ ssh_bits:encode([?SSH_MSG_USERAUTH_INFO_REQUEST, Name, Inst, Lang, NumPromtps, Data],
+ [byte, string, string, string, uint32, '...']);
+
+encode(#ssh_msg_userauth_info_response{
+ num_responses = Num,
+ data = Data}) ->
+ ssh_bits:encode([?SSH_MSG_USERAUTH_INFO_RESPONSE, Num, Data],
+ [byte, uint32, '...']);
+encode(#ssh_msg_disconnect{
+ code = Code,
+ description = Desc,
+ language = Lang
+ }) ->
+ ssh_bits:encode([?SSH_MSG_DISCONNECT, Code, Desc, Lang],
+ [byte, uint32, string, string]);
+
+encode(#ssh_msg_service_request{
+ name = Service
+ }) ->
+ ssh_bits:encode([?SSH_MSG_SERVICE_REQUEST, Service], [byte, string]);
+
+encode(#ssh_msg_service_accept{
+ name = Service
+ }) ->
+ ssh_bits:encode([?SSH_MSG_SERVICE_ACCEPT, Service], [byte, string]);
+
+encode(#ssh_msg_newkeys{}) ->
+ <<?BYTE(?SSH_MSG_NEWKEYS)>>;
+
+encode(#ssh_msg_kexinit{
+ cookie = Cookie,
+ kex_algorithms = KeyAlgs,
+ server_host_key_algorithms = HostKeyAlgs,
+ encryption_algorithms_client_to_server = EncAlgC2S,
+ encryption_algorithms_server_to_client = EncAlgS2C,
+ mac_algorithms_client_to_server = MacAlgC2S,
+ mac_algorithms_server_to_client = MacAlgS2C,
+ compression_algorithms_client_to_server = CompAlgS2C,
+ compression_algorithms_server_to_client = CompAlgC2S,
+ languages_client_to_server = LangC2S,
+ languages_server_to_client = LangS2C,
+ first_kex_packet_follows = Bool,
+ reserved = Reserved
+ }) ->
+ ssh_bits:encode([?SSH_MSG_KEXINIT, Cookie, KeyAlgs, HostKeyAlgs, EncAlgC2S, EncAlgS2C,
+ MacAlgC2S, MacAlgS2C, CompAlgS2C, CompAlgC2S, LangC2S, LangS2C, Bool,
+ Reserved],
+ [byte, cookie,
+ name_list, name_list,
+ name_list, name_list,
+ name_list, name_list,
+ name_list, name_list,
+ name_list, name_list,
+ boolean, uint32]);
+
+encode(#ssh_msg_kexdh_init{e = E}) ->
+ ssh_bits:encode([?SSH_MSG_KEXDH_INIT, E], [byte, mpint]);
+
+encode(#ssh_msg_kexdh_reply{
+ public_host_key = Key,
+ f = F,
+ h_sig = Signature
+ }) ->
+ EncKey = encode_host_key(Key),
+ EncSign = encode_sign(Key, Signature),
+ ssh_bits:encode([?SSH_MSG_KEXDH_REPLY, EncKey, F, EncSign], [byte, binary, mpint, binary]);
+
+encode(#ssh_msg_kex_dh_gex_request{
+ min = Min,
+ n = N,
+ max = Max
+ }) ->
+ ssh_bits:encode([?SSH_MSG_KEX_DH_GEX_REQUEST, Min, N, Max],
+ [byte, uint32, uint32, uint32, uint32]);
+encode(#ssh_msg_kex_dh_gex_request_old{n = N}) ->
+ ssh_bits:encode([?SSH_MSG_KEX_DH_GEX_REQUEST_OLD, N],
+ [byte, uint32]);
+
+encode(#ssh_msg_kex_dh_gex_group{p = Prime, g = Generator}) ->
+ ssh_bits:encode([?SSH_MSG_KEX_DH_GEX_GROUP, Prime, Generator],
+ [byte, mpint, mpint]);
+
+encode(#ssh_msg_kex_dh_gex_init{e = Public}) ->
+ ssh_bits:encode([?SSH_MSG_KEX_DH_GEX_INIT, Public], [byte, mpint]);
+
+encode(#ssh_msg_kex_dh_gex_reply{
+ %% Will be private key encode_host_key extracts only the public part!
+ public_host_key = Key,
+ f = F,
+ h_sig = Signature
+ }) ->
+ EncKey = encode_host_key(Key),
+ EncSign = encode_sign(Key, Signature),
+ ssh_bits:encode([?SSH_MSG_KEXDH_REPLY, EncKey, F, EncSign], [byte, binary, mpint, binary]);
+
+encode(#ssh_msg_ignore{data = Data}) ->
+ ssh_bits:encode([?SSH_MSG_IGNORE, Data], [byte, string]);
+
+encode(#ssh_msg_unimplemented{sequence = Seq}) ->
+ ssh_bits:encode([?SSH_MSG_IGNORE, Seq], [byte, uint32]);
+
+encode(#ssh_msg_debug{always_display = Bool,
+ message = Msg,
+ language = Lang}) ->
+ ssh_bits:encode([?SSH_MSG_DEBUG, Bool, Msg, Lang], [byte, boolean, string, string]).
+
+
+%% Connection Messages
+decode(<<?BYTE(?SSH_MSG_GLOBAL_REQUEST), ?UINT32(Len), Name:Len/binary,
+ ?BYTE(Bool), Data/binary>>) ->
+ #ssh_msg_global_request{
+ name = Name,
+ want_reply = erl_boolean(Bool),
+ data = Data
+ };
+decode(<<?BYTE(?SSH_MSG_REQUEST_SUCCESS), Data/binary>>) ->
+ #ssh_msg_request_success{data = Data};
+decode(<<?BYTE(?SSH_MSG_REQUEST_FAILURE)>>) ->
+ #ssh_msg_request_failure{};
+decode(<<?BYTE(?SSH_MSG_CHANNEL_OPEN),
+ ?UINT32(Len), Type:Len/binary,
+ ?UINT32(Sender), ?UINT32(Window), ?UINT32(Max),
+ Data/binary>>) ->
+ #ssh_msg_channel_open{
+ channel_type = binary_to_list(Type),
+ sender_channel = Sender,
+ initial_window_size = Window,
+ maximum_packet_size = Max,
+ data = Data
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_OPEN_CONFIRMATION), ?UINT32(Recipient), ?UINT32(Sender),
+ ?UINT32(InitWindowSize), ?UINT32(MaxPacketSize),
+ Data/binary>>) ->
+ #ssh_msg_channel_open_confirmation{
+ recipient_channel = Recipient,
+ sender_channel = Sender,
+ initial_window_size = InitWindowSize,
+ maximum_packet_size = MaxPacketSize,
+ data = Data
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_OPEN_FAILURE), ?UINT32(Recipient), ?UINT32(Reason),
+ ?UINT32(Len0), Desc:Len0/binary, ?UINT32(Len1), Lang:Len1/binary >>) ->
+ #ssh_msg_channel_open_failure{
+ recipient_channel = Recipient,
+ reason = Reason,
+ description = unicode:characters_to_list(Desc),
+ lang = Lang
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_WINDOW_ADJUST), ?UINT32(Recipient), ?UINT32(Bytes)>>) ->
+ #ssh_msg_channel_window_adjust{
+ recipient_channel = Recipient,
+ bytes_to_add = Bytes
+ };
+
+decode(<<?BYTE(?SSH_MSG_CHANNEL_DATA), ?UINT32(Recipient), ?UINT32(Len), Data:Len/binary>>) ->
+ #ssh_msg_channel_data{
+ recipient_channel = Recipient,
+ data = Data
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_EXTENDED_DATA), ?UINT32(Recipient),
+ ?UINT32(DataType), Data/binary>>) ->
+ #ssh_msg_channel_extended_data{
+ recipient_channel = Recipient,
+ data_type_code = DataType,
+ data = Data
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_EOF), ?UINT32(Recipient)>>) ->
+ #ssh_msg_channel_eof{
+ recipient_channel = Recipient
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_CLOSE), ?UINT32(Recipient)>>) ->
+ #ssh_msg_channel_close{
+ recipient_channel = Recipient
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_REQUEST), ?UINT32(Recipient),
+ ?UINT32(Len), RequestType:Len/binary,
+ ?BYTE(Bool), Data/binary>>) ->
+ #ssh_msg_channel_request{
+ recipient_channel = Recipient,
+ request_type = unicode:characters_to_list(RequestType),
+ want_reply = erl_boolean(Bool),
+ data = Data
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_SUCCESS), ?UINT32(Recipient)>>) ->
+ #ssh_msg_channel_success{
+ recipient_channel = Recipient
+ };
+decode(<<?BYTE(?SSH_MSG_CHANNEL_FAILURE), ?UINT32(Recipient)>>) ->
+ #ssh_msg_channel_failure{
+ recipient_channel = Recipient
+ };
+
+%%% Auth Messages
+decode(<<?BYTE(?SSH_MSG_USERAUTH_REQUEST),
+ ?UINT32(Len0), User:Len0/binary,
+ ?UINT32(Len1), Service:Len1/binary,
+ ?UINT32(Len2), Method:Len2/binary,
+ Data/binary>>) ->
+ #ssh_msg_userauth_request{
+ user = unicode:characters_to_list(User),
+ service = unicode:characters_to_list(Service),
+ method = unicode:characters_to_list(Method),
+ data = Data
+ };
+
+decode(<<?BYTE(?SSH_MSG_USERAUTH_FAILURE),
+ ?UINT32(Len0), Auths:Len0/binary,
+ ?BYTE(Bool)>>) ->
+ #ssh_msg_userauth_failure {
+ authentications = unicode:characters_to_list(Auths),
+ partial_success = erl_boolean(Bool)
+ };
+
+decode(<<?BYTE(?SSH_MSG_USERAUTH_SUCCESS)>>) ->
+ #ssh_msg_userauth_success{};
+
+decode(<<?BYTE(?SSH_MSG_USERAUTH_BANNER),
+ ?UINT32(Len0), Banner:Len0/binary,
+ ?UINT32(Len1), Lang:Len1/binary>>) ->
+ #ssh_msg_userauth_banner{
+ message = Banner,
+ language = Lang
+ };
+
+decode(<<?BYTE(?SSH_MSG_USERAUTH_PK_OK), ?UINT32(Len), Alg:Len/binary, KeyBlob/binary>>) ->
+ #ssh_msg_userauth_pk_ok{
+ algorithm_name = Alg,
+ key_blob = KeyBlob
+ };
+
+decode(<<?BYTE(?SSH_MSG_USERAUTH_PASSWD_CHANGEREQ), ?UINT32(Len0), Prompt:Len0/binary,
+ ?UINT32(Len1), Lang:Len1/binary>>) ->
+ #ssh_msg_userauth_passwd_changereq{
+ prompt = Prompt,
+ languge = Lang
+ };
+decode(<<?BYTE(?SSH_MSG_USERAUTH_INFO_REQUEST), ?UINT32(Len0), Name:Len0/binary,
+ ?UINT32(Len1), Inst:Len1/binary, ?UINT32(Len2), Lang:Len2/binary,
+ ?UINT32(NumPromtps), Data/binary>>) ->
+ #ssh_msg_userauth_info_request{
+ name = Name,
+ instruction = Inst,
+ language_tag = Lang,
+ num_prompts = NumPromtps,
+ data = Data};
+
+decode(<<?BYTE(?SSH_MSG_USERAUTH_INFO_RESPONSE), ?UINT32(Num), Data/binary>>) ->
+ #ssh_msg_userauth_info_response{
+ num_responses = Num,
+ data = Data};
+
+%%% Keyexchange messages
+decode(<<?BYTE(?SSH_MSG_KEXINIT), Cookie:128, Data/binary>>) ->
+ decode_kex_init(Data, [Cookie, ssh_msg_kexinit], 10);
+
+decode(<<?BYTE(?SSH_MSG_KEXDH_INIT), ?UINT32(Len), E:Len/binary>>) ->
+ #ssh_msg_kexdh_init{e = erlint(Len, E)
+ };
+decode(<<?BYTE(?SSH_MSG_KEX_DH_GEX_REQUEST), ?UINT32(Min), ?UINT32(N), ?UINT32(Max)>>) ->
+ #ssh_msg_kex_dh_gex_request{
+ min = Min,
+ n = N,
+ max = Max
+ };
+decode(<<?BYTE(?SSH_MSG_KEX_DH_GEX_REQUEST_OLD), ?UINT32(N)>>) ->
+ #ssh_msg_kex_dh_gex_request_old{
+ n = N
+ };
+decode(<<?BYTE(?SSH_MSG_KEX_DH_GEX_GROUP), ?UINT32(Len0), Prime:Len0/big-signed-integer,
+ ?UINT32(Len1), Generator:Len1/big-signed-integer>>) ->
+ #ssh_msg_kex_dh_gex_group{
+ p = Prime,
+ g = Generator
+ };
+decode(<<?BYTE(?SSH_MSG_KEXDH_REPLY), ?UINT32(Len0), Key:Len0/binary,
+ ?UINT32(Len1), F:Len1/binary,
+ ?UINT32(Len2), Hashsign:Len2/binary>>) ->
+ #ssh_msg_kexdh_reply{
+ public_host_key = decode_host_key(Key),
+ f = erlint(Len1, F),
+ h_sig = decode_sign(Hashsign)
+ };
+
+decode(<<?SSH_MSG_SERVICE_REQUEST, ?UINT32(Len0), Service:Len0/binary>>) ->
+ #ssh_msg_service_request{
+ name = unicode:characters_to_list(Service)
+ };
+
+decode(<<?SSH_MSG_SERVICE_ACCEPT, ?UINT32(Len0), Service:Len0/binary>>) ->
+ #ssh_msg_service_accept{
+ name = unicode:characters_to_list(Service)
+ };
+
+decode(<<?BYTE(?SSH_MSG_DISCONNECT), ?UINT32(Code),
+ ?UINT32(Len0), Desc:Len0/binary, ?UINT32(Len1), Lang:Len1/binary>>) ->
+ #ssh_msg_disconnect{
+ code = Code,
+ description = unicode:characters_to_list(Desc),
+ language = Lang
+ };
+
+decode(<<?SSH_MSG_NEWKEYS>>) ->
+ #ssh_msg_newkeys{};
+
+decode(<<?BYTE(?SSH_MSG_IGNORE), Data/binary>>) ->
+ #ssh_msg_ignore{data = Data};
+
+decode(<<?BYTE(?SSH_MSG_UNIMPLEMENTED), ?UINT32(Seq)>>) ->
+ #ssh_msg_unimplemented{sequence = Seq};
+
+decode(<<?BYTE(?SSH_MSG_DEBUG), ?BYTE(Bool), ?UINT32(Len0), Msg:Len0/binary,
+ ?UINT32(Len1), Lang:Len1/binary>>) ->
+ #ssh_msg_debug{always_display = erl_boolean(Bool),
+ message = Msg,
+ language = Lang}.
+
+decode_keyboard_interactive_prompts(<<>>, Acc) ->
+ lists:reverse(Acc);
+decode_keyboard_interactive_prompts(<<?UINT32(Len), Prompt:Len/binary, ?BYTE(Bool), Bin/binary>>,
+ Acc) ->
+ decode_keyboard_interactive_prompts(Bin, [{Prompt, erl_boolean(Bool)} | Acc]).
+
+erl_boolean(0) ->
+ false;
+erl_boolean(1) ->
+ true.
+
+decode_kex_init(<<?BYTE(Bool), ?UINT32(X)>>, Acc, 0) ->
+ list_to_tuple(lists:reverse([X, erl_boolean(Bool) | Acc]));
+decode_kex_init(<<?UINT32(Len), Data:Len/binary, Rest/binary>>, Acc, N) ->
+ Names = string:tokens(unicode:characters_to_list(Data), ","),
+ decode_kex_init(Rest, [Names | Acc], N -1).
+
+erlint(MPIntSize, MPIntValue) ->
+ Bits = MPIntSize * 8,
+ <<Integer:Bits/integer>> = MPIntValue,
+ Integer.
+
+decode_sign(<<?UINT32(Len), _Alg:Len/binary, ?UINT32(_), Signature/binary>>) ->
+ Signature.
+
+decode_host_key(<<?UINT32(Len), Alg:Len/binary, Rest/binary>>) ->
+ decode_host_key(Alg, Rest).
+
+decode_host_key(<<"ssh-rsa">>, <<?UINT32(Len0), E:Len0/binary,
+ ?UINT32(Len1), N:Len1/binary>>) ->
+ #'RSAPublicKey'{publicExponent = erlint(Len0, E),
+ modulus = erlint(Len1, N)};
+
+decode_host_key(<<"ssh-dss">>,
+ <<?UINT32(Len0), P:Len0/binary,
+ ?UINT32(Len1), Q:Len1/binary,
+ ?UINT32(Len2), G:Len2/binary,
+ ?UINT32(Len3), Y:Len3/binary>>) ->
+ {erlint(Len3, Y), #'Dss-Parms'{p = erlint(Len0, P), q = erlint(Len1, Q),
+ g = erlint(Len2, G)}}.
+
+encode_host_key(#'RSAPublicKey'{modulus = N, publicExponent = E}) ->
+ ssh_bits:encode(["ssh-rsa", E, N], [string, mpint, mpint]);
+encode_host_key({Y, #'Dss-Parms'{p = P, q = Q, g = G}}) ->
+ ssh_bits:encode(["ssh-dss", P, Q, G, Y],
+ [string, mpint, mpint, mpint, mpint]);
+encode_host_key(#'RSAPrivateKey'{modulus = N, publicExponent = E}) ->
+ ssh_bits:encode(["ssh-rsa", E, N], [string, mpint, mpint]);
+encode_host_key(#'DSAPrivateKey'{y = Y, p = P, q = Q, g = G}) ->
+ ssh_bits:encode(["ssh-dss", P, Q, G, Y],
+ [string, mpint, mpint, mpint, mpint]).
+encode_sign(#'RSAPrivateKey'{}, Signature) ->
+ ssh_bits:encode(["ssh-rsa", Signature],[string, binary]);
+encode_sign(#'DSAPrivateKey'{}, Signature) ->
+ ssh_bits:encode(["ssh-dss", Signature],[string, binary]).
diff --git a/lib/ssh/src/ssh_no_io.erl b/lib/ssh/src/ssh_no_io.erl
index 2c8dd92ee2..825a0d4af5 100644
--- a/lib/ssh/src/ssh_no_io.erl
+++ b/lib/ssh/src/ssh_no_io.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -22,18 +22,31 @@
%%% Description: ssh_io replacement that throws on everything
-module(ssh_no_io).
-
--export([yes_no/1, read_password/1, read_line/1, format/2]).
-
-yes_no(_Prompt) ->
- throw({no_io_allowed, yes_no}).
-
-read_password(_Prompt) ->
- throw({no_io_allowed, read_password}).
-
-read_line(_Prompt) ->
- throw({no_io_allowed, read_line}).
-
-format(_Fmt, _Args) ->
- throw({no_io_allowed, format}).
+-include("ssh_transport.hrl").
+
+-export([yes_no/2, read_password/2, read_line/2, format/2]).
+
+yes_no(_, _) ->
+ throw({{no_io_allowed, yes_no},
+ #ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
+ description = "User interaction is not allowed",
+ language = "en"}}).
+
+read_password(_, _) ->
+ throw({{no_io_allowed, read_password},
+ #ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
+ description = "User interaction is not allowed",
+ language = "en"}}).
+
+read_line(_, _) ->
+ throw({{no_io_allowed, read_line},
+ #ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
+ description = "User interaction is not allowed",
+ language = "en"}} ).
+
+format(_, _) ->
+ throw({{no_io_allowed, format},
+ #ssh_msg_disconnect{code = ?SSH_DISCONNECT_SERVICE_NOT_AVAILABLE,
+ description = "User interaction is not allowed",
+ language = "en"}}).
diff --git a/lib/ssh/src/ssh_sftpd.erl b/lib/ssh/src/ssh_sftpd.erl
index 3d469d3c6e..174ca0126b 100644
--- a/lib/ssh/src/ssh_sftpd.erl
+++ b/lib/ssh/src/ssh_sftpd.erl
@@ -76,7 +76,7 @@ listen(Addr, Port, Options) ->
%% Description: Stops the listener
%%--------------------------------------------------------------------
stop(Pid) ->
- ssh_cli:stop(Pid).
+ ssh:stop_listener(Pid).
%%% DEPRECATED END %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/lib/ssh/src/ssh_subsystem_sup.erl b/lib/ssh/src/ssh_subsystem_sup.erl
index cd6defd535..e8855b09ac 100644
--- a/lib/ssh/src/ssh_subsystem_sup.erl
+++ b/lib/ssh/src/ssh_subsystem_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -25,7 +25,9 @@
-behaviour(supervisor).
--export([start_link/1, connection_supervisor/1, channel_supervisor/1
+-export([start_link/1,
+ connection_supervisor/1,
+ channel_supervisor/1
]).
%% Supervisor callback
@@ -61,9 +63,9 @@ init([Opts]) ->
child_specs(Opts) ->
case proplists:get_value(role, Opts) of
client ->
- [ssh_connectinon_child_spec(Opts)];
+ [];
server ->
- [ssh_connectinon_child_spec(Opts), ssh_channel_child_spec(Opts)]
+ [ssh_channel_child_spec(Opts), ssh_connectinon_child_spec(Opts)]
end.
ssh_connectinon_child_spec(Opts) ->
@@ -72,9 +74,9 @@ ssh_connectinon_child_spec(Opts) ->
Role = proplists:get_value(role, Opts),
Name = id(Role, ssh_connection_sup, Address, Port),
StartFunc = {ssh_connection_sup, start_link, [Opts]},
- Restart = transient,
+ Restart = temporary,
Shutdown = 5000,
- Modules = [ssh_connection_sup],
+ Modules = [ssh_connection_sup],
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
@@ -84,7 +86,7 @@ ssh_channel_child_spec(Opts) ->
Role = proplists:get_value(role, Opts),
Name = id(Role, ssh_channel_sup, Address, Port),
StartFunc = {ssh_channel_sup, start_link, [Opts]},
- Restart = transient,
+ Restart = temporary,
Shutdown = infinity,
Modules = [ssh_channel_sup],
Type = supervisor,
diff --git a/lib/ssh/src/ssh_sup.erl b/lib/ssh/src/ssh_sup.erl
index f307d1f833..6d2b9c107d 100644
--- a/lib/ssh/src/ssh_sup.erl
+++ b/lib/ssh/src/ssh_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -51,8 +51,7 @@ children() ->
Clients = [Service || Service <- Services, is_client(Service)],
Servers = [Service || Service <- Services, is_server(Service)],
- [server_child_spec(Servers), client_child_spec(Clients),
- ssh_userauth_reg_spec()].
+ [server_child_spec(Servers), client_child_spec(Clients)].
server_child_spec(Servers) ->
Name = sshd_sup,
@@ -72,16 +71,6 @@ client_child_spec(Clients) ->
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
-ssh_userauth_reg_spec() ->
- Name = ssh_userreg,
- StartFunc = {ssh_userreg, start_link, []},
- Restart = transient,
- Shutdown = 5000,
- Modules = [ssh_userreg],
- Type = worker,
- {Name, StartFunc, Restart, Shutdown, Type, Modules}.
-
-
is_server({sftpd, _}) ->
true;
is_server({shelld, _}) ->
diff --git a/lib/ssh/src/ssh_system_sup.erl b/lib/ssh/src/ssh_system_sup.erl
index 36daf3b1ac..848133f838 100644
--- a/lib/ssh/src/ssh_system_sup.erl
+++ b/lib/ssh/src/ssh_system_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -40,7 +40,7 @@
-export([init/1]).
%%%=========================================================================
-%%% API
+%%% Internal API
%%%=========================================================================
start_link(ServerOpts) ->
Address = proplists:get_value(address, ServerOpts),
@@ -54,13 +54,15 @@ stop_listener(SysSup) ->
stop_listener(Address, Port) ->
Name = make_name(Address, Port),
stop_acceptor(whereis(Name)).
-
+
stop_system(SysSup) ->
Name = sshd_sup:system_name(SysSup),
- sshd_sup:stop_child(Name).
-
+ spawn(fun() -> sshd_sup:stop_child(Name) end),
+ ok.
+
stop_system(Address, Port) ->
- sshd_sup:stop_child(Address, Port).
+ spawn(fun() -> sshd_sup:stop_child(Address, Port) end),
+ ok.
system_supervisor(Address, Port) ->
Name = make_name(Address, Port),
@@ -121,7 +123,7 @@ restart_acceptor(Address, Port) ->
%%%=========================================================================
init([ServerOpts]) ->
RestartStrategy = one_for_one,
- MaxR = 10,
+ MaxR = 0,
MaxT = 3600,
Children = child_specs(ServerOpts),
{ok, {{RestartStrategy, MaxR, MaxT}, Children}}.
@@ -137,7 +139,7 @@ ssh_acceptor_child_spec(ServerOpts) ->
Port = proplists:get_value(port, ServerOpts),
Name = id(ssh_acceptor_sup, Address, Port),
StartFunc = {ssh_acceptor_sup, start_link, [ServerOpts]},
- Restart = permanent,
+ Restart = transient,
Shutdown = infinity,
Modules = [ssh_acceptor_sup],
Type = supervisor,
@@ -146,7 +148,7 @@ ssh_acceptor_child_spec(ServerOpts) ->
ssh_subsystem_child_spec(ServerOpts) ->
Name = make_ref(),
StartFunc = {ssh_subsystem_sup, start_link, [ServerOpts]},
- Restart = transient,
+ Restart = temporary,
Shutdown = infinity,
Modules = [ssh_subsystem_sup],
Type = supervisor,
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index 682d766d99..27723dc870 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -29,12 +29,12 @@
-include("ssh_transport.hrl").
-include("ssh.hrl").
--export([connect/5, accept/4]).
-export([versions/2, hello_version_msg/1]).
-export([next_seqnum/1, decrypt_first_block/2, decrypt_blocks/3,
- is_valid_mac/3, transport_messages/1, kexdh_messages/0,
- kex_dh_gex_messages/0, handle_hello_version/1,
- key_exchange_init_msg/1, key_init/3, new_keys_message/1,
+ is_valid_mac/3,
+ handle_hello_version/1,
+ key_exchange_init_msg/1,
+ key_init/3, new_keys_message/1,
handle_kexinit_msg/3, handle_kexdh_init/2,
handle_kex_dh_gex_group/2, handle_kex_dh_gex_reply/2,
handle_new_keys/2, handle_kex_dh_gex_request/2,
@@ -74,113 +74,9 @@ is_valid_mac(Mac, Data, #ssh{recv_mac = Algorithm,
recv_mac_key = Key, recv_sequence = SeqNum}) ->
Mac == mac(Algorithm, Key, SeqNum, Data).
-transport_messages(_) ->
- [{ssh_msg_disconnect, ?SSH_MSG_DISCONNECT,
- [uint32, string, string]},
-
- {ssh_msg_ignore, ?SSH_MSG_IGNORE,
- [string]},
-
- {ssh_msg_unimplemented, ?SSH_MSG_UNIMPLEMENTED,
- [uint32]},
-
- {ssh_msg_debug, ?SSH_MSG_DEBUG,
- [boolean, string, string]},
-
- {ssh_msg_service_request, ?SSH_MSG_SERVICE_REQUEST,
- [string]},
-
- {ssh_msg_service_accept, ?SSH_MSG_SERVICE_ACCEPT,
- [string]},
-
- {ssh_msg_kexinit, ?SSH_MSG_KEXINIT,
- [cookie,
- name_list, name_list,
- name_list, name_list,
- name_list, name_list,
- name_list, name_list,
- name_list, name_list,
- boolean,
- uint32]},
-
- {ssh_msg_newkeys, ?SSH_MSG_NEWKEYS,
- []}
- ].
-
-kexdh_messages() ->
- [{ssh_msg_kexdh_init, ?SSH_MSG_KEXDH_INIT,
- [mpint]},
-
- {ssh_msg_kexdh_reply, ?SSH_MSG_KEXDH_REPLY,
- [binary, mpint, binary]}
- ].
-
-kex_dh_gex_messages() ->
- [{ssh_msg_kex_dh_gex_request, ?SSH_MSG_KEX_DH_GEX_REQUEST,
- [uint32, uint32, uint32]},
-
- {ssh_msg_kex_dh_gex_request_old, ?SSH_MSG_KEX_DH_GEX_REQUEST_OLD,
- [uint32]},
-
- {ssh_msg_kex_dh_gex_group, ?SSH_MSG_KEX_DH_GEX_GROUP,
- [mpint, mpint]},
-
- {ssh_msg_kex_dh_gex_init, ?SSH_MSG_KEX_DH_GEX_INIT,
- [mpint]},
-
- {ssh_msg_kex_dh_gex_reply, ?SSH_MSG_KEX_DH_GEX_REPLY,
- [binary, mpint, binary]}
- ].
-
yes_no(Ssh, Prompt) ->
(Ssh#ssh.io_cb):yes_no(Prompt, Ssh).
-connect(ConnectionSup, Address, Port, SocketOpts, Opts) ->
- Timeout = proplists:get_value(connect_timeout, Opts, infinity),
- {_, Callback, _} =
- proplists:get_value(transport, Opts, {tcp, gen_tcp, tcp_closed}),
- case do_connect(Callback, Address, Port, SocketOpts, Timeout) of
- {ok, Socket} ->
- {ok, Pid} =
- ssh_connection_sup:start_handler_child(ConnectionSup,
- [client, Socket,
- [{address, Address},
- {port, Port} |
- Opts]]),
- Callback:controlling_process(Socket, Pid),
- ssh_connection_handler:send_event(Pid, socket_control),
- {ok, Pid};
- {error, Reason} ->
- {error, Reason}
- end.
-
-do_connect(Callback, Address, Port, SocketOpts, Timeout) ->
- Opts = [{active, false} | SocketOpts],
- case Callback:connect(Address, Port, Opts, Timeout) of
- {error, nxdomain} ->
- Callback:connect(Address, Port, lists:delete(inet6, Opts), Timeout);
- {error, eafnosupport} ->
- Callback:connect(Address, Port, lists:delete(inet6, Opts), Timeout);
- {error, enetunreach} ->
- Callback:connect(Address, Port, lists:delete(inet6, Opts), Timeout);
- Other ->
- Other
- end.
-
-accept(Address, Port, Socket, Options) ->
- {_, Callback, _} =
- proplists:get_value(transport, Options, {tcp, gen_tcp, tcp_closed}),
- ConnectionSup =
- ssh_system_sup:connection_supervisor(
- ssh_system_sup:system_supervisor(Address, Port)),
- {ok, Pid} =
- ssh_connection_sup:start_handler_child(ConnectionSup,
- [server, Socket,
- [{address, Address},
- {port, Port} | Options]]),
- Callback:controlling_process(Socket, Pid),
- {ok, Pid}.
-
format_version({Major,Minor}) ->
"SSH-" ++ integer_to_list(Major) ++ "." ++
integer_to_list(Minor) ++ "-Erlang".
@@ -257,7 +153,6 @@ handle_kexinit_msg(#ssh_msg_kexinit{} = CounterPart, #ssh_msg_kexinit{} = Own,
{ok, Algoritms} = select_algorithm(client, Own, CounterPart),
case verify_algorithm(Algoritms) of
true ->
- install_messages(Algoritms#alg.kex),
key_exchange_first_msg(Algoritms#alg.kex,
Ssh0#ssh{algorithms = Algoritms});
_ ->
@@ -271,7 +166,6 @@ handle_kexinit_msg(#ssh_msg_kexinit{} = CounterPart, #ssh_msg_kexinit{} = Own,
handle_kexinit_msg(#ssh_msg_kexinit{} = CounterPart, #ssh_msg_kexinit{} = Own,
#ssh{role = server} = Ssh) ->
{ok, Algoritms} = select_algorithm(server, CounterPart, Own),
- install_messages(Algoritms#alg.kex),
{ok, Ssh#ssh{algorithms = Algoritms}}.
@@ -284,11 +178,6 @@ verify_algorithm(#alg{kex = 'diffie-hellman-group-exchange-sha1'}) ->
verify_algorithm(_) ->
false.
-install_messages('diffie-hellman-group1-sha1') ->
- ssh_bits:install_messages(kexdh_messages());
-install_messages('diffie-hellman-group-exchange-sha1') ->
- ssh_bits:install_messages(kex_dh_gex_messages()).
-
key_exchange_first_msg('diffie-hellman-group1-sha1', Ssh0) ->
{G, P} = dh_group1(),
{Private, Public} = dh_gen_key(G, P, 1024),
@@ -312,10 +201,10 @@ handle_kexdh_init(#ssh_msg_kexdh_init{e = E}, Ssh0) ->
{G, P} = dh_group1(),
{Private, Public} = dh_gen_key(G, P, 1024),
K = ssh_math:ipow(E, Private, P),
- {Key, K_S} = get_host_key(Ssh0),
- H = kex_h(Ssh0, K_S, E, Public, K),
+ Key = get_host_key(Ssh0),
+ H = kex_h(Ssh0, Key, E, Public, K),
H_SIG = sign_host_key(Ssh0, Key, H),
- {SshPacket, Ssh1} = ssh_packet(#ssh_msg_kexdh_reply{public_host_key = K_S,
+ {SshPacket, Ssh1} = ssh_packet(#ssh_msg_kexdh_reply{public_host_key = Key,
f = Public,
h_sig = H_SIG
}, Ssh0),
@@ -411,65 +300,33 @@ get_host_key(SSH) ->
#ssh{key_cb = Mod, opts = Opts, algorithms = ALG} = SSH,
case Mod:host_key(ALG#alg.hkey, Opts) of
- {ok, #'RSAPrivateKey'{modulus = N, publicExponent = E} = Key} ->
- {Key,
- ssh_bits:encode(["ssh-rsa",E,N],[string,mpint,mpint])};
- {ok, #'DSAPrivateKey'{y = Y, p = P, q = Q, g = G} = Key} ->
- {Key, ssh_bits:encode(["ssh-dss",P,Q,G,Y],
- [string,mpint,mpint,mpint,mpint])};
+ {ok, #'RSAPrivateKey'{} = Key} ->
+ Key;
+ {ok, #'DSAPrivateKey'{} = Key} ->
+ Key;
Result ->
exit({error, {Result, unsupported_key_type}})
end.
sign_host_key(_Ssh, #'RSAPrivateKey'{} = Private, H) ->
Hash = sha, %% Option ?!
- Signature = sign(H, Hash, Private),
- ssh_bits:encode(["ssh-rsa", Signature],[string, binary]);
+ _Signature = sign(H, Hash, Private);
sign_host_key(_Ssh, #'DSAPrivateKey'{} = Private, H) ->
Hash = sha, %% Option ?!
- RawSignature = sign(H, Hash, Private),
- ssh_bits:encode(["ssh-dss", RawSignature],[string, binary]).
+ _RawSignature = sign(H, Hash, Private).
-verify_host_key(SSH, K_S, H, H_SIG) ->
- ALG = SSH#ssh.algorithms,
- case ALG#alg.hkey of
- 'ssh-rsa' ->
- verify_host_key_rsa(SSH, K_S, H, H_SIG);
- 'ssh-dss' ->
- verify_host_key_dss(SSH, K_S, H, H_SIG);
- _ ->
- {error, bad_host_key_algorithm}
- end.
-
-verify_host_key_rsa(SSH, K_S, H, H_SIG) ->
- case ssh_bits:decode(K_S,[string,mpint,mpint]) of
- ["ssh-rsa", E, N] ->
- ["ssh-rsa",SIG] = ssh_bits:decode(H_SIG,[string,binary]),
- Public = #'RSAPublicKey'{publicExponent = E, modulus = N},
- case verify(H, sha, SIG, Public) of
- false ->
- {error, bad_signature};
- true ->
- known_host_key(SSH, Public, 'ssh-rsa')
- end;
- _ ->
- {error, bad_format}
+verify_host_key(SSH, PublicKey, Digest, Signature) ->
+ case verify(Digest, sha, Signature, PublicKey) of
+ false ->
+ {error, bad_signature};
+ true ->
+ known_host_key(SSH, PublicKey, public_algo(PublicKey))
end.
-verify_host_key_dss(SSH, K_S, H, H_SIG) ->
- case ssh_bits:decode(K_S,[string,mpint,mpint,mpint,mpint]) of
- ["ssh-dss",P,Q,G,Y] ->
- ["ssh-dss",SIG] = ssh_bits:decode(H_SIG,[string,binary]),
- Public = {Y, #'Dss-Parms'{p = P, q = Q, g = G}},
- case verify(H, sha, SIG, Public) of
- false ->
- {error, bad_signature};
- true ->
- known_host_key(SSH, Public, 'ssh-dss')
- end;
- _ ->
- {error, bad_host_key_format}
- end.
+public_algo(#'RSAPublicKey'{}) ->
+ 'ssh-rsa';
+public_algo({_, #'Dss-Parms'{}}) ->
+ 'ssh-dss'.
accepted_host(Ssh, PeerName, Opts) ->
case proplists:get_value(silently_accept_hosts, Opts, false) of
@@ -636,12 +493,12 @@ select(CL, SL) ->
C.
ssh_packet(#ssh_msg_kexinit{} = Msg, Ssh0) ->
- BinMsg = ssh_bits:encode(Msg),
+ BinMsg = ssh_message:encode(Msg),
Ssh = key_init(Ssh0#ssh.role, Ssh0, BinMsg),
pack(BinMsg, Ssh);
ssh_packet(Msg, Ssh) ->
- BinMsg = ssh_bits:encode(Msg),
+ BinMsg = ssh_message:encode(Msg),
pack(BinMsg, Ssh).
pack(Data0, #ssh{encrypt_block_size = BlockSize,
@@ -1021,23 +878,23 @@ hash(K, H, Ki, N, HASH) ->
Kj = HASH([K, H, Ki]),
hash(K, H, <<Ki/binary, Kj/binary>>, N-128, HASH).
-kex_h(SSH, K_S, E, F, K) ->
+kex_h(SSH, Key, E, F, K) ->
L = ssh_bits:encode([SSH#ssh.c_version, SSH#ssh.s_version,
SSH#ssh.c_keyinit, SSH#ssh.s_keyinit,
- K_S, E,F,K],
+ ssh_message:encode_host_key(Key), E,F,K],
[string,string,binary,binary,binary,
mpint,mpint,mpint]),
crypto:hash(sha,L).
-kex_h(SSH, K_S, Min, NBits, Max, Prime, Gen, E, F, K) ->
+kex_h(SSH, Key, Min, NBits, Max, Prime, Gen, E, F, K) ->
L = if Min==-1; Max==-1 ->
Ts = [string,string,binary,binary,binary,
uint32,
mpint,mpint,mpint,mpint,mpint],
ssh_bits:encode([SSH#ssh.c_version,SSH#ssh.s_version,
SSH#ssh.c_keyinit,SSH#ssh.s_keyinit,
- K_S, NBits, Prime, Gen, E,F,K],
+ ssh_message:encode_host_key(Key), NBits, Prime, Gen, E,F,K],
Ts);
true ->
Ts = [string,string,binary,binary,binary,
@@ -1045,7 +902,7 @@ kex_h(SSH, K_S, Min, NBits, Max, Prime, Gen, E, F, K) ->
mpint,mpint,mpint,mpint,mpint],
ssh_bits:encode([SSH#ssh.c_version,SSH#ssh.s_version,
SSH#ssh.c_keyinit,SSH#ssh.s_keyinit,
- K_S, Min, NBits, Max,
+ ssh_message:encode_host_key(Key), Min, NBits, Max,
Prime, Gen, E,F,K], Ts)
end,
crypto:hash(sha,L).
diff --git a/lib/ssh/src/ssh_userreg.erl b/lib/ssh/src/ssh_userreg.erl
deleted file mode 100644
index f901461aea..0000000000
--- a/lib/ssh/src/ssh_userreg.erl
+++ /dev/null
@@ -1,141 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2008-2011. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
-
-%%
-%% Description: User register for ssh_cli
-
--module(ssh_userreg).
-
--behaviour(gen_server).
-
-%% API
--export([start_link/0,
- register_user/2,
- lookup_user/1,
- delete_user/1]).
-
-%% gen_server callbacks
--export([init/1,
- handle_call/3,
- handle_cast/2,
- handle_info/2,
- terminate/2,
- code_change/3]).
-
--record(state, {user_db = []}).
-
-%%====================================================================
-%% API
-%%====================================================================
-%%--------------------------------------------------------------------
-%% Function: start_link() -> {ok,Pid} | ignore | {error,Error}
-%% Description: Starts the server
-%%--------------------------------------------------------------------
-start_link() ->
- gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
-
-register_user(User, Cm) ->
- gen_server:cast(?MODULE, {register, {User, Cm}}).
-
-delete_user(Cm) ->
- gen_server:cast(?MODULE, {delete, Cm}).
-
-lookup_user(Cm) ->
- gen_server:call(?MODULE, {get_user, Cm}, infinity).
-
-%%====================================================================
-%% gen_server callbacks
-%%====================================================================
-
-%%--------------------------------------------------------------------
-%% Function: init(Args) -> {ok, State} |
-%% {ok, State, Timeout} |
-%% ignore |
-%% {stop, Reason}
-%% Description: Initiates the server
-%%--------------------------------------------------------------------
-init([]) ->
- {ok, #state{}}.
-
-%%--------------------------------------------------------------------
-%% Function: %% handle_call(Request, From, State) -> {reply, Reply, State} |
-%% {reply, Reply, State, Timeout} |
-%% {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, Reply, State} |
-%% {stop, Reason, State}
-%% Description: Handling call messages
-%%--------------------------------------------------------------------
-handle_call({get_user, Cm}, _From, #state{user_db = Db} = State) ->
- User = lookup(Cm, Db),
- {reply, {ok, User}, State}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_cast(Msg, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State}
-%% Description: Handling cast messages
-%%--------------------------------------------------------------------
-handle_cast({register, UserCm}, State) ->
- {noreply, insert(UserCm, State)};
-handle_cast({delete, UserCm}, State) ->
- {noreply, delete(UserCm, State)}.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info(Info, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State}
-%% Description: Handling all non call/cast messages
-%%--------------------------------------------------------------------
-handle_info(_Info, State) ->
- {noreply, State}.
-
-%%--------------------------------------------------------------------
-%% Function: terminate(Reason, State) -> void()
-%% Description: This function is called by a gen_server when it is about to
-%% terminate. It should be the opposite of Module:init/1 and do any necessary
-%% cleaning up. When it returns, the gen_server terminates with Reason.
-%% The return value is ignored.
-%%--------------------------------------------------------------------
-terminate(_Reason, _State) ->
- ok.
-
-%%--------------------------------------------------------------------
-%% Func: code_change(OldVsn, State, Extra) -> {ok, NewState}
-%% Description: Convert process state when code is changed
-%%--------------------------------------------------------------------
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%--------------------------------------------------------------------
-%%% Internal functions
-%%--------------------------------------------------------------------
-insert({User, Cm}, #state{user_db = Db} = State) ->
- State#state{user_db = [{User, Cm} | Db]}.
-
-delete(Cm, #state{user_db = Db} = State) ->
- State#state{user_db = lists:keydelete(Cm, 2, Db)}.
-
-lookup(_, []) ->
- undefined;
-lookup(Cm, [{User, Cm} | _Rest]) ->
- User;
-lookup(Cm, [_ | Rest]) ->
- lookup(Cm, Rest).
-
diff --git a/lib/ssh/src/ssh_xfer.erl b/lib/ssh/src/ssh_xfer.erl
index b299868d41..e18e18a9a9 100644
--- a/lib/ssh/src/ssh_xfer.erl
+++ b/lib/ssh/src/ssh_xfer.erl
@@ -267,7 +267,7 @@ xf_request(XF, Op, Arg) ->
list_to_binary(Arg)
end,
Size = 1+size(Data),
- ssh_connection:send(CM, Channel, <<?UINT32(Size), Op, Data/binary>>).
+ ssh_connection:send(CM, Channel, [<<?UINT32(Size), Op, Data/binary>>]).
xf_send_reply(#ssh_xfer{cm = CM, channel = Channel}, Op, Arg) ->
Data = if
@@ -277,7 +277,7 @@ xf_send_reply(#ssh_xfer{cm = CM, channel = Channel}, Op, Arg) ->
list_to_binary(Arg)
end,
Size = 1 + size(Data),
- ssh_connection:send(CM, Channel, <<?UINT32(Size), Op, Data/binary>>).
+ ssh_connection:send(CM, Channel, [<<?UINT32(Size), Op, Data/binary>>]).
xf_send_name(XF, ReqId, Name, Attr) ->
xf_send_names(XF, ReqId, [{Name, Attr}]).
diff --git a/lib/ssh/src/sshc_sup.erl b/lib/ssh/src/sshc_sup.erl
index 1d2779de23..e6b4b681a4 100644
--- a/lib/ssh/src/sshc_sup.erl
+++ b/lib/ssh/src/sshc_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -61,9 +61,9 @@ init(Args) ->
%%%=========================================================================
child_spec(_) ->
Name = undefined, % As simple_one_for_one is used.
- StartFunc = {ssh_connection_sup, start_link, []},
+ StartFunc = {ssh_connection_handler, start_link, []},
Restart = temporary,
Shutdown = infinity,
- Modules = [ssh_connection_sup],
+ Modules = [ssh_connection_handler],
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
diff --git a/lib/ssh/src/sshd_sup.erl b/lib/ssh/src/sshd_sup.erl
index 747906b2cf..60222f5172 100644
--- a/lib/ssh/src/sshd_sup.erl
+++ b/lib/ssh/src/sshd_sup.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2010. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -58,12 +58,7 @@ start_child(ServerOpts) ->
end.
stop_child(Name) ->
- case supervisor:terminate_child(?MODULE, Name) of
- ok ->
- supervisor:delete_child(?MODULE, Name);
- Error ->
- Error
- end.
+ supervisor:terminate_child(?MODULE, Name).
stop_child(Address, Port) ->
Name = id(Address, Port),
@@ -94,7 +89,7 @@ init([Servers]) ->
child_spec(Address, Port, ServerOpts) ->
Name = id(Address, Port),
StartFunc = {ssh_system_sup, start_link, [ServerOpts]},
- Restart = transient,
+ Restart = temporary,
Shutdown = infinity,
Modules = [ssh_system_sup],
Type = supervisor,
diff --git a/lib/ssh/test/Makefile b/lib/ssh/test/Makefile
index f5db31baee..740dbd0235 100644
--- a/lib/ssh/test/Makefile
+++ b/lib/ssh/test/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2004-2012. All Rights Reserved.
+# Copyright Ericsson AB 2004-2013. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -38,7 +38,9 @@ MODULES= \
ssh_sftpd_SUITE \
ssh_sftpd_erlclient_SUITE \
ssh_connection_SUITE \
- ssh_echo_server
+ ssh_echo_server \
+ ssh_peername_sockname_server \
+ ssh_test_cli
HRL_FILES_NEEDED_IN_TEST= \
$(ERL_TOP)/lib/ssh/src/ssh.hrl \
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index 0aa60624bf..b4e3871efd 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -22,6 +22,7 @@
-module(ssh_basic_SUITE).
-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/inet.hrl").
%% Note: This directive should only be used in test suites.
-compile(export_all).
@@ -45,15 +46,21 @@ all() ->
daemon_already_started,
server_password_option,
server_userpassword_option,
- close].
+ double_close].
groups() ->
- [{dsa_key, [], [send, exec, exec_compressed, shell, known_hosts, idle_time, rekey, openssh_zlib_basic_test]},
- {rsa_key, [], [send, exec, exec_compressed, shell, known_hosts, idle_time, rekey, openssh_zlib_basic_test]},
+ [{dsa_key, [], basic_tests()},
+ {rsa_key, [], basic_tests()},
{dsa_pass_key, [], [pass_phrase]},
{rsa_pass_key, [], [pass_phrase]},
{internal_error, [], [internal_error]}
].
+
+basic_tests() ->
+ [send, close, peername_sockname,
+ exec, exec_compressed, shell, cli, known_hosts,
+ idle_time, rekey, openssh_zlib_basic_test].
+
%%--------------------------------------------------------------------
init_per_suite(Config) ->
case catch crypto:start() of
@@ -252,7 +259,7 @@ idle_time(Config) ->
ssh_connection:close(ConnectionRef, Id),
receive
after 10000 ->
- {error,channel_closed} = ssh_connection:session_channel(ConnectionRef, 1000)
+ {error, closed} = ssh_connection:session_channel(ConnectionRef, 1000)
end,
ssh:stop_daemon(Pid).
%%--------------------------------------------------------------------
@@ -300,6 +307,41 @@ shell(Config) when is_list(Config) ->
end.
%%--------------------------------------------------------------------
+cli() ->
+ [{doc, ""}].
+cli(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ SystemDir = filename:join(?config(priv_dir, Config), system),
+ UserDir = ?config(priv_dir, Config),
+
+ {_Pid, Host, Port} = ssh_test_lib:daemon([{system_dir, SystemDir},{user_dir, UserDir},
+ {password, "morot"},
+ {ssh_cli, {ssh_test_cli, [cli]}},
+ {subsystems, []},
+ {failfun, fun ssh_test_lib:failfun/2}]),
+ ct:sleep(500),
+
+ ConnectionRef = ssh_test_lib:connect(Host, Port, [{silently_accept_hosts, true},
+ {user, "foo"},
+ {password, "morot"},
+ {user_interaction, false},
+ {user_dir, UserDir}]),
+
+ {ok, ChannelId} = ssh_connection:session_channel(ConnectionRef, infinity),
+ ssh_connection:shell(ConnectionRef, ChannelId),
+ ok = ssh_connection:send(ConnectionRef, ChannelId, <<"q">>),
+ receive
+ {ssh_cm, ConnectionRef,
+ {data,0,0, <<"\r\nYou are accessing a dummy, type \"q\" to exit\r\n\n">>}} ->
+ ok = ssh_connection:send(ConnectionRef, ChannelId, <<"q">>)
+ end,
+
+ receive
+ {ssh_cm, ConnectionRef,{closed, ChannelId}} ->
+ ok
+ end.
+
+%%--------------------------------------------------------------------
daemon_already_started() ->
[{doc, "Test that get correct error message if you try to start a daemon",
"on an adress that already runs a daemon see also seq10667"}].
@@ -445,10 +487,11 @@ internal_error(Config) when is_list(Config) ->
{Pid, Host, Port} = ssh_test_lib:daemon([{system_dir, SystemDir},
{user_dir, UserDir},
{failfun, fun ssh_test_lib:failfun/2}]),
- {error,"Internal error"} =
+ {error, Error} =
ssh:connect(Host, Port, [{silently_accept_hosts, true},
{user_dir, UserDir},
{user_interaction, false}]),
+ check_error(Error),
ssh:stop_daemon(Pid).
%%--------------------------------------------------------------------
@@ -473,9 +516,85 @@ send(Config) when is_list(Config) ->
%%--------------------------------------------------------------------
+peername_sockname() ->
+ [{doc, "Test ssh:connection_info([peername, sockname])"}].
+peername_sockname(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ SystemDir = filename:join(?config(priv_dir, Config), system),
+ UserDir = ?config(priv_dir, Config),
+
+ {_Pid, Host, Port} = ssh_test_lib:daemon([{system_dir, SystemDir},
+ {user_dir, UserDir},
+ {subsystems, [{"peername_sockname",
+ {ssh_peername_sockname_server, []}}
+ ]}
+ ]),
+ ConnectionRef =
+ ssh_test_lib:connect(Host, Port, [{silently_accept_hosts, true},
+ {user_dir, UserDir},
+ {user_interaction, false}]),
+ {ok, ChannelId} = ssh_connection:session_channel(ConnectionRef, infinity),
+ success = ssh_connection:subsystem(ConnectionRef, ChannelId, "peername_sockname", infinity),
+ [{peer, {_Name, {HostPeerClient,PortPeerClient} = ClientPeer}}] =
+ ssh:connection_info(ConnectionRef, [peer]),
+ [{sockname, {HostSockClient,PortSockClient} = ClientSock}] =
+ ssh:connection_info(ConnectionRef, [sockname]),
+ ct:pal("Client: ~p ~p", [ClientPeer, ClientSock]),
+ receive
+ {ssh_cm, ConnectionRef, {data, ChannelId, _, Response}} ->
+ {PeerNameSrv,SockNameSrv} = binary_to_term(Response),
+ {HostPeerSrv,PortPeerSrv} = PeerNameSrv,
+ {HostSockSrv,PortSockSrv} = SockNameSrv,
+ ct:pal("Server: ~p ~p", [PeerNameSrv, SockNameSrv]),
+ host_equal(HostPeerSrv, HostSockClient),
+ PortPeerSrv = PortSockClient,
+ host_equal(HostSockSrv, HostPeerClient),
+ PortSockSrv = PortPeerClient,
+ host_equal(HostSockSrv, Host),
+ PortSockSrv = Port
+ after 10000 ->
+ throw(timeout)
+ end.
+
+host_equal(H1, H2) ->
+ not ordsets:is_disjoint(ips(H1), ips(H2)).
+
+ips(IP) when is_tuple(IP) -> ordsets:from_list([IP]);
+ips(Name) when is_list(Name) ->
+ {ok,#hostent{h_addr_list=IPs4}} = inet:gethostbyname(Name,inet),
+ {ok,#hostent{h_addr_list=IPs6}} = inet:gethostbyname(Name,inet6),
+ ordsets:from_list(IPs4++IPs6).
+
+%%--------------------------------------------------------------------
+
close() ->
- [{doc, "Simulate that we try to close an already closed connection"}].
+ [{doc, "Client receives close when server closes"}].
close(Config) when is_list(Config) ->
+ process_flag(trap_exit, true),
+ SystemDir = filename:join(?config(priv_dir, Config), system),
+ UserDir = ?config(priv_dir, Config),
+
+ {Server, Host, Port} = ssh_test_lib:daemon([{system_dir, SystemDir},
+ {user_dir, UserDir},
+ {failfun, fun ssh_test_lib:failfun/2}]),
+ Client =
+ ssh_test_lib:connect(Host, Port, [{silently_accept_hosts, true},
+ {user_dir, UserDir},
+ {user_interaction, false}]),
+ {ok, ChannelId} = ssh_connection:session_channel(Client, infinity),
+
+ ssh:stop_daemon(Server),
+ receive
+ {ssh_cm, Client,{closed, ChannelId}} ->
+ ok
+ after 5000 ->
+ ct:fail(timeout)
+ end.
+
+%%--------------------------------------------------------------------
+double_close() ->
+ [{doc, "Simulate that we try to close an already closed connection"}].
+double_close(Config) when is_list(Config) ->
SystemDir = ?config(data_dir, Config),
PrivDir = ?config(priv_dir, Config),
UserDir = filename:join(PrivDir, nopubkey), % to make sure we don't use public-key-auth
@@ -494,6 +613,8 @@ close(Config) when is_list(Config) ->
exit(CM, {shutdown, normal}),
ok = ssh:close(CM).
+%%--------------------------------------------------------------------
+
openssh_zlib_basic_test() ->
[{doc, "Test basic connection with openssh_zlib"}].
openssh_zlib_basic_test(Config) ->
@@ -515,6 +636,15 @@ openssh_zlib_basic_test(Config) ->
%% Internal functions ------------------------------------------------
%%--------------------------------------------------------------------
+%% Due to timing the error message may or may not be delivered to
+%% the "tcp-application" before the socket closed message is recived
+check_error("Internal error") ->
+ ok;
+check_error("Connection closed") ->
+ ok;
+check_error(Error) ->
+ ct:fail(Error).
+
basic_test(Config) ->
ClientOpts = ?config(client_opts, Config),
ServerOpts = ?config(server_opts, Config),
diff --git a/lib/ssh/test/ssh_connection_SUITE.erl b/lib/ssh/test/ssh_connection_SUITE.erl
index 6c781e0e91..f4f0682b40 100644
--- a/lib/ssh/test/ssh_connection_SUITE.erl
+++ b/lib/ssh/test/ssh_connection_SUITE.erl
@@ -73,6 +73,9 @@ end_per_group(_, Config) ->
%%--------------------------------------------------------------------
init_per_testcase(_TestCase, Config) ->
+ %% To make sure we start clean as it is not certain that
+ %% end_per_testcase will be run!
+ ssh:stop(),
ssh:start(),
Config.
@@ -91,7 +94,6 @@ simple_exec(Config) when is_list(Config) ->
{ok, ChannelId0} = ssh_connection:session_channel(ConnectionRef, infinity),
success = ssh_connection:exec(ConnectionRef, ChannelId0,
"echo testing", infinity),
-
%% receive response to input
receive
{ssh_cm, ConnectionRef, {data, ChannelId0, 0, <<"testing\n">>}} ->
@@ -146,7 +148,6 @@ small_cat(Config) when is_list(Config) ->
{ssh_cm, ConnectionRef,{closed, ChannelId0}} ->
ok
end.
-
%%--------------------------------------------------------------------
big_cat() ->
[{doc,"Use 'cat' to echo large data block back to us."}].
@@ -204,37 +205,33 @@ send_after_exit(Config) when is_list(Config) ->
ConnectionRef = ssh_test_lib:connect(?SSH_DEFAULT_PORT, [{silently_accept_hosts, true},
{user_interaction, false}]),
{ok, ChannelId0} = ssh_connection:session_channel(ConnectionRef, infinity),
+ Data = <<"I like spaghetti squash">>,
%% Shell command "false" will exit immediately
success = ssh_connection:exec(ConnectionRef, ChannelId0,
"false", infinity),
-
- timer:sleep(2000), %% Allow incoming eof/close/exit_status ssh messages to be processed
-
- Data = <<"I like spaghetti squash">>,
- case ssh_connection:send(ConnectionRef, ChannelId0, Data, 2000) of
- {error, closed} -> ok;
- ok ->
- ct:fail({expected,{error,closed}});
- {error, timeout} ->
- ct:fail({expected,{error,closed}});
- Else ->
- ct:fail(Else)
- end,
-
- %% receive close messages
receive
{ssh_cm, ConnectionRef, {eof, ChannelId0}} ->
ok
end,
receive
- {ssh_cm, ConnectionRef, {exit_status, ChannelId0, _}} ->
+ {ssh_cm, ConnectionRef, {exit_status, ChannelId0, _ExitStatus}} ->
ok
end,
receive
{ssh_cm, ConnectionRef,{closed, ChannelId0}} ->
ok
+ end,
+ case ssh_connection:send(ConnectionRef, ChannelId0, Data, 2000) of
+ {error, closed} -> ok;
+ ok ->
+ ct:fail({expected,{error,closed}, {got, ok}});
+ {error, timeout} ->
+ ct:fail({expected,{error,closed}, {got, {error, timeout}}});
+ Else ->
+ ct:fail(Else)
end.
+
%%--------------------------------------------------------------------
interrupted_send() ->
[{doc, "Use a subsystem that echos n char and then sends eof to cause a channel exit partway through a large send."}].
diff --git a/lib/ssh/test/ssh_peername_sockname_server.erl b/lib/ssh/test/ssh_peername_sockname_server.erl
new file mode 100644
index 0000000000..bc505695d3
--- /dev/null
+++ b/lib/ssh/test/ssh_peername_sockname_server.erl
@@ -0,0 +1,54 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2008-2013. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(ssh_peername_sockname_server).
+
+%% The purpose of this module is to perform tests on the server side of an
+%% ssh connection.
+
+
+-behaviour(ssh_daemon_channel).
+-record(state, {}).
+
+-export([init/1, handle_msg/2, handle_ssh_msg/2, terminate/2]).
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_msg({ssh_channel_up, ChannelId, ConnectionManager}, State) ->
+ [{peer, {_Name, Peer}}] = ssh:connection_info(ConnectionManager, [peer]),
+ [{sockname, Sock}] = ssh:connection_info(ConnectionManager, [sockname]),
+ ssh_connection:send(ConnectionManager, ChannelId,
+ term_to_binary({Peer, Sock})),
+ {ok, State}.
+
+handle_ssh_msg({ssh_cm, _, {exit_signal, ChannelId, _, _Error, _}},
+ State) ->
+ {stop, ChannelId, State};
+
+handle_ssh_msg({ssh_cm, _, {exit_status, ChannelId, _Status}}, State) ->
+ {stop, ChannelId, State};
+
+handle_ssh_msg({ssh_cm, _CM, _}, State) ->
+ {ok, State}.
+
+terminate(_Reason, _State) ->
+ ok.
diff --git a/lib/ssh/test/ssh_test_cli.erl b/lib/ssh/test/ssh_test_cli.erl
new file mode 100644
index 0000000000..cd9ad5f2ff
--- /dev/null
+++ b/lib/ssh/test/ssh_test_cli.erl
@@ -0,0 +1,81 @@
+-module(ssh_test_cli).
+
+-export([init/1, terminate/2, handle_ssh_msg/2, handle_msg/2]).
+
+-record(state, {
+ type,
+ id,
+ ref,
+ port
+ }).
+
+init([Type]) ->
+ {ok, #state{type = Type}}.
+
+handle_msg({ssh_channel_up, Id, Ref}, S) ->
+ User = get_ssh_user(Ref),
+ ok = ssh_connection:send(Ref,
+ Id,
+ << "\r\nYou are accessing a dummy, type \"q\" to exit\r\n\n" >>),
+ Port = run_portprog(User, S#state.type),
+ {ok, S#state{port = Port, id = Id, ref = Ref}};
+
+handle_msg({Port, {data, Data}}, S = #state{port = Port}) ->
+ ok = ssh_connection:send(S#state.ref, S#state.id, Data),
+ {ok, S};
+handle_msg({Port, {exit_status, Exit}}, S = #state{port = Port}) ->
+ if
+ S#state.type =:= cli ->
+ ok = ssh_connection:send(S#state.ref, S#state.id, << "\r\n" >>);
+ true ->
+ ok
+ end,
+ ok = ssh_connection:exit_status(S#state.ref, S#state.id, Exit),
+ {stop, S#state.id, S#state{port = undefined}};
+handle_msg({'EXIT', Port, _}, S = #state{port = Port}) ->
+ ok = ssh_connection:exit_status(S#state.ref, S#state.id, 0),
+ {stop, S#state.id, S#state{port = undefined}};
+handle_msg(_Msg, S) ->
+ {ok, S}.
+
+handle_ssh_msg({ssh_cm, Ref, {data, Id, _Type, <<"q">>}}, S) ->
+ ssh_connection:send_eof(Ref, Id),
+ {stop, Id, S};
+handle_ssh_msg({ssh_cm, _Ref, {data, _Id, _Type, Data}}, S) ->
+ true = port_command(S#state.port, Data),
+ {ok, S};
+handle_ssh_msg({ssh_cm, _, {eof, _}}, S) ->
+ {ok, S};
+handle_ssh_msg({ssh_cm, Ref, {env, Id, WantReply, _Var, _Value}}, S) ->
+ ok = ssh_connection:reply_request(Ref, WantReply, success, Id),
+ {ok, S};
+handle_ssh_msg({ssh_cm, Ref, {pty, Id, WantReply, _Terminal_jox}}, S) ->
+ ok = ssh_connection:reply_request(Ref, WantReply, success, Id),
+ {ok, S};
+handle_ssh_msg({ssh_cm, Ref, {shell, Id, WantReply}}, S) ->
+ ok = ssh_connection:reply_request(Ref, WantReply, success, Id),
+ {ok, S};
+handle_ssh_msg({ssh_cm, _, {signal, _, _}}, S) ->
+ %% Ignore signals according to RFC 4254 section 6.9.
+ {ok, S};
+handle_ssh_msg({ssh_cm, _,
+ {window_change, _Id, _Width, _Height, _Pixw, _PixH}}, S) ->
+ {ok, S};
+handle_ssh_msg({ssh_cm, _, {exit_signal, Id, _, _, _}},
+ S) ->
+ {stop, Id, S}.
+
+terminate(_Why, _S) ->
+ nop.
+
+run_portprog(User, cli) ->
+ Pty_bin = os:find_executable("cat"),
+ open_port({spawn_executable, Pty_bin},
+ [stream, {cd, "/tmp"}, {env, [{"USER", User}]},
+ {args, []}, binary,
+ exit_status, use_stdio, stderr_to_stdout]).
+
+get_ssh_user(Ref) ->
+ [{user, User}] = ssh:connection_info(Ref, [user]),
+ User.
+
diff --git a/lib/ssl/doc/src/ssl.xml b/lib/ssl/doc/src/ssl.xml
index 6029a09730..1d74faf1b3 100644
--- a/lib/ssl/doc/src/ssl.xml
+++ b/lib/ssl/doc/src/ssl.xml
@@ -52,6 +52,8 @@
<item>CRL and policy certificate extensions are not supported
yet. However CRL verification is supported by public_key, only not integrated
in ssl yet. </item>
+ <item>Support for 'Server Name Indication' extension client side
+ (RFC 6066 section 3).</item>
</list>
</section>
@@ -74,7 +76,7 @@
<seealso marker="kernel:gen_tcp">gen_tcp(3)</seealso>.
</p>
- <p> <c>ssloption() = {verify, verify_type()} |
+ <p><marker id="type-ssloption"></marker><c>ssloption() = {verify, verify_type()} |
{verify_fun, {fun(), term()}} |
{fail_if_no_peer_cert, boolean()}
{depth, integer()} |
@@ -86,8 +88,8 @@
{user_lookup_fun, {fun(), term()}}, {psk_identity, string()}, {srp_identity, {string(), string()}} |
{ssl_imp, ssl_imp()} | {reuse_sessions, boolean()} | {reuse_session, fun()}
{next_protocols_advertised, [binary()]} |
- {client_preferred_next_protocols, client | server, [binary()]} |
- {log_alert, boolean()}
+ {client_preferred_next_protocols, {client | server, [binary()]} | {client | server, [binary()], binary()}} |
+ {log_alert, boolean()} | {server_name_indication, hostname() | disable}
</c></p>
<p><c>transportoption() = {cb_info, {CallbackModule::atom(), DataTag::atom(), ClosedTag::atom(), ErrTag:atom()}}
@@ -353,8 +355,8 @@ fun(srp, Username :: string(), UserState :: term()) ->
when possible.
</item>
- <tag>{client_preferred_next_protocols, Precedence :: server | client, ClientPrefs :: [binary()]}</tag>
- <tag>{client_preferred_next_protocols, Precedence :: server | client, ClientPrefs :: [binary()], Default :: binary()}</tag>
+ <tag>{client_preferred_next_protocols, {Precedence :: server | client, ClientPrefs :: [binary()]}}</tag>
+ <tag>{client_preferred_next_protocols, {Precedence :: server | client, ClientPrefs :: [binary()], Default :: binary()}}</tag>
<item>
<p>Indicates the client will try to perform Next Protocol
Negotiation.</p>
@@ -382,6 +384,15 @@ fun(srp, Username :: string(), UserState :: term()) ->
<tag>{srp_identity, {Username :: string(), Password :: string()}</tag>
<item>Specifies the Username and Password to use to authenticate to the server.
</item>
+ <tag>{server_name_indication, hostname()}</tag>
+ <tag>{server_name_indication, disable}</tag>
+ <item>
+ <p>This option can be specified when upgrading a TCP socket to a TLS
+ socket to use the TLS Server Name Indication extension.</p>
+ <p>When starting a TLS connection without upgrade the Server Name
+ Indication extension will be sent if possible, this option may also be
+ used to disable that behavior.</p>
+ </item>
</taglist>
</section>
diff --git a/lib/ssl/src/dtls_handshake.erl b/lib/ssl/src/dtls_handshake.erl
index 26e8ce7503..d0f9649f9f 100644
--- a/lib/ssl/src/dtls_handshake.erl
+++ b/lib/ssl/src/dtls_handshake.erl
@@ -46,7 +46,7 @@ client_hello(Host, Port, Cookie, ConnectionStates,
SecParams = Pending#connection_state.security_parameters,
CipherSuites = ssl_handshake:available_suites(UserSuites, Version),
- Extensions = ssl_handshake:client_hello_extensions(Version, CipherSuites,
+ Extensions = ssl_handshake:client_hello_extensions(Host, Version, CipherSuites,
SslOpts, ConnectionStates, Renegotiation),
Id = ssl_session:client_id({Host, Port, SslOpts}, Cache, CacheCb, OwnCert),
diff --git a/lib/ssl/src/inet_tls_dist.erl b/lib/ssl/src/inet_tls_dist.erl
index 57c859bf24..7367b5c224 100644
--- a/lib/ssl/src/inet_tls_dist.erl
+++ b/lib/ssl/src/inet_tls_dist.erl
@@ -95,11 +95,6 @@ do_setup(Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
end.
close(Socket) ->
- try
- erlang:error(foo)
- catch _:_ ->
- io:format("close called ~p ~p~n",[Socket, erlang:get_stacktrace()])
- end,
gen_tcp:close(Socket),
ok.
diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl
index 6513042e98..e6ed0d8626 100644
--- a/lib/ssl/src/ssl_cipher.erl
+++ b/lib/ssl/src/ssl_cipher.erl
@@ -34,7 +34,7 @@
-export([security_parameters/2, security_parameters/3, suite_definition/1,
decipher/5, cipher/5,
- suite/1, suites/1, anonymous_suites/0, psk_suites/1, srp_suites/0,
+ suite/1, suites/1, ec_keyed_suites/0, anonymous_suites/0, psk_suites/1, srp_suites/0,
openssl_suite/1, openssl_suite_name/1, filter/2, filter_suites/1,
hash_algorithm/1, sign_algorithm/1, is_acceptable_hash/2]).
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index 29a8996bd6..9142a260b1 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -49,13 +49,13 @@
]).
%% Cipher suites handling
--export([available_suites/2, available_suites/3, cipher_suites/2,
- select_session/10]).
+-export([available_suites/2, cipher_suites/2,
+ select_session/10, supported_ecc/1]).
%% Extensions handling
--export([client_hello_extensions/5,
+-export([client_hello_extensions/6,
handle_client_hello_extensions/8, %% Returns server hello extensions
- handle_server_hello_extensions/9
+ handle_server_hello_extensions/9, select_curve/2
]).
%% MISC
@@ -85,11 +85,11 @@ hello_request() ->
server_hello_done() ->
#server_hello_done{}.
-client_hello_extensions(Version, CipherSuites, SslOpts, ConnectionStates, Renegotiation) ->
+client_hello_extensions(Host, Version, CipherSuites, SslOpts, ConnectionStates, Renegotiation) ->
{EcPointFormats, EllipticCurves} =
case advertises_ec_ciphers(lists:map(fun ssl_cipher:suite_definition/1, CipherSuites)) of
true ->
- ecc_extensions(tls_v1, Version);
+ client_ecc_extensions(tls_v1, Version);
false ->
{undefined, undefined}
end,
@@ -104,7 +104,8 @@ client_hello_extensions(Version, CipherSuites, SslOpts, ConnectionStates, Renego
elliptic_curves = EllipticCurves,
next_protocol_negotiation =
encode_client_protocol_negotiation(SslOpts#ssl_options.next_protocol_selector,
- Renegotiation)}.
+ Renegotiation),
+ sni = sni(Host, SslOpts#ssl_options.server_name_indication)}.
%%--------------------------------------------------------------------
-spec certificate(der_cert(), db_handle(), certdb_ref(), client | server) -> #certificate{} | #alert{}.
@@ -641,7 +642,19 @@ encode_hello_extensions([#hash_sign_algos{hash_sign_algos = HashSignAlgos} | Res
ListLen = byte_size(SignAlgoList),
Len = ListLen + 2,
encode_hello_extensions(Rest, <<?UINT16(?SIGNATURE_ALGORITHMS_EXT),
- ?UINT16(Len), ?UINT16(ListLen), SignAlgoList/binary, Acc/binary>>).
+ ?UINT16(Len), ?UINT16(ListLen), SignAlgoList/binary, Acc/binary>>);
+encode_hello_extensions([#sni{hostname = Hostname} | Rest], Acc) ->
+ HostLen = length(Hostname),
+ HostnameBin = list_to_binary(Hostname),
+ % Hostname type (1 byte) + Hostname length (2 bytes) + Hostname (HostLen bytes)
+ ServerNameLength = 1 + 2 + HostLen,
+ % ServerNameListSize (2 bytes) + ServerNameLength
+ ExtLength = 2 + ServerNameLength,
+ encode_hello_extensions(Rest, <<?UINT16(?SNI_EXT), ?UINT16(ExtLength),
+ ?UINT16(ServerNameLength),
+ ?BYTE(?SNI_NAMETYPE_HOST_NAME),
+ ?UINT16(HostLen), HostnameBin/binary,
+ Acc/binary>>).
enc_server_key_exchange(Version, Params, {HashAlgo, SignAlgo},
ClientRandom, ServerRandom, PrivateKey) ->
@@ -861,22 +874,29 @@ available_suites(UserSuites, Version) ->
UserSuites
end.
-available_suites(ServerCert, UserSuites, Version) ->
- ssl_cipher:filter(ServerCert, available_suites(UserSuites, Version)).
+available_suites(ServerCert, UserSuites, Version, Curve) ->
+ ssl_cipher:filter(ServerCert, available_suites(UserSuites, Version))
+ -- unavailable_ecc_suites(Curve).
+
+unavailable_ecc_suites(no_curve) ->
+ ssl_cipher:ec_keyed_suites();
+unavailable_ecc_suites(_) ->
+ [].
cipher_suites(Suites, false) ->
[?TLS_EMPTY_RENEGOTIATION_INFO_SCSV | Suites];
cipher_suites(Suites, true) ->
Suites.
-select_session(SuggestedSessionId, CipherSuites, Compressions, Port, Session, Version,
+select_session(SuggestedSessionId, CipherSuites, Compressions, Port, #session{ecc = ECCCurve} =
+ Session, Version,
#ssl_options{ciphers = UserSuites} = SslOpts, Cache, CacheCb, Cert) ->
{SessionId, Resumed} = ssl_session:server_id(Port, SuggestedSessionId,
SslOpts, Cert,
Cache, CacheCb),
- Suites = ssl_handshake:available_suites(Cert, UserSuites, Version),
case Resumed of
undefined ->
+ Suites = available_suites(Cert, UserSuites, Version, ECCCurve),
CipherSuite = select_cipher_suite(CipherSuites, Suites),
Compression = select_compression(Compressions),
{new, Session#session{session_id = SessionId,
@@ -886,6 +906,13 @@ select_session(SuggestedSessionId, CipherSuites, Compressions, Port, Session, Ve
{resumed, Resumed}
end.
+supported_ecc(Version) ->
+ case tls_v1:ecc_curves(Version) of
+ [] ->
+ undefined;
+ Curves ->
+ #elliptic_curves{elliptic_curve_list = Curves}
+ end.
%%-------------certificate handling --------------------------------
certificate_types({KeyExchange, _, _, _})
@@ -926,9 +953,8 @@ certificate_authorities_from_db(CertDbHandle, CertDbRef) ->
handle_client_hello_extensions(RecordCB, Random,
#hello_extensions{renegotiation_info = Info,
srp = SRP,
- next_protocol_negotiation = NextProtocolNegotiation,
- ec_point_formats = EcPointFormats0,
- elliptic_curves = EllipticCurves0}, Version,
+ ec_point_formats = ECCFormat,
+ next_protocol_negotiation = NextProtocolNegotiation}, Version,
#ssl_options{secure_renegotiate = SecureRenegotation} = Opts,
#session{cipher_suite = CipherSuite, compression_method = Compression} = Session0,
ConnectionStates0, Renegotiation) ->
@@ -937,12 +963,11 @@ handle_client_hello_extensions(RecordCB, Random,
Random, CipherSuite, Compression,
ConnectionStates0, Renegotiation, SecureRenegotation),
ProtocolsToAdvertise = handle_next_protocol_extension(NextProtocolNegotiation, Renegotiation, Opts),
- {EcPointFormats, EllipticCurves} = handle_ecc_extensions(Version, EcPointFormats0, EllipticCurves0),
+
ServerHelloExtensions = #hello_extensions{
renegotiation_info = renegotiation_info(RecordCB, server,
ConnectionStates, Renegotiation),
- ec_point_formats = EcPointFormats,
- elliptic_curves = EllipticCurves,
+ ec_point_formats = server_ecc_extension(Version, ECCFormat),
next_protocol_negotiation =
encode_protocols_advertised_on_server(ProtocolsToAdvertise)
},
@@ -1069,16 +1094,17 @@ hello_extensions_list(#hello_extensions{renegotiation_info = RenegotiationInfo,
hash_signs = HashSigns,
ec_point_formats = EcPointFormats,
elliptic_curves = EllipticCurves,
- next_protocol_negotiation = NextProtocolNegotiation}) ->
+ next_protocol_negotiation = NextProtocolNegotiation,
+ sni = Sni}) ->
[Ext || Ext <- [RenegotiationInfo, SRP, HashSigns,
- EcPointFormats,EllipticCurves, NextProtocolNegotiation], Ext =/= undefined].
+ EcPointFormats, EllipticCurves, NextProtocolNegotiation, Sni], Ext =/= undefined].
srp_user(#ssl_options{srp_identity = {UserName, _}}) ->
#srp{username = UserName};
srp_user(_) ->
undefined.
-ecc_extensions(Module, Version) ->
+client_ecc_extensions(Module, Version) ->
CryptoSupport = proplists:get_value(public_keys, crypto:supports()),
case proplists:get_bool(ecdh, CryptoSupport) of
true ->
@@ -1089,15 +1115,13 @@ ecc_extensions(Module, Version) ->
{undefined, undefined}
end.
-handle_ecc_extensions(Version, EcPointFormats0, EllipticCurves0) ->
+server_ecc_extension(_Version, EcPointFormats) ->
CryptoSupport = proplists:get_value(public_keys, crypto:supports()),
case proplists:get_bool(ecdh, CryptoSupport) of
true ->
- EcPointFormats1 = handle_ecc_point_fmt_extension(EcPointFormats0),
- EllipticCurves1 = handle_ecc_curves_extension(Version, EllipticCurves0),
- {EcPointFormats1, EllipticCurves1};
- _ ->
- {undefined, undefined}
+ handle_ecc_point_fmt_extension(EcPointFormats);
+ false ->
+ undefined
end.
handle_ecc_point_fmt_extension(undefined) ->
@@ -1105,11 +1129,6 @@ handle_ecc_point_fmt_extension(undefined) ->
handle_ecc_point_fmt_extension(_) ->
#ec_point_formats{ec_point_format_list = [?ECPOINT_UNCOMPRESSED]}.
-handle_ecc_curves_extension(_Version, undefined) ->
- undefined;
-handle_ecc_curves_extension(Version, _) ->
- #elliptic_curves{elliptic_curve_list = tls_v1:ecc_curves(Version)}.
-
advertises_ec_ciphers([]) ->
false;
advertises_ec_ciphers([{ecdh_ecdsa, _,_,_} | _]) ->
@@ -1124,7 +1143,36 @@ advertises_ec_ciphers([{ecdh_anon, _,_,_} | _]) ->
true;
advertises_ec_ciphers([_| Rest]) ->
advertises_ec_ciphers(Rest).
-
+select_curve(#elliptic_curves{elliptic_curve_list = ClientCurves},
+ #elliptic_curves{elliptic_curve_list = ServerCurves}) ->
+ select_curve(ClientCurves, ServerCurves);
+select_curve(undefined, _) ->
+ %% Client did not send ECC extension use default curve if
+ %% ECC cipher is negotiated
+ {namedCurve, ?secp256k1};
+select_curve(_, []) ->
+ no_curve;
+select_curve(Curves, [Curve| Rest]) ->
+ case lists:member(Curve, Curves) of
+ true ->
+ {namedCurve, Curve};
+ false ->
+ select_curve(Curves, Rest)
+ end.
+%% RFC 6066, Section 3: Currently, the only server names supported are
+%% DNS hostnames
+sni(_, disable) ->
+ undefined;
+sni(Host, undefined) ->
+ sni1(Host);
+sni(_Host, SNIOption) ->
+ sni1(SNIOption).
+
+sni1(Hostname) ->
+ case inet_parse:domain(Hostname) of
+ false -> undefined;
+ true -> #sni{hostname = Hostname}
+ end.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
@@ -1648,3 +1696,4 @@ advertised_hash_signs({Major, Minor}) when Major >= 3 andalso Minor >= 3 ->
({Hash, _}) -> proplists:get_bool(Hash, Hashs) end, HashSigns)};
advertised_hash_signs(_) ->
undefined.
+
diff --git a/lib/ssl/src/ssl_handshake.hrl b/lib/ssl/src/ssl_handshake.hrl
index 3a3ad8cf35..75160526b9 100644
--- a/lib/ssl/src/ssl_handshake.hrl
+++ b/lib/ssl/src/ssl_handshake.hrl
@@ -45,7 +45,8 @@
master_secret,
srp_username,
is_resumable,
- time_stamp
+ time_stamp,
+ ecc
}).
-define(NUM_OF_SESSION_ID_BYTES, 32). % TSL 1.1 & SSL 3
@@ -97,7 +98,8 @@
next_protocol_negotiation = undefined, % [binary()]
srp,
ec_point_formats,
- elliptic_curves
+ elliptic_curves,
+ sni
}).
-record(server_hello, {
@@ -338,6 +340,19 @@
-define(NAMED_CURVE, 3).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Server name indication RFC 6066 section 3
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+-define(SNI_EXT, 16#0000).
+
+%% enum { host_name(0), (255) } NameType;
+-define(SNI_NAMETYPE_HOST_NAME, 0).
+
+-record(sni, {
+ hostname = undefined
+ }).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Dialyzer types
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -352,6 +367,3 @@
-endif. % -ifdef(ssl_handshake).
-
-
-
diff --git a/lib/ssl/src/ssl_internal.hrl b/lib/ssl/src/ssl_internal.hrl
index 96e3280fb5..a582b8c290 100644
--- a/lib/ssl/src/ssl_internal.hrl
+++ b/lib/ssl/src/ssl_internal.hrl
@@ -115,7 +115,8 @@
erl_dist = false,
next_protocols_advertised = undefined, %% [binary()],
next_protocol_selector = undefined, %% fun([binary()]) -> binary())
- log_alert
+ log_alert,
+ server_name_indication = undefined
}).
-record(socket_options,
diff --git a/lib/ssl/src/tls.erl b/lib/ssl/src/tls.erl
index b220a48f73..f1747dc69e 100644
--- a/lib/ssl/src/tls.erl
+++ b/lib/ssl/src/tls.erl
@@ -664,7 +664,8 @@ handle_options(Opts0, _Role) ->
next_protocol_selector =
make_next_protocol_selector(
handle_option(client_preferred_next_protocols, Opts, undefined)),
- log_alert = handle_option(log_alert, Opts, true)
+ log_alert = handle_option(log_alert, Opts, true),
+ server_name_indication = handle_option(server_name_indication, Opts, undefined)
},
CbInfo = proplists:get_value(cb_info, Opts, {gen_tcp, tcp, tcp_closed, tcp_error}),
@@ -855,6 +856,12 @@ validate_option(next_protocols_advertised = Opt, Value) when is_list(Value) ->
validate_option(next_protocols_advertised, undefined) ->
undefined;
+validate_option(server_name_indication, Value) when is_list(Value) ->
+ Value;
+validate_option(server_name_indication, disable) ->
+ disable;
+validate_option(server_name_indication, undefined) ->
+ undefined;
validate_option(Opt, Value) ->
throw({error, {options, {Opt, Value}}}).
diff --git a/lib/ssl/src/tls_connection.erl b/lib/ssl/src/tls_connection.erl
index 5618837506..39595b4f95 100644
--- a/lib/ssl/src/tls_connection.erl
+++ b/lib/ssl/src/tls_connection.erl
@@ -97,8 +97,7 @@
terminated = false, %
allow_renegotiate = true,
expecting_next_protocol_negotiation = false :: boolean(),
- next_protocol = undefined :: undefined | binary(),
- client_ecc % {Curves, PointFmt}
+ next_protocol = undefined :: undefined | binary()
}).
-define(DEFAULT_DIFFIE_HELLMAN_PARAMS,
@@ -405,26 +404,24 @@ hello(#server_hello{cipher_suite = CipherSuite,
hello(Hello = #client_hello{client_version = ClientVersion,
extensions = #hello_extensions{hash_signs = HashSigns}},
State = #state{connection_states = ConnectionStates0,
- port = Port, session = #session{own_certificate = Cert} = Session0,
+ port = Port,
+ session = #session{own_certificate = Cert} = Session0,
renegotiation = {Renegotiation, _},
session_cache = Cache,
session_cache_cb = CacheCb,
ssl_options = SslOpts}) ->
HashSign = ssl_handshake:select_hashsign(HashSigns, Cert),
case tls_handshake:hello(Hello, SslOpts, {Port, Session0, Cache, CacheCb,
- ConnectionStates0, Cert}, Renegotiation) of
+ ConnectionStates0, Cert}, Renegotiation) of
{Version, {Type, #session{cipher_suite = CipherSuite} = Session},
- ConnectionStates,
- #hello_extensions{ec_point_formats = EcPointFormats,
- elliptic_curves = EllipticCurves} = ServerHelloExt} ->
+ ConnectionStates, ServerHelloExt} ->
{KeyAlg, _, _, _} = ssl_cipher:suite_definition(CipherSuite),
- NegotiatedHashSign = negotiated_hashsign(HashSign, KeyAlg, Version),
- do_server_hello(Type, ServerHelloExt,
+ NegotiatedHashSign = negotiated_hashsign(HashSign, KeyAlg, Version),
+ do_server_hello(Type, ServerHelloExt,
State#state{connection_states = ConnectionStates,
negotiated_version = Version,
session = Session,
- hashsign_algorithm = NegotiatedHashSign,
- client_ecc = {EllipticCurves, EcPointFormats}});
+ hashsign_algorithm = NegotiatedHashSign});
#alert{} = Alert ->
handle_own_alert(Alert, ClientVersion, hello, State)
end;
@@ -1647,12 +1644,13 @@ key_exchange(#state{role = server, key_algorithm = Algo,
negotiated_version = Version,
tls_handshake_history = Handshake0,
socket = Socket,
- transport_cb = Transport
+ transport_cb = Transport,
+ session = #session{ecc = Curve}
} = State)
when Algo == ecdhe_ecdsa; Algo == ecdhe_rsa;
Algo == ecdh_anon ->
- ECDHKeys = public_key:generate_key(select_curve(State)),
+ ECDHKeys = public_key:generate_key(Curve),
ConnectionState =
ssl_record:pending_connection_state(ConnectionStates0, read),
SecParams = ConnectionState#connection_state.security_parameters,
@@ -3086,12 +3084,7 @@ default_hashsign(_Version, KeyExchange)
KeyExchange == rsa_psk;
KeyExchange == srp_anon ->
{null, anon}.
-
-select_curve(#state{client_ecc = {[Curve|_], _}}) ->
- {namedCurve, Curve};
-select_curve(_) ->
- {namedCurve, ?secp256k1}.
-
+
is_anonymous(Algo) when Algo == dh_anon;
Algo == ecdh_anon;
Algo == psk;
diff --git a/lib/ssl/src/tls_handshake.erl b/lib/ssl/src/tls_handshake.erl
index 02bfa69fc5..262f2d929f 100644
--- a/lib/ssl/src/tls_handshake.erl
+++ b/lib/ssl/src/tls_handshake.erl
@@ -56,7 +56,7 @@ client_hello(Host, Port, ConnectionStates,
SecParams = Pending#connection_state.security_parameters,
CipherSuites = ssl_handshake:available_suites(UserSuites, Version),
- Extensions = ssl_handshake:client_hello_extensions(Version, CipherSuites,
+ Extensions = ssl_handshake:client_hello_extensions(Host, Version, CipherSuites,
SslOpts, ConnectionStates, Renegotiation),
Id = ssl_session:client_id({Host, Port, SslOpts}, Cache, CacheCb, OwnCert),
@@ -70,7 +70,7 @@ client_hello(Host, Port, ConnectionStates,
}.
%%--------------------------------------------------------------------
--spec server_hello(#session{}, tls_version(), #connection_states{},
+-spec server_hello(binary(), tls_version(), #connection_states{},
#hello_extensions{}) -> #server_hello{}.
%%
%% Description: Creates a server hello message.
@@ -120,17 +120,16 @@ hello(#client_hello{client_version = ClientVersion,
cipher_suites = CipherSuites,
compression_methods = Compressions,
random = Random,
- extensions = HelloExt},
+ extensions = #hello_extensions{elliptic_curves = Curves} = HelloExt},
#ssl_options{versions = Versions} = SslOpts,
{Port, Session0, Cache, CacheCb, ConnectionStates0, Cert}, Renegotiation) ->
Version = ssl_handshake:select_version(tls_record, ClientVersion, Versions),
case tls_record:is_acceptable_version(Version, Versions) of
true ->
- %% TODO: need to take supported Curves into Account when selecting the CipherSuite....
- %% if whe have an ECDSA cert with an unsupported curve, we need to drop ECDSA ciphers
+ ECCCurve = ssl_handshake:select_curve(Curves, ssl_handshake:supported_ecc(Version)),
{Type, #session{cipher_suite = CipherSuite} = Session1}
= ssl_handshake:select_session(SugesstedId, CipherSuites, Compressions,
- Port, Session0, Version,
+ Port, Session0#session{ecc = ECCCurve}, Version,
SslOpts, Cache, CacheCb, Cert),
case CipherSuite of
no_suite ->
diff --git a/lib/ssl/test/ssl_handshake_SUITE.erl b/lib/ssl/test/ssl_handshake_SUITE.erl
index 9695710230..7e8e8d2611 100644
--- a/lib/ssl/test/ssl_handshake_SUITE.erl
+++ b/lib/ssl/test/ssl_handshake_SUITE.erl
@@ -34,7 +34,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() -> [decode_hello_handshake,
decode_single_hello_extension_correctly,
- decode_unknown_hello_extension_correctly].
+ decode_unknown_hello_extension_correctly,
+ encode_single_hello_sni_extension_correctly].
%%--------------------------------------------------------------------
%% Test Cases --------------------------------------------------------
@@ -73,3 +74,12 @@ decode_unknown_hello_extension_correctly(_Config) ->
Extensions = ssl_handshake:decode_hello_extensions(<<FourByteUnknown/binary, Renegotiation/binary>>),
#renegotiation_info{renegotiated_connection = <<0>>}
= Extensions#hello_extensions.renegotiation_info.
+
+encode_single_hello_sni_extension_correctly(_Config) ->
+ Exts = #hello_extensions{sni = #sni{hostname = "test.com"}},
+ SNI = <<16#00, 16#00, 16#00, 16#0d, 16#00, 16#0b, 16#00, 16#00, 16#08,
+ $t, $e, $s, $t, $., $c, $o, $m>>,
+ ExtSize = byte_size(SNI),
+ HelloExt = <<ExtSize:16/unsigned-big-integer, SNI/binary>>,
+ Encoded = ssl_handshake:encode_hello_extensions(Exts),
+ HelloExt = Encoded.
diff --git a/lib/stdlib/doc/src/re.xml b/lib/stdlib/doc/src/re.xml
index 71a6e34513..7a9f37ca90 100644
--- a/lib/stdlib/doc/src/re.xml
+++ b/lib/stdlib/doc/src/re.xml
@@ -40,8 +40,8 @@
<p>This module contains regular expression matching functions for
strings and binaries.</p>
- <p>The regular expression syntax and semantics resemble that of
- Perl.</p>
+ <p>The <seealso marker="#regexp_syntax">regular expression</seealso>
+ syntax and semantics resemble that of Perl.</p>
<p>The library's matching algorithms are currently based on the
PCRE library, but not all of the PCRE library is interfaced and
@@ -702,7 +702,7 @@ This option makes it possible to include comments inside complicated patterns. N
</func>
</funcs>
-
+ <marker id="regexp_syntax"></marker>
<section>
<title>PERL LIKE REGULAR EXPRESSIONS SYNTAX</title>
<p>The following sections contain reference material for the
diff --git a/lib/stdlib/src/dict.erl b/lib/stdlib/src/dict.erl
index 4f8d45dc8d..4b42f64609 100644
--- a/lib/stdlib/src/dict.erl
+++ b/lib/stdlib/src/dict.erl
@@ -387,7 +387,7 @@ merge(F, D1, D2) ->
update(K, fun (V1) -> F(K, V1, V2) end, V2, D)
end, D1, D2).
-
+
%% get_slot(Hashdb, Key) -> Slot.
%% Get the slot. First hash on the new range, if we hit a bucket
%% which has not been split use the unsplit buddy bucket.
diff --git a/lib/stdlib/src/erl_eval.erl b/lib/stdlib/src/erl_eval.erl
index 73b8da335a..ca6a4b5c58 100644
--- a/lib/stdlib/src/erl_eval.erl
+++ b/lib/stdlib/src/erl_eval.erl
@@ -912,7 +912,7 @@ type_test(binary) -> is_binary;
type_test(record) -> is_record;
type_test(Test) -> Test.
-
+
%% match(Pattern, Term, Bindings) ->
%% {match,NewBindings} | nomatch
%% or erlang:error({illegal_pattern, Pattern}).
@@ -1051,7 +1051,7 @@ match_list([], [], Bs, _BBs) ->
{match,Bs};
match_list(_, _, _Bs, _BBs) ->
nomatch.
-
+
%% new_bindings()
%% bindings(Bindings)
%% binding(Name, Bindings)
diff --git a/lib/stdlib/src/erl_tar.erl b/lib/stdlib/src/erl_tar.erl
index 4b654833ed..40ef6c8998 100644
--- a/lib/stdlib/src/erl_tar.erl
+++ b/lib/stdlib/src/erl_tar.erl
@@ -222,7 +222,7 @@ format_error(Atom) when is_atom(Atom) ->
format_error(Term) ->
lists:flatten(io_lib:format("~tp", [Term])).
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
%%% Useful definitions (also start of implementation).
@@ -412,7 +412,7 @@ split_filename([Comp|Rest], Prefix, Suffix, Len) ->
split_filename([], Prefix, Suffix, _) ->
{filename:join(Prefix),filename:join(Suffix)}.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%
%%% Retrieving files from a tape archive.
diff --git a/lib/stdlib/src/filelib.erl b/lib/stdlib/src/filelib.erl
index 9ef4954194..b8c0576e56 100644
--- a/lib/stdlib/src/filelib.erl
+++ b/lib/stdlib/src/filelib.erl
@@ -248,7 +248,7 @@ ensure_dir(F) ->
end
end.
-
+
%%%
%%% Pattern matching using a compiled wildcard.
%%%
@@ -360,7 +360,7 @@ do_alt([], _File) ->
do_list_dir(Dir, Mod) -> eval_list_dir(Dir, Mod).
-
+
%%% Compiling a wildcard.
%% Only for debugging.
diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl
index 7f65131f67..df68a37c06 100644
--- a/lib/stdlib/src/gen_server.erl
+++ b/lib/stdlib/src/gen_server.erl
@@ -393,7 +393,7 @@ decode_msg(Msg, Parent, Name, State, Mod, Time, Debug, Hib) ->
end.
%%% ---------------------------------------------------
-%%% Send/recive functions
+%%% Send/receive functions
%%% ---------------------------------------------------
do_send(Dest, Msg) ->
case catch erlang:send(Dest, Msg, [noconnect]) of
diff --git a/lib/stdlib/src/io_lib.erl b/lib/stdlib/src/io_lib.erl
index 92a086b077..9e69601770 100644
--- a/lib/stdlib/src/io_lib.erl
+++ b/lib/stdlib/src/io_lib.erl
@@ -583,7 +583,7 @@ printable_unicode_list(_) -> false. %Everything else is false
nl() ->
"\n".
-
+
%%
%% Utilities for collecting characters in input files
%%
diff --git a/lib/stdlib/src/lists.erl b/lib/stdlib/src/lists.erl
index b5577165f4..d6a9f4645d 100644
--- a/lib/stdlib/src/lists.erl
+++ b/lib/stdlib/src/lists.erl
@@ -630,7 +630,7 @@ flatlength([H|T], L) when is_list(H) ->
flatlength([_|T], L) ->
flatlength(T, L + 1);
flatlength([], L) -> L.
-
+
%% keymember(Key, Index, [Tuple]) Now a BIF!
%% keyfind(Key, Index, [Tuple]) A BIF!
%% keysearch(Key, Index, [Tuple]) Now a BIF!
@@ -1163,7 +1163,7 @@ rumerge(T1, []) ->
T1;
rumerge(T1, [H2 | T2]) ->
lists:reverse(rumerge2_1(T1, T2, [], H2), []).
-
+
%% all(Predicate, List)
%% any(Predicate, List)
%% map(Function, List)
diff --git a/lib/stdlib/src/math.erl b/lib/stdlib/src/math.erl
index c3fb684ec3..98a70b1644 100644
--- a/lib/stdlib/src/math.erl
+++ b/lib/stdlib/src/math.erl
@@ -51,9 +51,9 @@ asinh(_) ->
atan(_) ->
erlang:nif_error(undef).
--spec atan2(X, Y) -> float() when
- X :: number(),
- Y :: number().
+-spec atan2(Y, X) -> float() when
+ Y :: number(),
+ X :: number().
atan2(_, _) ->
erlang:nif_error(undef).
diff --git a/lib/stdlib/src/string.erl b/lib/stdlib/src/string.erl
index 4ed27ff4eb..d0bd0cb26e 100644
--- a/lib/stdlib/src/string.erl
+++ b/lib/stdlib/src/string.erl
@@ -257,7 +257,7 @@ chars(C, N, Tail) when N > 0 ->
chars(C, N-1, [C|Tail]);
chars(C, 0, Tail) when is_integer(C) ->
Tail.
-
+
%% Torbjörn's bit.
%%% COPIES %%%
@@ -461,7 +461,7 @@ sub_string(String, Start) -> substr(String, Start).
Stop :: pos_integer().
sub_string(String, Start, Stop) -> substr(String, Start, Stop - Start + 1).
-
+
%% ISO/IEC 8859-1 (latin1) letters are converted, others are ignored
%%
diff --git a/lib/stdlib/test/slave_SUITE.erl b/lib/stdlib/test/slave_SUITE.erl
index 37fc694083..1d6a3ac90d 100644
--- a/lib/stdlib/test/slave_SUITE.erl
+++ b/lib/stdlib/test/slave_SUITE.erl
@@ -230,7 +230,7 @@ rsh_test(ResultTo) ->
link(ResultTo),
?line {error, no_rsh} = slave:start(super, slave3).
-
+
%%% Utilities.
diff --git a/lib/syntax_tools/src/erl_syntax.erl b/lib/syntax_tools/src/erl_syntax.erl
index bdb2b5bcd7..409805e95f 100644
--- a/lib/syntax_tools/src/erl_syntax.erl
+++ b/lib/syntax_tools/src/erl_syntax.erl
@@ -5485,22 +5485,15 @@ revert_implicit_fun(Node) ->
arity_qualifier ->
F = arity_qualifier_body(Name),
A = arity_qualifier_argument(Name),
- case {type(F), type(A)} of
- {atom, integer} ->
- {'fun', Pos,
- {function, concrete(F), concrete(A)}};
- _ ->
- Node
- end;
+ {'fun', Pos, {function, F, A}};
module_qualifier ->
M = module_qualifier_argument(Name),
Name1 = module_qualifier_body(Name),
- F = arity_qualifier_body(Name1),
- A = arity_qualifier_argument(Name1),
- case {type(M), type(F), type(A)} of
- {atom, atom, integer} ->
- {'fun', Pos,
- {function, concrete(M), concrete(F), concrete(A)}};
+ case type(Name1) of
+ arity_qualifier ->
+ F = arity_qualifier_body(Name1),
+ A = arity_qualifier_argument(Name1),
+ {'fun', Pos, {function, M, F, A}};
_ ->
Node
end;
diff --git a/lib/syntax_tools/src/erl_syntax_lib.erl b/lib/syntax_tools/src/erl_syntax_lib.erl
index 2c94ac776d..e4665b99fc 100644
--- a/lib/syntax_tools/src/erl_syntax_lib.erl
+++ b/lib/syntax_tools/src/erl_syntax_lib.erl
@@ -1357,8 +1357,6 @@ analyze_attribute(file, Node) ->
analyze_file_attribute(Node);
analyze_attribute(record, Node) ->
analyze_record_attribute(Node);
-analyze_attribute(define, _Node) ->
- define;
analyze_attribute(spec, _Node) ->
spec;
analyze_attribute(_, Node) ->
diff --git a/lib/syntax_tools/src/igor.erl b/lib/syntax_tools/src/igor.erl
index d385c2b690..19b1cd592f 100644
--- a/lib/syntax_tools/src/igor.erl
+++ b/lib/syntax_tools/src/igor.erl
@@ -1803,20 +1803,25 @@ transform_rule(T, Env, St) ->
transform_implicit_fun(T, Env, St) ->
{T1, St1} = default_transform(T, Env, St),
- F = erl_syntax_lib:analyze_implicit_fun(T1),
- {V, Text} = case (Env#code.map)(F) of
- F ->
- %% Not renamed
- {none, []};
- {Atom, Arity} ->
- %% Renamed
- N = rewrite(
- erl_syntax:implicit_fun_name(T1),
- erl_syntax:arity_qualifier(
- erl_syntax:atom(Atom),
- erl_syntax:integer(Arity))),
- T2 = erl_syntax:implicit_fun(N),
- {{value, T2}, ["function was renamed"]}
+ {V, Text} = case erl_syntax:type(erl_syntax:implicit_fun_name(T1)) of
+ arity_qualifier ->
+ F = erl_syntax_lib:analyze_implicit_fun(T1),
+ case (Env#code.map)(F) of
+ F ->
+ %% Not renamed
+ {none, []};
+ {Atom, Arity} ->
+ %% Renamed
+ N = rewrite(
+ erl_syntax:implicit_fun_name(T1),
+ erl_syntax:arity_qualifier(
+ erl_syntax:atom(Atom),
+ erl_syntax:integer(Arity))),
+ T2 = erl_syntax:implicit_fun(N),
+ {{value, T2}, ["function was renamed"]}
+ end;
+ module_qualifier ->
+ {none, []}
end,
{maybe_modified_quiet(V, T1, 2, Text, Env), St1}.
diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl
index 6ddb2b615f..54be6d4c72 100644
--- a/lib/test_server/src/test_server.erl
+++ b/lib/test_server/src/test_server.erl
@@ -915,6 +915,7 @@ run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit,
put(test_server_logopts, LogOpts),
Where = [{Mod,Func}],
put(test_server_loc, Where),
+
FWInitResult = test_server_sup:framework_call(init_tc,[Mod,Func,Args0],
{ok,Args0}),
set_tc_state(running),
@@ -924,7 +925,7 @@ run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit,
run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback);
Error = {error,_Reason} ->
NewResult = do_end_tc_call(Mod,Func, {Error,Args0},
- {skip,{failed,Error}}),
+ {auto_skip,{failed,Error}}),
{{0,NewResult},Where,[]};
{fail,Reason} ->
Conf = [{tc_status,{failed,Reason}} | hd(Args0)],
@@ -935,9 +936,9 @@ run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit,
Skip = {skip,_Reason} ->
NewResult = do_end_tc_call(Mod,Func, {Skip,Args0}, Skip),
{{0,NewResult},Where,[]};
- {auto_skip,Reason} ->
- NewResult = do_end_tc_call(Mod,Func, {{skip,Reason},Args0},
- {skip,Reason}),
+ AutoSkip = {auto_skip,_Reason} ->
+ %% special case where a conf case "pretends" to be skipped
+ NewResult = do_end_tc_call(Mod,Func, {AutoSkip,Args0}, AutoSkip),
{{0,NewResult},Where,[]}
end,
exit({Ref,Time,Value,Loc,Opts}).
@@ -955,7 +956,8 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
{{0,NewRes},Line,[]};
{skip_and_save,Reason,SaveCfg} ->
Line = get_loc(),
- Conf = [{tc_status,{skipped,Reason}},{save_config,SaveCfg}|hd(Args)],
+ Conf = [{tc_status,{skipped,Reason}},
+ {save_config,SaveCfg}|hd(Args)],
NewRes = do_end_tc_call(Mod,Func, {{skip,Reason},[Conf]},
{skip,Reason}),
{{0,NewRes},Line,[]};
diff --git a/lib/test_server/src/test_server_ctrl.erl b/lib/test_server/src/test_server_ctrl.erl
index d0f31af198..352e58f91c 100644
--- a/lib/test_server/src/test_server_ctrl.erl
+++ b/lib/test_server/src/test_server_ctrl.erl
@@ -1161,8 +1161,11 @@ init_tester(Mod, Func, Args, Dir, Name, {_,_,MinLev}=Levels,
end,
{SkippedN,SkipStr} =
case get(test_server_skipped) of
- {0,_} -> {0,""};
- {Skipped,_} -> {Skipped,io_lib:format(", ~w Skipped", [Skipped])}
+ {0,0} ->
+ {0,""};
+ {USkipped,ASkipped} ->
+ Skipped = USkipped+ASkipped,
+ {Skipped,io_lib:format(", ~w Skipped", [Skipped])}
end,
OkN = get(test_server_ok),
FailedN = get(test_server_failed),
@@ -1402,10 +1405,23 @@ remove_conf([{conf, _Ref, Props, _MF}|Cases], NoConf, Repeats) ->
end;
remove_conf([{make,_Ref,_MF}|Cases], NoConf, Repeats) ->
remove_conf(Cases, NoConf, Repeats);
+remove_conf([{skip_case,{{_M,all},_Cmt}}|Cases], NoConf, Repeats) ->
+ remove_conf(Cases, NoConf, Repeats);
remove_conf([{skip_case,{Type,_Ref,_MF,_Cmt}}|Cases],
NoConf, Repeats) when Type==conf;
Type==make ->
remove_conf(Cases, NoConf, Repeats);
+remove_conf([{skip_case,{Type,_Ref,_MF,_Cmt},_Mode}|Cases],
+ NoConf, Repeats) when Type==conf;
+ Type==make ->
+ remove_conf(Cases, NoConf, Repeats);
+remove_conf([C={Mod,error_in_suite,_}|Cases], NoConf, Repeats) ->
+ FwMod = get_fw_mod(?MODULE),
+ if Mod == FwMod ->
+ remove_conf(Cases, NoConf, Repeats);
+ true ->
+ remove_conf(Cases, [C|NoConf], Repeats)
+ end;
remove_conf([C|Cases], NoConf, Repeats) ->
remove_conf(Cases, [C|NoConf], Repeats);
remove_conf([], NoConf, true) ->
@@ -1413,6 +1429,11 @@ remove_conf([], NoConf, true) ->
remove_conf([], NoConf, false) ->
lists:reverse(NoConf).
+get_suites([{skip_case,{{Mod,_Func},_Cmt}}|Tests], Mods) when is_atom(Mod) ->
+ case add_mod(Mod, Mods) of
+ true -> get_suites(Tests, [Mod|Mods]);
+ false -> get_suites(Tests, Mods)
+ end;
get_suites([{Mod,_Case}|Tests], Mods) when is_atom(Mod) ->
case add_mod(Mod, Mods) of
true -> get_suites(Tests, [Mod|Mods]);
@@ -1478,8 +1499,10 @@ do_test_cases(TopCases, SkipCases,
end,
put(test_server_cases, N),
put(test_server_case_num, 0),
+
TestSpec =
add_init_and_end_per_suite(TestSpec0, undefined, undefined, FwMod),
+
TI = get_target_info(),
print(1, "Starting test~ts",
[print_if_known(N, {", ~w test cases",[N]},
@@ -1795,23 +1818,40 @@ downcase([], Result) ->
%%
%% Errors are silently ignored.
-html_convert_modules(TestSpec, _Config) ->
- Mods = html_isolate_modules(TestSpec),
+html_convert_modules(TestSpec, _Config, FwMod) ->
+ Mods = html_isolate_modules(TestSpec, FwMod),
html_convert_modules(Mods),
copy_html_files(get(test_server_dir), get(test_server_log_dir_base)).
%% Retrieve a list of modules out of the test spec.
-html_isolate_modules(List) -> html_isolate_modules(List, sets:new()).
-
-html_isolate_modules([], Set) -> sets:to_list(Set);
-html_isolate_modules([{skip_case,_}|Cases], Set) ->
- html_isolate_modules(Cases, Set);
-html_isolate_modules([{conf,_Ref,_Props,{Mod,_Func}}|Cases], Set) ->
- html_isolate_modules(Cases, sets:add_element(Mod, Set));
-html_isolate_modules([{Mod,_Case}|Cases], Set) ->
- html_isolate_modules(Cases, sets:add_element(Mod, Set));
-html_isolate_modules([{Mod,_Case,_Args}|Cases], Set) ->
- html_isolate_modules(Cases, sets:add_element(Mod, Set)).
+html_isolate_modules(List, FwMod) ->
+ html_isolate_modules(List, sets:new(), FwMod).
+
+html_isolate_modules([], Set, _) -> sets:to_list(Set);
+html_isolate_modules([{skip_case,_}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, Set, FwMod);
+html_isolate_modules([{conf,_Ref,Props,{FwMod,_Func}}|Cases], Set, FwMod) ->
+ Set1 = case proplists:get_value(suite, Props) of
+ undefined -> Set;
+ Mod -> sets:add_element(Mod, Set)
+ end,
+ html_isolate_modules(Cases, Set1, FwMod);
+html_isolate_modules([{conf,_Ref,_Props,{Mod,_Func}}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod);
+html_isolate_modules([{skip_case,{conf,_Ref,{FwMod,_Func},_Cmt},Mode}|Cases],
+ Set, FwMod) ->
+ Set1 = case proplists:get_value(suite, get_props(Mode)) of
+ undefined -> Set;
+ Mod -> sets:add_element(Mod, Set)
+ end,
+ html_isolate_modules(Cases, Set1, FwMod);
+html_isolate_modules([{skip_case,{conf,_Ref,{Mod,_Func},_Cmt},_Props}|Cases],
+ Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod);
+html_isolate_modules([{Mod,_Case}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod);
+html_isolate_modules([{Mod,_Case,_Args}|Cases], Set, FwMod) ->
+ html_isolate_modules(Cases, sets:add_element(Mod, Set), FwMod).
%% Given a list of modules, convert each module's source code to HTML.
html_convert_modules([Mod|Mods]) ->
@@ -1902,13 +1942,16 @@ add_init_and_end_per_suite([{skip_case,{{Mod,_},_}}=Case|Cases], LastMod,
{PreCases, NextMod, NextRef} =
do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)];
+add_init_and_end_per_suite([{skip_case,{conf,_,{Mod,_},_},_}=Case|Cases], LastMod,
+ LastRef, FwMod) when Mod =/= LastMod ->
+ {PreCases, NextMod, NextRef} =
+ do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
+ PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)];
add_init_and_end_per_suite([{skip_case,{conf,_,{Mod,_},_}}=Case|Cases], LastMod,
LastRef, FwMod) when Mod =/= LastMod ->
{PreCases, NextMod, NextRef} =
do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)];
-add_init_and_end_per_suite([{skip_case,_}=Case|Cases], LastMod, LastRef, FwMod) ->
- [Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)];
add_init_and_end_per_suite([{conf,Ref,Props,{FwMod,Func}}=Case|Cases], LastMod,
LastRef, FwMod) ->
%% if Mod == FwMod, this conf test is (probably) a test case group where
@@ -1918,7 +1961,8 @@ add_init_and_end_per_suite([{conf,Ref,Props,{FwMod,Func}}=Case|Cases], LastMod,
Suite when Suite =/= undefined, Suite =/= LastMod ->
{PreCases, NextMod, NextRef} =
do_add_init_and_end_per_suite(LastMod, LastRef, Suite, FwMod),
- Case1 = {conf,Ref,proplists:delete(suite,Props),{FwMod,Func}},
+ Case1 = {conf,Ref,[{suite,NextMod}|proplists:delete(suite,Props)],
+ {FwMod,Func}},
PreCases ++ [Case1|add_init_and_end_per_suite(Cases, NextMod,
NextRef, FwMod)];
_ ->
@@ -1929,6 +1973,9 @@ add_init_and_end_per_suite([{conf,_,_,{Mod,_}}=Case|Cases], LastMod,
{PreCases, NextMod, NextRef} =
do_add_init_and_end_per_suite(LastMod, LastRef, Mod, FwMod),
PreCases ++ [Case|add_init_and_end_per_suite(Cases, NextMod, NextRef, FwMod)];
+add_init_and_end_per_suite([SkipCase|Cases], LastMod, LastRef, FwMod)
+ when element(1,SkipCase) == skip_case ->
+ [SkipCase|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)];
add_init_and_end_per_suite([{conf,_,_,_}=Case|Cases], LastMod, LastRef, FwMod) ->
[Case|add_init_and_end_per_suite(Cases, LastMod, LastRef, FwMod)];
add_init_and_end_per_suite([{Mod,_}=Case|Cases], LastMod, LastRef, FwMod)
@@ -2043,7 +2090,8 @@ run_test_cases(TestSpec, Config, TimetrapData) ->
true ->
ok;
false ->
- html_convert_modules(TestSpec, Config)
+ FwMod = get_fw_mod(?MODULE),
+ html_convert_modules(TestSpec, Config, FwMod)
end,
run_test_cases_loop(TestSpec, [Config], TimetrapData, [], []),
@@ -2209,34 +2257,50 @@ run_test_cases(TestSpec, Config, TimetrapData) ->
%% group1_end | --->
%%
-run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
- Config, TimetrapData, Mode, Status) when Type==conf;
- Type==make ->
+run_test_cases_loop([{SkipTag,CaseData={Type,_Ref,_Case,_Comment}}|Cases],
+ Config, TimetrapData, Mode, Status) when
+ ((SkipTag==auto_skip_case) or (SkipTag==skip_case)) and
+ ((Type==conf) or (Type==make)) ->
+ run_test_cases_loop([{SkipTag,CaseData,Mode}|Cases],
+ Config, TimetrapData, Mode, Status);
+
+run_test_cases_loop([{SkipTag,{Type,Ref,Case,Comment},SkipMode}|Cases],
+ Config, TimetrapData, Mode, Status) when
+ ((SkipTag==auto_skip_case) or (SkipTag==skip_case)) and
+ ((Type==conf) or (Type==make)) ->
file:set_cwd(filename:dirname(get(test_server_dir))),
CurrIOHandler = get(test_server_common_io_handler),
ParentMode = tl(Mode),
+ {AutoOrUser,ReportTag} =
+ if SkipTag == auto_skip_case -> {auto,tc_auto_skip};
+ SkipTag == skip_case -> {user,tc_user_skip}
+ end,
+
%% check and update the mode for test case execution and io msg handling
case {curr_ref(Mode),check_props(parallel, Mode)} of
{Ref,Ref} ->
case check_props(parallel, ParentMode) of
false ->
- %% this is a skipped end conf for a top level parallel group,
- %% buffered io can be flushed
+ %% this is a skipped end conf for a top level parallel
+ %% group, buffered io can be flushed
handle_test_case_io_and_status(),
set_io_buffering(undefined),
- {Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, false, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,
- {Mod,Func,Comment}]),
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ false, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report,
+ [ReportTag,ConfData]),
run_test_cases_loop(Cases, Config, TimetrapData, ParentMode,
delete_status(Ref, Status));
_ ->
- %% this is a skipped end conf for a parallel group nested under a
- %% parallel group (io buffering is active)
+ %% this is a skipped end conf for a parallel group nested
+ %% under a parallel group (io buffering is active)
wait_for_cases(Ref),
- {Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, true, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,
- {Mod,Func,Comment}]),
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ true, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
case CurrIOHandler of
{Ref,_} ->
%% current_io_handler was set by start conf of this
@@ -2246,18 +2310,21 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
_ ->
ok
end,
- run_test_cases_loop(Cases, Config, TimetrapData, ParentMode,
+ run_test_cases_loop(Cases, Config,
+ TimetrapData, ParentMode,
delete_status(Ref, Status))
end;
{Ref,false} ->
%% this is a skipped end conf for a non-parallel group that's not
%% nested under a parallel group
- {Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, false, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
-
- %% Check if this group is auto skipped because of error in the init conf.
- %% If so, check if the parent group is a sequence, and if it is, skip
- %% all proceeding tests in that group.
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ false, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+
+ %% Check if this group is auto skipped because of error in the
+ %% init conf. If so, check if the parent group is a sequence,
+ %% and if it is, skip all proceeding tests in that group.
GrName = get_name(Mode),
Cases1 =
case get_tc_results(Status) of
@@ -2270,7 +2337,8 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
ParentRef ->
Reason = {group_result,GrName,failed},
skip_cases_upto(ParentRef, Cases,
- Reason, tc, Mode)
+ Reason, tc, Mode,
+ SkipTag)
end;
false ->
Cases
@@ -2283,8 +2351,10 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
{Ref,_} ->
%% this is a skipped end conf for a non-parallel group nested under
%% a parallel group (io buffering is active)
- {Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, true, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ true, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
case CurrIOHandler of
{Ref,_} ->
%% current_io_handler was set by start conf of this
@@ -2299,20 +2369,27 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
{_,false} ->
%% this is a skipped start conf for a group which is not nested
%% under a parallel group
- {Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, false, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
- run_test_cases_loop(Cases, Config, TimetrapData, [conf(Ref,[])|Mode], Status);
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ false, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+ run_test_cases_loop(Cases, Config, TimetrapData,
+ [conf(Ref,[])|Mode], Status);
{_,Ref0} when is_reference(Ref0) ->
- %% this is a skipped start conf for a group nested under a parallel group
- %% and if this is the first nested group, io buffering must be activated
+ %% this is a skipped start conf for a group nested under a parallel
+ %% group and if this is the first nested group, io buffering must
+ %% be activated
if CurrIOHandler == undefined ->
set_io_buffering({Ref,self()});
true ->
ok
end,
- {Mod,Func} = skip_case(auto, Ref, 0, Case, Comment, true, SkipMode),
- test_server_sup:framework_call(report, [tc_auto_skip,{Mod,Func,Comment}]),
- run_test_cases_loop(Cases, Config, TimetrapData, [conf(Ref,[])|Mode], Status)
+ {Mod,Func} = skip_case(AutoOrUser, Ref, 0, Case, Comment,
+ true, SkipMode),
+ ConfData = {Mod,{Func,get_name(SkipMode)},Comment},
+ test_server_sup:framework_call(report, [ReportTag,ConfData]),
+ run_test_cases_loop(Cases, Config, TimetrapData,
+ [conf(Ref,[])|Mode], Status)
end;
run_test_cases_loop([{auto_skip_case,{Case,Comment},SkipMode}|Cases],
@@ -2323,21 +2400,12 @@ run_test_cases_loop([{auto_skip_case,{Case,Comment},SkipMode}|Cases],
run_test_cases_loop(Cases, Config, TimetrapData, Mode,
update_status(skipped, Mod, Func, Status));
-run_test_cases_loop([{skip_case,{conf,Ref,Case,Comment}}|Cases0],
+run_test_cases_loop([{skip_case,{{Mod,all}=Case,Comment}}|Cases],
Config, TimetrapData, Mode, Status) ->
- {Mod,Func} = skip_case(user, Ref, 0, Case, Comment, is_io_buffered()),
- {Cases,Config1} =
- case curr_ref(Mode) of
- Ref ->
- %% skipped end conf
- {Cases0,tl(Config)};
- _ ->
- %% skipped start conf
- {skip_cases_upto(Ref, Cases0, Comment, conf, Mode),Config}
- end,
- test_server_sup:framework_call(report, [tc_user_skip,{Mod,Func,Comment}]),
- run_test_cases_loop(Cases, Config1, TimetrapData, Mode,
- update_status(skipped, Mod, Func, Status));
+ skip_case(user, undefined, 0, Case, Comment, false, Mode),
+ test_server_sup:framework_call(report, [tc_user_skip,
+ {Mod,all,Comment}]),
+ run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status);
run_test_cases_loop([{skip_case,{Case,Comment}}|Cases],
Config, TimetrapData, Mode, Status) ->
@@ -2597,7 +2665,8 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
"~n*** ~w returned bad elements in Config: ~p.~n",
[Func,Bad]),
Reason = {failed,{Mod,init_per_suite,bad_return}},
- Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode),
+ Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode,
+ auto_skip_case),
set_io_buffering(IOHandler),
stop_minor_log_file(),
run_test_cases_loop(Cases2, Config, TimetrapData, Mode,
@@ -2623,7 +2692,8 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
print(minor, "~n*** ~w failed.~n"
" Skipping all cases.", [Func]),
Reason = {failed,{Mod,Func,Fail}},
- {skip_cases_upto(Ref, Cases, Reason, conf, CurrMode),
+ {skip_cases_upto(Ref, Cases, Reason, conf, CurrMode,
+ auto_skip_case),
Config,
update_status(failed, group_result, get_name(Mode),
delete_status(Ref, Status2))};
@@ -2635,14 +2705,37 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
set_io_buffering(IOHandler),
stop_minor_log_file(),
run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3);
+
+ {_,{auto_skip,SkipReason},_} ->
+ %% this case can only happen if the framework (not the user)
+ %% decides to skip execution of a conf function
+ {Cases2,Config1,Status3} =
+ if StartConf ->
+ ReportAbortRepeat(auto_skipped),
+ print(minor, "~n*** ~w auto skipped.~n"
+ " Skipping all cases.", [Func]),
+ {skip_cases_upto(Ref, Cases, SkipReason, conf, CurrMode,
+ auto_skip_case),
+ Config,
+ delete_status(Ref, Status2)};
+ not StartConf ->
+ ReportRepeatStop(),
+ print_conf_time(ConfTime),
+ {Cases,tl(Config),delete_status(Ref, Status2)}
+ end,
+ set_io_buffering(IOHandler),
+ stop_minor_log_file(),
+ run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3);
+
{_,{Skip,Reason},_} when StartConf and ((Skip==skip) or (Skip==skipped)) ->
ReportAbortRepeat(skipped),
print(minor, "~n*** ~w skipped.~n"
" Skipping all cases.", [Func]),
set_io_buffering(IOHandler),
stop_minor_log_file(),
- run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf, CurrMode),
- Config, TimetrapData, Mode,
+ run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf,
+ CurrMode, skip_case),
+ [hd(Config)|Config], TimetrapData, Mode,
delete_status(Ref, Status2));
{_,{skip_and_save,Reason,_SavedConfig},_} when StartConf ->
ReportAbortRepeat(skipped),
@@ -2650,13 +2743,15 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
" Skipping all cases.", [Func]),
set_io_buffering(IOHandler),
stop_minor_log_file(),
- run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf, CurrMode),
- Config, TimetrapData, Mode,
+ run_test_cases_loop(skip_cases_upto(Ref, Cases, Reason, conf,
+ CurrMode, skip_case),
+ [hd(Config)|Config], TimetrapData, Mode,
delete_status(Ref, Status2));
{_,_Other,_} when Func == init_per_suite ->
print(minor, "~n*** init_per_suite failed to return a Config list.~n", []),
Reason = {failed,{Mod,init_per_suite,bad_return}},
- Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode),
+ Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode,
+ auto_skip_case),
set_io_buffering(IOHandler),
stop_minor_log_file(),
run_test_cases_loop(Cases2, Config, TimetrapData, Mode,
@@ -2668,7 +2763,6 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
stop_minor_log_file(),
run_test_cases_loop(Cases, [hd(Config)|Config], TimetrapData,
Mode, Status2);
-
{_,_EndConfRetVal,Opts} ->
%% Check if return_group_result is set (ok, skipped or failed) and
%% if so:
@@ -2683,7 +2777,8 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
case {curr_ref(Mode),check_prop(sequence, Mode)} of
{ParentRef,ParentRef} ->
Reason = {group_result,GrName,failed},
- {skip_cases_upto(ParentRef, Cases, Reason, tc, Mode),
+ {skip_cases_upto(ParentRef, Cases, Reason, tc,
+ Mode, auto_skip_case),
update_status(failed, group_result, GrName,
delete_status(Ref, Status2))};
_ ->
@@ -2701,16 +2796,19 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
ReportRepeatStop(),
set_io_buffering(IOHandler),
stop_minor_log_file(),
- run_test_cases_loop(Cases2, tl(Config), TimetrapData, Mode, Status3)
+ run_test_cases_loop(Cases2, tl(Config), TimetrapData,
+ Mode, Status3)
end;
-run_test_cases_loop([{make,Ref,{Mod,Func,Args}}|Cases0], Config, TimetrapData, Mode, Status) ->
+run_test_cases_loop([{make,Ref,{Mod,Func,Args}}|Cases0], Config, TimetrapData,
+ Mode, Status) ->
case run_test_case(Ref, 0, Mod, Func, Args, skip_init, TimetrapData) of
{_,Why={'EXIT',_},_} ->
print(minor, "~n*** ~w failed.~n"
" Skipping all cases.", [Func]),
Reason = {failed,{Mod,Func,Why}},
- Cases = skip_cases_upto(Ref, Cases0, Reason, conf, Mode),
+ Cases = skip_cases_upto(Ref, Cases0, Reason, conf, Mode,
+ auto_skip_case),
stop_minor_log_file(),
run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status);
{_,_Whatever,_} ->
@@ -2735,7 +2833,14 @@ run_test_cases_loop([{Mod,Case}|Cases], Config, TimetrapData, Mode, Status) ->
TimetrapData, Mode, Status);
run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status) ->
- Num = put(test_server_case_num, get(test_server_case_num)+1),
+ {Num,RunInit} =
+ case FwMod = get_fw_mod(?MODULE) of
+ Mod when Func == error_in_suite ->
+ {-1,skip_init};
+ _ ->
+ {put(test_server_case_num, get(test_server_case_num)+1),
+ run_init}
+ end,
%% check the current execution mode and save info about the case if
%% detected that printouts to common log files is handled later
@@ -2750,7 +2855,7 @@ run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status)
end,
case run_test_case(undefined, Num+1, Mod, Func, Args,
- run_init, TimetrapData, Mode) of
+ RunInit, TimetrapData, Mode) of
%% callback to framework module failed, exit immediately
{_,{framework_error,{FwMod,FwFunc},Reason},_} ->
print(minor, "~n*** ~w failed in ~w. Reason: ~p~n",
@@ -2791,7 +2896,8 @@ run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status)
" Skipping all other cases in sequence.",
[Func]),
Reason = {failed,{Mod,Func}},
- Cases2 = skip_cases_upto(Ref, Cases, Reason, tc, Mode),
+ Cases2 = skip_cases_upto(Ref, Cases, Reason, tc,
+ Mode, auto_skip_case),
stop_minor_log_file(),
run_test_cases_loop(Cases2, Config, TimetrapData, Mode, Status1)
end
@@ -3012,13 +3118,13 @@ cases_to_shuffle(Ref, Cases) ->
cases_to_shuffle(Ref, [{conf,Ref,_,_} | _]=Cs, N, Ix) -> % end
{N-1,Ix,Cs};
-cases_to_shuffle(Ref, [{skip_case,{_,Ref,_,_}} | _]=Cs, N, Ix) -> % end
+cases_to_shuffle(Ref, [{skip_case,{_,Ref,_,_},_} | _]=Cs, N, Ix) -> % end
{N-1,Ix,Cs};
cases_to_shuffle(Ref, [{conf,Ref1,_,_}=C | Cs], N, Ix) -> % nested group
{Cs1,Rest} = get_subcases(Ref1, Cs, []),
cases_to_shuffle(Ref, Rest, N+1, [{N,[C|Cs1]} | Ix]);
-cases_to_shuffle(Ref, [{skip_case,{_,Ref1,_,_}}=C | Cs], N, Ix) -> % nested group
+cases_to_shuffle(Ref, [{skip_case,{_,Ref1,_,_},_}=C | Cs], N, Ix) -> % nested group
{Cs1,Rest} = get_subcases(Ref1, Cs, []),
cases_to_shuffle(Ref, Rest, N+1, [{N,[C|Cs1]} | Ix]);
@@ -3027,7 +3133,7 @@ cases_to_shuffle(Ref, [C | Cs], N, Ix) ->
get_subcases(SubRef, [{conf,SubRef,_,_}=C | Cs], SubCs) ->
{lists:reverse([C|SubCs]),Cs};
-get_subcases(SubRef, [{skip_case,{_,SubRef,_,_}}=C | Cs], SubCs) ->
+get_subcases(SubRef, [{skip_case,{_,SubRef,_,_},_}=C | Cs], SubCs) ->
{lists:reverse([C|SubCs]),Cs};
get_subcases(SubRef, [C|Cs], SubCs) ->
get_subcases(SubRef, Cs, [C|SubCs]).
@@ -3075,13 +3181,27 @@ skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) ->
ResultCol = if Type == auto -> ?auto_skip_color;
Type == user -> ?user_skip_color
end,
-
- Comment1 = reason_to_string(Comment),
-
print(major, "~n=case ~w:~w", [Mod,Func]),
- print(major, "=started ~s", [lists:flatten(timestamp_get(""))]),
- print(major, "=result skipped: ~ts", [Comment1]),
- print(2,"*** Skipping test case #~w ~w ***", [CaseNum,{Mod,Func}]),
+ GroupName = case get_name(Mode) of
+ undefined ->
+ "";
+ GrName ->
+ GrName1 = cast_to_list(GrName),
+ print(major, "=group_props ~p", [[{name,GrName1}]]),
+ GrName1
+ end,
+ print(major, "=started ~s", [lists:flatten(timestamp_get(""))]),
+ Comment1 = reason_to_string(Comment),
+ if Type == auto ->
+ print(major, "=result auto_skipped: ~ts", [Comment1]);
+ Type == user ->
+ print(major, "=result skipped: ~ts", [Comment1])
+ end,
+ if CaseNum == 0 ->
+ print(2,"*** Skipping ~w ***", [{Mod,Func}]);
+ true ->
+ print(2,"*** Skipping test case #~w ~w ***", [CaseNum,{Mod,Func}])
+ end,
TR = xhtml("<tr valign=\"top\">", ["<tr class=\"",odd_or_even(),"\">"]),
GroupName = case get_name(Mode) of
undefined -> "";
@@ -3097,6 +3217,7 @@ skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) ->
"<td><font color=\"~ts\">SKIPPED</font></td>"
"<td>~ts</td></tr>\n",
[num2str(CaseNum),fw_name(Mod),GroupName,Func,ResultCol,Comment1]),
+
if CaseNum > 0 ->
{US,AS} = get(test_server_skipped),
case Type of
@@ -3110,12 +3231,14 @@ skip_case1(Type, CaseNum, Mod, Func, Comment, Mode) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% skip_cases_upto(Ref, Cases, Reason, Origin, Mode) -> Cases1
+%% skip_cases_upto(Ref, Cases, Reason, Origin, Mode, SkipType) -> Cases1
%%
+%% SkipType = skip_case | auto_skip_case
%% Mark all cases tagged with Ref as skipped.
-skip_cases_upto(Ref, Cases, Reason, Origin, Mode) ->
- {_,Modified,Rest} = modify_cases_upto(Ref, {skip,Reason,Origin,Mode}, Cases),
+skip_cases_upto(Ref, Cases, Reason, Origin, Mode, SkipType) ->
+ {_,Modified,Rest} =
+ modify_cases_upto(Ref, {skip,Reason,Origin,Mode,SkipType}, Cases),
Modified++Rest.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -3151,6 +3274,7 @@ modify_cases_upto(Ref, ModOp, Cases, Orig, Alt) ->
%% same ref in the list, if not, this *is* an end conf case
case lists:any(fun({_,R,_,_}) when R == Ref -> true;
({_,R,_}) when R == Ref -> true;
+ ({skip_case,{_,R,_,_},_}) when R == Ref -> true;
({skip_case,{_,R,_,_}}) when R == Ref -> true;
(_) -> false
end, Cases) of
@@ -3161,25 +3285,39 @@ modify_cases_upto(Ref, ModOp, Cases, Orig, Alt) ->
end.
%% next case is a conf with same ref, must be end conf = we're done
-modify_cases_upto1(Ref, {skip,Reason,conf,Mode}, [{conf,Ref,_Props,MF}|T], Orig, Alt) ->
+modify_cases_upto1(Ref, {skip,Reason,conf,Mode,skip_case},
+ [{conf,Ref,_Props,MF}|T], Orig, Alt) ->
+ {Orig,[{skip_case,{conf,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {skip,Reason,conf,Mode,auto_skip_case},
+ [{conf,Ref,_Props,MF}|T], Orig, Alt) ->
{Orig,[{auto_skip_case,{conf,Ref,MF,Reason},Mode}|Alt],T};
modify_cases_upto1(Ref, {copy,NewRef}, [{conf,Ref,Props,MF}=C|T], Orig, Alt) ->
{[C|Orig],[{conf,NewRef,update_repeat(Props),MF}|Alt],T};
%% we've skipped all remaining cases in a sequence
-modify_cases_upto1(Ref, {skip,_,tc,_}, [{conf,Ref,_Props,_MF}|_]=Cs, Orig, Alt) ->
+modify_cases_upto1(Ref, {skip,_,tc,_,_},
+ [{conf,Ref,_Props,_MF}|_]=Cs, Orig, Alt) ->
{Orig,Alt,Cs};
%% next is a make case
-modify_cases_upto1(Ref, {skip,Reason,_,Mode}, [{make,Ref,MF}|T], Orig, Alt) ->
- {Orig,[{auto_skip_case,{make,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType},
+ [{make,Ref,MF}|T], Orig, Alt) ->
+ {Orig,[{SkipType,{make,Ref,MF,Reason},Mode}|Alt],T};
modify_cases_upto1(Ref, {copy,NewRef}, [{make,Ref,MF}=M|T], Orig, Alt) ->
{[M|Orig],[{make,NewRef,MF}|Alt],T};
%% next case is a user skipped end conf with the same ref = we're done
-modify_cases_upto1(Ref, {skip,Reason,_,Mode}, [{skip_case,{Type,Ref,MF,_Cmt}}|T], Orig, Alt) ->
- {Orig,[{auto_skip_case,{Type,Ref,MF,Reason},Mode}|Alt],T};
-modify_cases_upto1(Ref, {copy,NewRef}, [{skip_case,{Type,Ref,MF,Cmt}}=C|T], Orig, Alt) ->
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType},
+ [{skip_case,{Type,Ref,MF,_Cmt},_}|T], Orig, Alt) ->
+ {Orig,[{SkipType,{Type,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,SkipType},
+ [{skip_case,{Type,Ref,MF,_Cmt}}|T], Orig, Alt) ->
+ {Orig,[{SkipType,{Type,Ref,MF,Reason},Mode}|Alt],T};
+modify_cases_upto1(Ref, {copy,NewRef},
+ [{skip_case,{Type,Ref,MF,Cmt},Mode}=C|T], Orig, Alt) ->
+ {[C|Orig],[{skip_case,{Type,NewRef,MF,Cmt},Mode}|Alt],T};
+modify_cases_upto1(Ref, {copy,NewRef},
+ [{skip_case,{Type,Ref,MF,Cmt}}=C|T], Orig, Alt) ->
{[C|Orig],[{skip_case,{Type,NewRef,MF,Cmt}}|Alt],T};
%% next is a skip_case, could be one test case or 'all' in suite, we must proceed
@@ -3187,13 +3325,17 @@ modify_cases_upto1(Ref, ModOp, [{skip_case,{_F,_Cmt}}=MF|T], Orig, Alt) ->
modify_cases_upto1(Ref, ModOp, T, [MF|Orig], [MF|Alt]);
%% next is a normal case (possibly in a sequence), mark as skipped, or copy, and proceed
-modify_cases_upto1(Ref, {skip,Reason,_,Mode}=Op, [{_M,_F}=MF|T], Orig, Alt) ->
+modify_cases_upto1(Ref, {skip,Reason,_,_,skip_case}=Op,
+ [{_M,_F}=MF|T], Orig, Alt) ->
+ modify_cases_upto1(Ref, Op, T, Orig, [{skip_case,{MF,Reason}}|Alt]);
+modify_cases_upto1(Ref, {skip,Reason,_,Mode,auto_skip_case}=Op,
+ [{_M,_F}=MF|T], Orig, Alt) ->
modify_cases_upto1(Ref, Op, T, Orig, [{auto_skip_case,{MF,Reason},Mode}|Alt]);
modify_cases_upto1(Ref, CopyOp, [{_M,_F}=MF|T], Orig, Alt) ->
modify_cases_upto1(Ref, CopyOp, T, [MF|Orig], [MF|Alt]);
%% next is some other case, ignore or copy
-modify_cases_upto1(Ref, {skip,_,_,_}=Op, [_|T], Orig, Alt) ->
+modify_cases_upto1(Ref, {skip,_,_,_,_}=Op, [_|T], Orig, Alt) ->
modify_cases_upto1(Ref, Op, T, Orig, Alt);
modify_cases_upto1(Ref, CopyOp, [C|T], Orig, Alt) ->
modify_cases_upto1(Ref, CopyOp, T, [C|Orig], [C|Alt]).
@@ -3567,7 +3709,8 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
{died,Reason} ->
progress(failed, Num, Mod, Func, Loc, Reason,
Time, Comment, Style);
- {_,{'EXIT',{Skip,Reason}}} when Skip==skip; Skip==skipped ->
+ {_,{'EXIT',{Skip,Reason}}} when Skip==skip; Skip==skipped;
+ Skip==auto_skip ->
progress(skip, Num, Mod, Func, Loc, Reason,
Time, Comment, Style);
{_,{'EXIT',_Pid,{Skip,Reason}}} when Skip==skip; Skip==skipped ->
@@ -3579,10 +3722,13 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
{_,{'EXIT',Reason}} ->
progress(failed, Num, Mod, Func, Loc, Reason,
Time, Comment, Style);
- {_, {Fail, Reason}} when Fail =:= fail; Fail =:= failed ->
+ {_,{Fail,Reason}} when Fail =:= fail; Fail =:= failed ->
progress(failed, Num, Mod, Func, Loc, Reason,
Time, Comment, Style);
- {_, {Skip, Reason}} when Skip==skip; Skip==skipped ->
+ {_,Reason={auto_skip,_Why}} ->
+ progress(skip, Num, Mod, Func, Loc, Reason,
+ Time, Comment, Style);
+ {_,{Skip,Reason}} when Skip==skip; Skip==skipped ->
progress(skip, Num, Mod, Func, Loc, Reason,
Time, Comment, Style);
{Time,RetVal} ->
@@ -3699,15 +3845,15 @@ num2str(N) -> integer_to_list(N).
progress(skip, CaseNum, Mod, Func, Loc, Reason, Time,
Comment, {St0,St1}) ->
- {Reason1,{Color,Ret}} =
+ {Reason1,{Color,Ret,ReportTag}} =
if_auto_skip(Reason,
- fun() -> {?auto_skip_color,auto_skip} end,
- fun() -> {?user_skip_color,skip} end),
- print(major, "=result skipped", []),
- print(1, "*** SKIPPED *** ~ts",
- [get_info_str(Func, CaseNum, get(test_server_cases))]),
+ fun() -> {?auto_skip_color,auto_skip,auto_skipped} end,
+ fun() -> {?user_skip_color,skip,skipped} end),
+ print(major, "=result ~w: ~p", [ReportTag,Reason1]),
+ print(1, "*** SKIPPED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
test_server_sup:framework_call(report, [tc_done,{Mod,Func,
- {skipped,Reason1}}]),
+ {ReportTag,Reason1}}]),
ReasonStr = reason_to_string(Reason1),
ReasonStr1 = lists:flatten([string:strip(S,left) ||
S <- string:tokens(ReasonStr,[$\n])]),
@@ -3734,8 +3880,8 @@ progress(skip, CaseNum, Mod, Func, Loc, Reason, Time,
progress(failed, CaseNum, Mod, Func, Loc, timetrap_timeout, T,
Comment0, {St0,St1}) ->
print(major, "=result failed: timeout, ~p", [Loc]),
- print(1, "*** FAILED *** ~ts",
- [get_info_str(Func, CaseNum, get(test_server_cases))]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
test_server_sup:framework_call(report,
[tc_done,{Mod,Func,
{failed,timetrap_timeout}}]),
@@ -3760,8 +3906,8 @@ progress(failed, CaseNum, Mod, Func, Loc, timetrap_timeout, T,
progress(failed, CaseNum, Mod, Func, Loc, {testcase_aborted,Reason}, _T,
Comment0, {St0,St1}) ->
print(major, "=result failed: testcase_aborted, ~p", [Loc]),
- print(1, "*** FAILED *** ~ts",
- [get_info_str(Func, CaseNum, get(test_server_cases))]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
test_server_sup:framework_call(report,
[tc_done,{Mod,Func,
{failed,testcase_aborted}}]),
@@ -3786,8 +3932,8 @@ progress(failed, CaseNum, Mod, Func, Loc, {testcase_aborted,Reason}, _T,
progress(failed, CaseNum, Mod, Func, unknown, Reason, Time,
Comment0, {St0,St1}) ->
print(major, "=result failed: ~p, ~w", [Reason,unknown]),
- print(1, "*** FAILED *** ~ts",
- [get_info_str(Func, CaseNum, get(test_server_cases))]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
test_server_sup:framework_call(report, [tc_done,{Mod,Func,
{failed,Reason}}]),
TimeStr = io_lib:format(if is_float(Time) -> "~.3fs";
@@ -3822,8 +3968,8 @@ progress(failed, CaseNum, Mod, Func, unknown, Reason, Time,
progress(failed, CaseNum, Mod, Func, Loc, Reason, Time,
Comment0, {St0,St1}) ->
print(major, "=result failed: ~p, ~p", [Reason,Loc]),
- print(1, "*** FAILED *** ~ts",
- [get_info_str(Func, CaseNum, get(test_server_cases))]),
+ print(1, "*** FAILED ~ts ***",
+ [get_info_str(Mod,Func, CaseNum, get(test_server_cases))]),
test_server_sup:framework_call(report, [tc_done,{Mod,Func,
{failed,Reason}}]),
TimeStr = io_lib:format(if is_float(Time) -> "~.3fs";
@@ -3920,24 +4066,25 @@ fw_name(Mod) ->
if_auto_skip(Reason={failed,{_,init_per_testcase,_}}, True, _False) ->
{Reason,True()};
-if_auto_skip({_T,{skip,Reason={failed,{_,init_per_testcase,_}}},_Opts}, True, _False) ->
+if_auto_skip({skip,Reason={failed,{_,init_per_testcase,_}}}, True, _False) ->
{Reason,True()};
-if_auto_skip({fw_auto_skip,Reason}, True, _False) ->
- {Reason,True()};
-if_auto_skip({_T,{skip,{fw_auto_skip,Reason}},_Opts}, True, _False) ->
+if_auto_skip({auto_skip,Reason}, True, _False) ->
{Reason,True()};
if_auto_skip(Reason, _True, False) ->
{Reason,False()}.
-update_skip_counters(RetVal, {US,AS}) ->
- {_,Result} = if_auto_skip(RetVal, fun() -> {US,AS+1} end, fun() -> {US+1,AS} end),
+update_skip_counters({_T,Pat,_Opts}, {US,AS}) ->
+ {_,Result} = if_auto_skip(Pat, fun() -> {US,AS+1} end, fun() -> {US+1,AS} end),
+ Result;
+update_skip_counters(Pat, {US,AS}) ->
+ {_,Result} = if_auto_skip(Pat, fun() -> {US,AS+1} end, fun() -> {US+1,AS} end),
Result.
-get_info_str(Func, 0, _Cases) ->
- atom_to_list(Func);
-get_info_str(_Func, CaseNum, unknown) ->
+get_info_str(Mod,Func, 0, _Cases) ->
+ io_lib:format("~w", [{Mod,Func}]);
+get_info_str(_Mod,_Func, CaseNum, unknown) ->
"test case " ++ integer_to_list(CaseNum);
-get_info_str(_Func, CaseNum, Cases) ->
+get_info_str(_Mod,_Func, CaseNum, Cases) ->
"test case " ++ integer_to_list(CaseNum) ++
" of " ++ integer_to_list(Cases).
@@ -4396,12 +4543,27 @@ collect_cases({conf,Props,InitMF,CaseList,FinMF} = Conf, St) ->
Props1 ->
Ref = make_ref(),
Skips = St#cc.skip,
+ Props2 = [{suite,St#cc.mod} | lists:delete(suite,Props1)],
+ Mode = [{Ref,Props2,undefined}],
case in_skip_list({St#cc.mod,Conf}, Skips) of
{true,Comment} -> % conf init skipped
- {ok,[{skip_case,{conf,Ref,InitMF,Comment}} |
+ {ok,[{skip_case,{conf,Ref,InitMF,Comment},Mode} |
[] ++ [{conf,Ref,[],FinMF}]],St};
{true,Name,Comment} when is_atom(Name) -> % all cases skipped
- {ok,[{skip_case,{{St#cc.mod,{group,Name}},Comment}}],St};
+ case collect_cases(CaseList, St) of
+ {ok,[],_St} = Empty ->
+ Empty;
+ {ok,FlatCases,St1} ->
+ Cases2Skip = FlatCases ++ [{conf,Ref,
+ keep_name(Props1),
+ FinMF}],
+ Skipped = skip_cases_upto(Ref, Cases2Skip, Comment,
+ conf, Mode, skip_case),
+ {ok,[{skip_case,{conf,Ref,InitMF,Comment},Mode} |
+ Skipped],St1};
+ {error,_Reason} = Error ->
+ Error
+ end;
{true,ToSkip,_} when is_list(ToSkip) -> % some cases skipped
case collect_cases(CaseList,
St#cc{skip=ToSkip++Skips}) of
diff --git a/lib/test_server/src/ts.erl b/lib/test_server/src/ts.erl
index 8e71c69d35..189a71a8ce 100644
--- a/lib/test_server/src/ts.erl
+++ b/lib/test_server/src/ts.erl
@@ -622,7 +622,7 @@ run_test(File, Args, Options) ->
run_test(File, Args, Options, Vars) ->
ts_run:run(File, Args, Options, Vars).
-
+
%% This module provides some convenient shortcuts to running
%% the test server from within a started Erlang shell.
%% (This are here for backwards compatibility.)
diff --git a/lib/test_server/test/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE.erl
index 8ad5fcfb5c..c4faac036b 100644
--- a/lib/test_server/test/test_server_SUITE.erl
+++ b/lib/test_server/test/test_server_SUITE.erl
@@ -117,7 +117,7 @@ test_server_shuffle01_SUITE(Config) ->
test_server_skip_SUITE(Config) ->
run_test_server_tests("test_server_skip_SUITE", [],
- 3, 0, 1, 0, 0, 1, 3, 0, 0, Config).
+ 3, 0, 1, 0, 1, 0, 3, 0, 0, Config).
test_server_conf01_SUITE(Config) ->
run_test_server_tests("test_server_conf01_SUITE", [],
@@ -248,11 +248,13 @@ run_test_server_tests(SuiteName, Skip, NCases, NFail, NExpected, NSucc,
{NActualSkip,NActualFail,NActualSucc} =
lists:foldl(fun(#tc{ result = skip },{S,F,Su}) ->
{S+1,F,Su};
- (#tc{ result = ok },{S,F,Su}) ->
- {S,F,Su+1};
- (#tc{ result = failed },{S,F,Su}) ->
- {S,F+1,Su}
- end,{0,0,0},Data#suite.cases),
+ (#tc{ result = auto_skip },{S,F,Su}) ->
+ {S+1,F,Su};
+ (#tc{ result = ok },{S,F,Su}) ->
+ {S,F,Su+1};
+ (#tc{ result = failed },{S,F,Su}) ->
+ {S,F+1,Su}
+ end,{0,0,0},Data#suite.cases),
Data.
translate_filename(Filename,EncodingOnTestNode) ->
diff --git a/lib/test_server/test/test_server_test_lib.erl b/lib/test_server/test/test_server_test_lib.erl
index cd6804f7ad..82a702d59f 100644
--- a/lib/test_server/test/test_server_test_lib.erl
+++ b/lib/test_server/test/test_server_test_lib.erl
@@ -17,6 +17,7 @@
%% %CopyrightEnd%
%%
-module(test_server_test_lib).
+
-export([parse_suite/1]).
-export([init/2, pre_init_per_testcase/3, post_end_per_testcase/4]).
@@ -185,7 +186,9 @@ parse_case("=result" ++ Result, _, Tc) ->
"failed" ++ _ ->
{ok, Tc#tc{ result = failed } };
"skipped" ++ _ ->
- {ok, Tc#tc{ result = skip } }
+ {ok, Tc#tc{ result = skip } };
+ "auto_skipped" ++ _ ->
+ {ok, Tc#tc{ result = auto_skip } }
end;
parse_case("=finished" ++ _ , _Fd, #tc{ name = undefined }) ->
finished;
diff --git a/lib/tools/emacs/erlang-eunit.el b/lib/tools/emacs/erlang-eunit.el
index f2c0db67dd..0adeff1a02 100644
--- a/lib/tools/emacs/erlang-eunit.el
+++ b/lib/tools/emacs/erlang-eunit.el
@@ -40,6 +40,10 @@ This is useful, reducing the save-compile-load-test cycle to one keychord.")
(defvar erlang-eunit-recent-info '((mode . nil) (module . nil) (test . nil) (cover . nil))
"Info about the most recent running of an EUnit test representation.")
+(defvar erlang-error-regexp-alist
+ '(("^\\([^:( \t\n]+\\)[:(][ \t]*\\([0-9]+\\)[:) \t]" . (1 2)))
+ "*Patterns for matching Erlang errors.")
+
;;;
;;; Switch between src/EUnit test buffers
;;;
diff --git a/lib/tools/emacs/erlang-start.el b/lib/tools/emacs/erlang-start.el
index e1dc86621e..76e0575e68 100644
--- a/lib/tools/emacs/erlang-start.el
+++ b/lib/tools/emacs/erlang-start.el
@@ -52,7 +52,7 @@
;;
;; To set the variable you can use the following command:
;; M-x set-variable RET debug-on-error RET t RET
-
+
;;; Code:
;;
diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el
index 624042204c..c395d22356 100644
--- a/lib/tools/emacs/erlang.el
+++ b/lib/tools/emacs/erlang.el
@@ -697,6 +697,7 @@ resulting regexp is surrounded by \\_< and \\_>."
"char"
"cons"
"deep_string"
+ "iodata"
"iolist"
"maybe_improper_list"
"module"
@@ -708,6 +709,7 @@ resulting regexp is surrounded by \\_< and \\_>."
"nonempty_list"
"nonempty_improper_list"
"nonempty_maybe_improper_list"
+ "nonempty_string"
"no_return"
"pos_integer"
"string"
@@ -1025,7 +1027,7 @@ behaviour.")
(defvar erlang-mode-syntax-table nil
"Syntax table in use in Erlang-mode buffers.")
-
+
(defvar erlang-skel-file "erlang-skels"
"The type of erlang-skeletons that should be used, default
@@ -1272,7 +1274,7 @@ Unfortunately, XEmacs hasn't got support for a special Font
Lock syntax table. The effect is that `apply' in the atom
`foo_apply' will be highlighted as a bif.")
-
+
;;; Avoid errors while compiling this file.
;; `eval-when-compile' is not defined in Emacs 18. We define it as a
@@ -1321,7 +1323,7 @@ Lock syntax table. The effect is that `apply' in the atom
(require 'tempo)
(require 'compile))))
-
+
(defun erlang-version ()
"Return the current version of Erlang mode."
(interactive)
@@ -1516,7 +1518,7 @@ Other commands:
(set (make-local-variable 'outline-level) (lambda () 1))
(set (make-local-variable 'add-log-current-defun-function)
'erlang-current-defun))
-
+
(defun erlang-font-lock-init ()
"Initialize Font Lock for Erlang mode."
(or erlang-font-lock-syntax-table
@@ -1686,7 +1688,7 @@ plus variables, macros and records."
(font-lock-mode 1)
(funcall (symbol-function 'font-lock-fontify-buffer)))
-
+
(defun erlang-menu-init ()
"Init menus for Erlang mode.
@@ -1905,7 +1907,7 @@ Example:
The new menu is returned. No guarantee is given that the original
menu is left unchanged."
(delq entry items))
-
+
;; Man code:
(defun erlang-man-init ()
@@ -2228,7 +2230,7 @@ For example:
After installing the line, kill and restart Emacs, or restart Erlang
mode with the command `M-x erlang-mode RET'.")))
-
+
;; Skeleton code:
;; This code is based on the package `tempo' which is part of modern
@@ -2349,7 +2351,7 @@ The first character of DD is space if the value is less than 10."
(erlang-string-to-int (substring date 8 10))
(substring date 4 7)
(substring date -4))))
-
+
;; Indentation code:
(defun erlang-indent-command (&optional whole-exp)
@@ -3132,7 +3134,7 @@ commands."
(skip-chars-backward " \t")
(max (if (bolp) 0 (1+ (current-column)))
comment-column)))))
-
+
;;; Erlang movement commands
;; All commands below work as movement commands. I.e. if the point is
@@ -3336,7 +3338,7 @@ With negative argument go towards the beginning of the buffer."
(forward-sexp 1)
(buffer-substring start (point)))))
-
+
;;; Miscellaneous
(defun erlang-fill-paragraph (&optional justify)
@@ -3445,7 +3447,7 @@ at the end."
(error "Can't clone argument list"))
(insert args)
(set-mark p)))
-
+
;;; Information retrieval functions.
(defun erlang-buffer-substring (beg end)
@@ -3772,7 +3774,7 @@ exported function."
(store-match-data old-match-data)
(member (cons name arity) exports))))
-
+
;;; Check module name
;; The function `write-file', bound to C-x C-w, calls
@@ -3835,7 +3837,7 @@ This function is normally placed in the hook `local-write-file-hooks'."
;; Must return nil since it is added to `local-write-file-hook'.
nil)
-
+
;;; Electric functions.
(defun erlang-electric-semicolon (&optional arg)
@@ -4229,7 +4231,7 @@ This function is designed to be a member of a criteria list."
(erlang-skip-blank)
(looking-at "end[^_a-zA-Z0-9]")))
-
+
;; Erlang tags support which is aware of erlang modules.
;;
;; Not yet implemented under XEmacs. (Hint: The Emacs 19 etags
@@ -4539,7 +4541,7 @@ Tags can be given on the forms `tag', `module:', `module:tag'."
(or default (error "There is no default tag"))
spec)))))
-
+
;; Search tag functions which are aware of Erlang modules. The tactic
;; is to store new search functions into the local variables of the
;; TAGS buffers. The variables are restored directly after the
@@ -4715,7 +4717,7 @@ for a tag on the form `module:tag'."
(string= mod (erlang-get-module-from-file-name
(file-of-tag)))))))
-
+
;;; Tags completion, Emacs 19 `etags' specific.
;;;
;;; The basic idea is to create a second completion table `erlang-tags-
@@ -4834,7 +4836,7 @@ about Erlang modules."
;; Only the first one will be stored in the table.
(intern (concat module ":") table))))))
table))
-
+
;;;
;;; Prepare for other methods to run an Erlang slave process.
;;;
@@ -4916,7 +4918,7 @@ future, a new shell on an already running host will be started."
(call-interactively erlang-next-error-function))
-
+
;;;
;;; Erlang Shell Mode -- Major mode used for Erlang shells.
;;;
@@ -5052,7 +5054,7 @@ Selects Comint or Compilation mode command as appropriate."
(define-key map "\M-\C-m" 'compile-goto-error)
(unless inferior-erlang-use-cmm
(define-key map "\C-x`" 'erlang-next-error)))
-
+
;;;
;;; Inferior Erlang -- Run an Erlang shell as a subprocess.
;;;
diff --git a/lib/tools/src/cover.erl b/lib/tools/src/cover.erl
index bf21aa6b48..13d9aefb0c 100644
--- a/lib/tools/src/cover.erl
+++ b/lib/tools/src/cover.erl
@@ -255,16 +255,7 @@ compile_directory(Dir, Options) when is_list(Dir), is_list(Options) ->
end.
compile_modules(Files,Options) ->
- Options2 = lists:filter(fun(Option) ->
- case Option of
- {i, Dir} when is_list(Dir) -> true;
- {d, _Macro} -> true;
- {d, _Macro, _Value} -> true;
- export_all -> true;
- _ -> false
- end
- end,
- Options),
+ Options2 = filter_options(Options),
compile_modules(Files,Options2,[]).
compile_modules([File|Files], Options, Result) ->
@@ -273,6 +264,17 @@ compile_modules([File|Files], Options, Result) ->
compile_modules([],_Opts,Result) ->
reverse(Result).
+filter_options(Options) ->
+ lists:filter(fun(Option) ->
+ case Option of
+ {i, Dir} when is_list(Dir) -> true;
+ {d, _Macro} -> true;
+ {d, _Macro, _Value} -> true;
+ export_all -> true;
+ _ -> false
+ end
+ end,
+ Options).
%% compile_beam(ModFile) -> Result | {error,Reason}
%% ModFile - see compile/1
@@ -622,8 +624,9 @@ main_process_loop(State) ->
Compiled0 = State#main_state.compiled,
case get_beam_file(Module,BeamFile0,Compiled0) of
{ok,BeamFile} ->
+ UserOptions = get_compile_options(Module,BeamFile),
{Reply,Compiled} =
- case do_compile_beam(Module,BeamFile,[]) of
+ case do_compile_beam(Module,BeamFile,UserOptions) of
{ok, Module} ->
remote_load_compiled(State#main_state.nodes,
[{Module,BeamFile}]),
@@ -1421,12 +1424,23 @@ get_abstract_code(Module, Beam) ->
end.
get_source_info(Module, Beam) ->
+ Compile = get_compile_info(Module, Beam),
+ case lists:keyfind(source, 1, Compile) of
+ { source, _ } = Tuple -> [Tuple];
+ false -> []
+ end.
+
+get_compile_options(Module, Beam) ->
+ Compile = get_compile_info(Module, Beam),
+ case lists:keyfind(options, 1, Compile) of
+ {options, Options } -> filter_options(Options);
+ false -> []
+ end.
+
+get_compile_info(Module, Beam) ->
case beam_lib:chunks(Beam, [compile_info]) of
{ok, {Module, [{compile_info, Compile}]}} ->
- case lists:keyfind(source, 1, Compile) of
- { source, _ } = Tuple -> [Tuple];
- false -> []
- end;
+ Compile;
_ ->
[]
end.
diff --git a/lib/tools/src/tags.erl b/lib/tools/src/tags.erl
index 1c72ef8db5..e3cc51cdb2 100644
--- a/lib/tools/src/tags.erl
+++ b/lib/tools/src/tags.erl
@@ -292,7 +292,7 @@ word_char(C) when C >= $0, C =< $9 -> true;
word_char($_) -> true;
word_char(_) -> false.
-
+
%%% Output routines
%% Check the options `outfile' and `outdir'.
@@ -323,7 +323,7 @@ genout(Os, Name, Entries) ->
io:put_chars(Os, lists:reverse(Entries)).
-
+
%%% help routines
%% Flatten and reverse a nested list.
diff --git a/lib/tools/test/cover_SUITE.erl b/lib/tools/test/cover_SUITE.erl
index c033be98a3..29b26c7a76 100644
--- a/lib/tools/test/cover_SUITE.erl
+++ b/lib/tools/test/cover_SUITE.erl
@@ -28,7 +28,7 @@
export_import/1,
otp_5031/1, eif/1, otp_5305/1, otp_5418/1, otp_6115/1, otp_7095/1,
otp_8188/1, otp_8270/1, otp_8273/1, otp_8340/1,
- otp_10979_hanging_node/1]).
+ otp_10979_hanging_node/1, compile_beam_opts/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -53,7 +53,7 @@ all() ->
dont_reconnect_after_stop, stop_node_after_disconnect,
export_import, otp_5031, eif, otp_5305, otp_5418,
otp_6115, otp_7095, otp_8188, otp_8270, otp_8273,
- otp_8340, otp_10979_hanging_node];
+ otp_8340, otp_10979_hanging_node, compile_beam_opts];
_pid ->
{skip,
"It looks like the test server is running "
@@ -1401,6 +1401,39 @@ otp_10979_hanging_node(_Config) ->
ok.
+compile_beam_opts(doc) ->
+ ["Take compiler options from beam in cover:compile_beam"];
+compile_beam_opts(suite) -> [];
+compile_beam_opts(Config) when is_list(Config) ->
+ {ok, Cwd} = file:get_cwd(),
+ ok = file:set_cwd(?config(priv_dir, Config)),
+ IncDir = filename:join(?config(data_dir, Config),
+ "included_functions"),
+ File = filename:join([?config(data_dir, Config), "otp_11439", "t.erl"]),
+ %% use all compiler options allowed by cover:filter_options
+ %% i and d don't make sense when compiling from beam though
+ {ok, t} =
+ compile:file(File, [{i, IncDir},
+ {d, 'BOOL'},
+ {d, 'MACRO', macro_defined},
+ export_all,
+ debug_info,
+ return_errors]),
+ Exports =
+ [{func1,0},
+ {macro, 0},
+ {exported,0},
+ {nonexported,0},
+ {module_info,0},
+ {module_info,1}],
+ Exports = t:module_info(exports),
+ {ok, t} = cover:compile_beam("t"),
+ Exports = t:module_info(exports),
+ cover:stop(),
+ ok = file:delete("t.beam"),
+ ok = file:set_cwd(Cwd),
+ ok.
+
%%--Auxiliary------------------------------------------------------------
analyse_expr(Expr, Config) ->
diff --git a/lib/tools/test/cover_SUITE_data/otp_11439/t.erl b/lib/tools/test/cover_SUITE_data/otp_11439/t.erl
new file mode 100644
index 0000000000..d1eb9f16ee
--- /dev/null
+++ b/lib/tools/test/cover_SUITE_data/otp_11439/t.erl
@@ -0,0 +1,11 @@
+-module(t).
+-export([exported/0]).
+-include("cover_inc.hrl").
+-ifdef(BOOL).
+macro() ->
+ ?MACRO.
+-endif.
+exported() ->
+ ok.
+nonexported() ->
+ ok.
diff --git a/lib/tools/test/eprof_SUITE_data/eed.erl b/lib/tools/test/eprof_SUITE_data/eed.erl
index 520c5f3dd1..5f2a21aa60 100644
--- a/lib/tools/test/eprof_SUITE_data/eed.erl
+++ b/lib/tools/test/eprof_SUITE_data/eed.erl
@@ -146,7 +146,7 @@ format_error({'EXIT', {Code, {Mod, Func, Args}}}) ->
[{Code, {Mod, Func, length(Args)}}]));
format_error(A) -> atom_to_list(A).
-
+
%%% Parsing commands.
@@ -327,7 +327,7 @@ when 0 =< Num1, Num1 =< Num2, Num2 =< State#state.lines ->
check_lines(_, _, _, _) ->
error(bad_linenum).
-
+
%%% Executing commands.
%% ($)= - print line number
@@ -657,7 +657,7 @@ undo_command(_, _, _) ->
write_command(_Cmd, [_First, _Last], _St) ->
error(not_implemented).
-
+
%%% Primitive buffer operations.
print_current(St) ->
@@ -717,7 +717,7 @@ wrap_next_line(State) when State#state.dot == State#state.lines ->
wrap_next_line(State) ->
next_line(State).
-
+
%%% Utilities.
get_pattern(End, Cmd, State) ->
diff --git a/lib/wx/doc/overview.edoc b/lib/wx/doc/overview.edoc
index 8bff6c34e1..054016f515 100644
--- a/lib/wx/doc/overview.edoc
+++ b/lib/wx/doc/overview.edoc
@@ -12,11 +12,11 @@ of <em>wxWidgets</em>. This document describes the erlang mapping to
wxWidgets and it's implementation. It is not a complete
users guide to wxWidgets. If you need that, you will have to read the wxWidgets
documentation instead. <em>wx</em> tries to keep a one-to-one mapping with
-the original api so that the original documentation and examples shall be
+the original API so that the original documentation and examples shall be
as easy as possible to use.
wxErlang examples and test suite can be found in the erlang src release.
-They can also provide some help on how to use the api.
+They can also provide some help on how to use the API.
This is currently a very brief introduction to <em>wx</em>. The
application is still under development, which means the interface may change,
@@ -33,17 +33,17 @@ and the test suite currently have a poor coverage ratio.
== Introduction ==
-The original <em>wxWidgets</em> is an object-oriented (C++) api and
+The original <em>wxWidgets</em> is an object-oriented (C++) API and
that is reflected in the erlang mapping. In most cases each class in
wxWidgets is represented as a module in erlang. This gives
the <em>wx</em> application a huge interface, spread over several
modules, and it all starts with the <em>wx</em>
module. The <em>wx</em> module contains functions to create and
-destroy the gui, i.e. <code>wx:new/0</code>,<code>wx:destroy/0</code>, and
+destroy the GUI, i.e. <code>wx:new/0</code>, <code>wx:destroy/0</code>, and
some other useful functions.
Objects or object references in <em>wx</em> should be seen as erlang
-processes rather then erlang terms. When you operate on them they can
+processes rather than erlang terms. When you operate on them they can
change state, e.g. they are not functional objects as erlang terms are.
Each object has a type or rather a class, which is manipulated with
the corresponding module or by sub-classes of that object. Type
@@ -62,7 +62,7 @@ For example the <em>wxWindow</em> C++ class is implemented in the
member <em>wxWindow::CenterOnParent</em> is
thus <em>wxWindow:centerOnParent</em>. The following C++ code:
<pre>
- wxWindow MyWin = new wxWindo();
+ wxWindow MyWin = new wxWindow();
MyWin.CenterOnParent(wxVERTICAL);
...
delete MyWin;
@@ -91,7 +91,7 @@ they are directly mapped to corresponding erlang terms:
<dt><em>wxGridCellCoords</em> is represented by {Row,Column}</dt>
</dl>
-In the places where the erlang api differs from the original one it should
+In the places where the erlang API differs from the original one it should
be obvious from the erlang documentation which representation has
been used. E.g. the C++ arrays and/or lists are sometimes represented
as erlang lists and sometimes as tuples.
@@ -112,7 +112,7 @@ Additionally some global functions, i.e. non-class functions, exist in
the <c>wx_misc</c> module.
<em>wxErlang</em> is implemented as a (threaded) driver and a rather direct
-interface to the C++ api, with the drawback that if the erlang
+interface to the C++ API, with the drawback that if the erlang
programmer does an error, it might crash the emulator.
Since the driver is threaded it requires a <em>smp</em> enabled emulator,
@@ -121,7 +121,7 @@ that provides a thread safe interface to the driver.
== Multiple processes and memory handling ==
The intention is that each erlang application calls wx:new() once to
-setup it's gui which creates an environment and a memory mapping. To
+setup it's GUI which creates an environment and a memory mapping. To
be able to use <em>wx</em> from several processes in your application,
you must share the environment. You can get the active environment with
<code>wx:get_env/0</code> and set it in the new processes
@@ -153,26 +153,26 @@ increasing memory usage. This is especially important when
<em>wxWidgets</em> assumes or recommends that you (or rather the C++
programmer) have allocated the object on the stack since that will
never be done in the erlang binding. For example <code>wxDC</code> class
-or its sub-classes or <code> wxSizerFlags</code>.
+or its sub-classes or <code>wxSizerFlags</code>.
Currently the dialogs show modal function freezes wxWidgets
until the dialog is closed. That is intended but in erlang where you
-can have several gui applications running at the same time it causes
+can have several GUI applications running at the same time it causes
trouble. This will hopefully be fixed in future <em>wxWidgets</em>
releases.
== Event Handling ==
-Event handling in <em>wx</em> differs most the from the original api.
+Event handling in <em>wx</em> differs most from the original API.
You must specify every event you want to handle in <em>wxWidgets</em>,
-that is the same in the erlang binding but can you choose to receive
-the events as messages or handle them with callback funs.
+that is the same in the erlang binding but you can choose to receive
+the events as messages or handle them with callback <em>funs</em>.
Otherwise the event subscription is handled as <em>wxWidgets</em>
dynamic event-handler connection. You subscribe to events of a certain
-type from objects with an <em>ID</em> or within a range of ID:s. The
-callback fun is optional, if not supplied the event will be sent to the
-process that called <em>connect/2</em>. Thus, a handler is a callback fun
+type from objects with an <em>ID</em> or within a range of <em>ID</em>s. The
+callback <em>fun</em> is optional, if not supplied the event will be sent to the
+process that called <em>connect/2</em>. Thus, a handler is a callback <em>fun</em>
or a process which will receive an event message.
Events are handled in order from bottom to top, in the widgets
@@ -195,7 +195,7 @@ subscribed to
<em>wxKey</em> event record where <code>Event#wxKey.type =
key_up</code>.
-In <em>wxWidgets</em> the developer have to call
+In <em>wxWidgets</em> the developer has to call
<code>wxEvent:skip()</code> if he wants the event to be processed by
other handlers. You can do the same in <em>wx</em> if you use
callbacks. If you want the event as messages you just don't supply a
@@ -217,11 +217,11 @@ following handlers. The actual event objects are deleted after
the <em>fun</em> returns.
The callbacks are always invoked by another process and have
-exclusive usage of the gui when invoked. This means that a callback fun
+exclusive usage of the GUI when invoked. This means that a callback <em>fun</em>
can not use the process dictionary and should not make calls to other
-processes. Calls to another process inside a callback fun may cause a
+processes. Calls to another process inside a callback <em>fun</em> may cause a
deadlock if the other process is waiting on completion of his call to
-the gui.
+the GUI.
== Acknowledgments ==
diff --git a/lib/xmerl/src/xmerl.erl b/lib/xmerl/src/xmerl.erl
index 3249094e78..01af183eef 100644
--- a/lib/xmerl/src/xmerl.erl
+++ b/lib/xmerl/src/xmerl.erl
@@ -303,18 +303,17 @@ apply_tag_cb(Ms, F, Args) ->
apply_cb(Ms, F, '#element#', Args).
apply_cb(Ms, F, Df, Args) ->
- apply_cb(Ms, F, Df, Args, Ms).
-
-apply_cb([M|Ms], F, Df, Args, Ms0) ->
- case catch apply(M, F, Args) of
- {'EXIT', {undef,[{M,F,_,_}|_]}} ->
- apply_cb(Ms, F, Df, Args, Ms0);
- {'EXIT', Reason} ->
- exit(Reason);
- Res ->
- Res
+ apply_cb(Ms, F, Df, Args, length(Args)).
+
+apply_cb(Ms, F, Df, Args, A) ->
+ apply_cb(Ms, F, Df, Args, A, Ms).
+
+apply_cb([M|Ms], F, Df, Args, A, Ms0) ->
+ case erlang:function_exported(M, F, A) of
+ true -> apply(M, F, Args);
+ false -> apply_cb(Ms, F, Df, Args, A, Ms0)
end;
-apply_cb([], Df, Df, Args, _Ms0) ->
+apply_cb([], Df, Df, Args, A, _Ms0) ->
exit({unknown_tag, {Df, Args}});
-apply_cb([], F, Df, Args, Ms0) ->
- apply_cb(Ms0, Df, Df, [F|Args]).
+apply_cb([], F, Df, Args, A, Ms0) ->
+ apply_cb(Ms0, Df, Df, [F|Args], A+1).
diff --git a/lib/xmerl/src/xmerl_regexp.erl b/lib/xmerl/src/xmerl_regexp.erl
index 0c53e6f34a..9303bdb125 100644
--- a/lib/xmerl/src/xmerl_regexp.erl
+++ b/lib/xmerl/src/xmerl_regexp.erl
@@ -593,7 +593,7 @@ sub_first_match(S, {regexp,RE}) ->
nomatch -> nomatch
end.
-
+
%% This is the regular expression grammar used. It is equivalent to the
%% one used in AWK, except that we allow ^ $ to be used anywhere and fail
%% in the matching.
@@ -961,7 +961,7 @@ re_apply_or(never_match, R2) -> R2;
re_apply_or(R1, never_match) -> R1;
re_apply_or(nomatch, R2) -> R2;
re_apply_or(R1, nomatch) -> R1.
-
+
%% Record definitions for the NFA, DFA and compiler.
-record(nfa_state, {no,edges=[],accept=no}).
@@ -1026,7 +1026,7 @@ parse_reas([{RegExp,A}|REAs], S) ->
{error,E} -> {error,E}
end;
parse_reas([], Stack) -> {ok,reverse(Stack)}.
-
+
%% build_combined_nfa(RegExpActionList) -> {NFA,StartState}.
%% Build the combined NFA using Thompson's construction straight out
%% of the book. Build the separate NFAs in the same order as the
@@ -1147,7 +1147,7 @@ nfa_comp_class(Cc) ->
comp_crs([{C1,C2}|Crs], Last) ->
[{Last,C1-1}|comp_crs(Crs, C2+1)];
comp_crs([], Last) -> [{Last,maxchar}].
-
+
%% build_dfa(NFA, NfaStartState) -> {DFA,DfaStartState}.
%% Build a DFA from an NFA using "subset construction". The major
%% difference from the book is that we keep the marked and unmarked
@@ -1282,7 +1282,7 @@ accept([St|Sts], NFA) ->
#nfa_state{accept=no} -> accept(Sts, NFA)
end;
accept([], _NFA) -> no.
-
+
%% minimise_dfa(DFA, StartState, FirstState) -> {DFA,StartState}.
%% Minimise the DFA by removing equivalent states. We consider a
%% state if both the transitions and the their accept state is the
@@ -1331,7 +1331,7 @@ pack_dfa([D|DFA], NewN, Rs, PDFA) ->
pack_dfa(DFA, NewN+1, [{D#dfa_state.no,NewN}|Rs],
[D#dfa_state{no=NewN}|PDFA]);
pack_dfa([], _NewN, Rs, PDFA) -> {PDFA,Rs}.
-
+
%% comp_apply(String, StartPos, DFAReg) -> {match,RestPos,Rest} | nomatch.
%% Apply the DFA of a regular expression to a string. If
%% there is a match return the position of the remaining string and
diff --git a/lib/xmerl/src/xmerl_xpath.erl b/lib/xmerl/src/xmerl_xpath.erl
index b3301f2faf..be0e863ce4 100644
--- a/lib/xmerl/src/xmerl_xpath.erl
+++ b/lib/xmerl/src/xmerl_xpath.erl
@@ -713,10 +713,26 @@ node_test(_Test,
node_test({wildcard, _}, #xmlNode{type=ElAt}, _Context)
when ElAt==element; ElAt==attribute; ElAt==namespace ->
true;
-node_test({prefix_test, Prefix}, #xmlNode{node = N}, _Context) ->
+node_test({prefix_test, Prefix}, #xmlNode{node = N}, Context) ->
case N of
- #xmlElement{nsinfo = {Prefix, _}} -> true;
- #xmlAttribute{nsinfo = {Prefix, _}} -> true;
+ #xmlElement{nsinfo = {Prefix, _}} ->
+ true;
+ #xmlElement{expanded_name = {Uri, _}} ->
+ case expanded_name(Prefix, "_", Context) of
+ {Uri, _} ->
+ true;
+ _ ->
+ false
+ end;
+ #xmlAttribute{nsinfo = {Prefix, _}} ->
+ true;
+ #xmlAttribute{expanded_name = {Uri, _}} ->
+ case expanded_name(Prefix, "_", Context) of
+ {Uri, _} ->
+ true;
+ _ ->
+ false
+ end;
_ ->
false
end;
@@ -760,20 +776,21 @@ node_test({name, {_Tag, Prefix, Local}},
node_test({name, {Tag,_Prefix,_Local}},
#xmlNode{node = #xmlAttribute{name = Tag}}, _Context) ->
true;
-node_test({name, {_Tag, Prefix, Local}},
- #xmlNode{node = #xmlAttribute{expanded_name = {URI, Local},
- nsinfo = {_Prefix1, _},
- namespace = NS}}, _Context) ->
- NSNodes = NS#xmlNamespace.nodes,
- case lists:keysearch(Prefix, 1, NSNodes) of
- {value, {_, URI}} ->
- ?dbg("node_test(~, ~p) -> true.~n",
- [{_Tag, Prefix, Local}, write_node(NSNodes)]),
- true;
- false ->
- ?dbg("node_test(~, ~p) -> false.~n",
- [{_Tag, Prefix, Local}, write_node(NSNodes)]),
- false
+node_test({name, {Tag, Prefix, Local}},
+ #xmlNode{node = #xmlAttribute{name = Name,
+ expanded_name = EExpName
+ }}, Context) ->
+ case expanded_name(Prefix, Local, Context) of
+ [] ->
+ Res = (Tag == Name),
+ ?dbg("node_test(~p, ~p) -> ~p.~n",
+ [{Tag, Prefix, Local}, write_node(Name), Res]),
+ Res;
+ ExpName ->
+ Res = (ExpName == EExpName),
+ ?dbg("node_test(~p, ~p) -> ~p.~n",
+ [{Tag, Prefix, Local}, write_node(Name), Res]),
+ Res
end;
node_test({name, {_Tag, [], Local}},
#xmlNode{node = #xmlNsNode{prefix = Local}}, _Context) ->
diff --git a/lib/xmerl/test/xmerl_SUITE.erl b/lib/xmerl/test/xmerl_SUITE.erl
index e21355f877..8432e66a97 100644
--- a/lib/xmerl/test/xmerl_SUITE.erl
+++ b/lib/xmerl/test/xmerl_SUITE.erl
@@ -42,7 +42,7 @@
%%----------------------------------------------------------------------
all() ->
[{group, cpd_tests}, xpath_text1, xpath_main,
- xpath_abbreviated_syntax, xpath_functions,
+ xpath_abbreviated_syntax, xpath_functions, xpath_namespaces,
{group, misc}, {group, eventp_tests},
{group, ticket_tests}, {group, app_test},
{group, appup_test}].
@@ -205,6 +205,11 @@ xpath_functions(Config) ->
?line file:set_cwd(filename:join(?config(data_dir,Config),xpath)),
?line ok = xpath_abbrev:functions().
+xpath_namespaces(suite) -> [];
+xpath_namespaces(Config) ->
+ ?line file:set_cwd(filename:join(?config(data_dir,Config),xpath)),
+ ?line ok = xpath_abbrev:namespaces().
+
%%----------------------------------------------------------------------
latin1_alias(suite) -> [];
diff --git a/lib/xmerl/test/xmerl_SUITE_data/xpath/purchaseOrder.xml b/lib/xmerl/test/xmerl_SUITE_data/xpath/purchaseOrder.xml
index a5ae223d65..16090c3590 100644
--- a/lib/xmerl/test/xmerl_SUITE_data/xpath/purchaseOrder.xml
+++ b/lib/xmerl/test/xmerl_SUITE_data/xpath/purchaseOrder.xml
@@ -1,7 +1,8 @@
<?xml version="1.0"?>
<apo:purchaseOrder xmlns:apo="http://www.example.com/PO1"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
orderDate="1999-10-20">
- <billTo country="US">
+ <billTo country="US" xsi:type="apo:USAddress">
<name>Robert Smith</name>
<street>8 Oak Avenue</street>
<!-- etc. -->
@@ -10,7 +11,7 @@
<zip>95819</zip>
</billTo>
<apo:comment>Hurry, my lawn is going wild!</apo:comment>
- <shipTo country="US">
+ <shipTo country="US" xsi:type="apo:USAddress">
<name>Alice Smith</name>
<street>123 Maple Street</street>
<!-- etc. -->
diff --git a/lib/xmerl/test/xmerl_SUITE_data/xpath/xpath_abbrev.erl b/lib/xmerl/test/xmerl_SUITE_data/xpath/xpath_abbrev.erl
index 7b6f1e95b3..afd39b6598 100644
--- a/lib/xmerl/test/xmerl_SUITE_data/xpath/xpath_abbrev.erl
+++ b/lib/xmerl/test/xmerl_SUITE_data/xpath/xpath_abbrev.erl
@@ -8,6 +8,7 @@
-module(xpath_abbrev).
-export([test/0, check_node_set/2, ticket_6873/0, ticket_7496/0, functions/0]).
+-export([namespaces/0]).
-include("test_server.hrl").
-include_lib("xmerl/include/xmerl.hrl").
@@ -264,3 +265,33 @@ functions() ->
[city,city,comment]),
?line ok = Test(Doc2,"//*[starts-with(name(),'{http://www.example.com/PO1')]",
['apo:purchaseOrder','apo:comment']).
+
+
+namespaces() ->
+ {Doc,_} = xmerl_scan:file("purchaseOrder.xml", [{namespace_conformant, true}]),
+
+ %% Element name using regular namespace and context namespace declaration.
+ ?line [#xmlElement{nsinfo = {_, "purchaseOrder"}}] =
+ xmerl_xpath:string("/apo:purchaseOrder", Doc),
+ ?line [#xmlElement{nsinfo = {_, "purchaseOrder"}}] =
+ xmerl_xpath:string("/t:purchaseOrder", Doc, [{namespace, [{"t", "http://www.example.com/PO1"}]}]),
+
+ %% Wildcard element name using regular namespace and context namespace declaration.
+ ?line [#xmlElement{nsinfo = {_, "comment"}}] =
+ xmerl_xpath:string("./apo:*", Doc),
+ ?line [#xmlElement{nsinfo = {_, "comment"}}] =
+ xmerl_xpath:string("./t:*", Doc, [{namespace, [{"t", "http://www.example.com/PO1"}]}]),
+
+ %% Attribute name using regular namespace and context namespace declaration.
+ ?line [#xmlAttribute{nsinfo = {_, "type"}}, #xmlAttribute{nsinfo = {_, "type"}}] =
+ xmerl_xpath:string("//@xsi:type", Doc),
+ ?line [#xmlAttribute{nsinfo = {_, "type"}}, #xmlAttribute{nsinfo = {_, "type"}}] =
+ xmerl_xpath:string("//@t:type", Doc, [{namespace, [{"t", "http://www.w3.org/2001/XMLSchema-instance"}]}]),
+
+ %% Wildcard attribute name using regular namespace and context namespace declaration.
+ ?line [#xmlAttribute{nsinfo = {_, "type"}}, #xmlAttribute{nsinfo = {_, "type"}}] =
+ xmerl_xpath:string("//@xsi:*", Doc),
+ ?line [#xmlAttribute{nsinfo = {_, "type"}}, #xmlAttribute{nsinfo = {_, "type"}}] =
+ xmerl_xpath:string("//@t:*", Doc, [{namespace, [{"t", "http://www.w3.org/2001/XMLSchema-instance"}]}]),
+
+ ok.