aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml4
-rw-r--r--Makefile.in2
-rw-r--r--OTP_VERSION2
-rw-r--r--bootstrap/bin/no_dot_erlang.bootbin6292 -> 6407 bytes
-rw-r--r--bootstrap/bin/start.bootbin6292 -> 6407 bytes
-rw-r--r--bootstrap/bin/start_clean.bootbin6292 -> 6407 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/beam_dead.beambin13052 -> 13804 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/beam_peep.beambin2872 -> 3004 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/beam_type.beambin18632 -> 18692 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/compile.beambin41364 -> 41372 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/erl_bifs.beambin2088 -> 2104 bytes
-rw-r--r--bootstrap/lib/compiler/ebin/v3_codegen.beambin65172 -> 65360 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/application_controller.beambin31272 -> 31212 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/code_server.beambin24140 -> 24136 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/erl_epmd.beambin7176 -> 7248 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/erl_signal_handler.beambin956 -> 1112 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/error_logger.beambin5412 -> 6160 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/inet_tcp_dist.beambin7288 -> 7700 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/kernel.app7
-rw-r--r--bootstrap/lib/kernel/ebin/kernel.beambin3672 -> 3596 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger.beambin11484 -> 12428 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_config.beambin2940 -> 2960 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_disk_log_h.beambin8508 -> 9108 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_filters.beambin1760 -> 1800 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_formatter.beambin5736 -> 7024 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_h_common.beambin5116 -> 5296 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_server.beambin7208 -> 9928 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_simple.beambin4524 -> 0 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_simple_h.beambin0 -> 4512 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_std_h.beambin10132 -> 10644 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/epp.beambin27716 -> 29720 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/erl_error.beambin0 -> 8368 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/erl_eval.beambin30260 -> 35216 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/erl_internal.beambin6984 -> 7012 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/escript.beambin16824 -> 16832 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/ets.beambin22280 -> 22460 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/gen_server.beambin15008 -> 15020 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/gen_statem.beambin20528 -> 20540 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/lib.beambin14936 -> 0 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/ms_transform.beambin19452 -> 19468 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/otp_internal.beambin10284 -> 10368 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/proc_lib.beambin12436 -> 12456 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/qlc.beambin68808 -> 68816 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/shell.beambin29804 -> 29812 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/slave.beambin4740 -> 4816 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/stdlib.app4
-rw-r--r--bootstrap/lib/stdlib/ebin/string.beambin35068 -> 36520 bytes
-rw-r--r--erts/aclocal.m48
-rw-r--r--erts/configure.in570
-rw-r--r--erts/doc/src/Makefile1
-rw-r--r--erts/doc/src/alt_disco.xml93
-rw-r--r--erts/doc/src/erl.xml6
-rw-r--r--erts/doc/src/erl_driver.xml6
-rw-r--r--erts/doc/src/erlang.xml86
-rw-r--r--erts/doc/src/match_spec.xml23
-rw-r--r--erts/doc/src/notes.xml40
-rw-r--r--erts/doc/src/part.xml1
-rw-r--r--erts/emulator/Makefile.in4
-rw-r--r--erts/emulator/beam/atom.names3
-rw-r--r--erts/emulator/beam/beam_bif_load.c32
-rw-r--r--erts/emulator/beam/beam_emu.c3
-rw-r--r--erts/emulator/beam/bif.c181
-rw-r--r--erts/emulator/beam/bif.h37
-rw-r--r--erts/emulator/beam/bif.tab14
-rw-r--r--erts/emulator/beam/dist.c406
-rw-r--r--erts/emulator/beam/erl_alloc.c32
-rw-r--r--erts/emulator/beam/erl_alloc.types30
-rw-r--r--erts/emulator/beam/erl_alloc_util.h4
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.c32
-rw-r--r--erts/emulator/beam/erl_ao_firstfit_alloc.h6
-rw-r--r--erts/emulator/beam/erl_bif_chksum.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c214
-rw-r--r--erts/emulator/beam/erl_bif_trace.c194
-rw-r--r--erts/emulator/beam/erl_binary.h4
-rw-r--r--erts/emulator/beam/erl_db.c630
-rw-r--r--erts/emulator/beam/erl_db.h9
-rw-r--r--erts/emulator/beam/erl_db_hash.c956
-rw-r--r--erts/emulator/beam/erl_db_hash.h20
-rw-r--r--erts/emulator/beam/erl_db_tree.c60
-rw-r--r--erts/emulator/beam/erl_db_util.c16
-rw-r--r--erts/emulator/beam/erl_db_util.h16
-rw-r--r--erts/emulator/beam/erl_gc.c15
-rw-r--r--erts/emulator/beam/erl_map.c49
-rw-r--r--erts/emulator/beam/erl_message.c5
-rw-r--r--erts/emulator/beam/erl_monitor_link.c97
-rw-r--r--erts/emulator/beam/erl_monitor_link.h57
-rw-r--r--erts/emulator/beam/erl_nif.c6
-rw-r--r--erts/emulator/beam/erl_port_task.c5
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c554
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.h171
-rw-r--r--erts/emulator/beam/erl_process.c931
-rw-r--r--erts/emulator/beam/erl_process.h57
-rw-r--r--erts/emulator/beam/erl_process_lock.h18
-rw-r--r--erts/emulator/beam/erl_time_sup.c2
-rw-r--r--erts/emulator/beam/erl_trace.c46
-rw-r--r--erts/emulator/beam/erl_trace.h2
-rw-r--r--erts/emulator/beam/erl_unicode.c10
-rw-r--r--erts/emulator/beam/utils.c7
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c13
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c23
-rw-r--r--erts/emulator/hipe/hipe_process.h9
-rw-r--r--erts/emulator/nifs/unix/unix_prim_file.c2
-rw-r--r--erts/emulator/nifs/win32/win_prim_file.c50
-rw-r--r--erts/emulator/sys/common/erl_mmap.c4
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h12
-rw-r--r--erts/emulator/test/code_SUITE.erl2
-rw-r--r--erts/emulator/test/distribution_SUITE.erl13
-rw-r--r--erts/emulator/test/map_SUITE.erl43
-rw-r--r--erts/emulator/test/match_spec_SUITE.erl7
-rw-r--r--erts/emulator/test/nif_SUITE.erl17
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c10
-rw-r--r--erts/emulator/test/port_SUITE.erl8
-rw-r--r--erts/emulator/test/sensitive_SUITE.erl2
-rw-r--r--erts/emulator/test/system_info_SUITE.erl17
-rw-r--r--erts/emulator/test/system_profile_SUITE.erl86
-rw-r--r--erts/emulator/test/trace_SUITE.erl62
-rw-r--r--erts/emulator/test/tracer_SUITE.erl6
-rw-r--r--erts/etc/common/heart.c5
-rw-r--r--erts/etc/unix/etp-commands.in161
-rw-r--r--erts/lib_src/Makefile.in2
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin54476 -> 54432 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin101656 -> 103296 bytes
-rw-r--r--erts/preloaded/ebin/erts_dirty_process_signal_handler.beambin2720 -> 2760 bytes
-rw-r--r--erts/preloaded/ebin/erts_internal.beambin15584 -> 16672 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin51452 -> 51408 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin27768 -> 27760 bytes
-rw-r--r--erts/preloaded/src/erlang.erl106
-rw-r--r--erts/preloaded/src/erts_code_purger.erl4
-rw-r--r--erts/preloaded/src/erts_dirty_process_signal_handler.erl34
-rw-r--r--erts/preloaded/src/erts_internal.erl49
-rw-r--r--erts/preloaded/src/prim_file.erl2
-rw-r--r--erts/preloaded/src/zlib.erl50
-rw-r--r--erts/test/erlc_SUITE.erl4
-rw-r--r--erts/vsn.mk2
-rw-r--r--lib/common_test/doc/src/ct.xml10
-rw-r--r--lib/common_test/doc/src/ct_hooks_chapter.xml9
-rw-r--r--lib/common_test/src/Makefile2
-rw-r--r--lib/common_test/src/ct.erl17
-rw-r--r--lib/common_test/src/test_server_ctrl.erl2
-rw-r--r--lib/common_test/src/test_server_node.erl8
-rw-r--r--lib/common_test/test_server/ts_erl_config.erl10
-rw-r--r--lib/common_test/test_server/ts_run.erl2
-rw-r--r--lib/compiler/src/beam_asm.erl4
-rw-r--r--lib/compiler/src/beam_dead.erl20
-rw-r--r--lib/compiler/src/beam_peep.erl6
-rw-r--r--lib/compiler/src/beam_type.erl3
-rw-r--r--lib/compiler/src/cerl_trees.erl7
-rw-r--r--lib/compiler/src/compile.erl2
-rw-r--r--lib/compiler/src/erl_bifs.erl1
-rw-r--r--lib/compiler/src/sys_core_fold.erl37
-rw-r--r--lib/compiler/src/sys_core_inline.erl4
-rw-r--r--lib/compiler/src/v3_codegen.erl6
-rw-r--r--lib/compiler/test/bs_match_SUITE.erl10
-rw-r--r--lib/compiler/test/core_SUITE.erl5
-rw-r--r--lib/compiler/test/core_SUITE_data/name_capture.core110
-rw-r--r--lib/compiler/test/map_SUITE.erl18
-rw-r--r--lib/crypto/c_src/crypto.c59
-rw-r--r--lib/crypto/c_src/otp_test_engine.c19
-rw-r--r--lib/crypto/doc/src/notes.xml22
-rw-r--r--lib/crypto/vsn.mk2
-rw-r--r--lib/debugger/src/dbg_icmd.erl2
-rw-r--r--lib/debugger/src/dbg_wx_win.erl2
-rw-r--r--lib/dialyzer/src/dialyzer.erl4
-rw-r--r--lib/dialyzer/src/dialyzer_contracts.erl99
-rw-r--r--lib/dialyzer/src/dialyzer_dataflow.erl44
-rw-r--r--lib/dialyzer/src/typer.erl2
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/results/compiler2
-rw-r--r--lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl6
-rw-r--r--lib/dialyzer/test/overspecs_SUITE_data/dialyzer_options1
-rw-r--r--lib/dialyzer/test/overspecs_SUITE_data/results/iodata2
-rw-r--r--lib/dialyzer/test/overspecs_SUITE_data/results/iolist2
-rw-r--r--lib/dialyzer/test/overspecs_SUITE_data/src/iodata.erl41
-rw-r--r--lib/dialyzer/test/overspecs_SUITE_data/src/iolist.erl41
-rw-r--r--lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl44
-rw-r--r--lib/dialyzer/test/small_SUITE_data/results/unused_funs5
-rw-r--r--lib/dialyzer/test/small_SUITE_data/src/unused_funs.erl21
-rw-r--r--lib/dialyzer/test/specdiffs_SUITE_data/dialyzer_options1
-rw-r--r--lib/dialyzer/test/specdiffs_SUITE_data/results/iodata3
-rw-r--r--lib/dialyzer/test/specdiffs_SUITE_data/results/iolist2
-rw-r--r--lib/dialyzer/test/specdiffs_SUITE_data/src/iodata.erl41
-rw-r--r--lib/dialyzer/test/specdiffs_SUITE_data/src/iolist.erl41
-rw-r--r--lib/edoc/src/edoc_doclet.erl2
-rw-r--r--lib/erl_docgen/doc/src/notes.xml17
-rw-r--r--lib/erl_docgen/priv/xsl/Makefile5
-rw-r--r--lib/erl_docgen/vsn.mk2
-rw-r--r--lib/erl_interface/src/prog/erl_call.c11
-rw-r--r--lib/hipe/cerl/erl_bif_types.erl6
-rw-r--r--lib/hipe/cerl/erl_types.erl192
-rw-r--r--lib/hipe/main/hipe.erl6
-rw-r--r--lib/hipe/opt/hipe_schedule.erl1483
-rw-r--r--lib/hipe/opt/hipe_schedule_prio.erl53
-rw-r--r--lib/hipe/opt/hipe_target_machine.erl87
-rw-r--r--lib/hipe/opt/hipe_ultra_mod2.erl233
-rw-r--r--lib/hipe/opt/hipe_ultra_prio.erl298
-rw-r--r--lib/hipe/test/Makefile3
-rw-r--r--lib/hipe/test/erl_types_SUITE.erl197
-rw-r--r--lib/inets/doc/src/notes.xml21
-rw-r--r--lib/inets/src/http_lib/http_request.erl6
-rw-r--r--lib/inets/src/http_server/httpd_request.erl6
-rw-r--r--lib/inets/src/http_server/mod_esi.erl44
-rw-r--r--lib/inets/vsn.mk2
-rw-r--r--lib/kernel/doc/src/Makefile1
-rw-r--r--lib/kernel/doc/src/config.xml4
-rw-r--r--lib/kernel/doc/src/erl_epmd.xml104
-rw-r--r--lib/kernel/doc/src/error_logger.xml289
-rw-r--r--lib/kernel/doc/src/inet.xml2
-rw-r--r--lib/kernel/doc/src/introduction_chapter.xml1
-rw-r--r--lib/kernel/doc/src/kernel_app.xml198
-rw-r--r--lib/kernel/doc/src/logger.xml720
-rw-r--r--lib/kernel/doc/src/logger_arch.pngbin31459 -> 32377 bytes
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml1123
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml28
-rw-r--r--lib/kernel/doc/src/logger_filters.xml155
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml353
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml28
-rw-r--r--lib/kernel/doc/src/ref_man.xml5
-rw-r--r--lib/kernel/doc/src/specs.xml1
-rw-r--r--lib/kernel/src/Makefile6
-rw-r--r--lib/kernel/src/application_controller.erl12
-rw-r--r--lib/kernel/src/code.erl4
-rw-r--r--lib/kernel/src/code_server.erl4
-rw-r--r--lib/kernel/src/erl_epmd.erl64
-rw-r--r--lib/kernel/src/erl_signal_handler.erl11
-rw-r--r--lib/kernel/src/error_logger.erl55
-rw-r--r--lib/kernel/src/file.erl2
-rw-r--r--lib/kernel/src/file_server.erl6
-rw-r--r--lib/kernel/src/gen_sctp.erl2
-rw-r--r--lib/kernel/src/gen_tcp.erl4
-rw-r--r--lib/kernel/src/gen_udp.erl2
-rw-r--r--lib/kernel/src/hipe_unified_loader.erl2
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl148
-rw-r--r--lib/kernel/src/kernel.app.src7
-rw-r--r--lib/kernel/src/kernel.erl22
-rw-r--r--lib/kernel/src/kernel_config.erl7
-rw-r--r--lib/kernel/src/kernel_refc.erl4
-rw-r--r--lib/kernel/src/logger.erl475
-rw-r--r--lib/kernel/src/logger_backend.erl4
-rw-r--r--lib/kernel/src/logger_config.erl6
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl97
-rw-r--r--lib/kernel/src/logger_filters.erl74
-rw-r--r--lib/kernel/src/logger_formatter.erl210
-rw-r--r--lib/kernel/src/logger_h_common.erl26
-rw-r--r--lib/kernel/src/logger_h_common.hrl2
-rw-r--r--lib/kernel/src/logger_internal.hrl8
-rw-r--r--lib/kernel/src/logger_server.erl382
-rw-r--r--lib/kernel/src/logger_simple_h.erl (renamed from lib/kernel/src/logger_simple.erl)84
-rw-r--r--lib/kernel/src/logger_std_h.erl55
-rw-r--r--lib/kernel/test/Makefile3
-rw-r--r--lib/kernel/test/application_SUITE.erl12
-rw-r--r--lib/kernel/test/erl_distribution_SUITE.erl2
-rw-r--r--lib/kernel/test/error_logger_warn_SUITE.erl14
-rw-r--r--lib/kernel/test/heart_SUITE.erl21
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl2
-rw-r--r--lib/kernel/test/logger.cover2
-rw-r--r--lib/kernel/test/logger.spec2
-rw-r--r--lib/kernel/test/logger_SUITE.erl246
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl289
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl865
-rw-r--r--lib/kernel/test/logger_filters_SUITE.erl123
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl252
-rw-r--r--lib/kernel/test/logger_legacy_SUITE.erl4
-rw-r--r--lib/kernel/test/logger_simple_SUITE.erl247
-rw-r--r--lib/kernel/test/logger_simple_h_SUITE.erl210
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl503
-rw-r--r--lib/kernel/test/logger_test_lib.erl82
-rw-r--r--lib/kernel/test/os_SUITE.erl6
-rw-r--r--lib/observer/src/observer_lib.erl2
-rw-r--r--lib/parsetools/src/yecc.erl8
-rw-r--r--lib/public_key/doc/src/public_key.xml28
-rw-r--r--lib/public_key/src/pubkey_pem.erl2
-rw-r--r--lib/public_key/src/public_key.erl75
-rw-r--r--lib/public_key/test/pbe_SUITE.erl10
-rw-r--r--lib/public_key/test/public_key_SUITE.erl58
-rw-r--r--lib/public_key/test/public_key_SUITE_data/dsa_key_pkcs8.pem9
-rw-r--r--lib/public_key/test/public_key_SUITE_data/ec_key_pkcs8.pem5
-rw-r--r--lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem24
-rw-r--r--lib/public_key/test/public_key_SUITE_data/rsa_key_pkcs8.pem10
-rw-r--r--lib/public_key/test/public_key_SUITE_data/verify_hostname.conf3
-rw-r--r--lib/runtime_tools/doc/src/LTTng.xml2
-rw-r--r--lib/sasl/doc/src/error_logging.xml52
-rw-r--r--lib/sasl/doc/src/sasl_app.xml43
-rw-r--r--lib/sasl/src/sasl.erl20
-rw-r--r--lib/sasl/src/systools_make.erl2
-rw-r--r--lib/sasl/test/sasl_report_SUITE.erl6
-rw-r--r--lib/ssh/doc/src/notes.xml50
-rw-r--r--lib/ssh/doc/src/ssh.xml18
-rw-r--r--lib/ssh/doc/src/ssh_app.xml55
-rw-r--r--lib/ssh/src/ssh.hrl8
-rw-r--r--lib/ssh/src/ssh_client_channel.erl6
-rw-r--r--lib/ssh/src/ssh_connection.erl42
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl432
-rw-r--r--lib/ssh/src/ssh_options.erl21
-rw-r--r--lib/ssh/src/ssh_sftp.erl25
-rw-r--r--lib/ssh/src/ssh_transport.erl8
-rw-r--r--lib/ssh/test/Makefile3
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE.erl13
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl242
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_SUITE.erl152
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key13
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key.pub11
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key6
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key.pub1
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key16
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key.pub5
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_client.erl143
-rw-r--r--lib/ssh/test/ssh_chan_behaviours_server.erl96
-rw-r--r--lib/ssh/test/ssh_test_lib.erl2
-rw-r--r--lib/ssh/vsn.mk3
-rw-r--r--lib/ssl/doc/src/notes.xml16
-rw-r--r--lib/ssl/src/Makefile4
-rw-r--r--lib/ssl/src/dtls_connection.erl13
-rw-r--r--lib/ssl/src/dtls_listener_sup.erl (renamed from lib/ssl/src/dtls_udp_sup.erl)6
-rw-r--r--lib/ssl/src/dtls_packet_demux.erl (renamed from lib/ssl/src/dtls_udp_listener.erl)56
-rw-r--r--lib/ssl/src/dtls_socket.erl34
-rw-r--r--lib/ssl/src/inet_tls_dist.erl100
-rw-r--r--lib/ssl/src/ssl.app.src4
-rw-r--r--lib/ssl/src/ssl.appup.src2
-rw-r--r--lib/ssl/src/ssl.erl69
-rw-r--r--lib/ssl/src/ssl_cipher.erl353
-rw-r--r--lib/ssl/src/ssl_config.erl8
-rw-r--r--lib/ssl/src/ssl_connection.erl11
-rw-r--r--lib/ssl/src/ssl_connection_sup.erl10
-rw-r--r--lib/ssl/src/ssl_handshake.erl109
-rw-r--r--lib/ssl/src/ssl_internal.hrl2
-rw-r--r--lib/ssl/test/ssl_ECC.erl50
-rw-r--r--lib/ssl/test/ssl_ECC_openssl_SUITE.erl86
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl34
-rw-r--r--lib/ssl/test/ssl_test_lib.erl96
-rw-r--r--lib/ssl/test/ssl_to_openssl_SUITE.erl108
-rw-r--r--lib/ssl/vsn.mk2
-rw-r--r--lib/stdlib/doc/src/Makefile1
-rw-r--r--lib/stdlib/doc/src/calendar.xml9
-rw-r--r--lib/stdlib/doc/src/ets.xml6
-rw-r--r--lib/stdlib/doc/src/gen_event.xml5
-rw-r--r--lib/stdlib/doc/src/gen_server.xml6
-rw-r--r--lib/stdlib/doc/src/gen_statem.xml4
-rw-r--r--lib/stdlib/doc/src/io_lib.xml34
-rw-r--r--lib/stdlib/doc/src/lib.xml103
-rw-r--r--lib/stdlib/doc/src/proc_lib.xml47
-rw-r--r--lib/stdlib/doc/src/ref_man.xml1
-rw-r--r--lib/stdlib/doc/src/specs.xml1
-rw-r--r--lib/stdlib/doc/src/string.xml2
-rw-r--r--lib/stdlib/src/Makefile3
-rw-r--r--lib/stdlib/src/array.erl2
-rw-r--r--lib/stdlib/src/epp.erl127
-rw-r--r--lib/stdlib/src/erl_error.erl (renamed from lib/stdlib/src/lib.erl)327
-rw-r--r--lib/stdlib/src/erl_eval.erl221
-rw-r--r--lib/stdlib/src/erl_internal.erl2
-rw-r--r--lib/stdlib/src/escript.erl2
-rw-r--r--lib/stdlib/src/ets.erl26
-rw-r--r--lib/stdlib/src/gen_event.erl6
-rw-r--r--lib/stdlib/src/gen_fsm.erl38
-rw-r--r--lib/stdlib/src/gen_server.erl6
-rw-r--r--lib/stdlib/src/gen_statem.erl2
-rw-r--r--lib/stdlib/src/io.erl11
-rw-r--r--lib/stdlib/src/ms_transform.erl1
-rw-r--r--lib/stdlib/src/otp_internal.erl9
-rw-r--r--lib/stdlib/src/proc_lib.erl12
-rw-r--r--lib/stdlib/src/qlc.erl8
-rw-r--r--lib/stdlib/src/shell.erl6
-rw-r--r--lib/stdlib/src/slave.erl14
-rw-r--r--lib/stdlib/src/stdlib.app.src4
-rw-r--r--lib/stdlib/src/string.erl180
-rw-r--r--lib/stdlib/test/epp_SUITE.erl171
-rw-r--r--lib/stdlib/test/ets_SUITE.erl71
-rw-r--r--lib/stdlib/test/io_SUITE.erl2
-rw-r--r--lib/stdlib/test/qlc_SUITE.erl6
-rw-r--r--lib/stdlib/test/shell_SUITE.erl6
-rw-r--r--lib/stdlib/test/string_SUITE.erl24
-rw-r--r--lib/syntax_tools/src/epp_dodger.erl36
-rw-r--r--lib/syntax_tools/src/erl_prettypr.erl7
-rw-r--r--lib/syntax_tools/src/erl_syntax.erl4
-rw-r--r--lib/syntax_tools/src/erl_syntax_lib.erl2
-rw-r--r--lib/syntax_tools/test/syntax_tools_SUITE.erl24
-rw-r--r--lib/tools/doc/src/fprof.xml10
-rw-r--r--lib/tools/emacs/erlang.el1
-rw-r--r--lib/tools/src/lcnt.erl5
-rw-r--r--lib/tools/src/xref.erl6
-rw-r--r--lib/tools/test/eprof_SUITE_data/eed.erl6
-rw-r--r--make/otp_release_targets.mk2
-rw-r--r--otp_versions.table4
-rwxr-xr-xscripts/build-otp4
-rwxr-xr-xscripts/diffable5
-rw-r--r--system/doc/design_principles/gen_server_concepts.xml2
-rw-r--r--system/doc/design_principles/release_structure.xml2
-rw-r--r--system/doc/design_principles/spec_proc.xml2
-rw-r--r--system/doc/efficiency_guide/Makefile2
-rw-r--r--system/doc/efficiency_guide/advanced.xml7
-rw-r--r--system/doc/efficiency_guide/binaryhandling.xml84
-rw-r--r--system/doc/efficiency_guide/efficiency_guide.erl24
-rw-r--r--system/doc/efficiency_guide/profiling.xml2
-rw-r--r--system/doc/efficiency_guide/xmlfiles.mk2
-rw-r--r--system/doc/embedded/Makefile2
-rw-r--r--system/doc/getting_started/Makefile2
-rw-r--r--system/doc/getting_started/conc_prog.xml2
-rw-r--r--system/doc/installation_guide/Makefile2
-rw-r--r--system/doc/installation_guide/xmlfiles.mk2
-rw-r--r--system/doc/oam/Makefile2
-rw-r--r--system/doc/programming_examples/Makefile2
-rw-r--r--system/doc/programming_examples/xmlfiles.mk2
-rw-r--r--system/doc/reference_manual/Makefile2
-rw-r--r--system/doc/reference_manual/introduction.xml2
-rw-r--r--system/doc/reference_manual/macros.xml37
-rw-r--r--system/doc/reference_manual/modules.xml2
-rw-r--r--system/doc/reference_manual/xmlfiles.mk2
-rw-r--r--system/doc/system_architecture_intro/Makefile2
-rw-r--r--system/doc/system_architecture_intro/sys_arch_intro.xml2
-rw-r--r--system/doc/system_principles/Makefile2
-rw-r--r--system/doc/system_principles/create_target.xmlsrc2
-rw-r--r--system/doc/system_principles/error_logging.xml113
-rw-r--r--system/doc/system_principles/system_principles.xml2
-rw-r--r--system/doc/system_principles/xmlfiles.mk2
-rw-r--r--system/doc/top/Makefile2
-rw-r--r--system/doc/top/book.xml2
-rw-r--r--system/doc/tutorial/Makefile2
-rw-r--r--system/doc/tutorial/xmlfiles.mk2
416 files changed, 12715 insertions, 9883 deletions
diff --git a/.travis.yml b/.travis.yml
index 88a4c46f77..1438ea865a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -19,6 +19,7 @@ addons:
- default-jdk
- g++
- xsltproc
+ - libxml2-utils
matrix:
include:
@@ -27,7 +28,7 @@ matrix:
services:
- docker
script:
- - ./scripts/build-docker-otp 32 sh -c "scripts/build-otp && ./otp_build tests && scripts/run-smoke-tests && bin/dialyzer --build_plt --apps erts kernel stdlib"
+ - ./scripts/build-docker-otp 32 sh -c "scripts/build-otp release && ./otp_build tests && scripts/run-smoke-tests && bin/dialyzer --build_plt --apps erts kernel stdlib"
- env: Linux64Dialyzer
os: linux
script:
@@ -39,6 +40,7 @@ matrix:
- ./scripts/build-otp
- ./otp_build tests
- make release_docs
+ - make xmllint
- ./scripts/run-smoke-tests
before_script:
diff --git a/Makefile.in b/Makefile.in
index 6803e4b77e..e75bcf7f10 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -125,7 +125,7 @@ BINDIR = $(DESTDIR)$(EXTRA_PREFIX)$(bindir)
#
# Erlang base public files
#
-ERL_BASE_PUB_FILES=erl erlc epmd run_erl to_erl dialyzer escript ct_run
+ERL_BASE_PUB_FILES=erl erlc epmd run_erl to_erl dialyzer typer escript ct_run
# ERLANG_INST_LIBDIR is the top directory where the Erlang installation
# will be located when running.
diff --git a/OTP_VERSION b/OTP_VERSION
index 06d4ac2bfd..a28399bab8 100644
--- a/OTP_VERSION
+++ b/OTP_VERSION
@@ -1 +1 @@
-21.0-rc0
+21.0-rc2
diff --git a/bootstrap/bin/no_dot_erlang.boot b/bootstrap/bin/no_dot_erlang.boot
index fe11c1d256..60f70c0f97 100644
--- a/bootstrap/bin/no_dot_erlang.boot
+++ b/bootstrap/bin/no_dot_erlang.boot
Binary files differ
diff --git a/bootstrap/bin/start.boot b/bootstrap/bin/start.boot
index fe11c1d256..60f70c0f97 100644
--- a/bootstrap/bin/start.boot
+++ b/bootstrap/bin/start.boot
Binary files differ
diff --git a/bootstrap/bin/start_clean.boot b/bootstrap/bin/start_clean.boot
index fe11c1d256..60f70c0f97 100644
--- a/bootstrap/bin/start_clean.boot
+++ b/bootstrap/bin/start_clean.boot
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/beam_dead.beam b/bootstrap/lib/compiler/ebin/beam_dead.beam
index af2f76c6ee..8934cb5d11 100644
--- a/bootstrap/lib/compiler/ebin/beam_dead.beam
+++ b/bootstrap/lib/compiler/ebin/beam_dead.beam
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/beam_peep.beam b/bootstrap/lib/compiler/ebin/beam_peep.beam
index 67d1f808b6..1bf1e5a79f 100644
--- a/bootstrap/lib/compiler/ebin/beam_peep.beam
+++ b/bootstrap/lib/compiler/ebin/beam_peep.beam
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/beam_type.beam b/bootstrap/lib/compiler/ebin/beam_type.beam
index dab30b21eb..f21ede5043 100644
--- a/bootstrap/lib/compiler/ebin/beam_type.beam
+++ b/bootstrap/lib/compiler/ebin/beam_type.beam
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/compile.beam b/bootstrap/lib/compiler/ebin/compile.beam
index 99a6c5d7f0..5772dd173b 100644
--- a/bootstrap/lib/compiler/ebin/compile.beam
+++ b/bootstrap/lib/compiler/ebin/compile.beam
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/erl_bifs.beam b/bootstrap/lib/compiler/ebin/erl_bifs.beam
index feb60041bf..be3dc425f4 100644
--- a/bootstrap/lib/compiler/ebin/erl_bifs.beam
+++ b/bootstrap/lib/compiler/ebin/erl_bifs.beam
Binary files differ
diff --git a/bootstrap/lib/compiler/ebin/v3_codegen.beam b/bootstrap/lib/compiler/ebin/v3_codegen.beam
index 833ddf86bb..5aeca65888 100644
--- a/bootstrap/lib/compiler/ebin/v3_codegen.beam
+++ b/bootstrap/lib/compiler/ebin/v3_codegen.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/application_controller.beam b/bootstrap/lib/kernel/ebin/application_controller.beam
index 869c46939b..e57be279d5 100644
--- a/bootstrap/lib/kernel/ebin/application_controller.beam
+++ b/bootstrap/lib/kernel/ebin/application_controller.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/code_server.beam b/bootstrap/lib/kernel/ebin/code_server.beam
index 736190c08c..93c354bf40 100644
--- a/bootstrap/lib/kernel/ebin/code_server.beam
+++ b/bootstrap/lib/kernel/ebin/code_server.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/erl_epmd.beam b/bootstrap/lib/kernel/ebin/erl_epmd.beam
index d06aa1add3..449fd8dff1 100644
--- a/bootstrap/lib/kernel/ebin/erl_epmd.beam
+++ b/bootstrap/lib/kernel/ebin/erl_epmd.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/erl_signal_handler.beam b/bootstrap/lib/kernel/ebin/erl_signal_handler.beam
index 881e36e6fb..1a1d9d28ee 100644
--- a/bootstrap/lib/kernel/ebin/erl_signal_handler.beam
+++ b/bootstrap/lib/kernel/ebin/erl_signal_handler.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/error_logger.beam b/bootstrap/lib/kernel/ebin/error_logger.beam
index 752c0f2bb1..bb250d71f3 100644
--- a/bootstrap/lib/kernel/ebin/error_logger.beam
+++ b/bootstrap/lib/kernel/ebin/error_logger.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam b/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam
index 9a7e36791e..c33a9e7f3a 100644
--- a/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam
+++ b/bootstrap/lib/kernel/ebin/inet_tcp_dist.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/kernel.app b/bootstrap/lib/kernel/ebin/kernel.app
index b93a65e8fb..0e1cf7cbb7 100644
--- a/bootstrap/lib/kernel/ebin/kernel.app
+++ b/bootstrap/lib/kernel/ebin/kernel.app
@@ -68,7 +68,7 @@
logger_formatter,
logger_h_common,
logger_server,
- logger_simple,
+ logger_simple_h,
logger_std_h,
logger_sup,
net,
@@ -140,7 +140,10 @@
inet_db,
pg2]},
{applications, []},
- {env, []},
+ {env, [{logger_level, info},
+ {logger_sasl_compatible, false},
+ {logger_progress_reports, stop}
+ ]},
{mod, {kernel, []}},
{runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]}
]
diff --git a/bootstrap/lib/kernel/ebin/kernel.beam b/bootstrap/lib/kernel/ebin/kernel.beam
index 15dfd19ff8..b3f0783d25 100644
--- a/bootstrap/lib/kernel/ebin/kernel.beam
+++ b/bootstrap/lib/kernel/ebin/kernel.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger.beam b/bootstrap/lib/kernel/ebin/logger.beam
index 8b57370f82..a2e87793ff 100644
--- a/bootstrap/lib/kernel/ebin/logger.beam
+++ b/bootstrap/lib/kernel/ebin/logger.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_config.beam b/bootstrap/lib/kernel/ebin/logger_config.beam
index 0fc49960b4..1033fc7c3d 100644
--- a/bootstrap/lib/kernel/ebin/logger_config.beam
+++ b/bootstrap/lib/kernel/ebin/logger_config.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam b/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam
index 7a21b07c2d..fc601ddaa6 100644
--- a/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam
+++ b/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_filters.beam b/bootstrap/lib/kernel/ebin/logger_filters.beam
index 0e8c97c98e..57197e137c 100644
--- a/bootstrap/lib/kernel/ebin/logger_filters.beam
+++ b/bootstrap/lib/kernel/ebin/logger_filters.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_formatter.beam b/bootstrap/lib/kernel/ebin/logger_formatter.beam
index 2f225b0295..6216a8095e 100644
--- a/bootstrap/lib/kernel/ebin/logger_formatter.beam
+++ b/bootstrap/lib/kernel/ebin/logger_formatter.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_h_common.beam b/bootstrap/lib/kernel/ebin/logger_h_common.beam
index 528261f4e3..b1fb2c1191 100644
--- a/bootstrap/lib/kernel/ebin/logger_h_common.beam
+++ b/bootstrap/lib/kernel/ebin/logger_h_common.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_server.beam b/bootstrap/lib/kernel/ebin/logger_server.beam
index 2bf304e044..381c9292b4 100644
--- a/bootstrap/lib/kernel/ebin/logger_server.beam
+++ b/bootstrap/lib/kernel/ebin/logger_server.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_simple.beam b/bootstrap/lib/kernel/ebin/logger_simple.beam
deleted file mode 100644
index be8017391f..0000000000
--- a/bootstrap/lib/kernel/ebin/logger_simple.beam
+++ /dev/null
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_simple_h.beam b/bootstrap/lib/kernel/ebin/logger_simple_h.beam
new file mode 100644
index 0000000000..64db774f26
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_simple_h.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_std_h.beam b/bootstrap/lib/kernel/ebin/logger_std_h.beam
index 7483dee399..e883c81366 100644
--- a/bootstrap/lib/kernel/ebin/logger_std_h.beam
+++ b/bootstrap/lib/kernel/ebin/logger_std_h.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/epp.beam b/bootstrap/lib/stdlib/ebin/epp.beam
index 9695be2c1a..c87665ff86 100644
--- a/bootstrap/lib/stdlib/ebin/epp.beam
+++ b/bootstrap/lib/stdlib/ebin/epp.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/erl_error.beam b/bootstrap/lib/stdlib/ebin/erl_error.beam
new file mode 100644
index 0000000000..dc9d0a8d39
--- /dev/null
+++ b/bootstrap/lib/stdlib/ebin/erl_error.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/erl_eval.beam b/bootstrap/lib/stdlib/ebin/erl_eval.beam
index f3fc64ee32..a6dc4e0d68 100644
--- a/bootstrap/lib/stdlib/ebin/erl_eval.beam
+++ b/bootstrap/lib/stdlib/ebin/erl_eval.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/erl_internal.beam b/bootstrap/lib/stdlib/ebin/erl_internal.beam
index 522230cf51..c635408fde 100644
--- a/bootstrap/lib/stdlib/ebin/erl_internal.beam
+++ b/bootstrap/lib/stdlib/ebin/erl_internal.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/escript.beam b/bootstrap/lib/stdlib/ebin/escript.beam
index 79ed0a3876..781484fe0b 100644
--- a/bootstrap/lib/stdlib/ebin/escript.beam
+++ b/bootstrap/lib/stdlib/ebin/escript.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/ets.beam b/bootstrap/lib/stdlib/ebin/ets.beam
index 3d103b1624..ab4996ef4e 100644
--- a/bootstrap/lib/stdlib/ebin/ets.beam
+++ b/bootstrap/lib/stdlib/ebin/ets.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/gen_server.beam b/bootstrap/lib/stdlib/ebin/gen_server.beam
index 7828b04453..68e6390cbe 100644
--- a/bootstrap/lib/stdlib/ebin/gen_server.beam
+++ b/bootstrap/lib/stdlib/ebin/gen_server.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/gen_statem.beam b/bootstrap/lib/stdlib/ebin/gen_statem.beam
index e89eb4a65a..f47d71163d 100644
--- a/bootstrap/lib/stdlib/ebin/gen_statem.beam
+++ b/bootstrap/lib/stdlib/ebin/gen_statem.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/lib.beam b/bootstrap/lib/stdlib/ebin/lib.beam
deleted file mode 100644
index 2cc777b388..0000000000
--- a/bootstrap/lib/stdlib/ebin/lib.beam
+++ /dev/null
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/ms_transform.beam b/bootstrap/lib/stdlib/ebin/ms_transform.beam
index ace2e0c2f4..03b4b75bb6 100644
--- a/bootstrap/lib/stdlib/ebin/ms_transform.beam
+++ b/bootstrap/lib/stdlib/ebin/ms_transform.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/otp_internal.beam b/bootstrap/lib/stdlib/ebin/otp_internal.beam
index a64abac273..00fef1da84 100644
--- a/bootstrap/lib/stdlib/ebin/otp_internal.beam
+++ b/bootstrap/lib/stdlib/ebin/otp_internal.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/proc_lib.beam b/bootstrap/lib/stdlib/ebin/proc_lib.beam
index 9025f68b68..b8602a6490 100644
--- a/bootstrap/lib/stdlib/ebin/proc_lib.beam
+++ b/bootstrap/lib/stdlib/ebin/proc_lib.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/qlc.beam b/bootstrap/lib/stdlib/ebin/qlc.beam
index 523f93a848..5d4375adca 100644
--- a/bootstrap/lib/stdlib/ebin/qlc.beam
+++ b/bootstrap/lib/stdlib/ebin/qlc.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/shell.beam b/bootstrap/lib/stdlib/ebin/shell.beam
index abbe513b39..36a9c27c35 100644
--- a/bootstrap/lib/stdlib/ebin/shell.beam
+++ b/bootstrap/lib/stdlib/ebin/shell.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/slave.beam b/bootstrap/lib/stdlib/ebin/slave.beam
index 596dda4ed5..e832637c7c 100644
--- a/bootstrap/lib/stdlib/ebin/slave.beam
+++ b/bootstrap/lib/stdlib/ebin/slave.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/stdlib.app b/bootstrap/lib/stdlib/ebin/stdlib.app
index c24ca46516..20c978670e 100644
--- a/bootstrap/lib/stdlib/ebin/stdlib.app
+++ b/bootstrap/lib/stdlib/ebin/stdlib.app
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -43,6 +43,7 @@
erl_anno,
erl_bits,
erl_compile,
+ erl_error,
erl_eval,
erl_expand_records,
erl_internal,
@@ -71,7 +72,6 @@
io_lib_format,
io_lib_fread,
io_lib_pretty,
- lib,
lists,
log_mf_h,
maps,
diff --git a/bootstrap/lib/stdlib/ebin/string.beam b/bootstrap/lib/stdlib/ebin/string.beam
index 39ec49672a..678fead549 100644
--- a/bootstrap/lib/stdlib/ebin/string.beam
+++ b/bootstrap/lib/stdlib/ebin/string.beam
Binary files differ
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index a4d09810bd..99b96eb5bc 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -2645,7 +2645,7 @@ case $erl_gethrvtime in
dnl Check if clock_gettime (linux) is working
dnl
- AC_MSG_CHECKING([if clock_gettime can be used to get process CPU time])
+ AC_MSG_CHECKING([if clock_gettime can be used to get thread CPU time])
save_libs=$LIBS
LIBS="-lrt"
AC_TRY_RUN([
@@ -2659,11 +2659,11 @@ case $erl_gethrvtime in
int i;
struct timespec tp;
- if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp) < 0)
+ if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tp) < 0)
exit(1);
start = ((long long)tp.tv_sec * 1000000000LL) + (long long)tp.tv_nsec;
for (i = 0; i < 100; i++)
- clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);
+ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tp);
stop = ((long long)tp.tv_sec * 1000000000LL) + (long long)tp.tv_nsec;
if (start == 0)
exit(4);
@@ -2686,7 +2686,7 @@ case $erl_gethrvtime in
case $erl_clock_gettime_cpu_time in
yes)
AC_DEFINE(HAVE_CLOCK_GETTIME_CPU_TIME,[],
- [define if clock_gettime() works for getting process time])
+ [define if clock_gettime() works for getting thread time])
LIBRT=-lrt
;;
cross)
diff --git a/erts/configure.in b/erts/configure.in
index d1c5fe324b..10ea0b5e4b 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -2701,570 +2701,14 @@ if test X${enable_hipe} = Xyes; then
fi
AC_SUBST(HIPEBEAMLDFLAGS)
-if test X${enable_fp_exceptions} = Xauto ; then
- case $host_os in
- *linux*)
- enable_fp_exceptions=no
- AC_MSG_NOTICE([Floating point exceptions disabled by default on Linux]) ;;
- darwin*)
- enable_fp_exceptions=no
- AC_MSG_NOTICE([Floating point exceptions disabled by default on MacOS X]) ;;
- *)
- ;;
- esac
-fi
-
-if test X${enable_fp_exceptions} = Xauto ; then
- if test X${enable_hipe} = Xyes; then
- enable_fp_exceptions=yes
- else
- enable_fp_exceptions=no
- AC_MSG_NOTICE([Floating point exceptions disabled by default in this configuration])
- fi
-fi
-
-if test X${enable_fp_exceptions} != Xyes ; then
- AC_DEFINE(NO_FPE_SIGNALS,[],[Define if floating points exceptions are non-existing/not reliable])
- FPE=unreliable
-else
-
- AC_MSG_CHECKING([for unreliable floating point exceptions])
-
-
- AC_TRY_RUN([
-/* fpe-test.c */
-#include <stdio.h>
-#include <signal.h>
-#include <stdlib.h>
-
-#if defined(__clang__) || defined(__llvm__)
-#error "Clang/LLVM generates broken code for FP exceptions"
-#endif
-
-volatile int erl_fp_exception;
-
-/*
- * We expect a single SIGFPE in this test program.
- * Getting many more indicates an inadequate SIGFPE handler,
- * e.g. using the generic handler on x86.
- */
-static void new_fp_exception(void)
-{
- if (++erl_fp_exception > 50) {
- fprintf(stderr, "SIGFPE loop detected, bailing out\n");
- exit(1);
- }
-}
-
-/* Is there no standard identifier for Darwin/MacOSX ? */
-#if defined(__APPLE__) && defined(__MACH__) && !defined(__DARWIN__)
-#define __DARWIN__ 1
-#endif
-
-/*
- * Implement unmask_fpe() and check_fpe() based on CPU/OS combination
- */
-
-#if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
-
-static void unmask_x87(void)
-{
- unsigned short cw;
- __asm__ __volatile__("fstcw %0" : "=m"(cw));
- cw &= ~(0x01|0x04|0x08); /* unmask IM, ZM, OM */
- __asm__ __volatile__("fldcw %0" : : "m"(cw));
-}
-
-static void unmask_sse2(void)
-{
- unsigned int mxcsr;
- __asm__ __volatile__("stmxcsr %0" : "=m"(mxcsr));
- mxcsr &= ~(0x003F|0x0680); /* clear exn flags, unmask OM, ZM, IM (not PM, UM, DM) */
- __asm__ __volatile__("ldmxcsr %0" : : "m"(mxcsr));
-}
-
-#if defined(__x86_64__)
-
-static inline int cpu_has_sse2(void) { return 1; }
-
-#else /* !__x86_64__ */
-
-/*
- * Check if an x86-32 processor has SSE2.
- */
-static unsigned int xor_eflags(unsigned int mask)
-{
- unsigned int eax, edx;
-
- eax = mask; /* eax = mask */
- __asm__("pushfl\n\t"
- "popl %0\n\t" /* edx = original EFLAGS */
- "xorl %0, %1\n\t" /* eax = mask ^ EFLAGS */
- "pushl %1\n\t"
- "popfl\n\t" /* new EFLAGS = mask ^ original EFLAGS */
- "pushfl\n\t"
- "popl %1\n\t" /* eax = new EFLAGS */
- "xorl %0, %1\n\t" /* eax = new EFLAGS ^ old EFLAGS */
- "pushl %0\n\t"
- "popfl" /* restore original EFLAGS */
- : "=d"(edx), "=a"(eax)
- : "1"(eax));
- return eax;
-}
-
-static __inline__ unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax, save_ebx;
-
- /* In PIC mode i386 reserves EBX. So we must save
- and restore it ourselves to not upset gcc. */
- __asm__(
- "movl %%ebx, %1\n\t"
- "cpuid\n\t"
- "movl %1, %%ebx"
- : "=a"(eax), "=m"(save_ebx)
- : "0"(op)
- : "cx", "dx");
- return eax;
-}
-
-static __inline__ unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, edx, save_ebx;
-
- /* In PIC mode i386 reserves EBX. So we must save
- and restore it ourselves to not upset gcc. */
- __asm__(
- "movl %%ebx, %2\n\t"
- "cpuid\n\t"
- "movl %2, %%ebx"
- : "=a"(eax), "=d"(edx), "=m"(save_ebx)
- : "0"(op)
- : "cx");
- return edx;
-}
-
-/* The AC bit, bit #18, is a new bit introduced in the EFLAGS
- * register on the Intel486 processor to generate alignment
- * faults. This bit cannot be set on the Intel386 processor.
- */
-static __inline__ int is_386(void)
-{
- return ((xor_eflags(1<<18) >> 18) & 1) == 0;
-}
-
-/* Newer x86 processors have a CPUID instruction, as indicated by
- * the ID bit (#21) in EFLAGS being modifiable.
- */
-static __inline__ int has_CPUID(void)
-{
- return (xor_eflags(1<<21) >> 21) & 1;
-}
-
-static int cpu_has_sse2(void)
-{
- unsigned int maxlev, features;
- static int has_sse2 = -1;
-
- if (has_sse2 >= 0)
- return has_sse2;
- has_sse2 = 0;
-
- if (is_386())
- return 0;
- if (!has_CPUID())
- return 0;
- maxlev = cpuid_eax(0);
- /* Intel A-step Pentium had a preliminary version of CPUID.
- It also didn't have SSE2. */
- if ((maxlev & 0xFFFFFF00) == 0x0500)
- return 0;
- /* If max level is zero then CPUID cannot report any features. */
- if (maxlev == 0)
- return 0;
- features = cpuid_edx(1);
- has_sse2 = (features & (1 << 26)) != 0;
-
- return has_sse2;
-}
-#endif /* !__x86_64__ */
-
-static void unmask_fpe(void)
-{
- unmask_x87();
- if (cpu_has_sse2())
- unmask_sse2();
-}
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("fwait" : "=m"(erl_fp_exception) : "m"(f));
- if (!erl_fp_exception)
- return 0;
- __asm__ __volatile__("fninit");
- unmask_fpe();
- return 1;
-}
-
-#elif defined(__sparc__) && defined(__linux__)
-
-#if defined(__arch64__)
-#define LDX "ldx"
-#define STX "stx"
-#else
-#define LDX "ld"
-#define STX "st"
-#endif
-
-static void unmask_fpe(void)
-{
- unsigned long fsr;
-
- __asm__(STX " %%fsr, %0" : "=m"(fsr));
- fsr &= ~(0x1FUL << 23); /* clear FSR[TEM] field */
- fsr |= (0x1AUL << 23); /* enable NV, OF, DZ exceptions */
- __asm__ __volatile__(LDX " %0, %%fsr" : : "m"(fsr));
-}
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("" : "=m"(erl_fp_exception) : "em"(f));
- return erl_fp_exception;
-}
-
-#elif (defined(__powerpc__) && defined(__linux__)) || (defined(__ppc__) && defined(__DARWIN__))
-
-#if defined(__linux__)
-
-#include <sys/prctl.h>
-
-static void set_fpexc_precise(void)
-{
- if (prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE) < 0) {
- perror("PR_SET_FPEXC");
- exit(1);
- }
-}
-
-#elif defined(__DARWIN__)
-
-#include <mach/mach.h>
-#include <pthread.h>
-
-/*
- * FE0 FE1 MSR bits
- * 0 0 floating-point exceptions disabled
- * 0 1 floating-point imprecise nonrecoverable
- * 1 0 floating-point imprecise recoverable
- * 1 1 floating-point precise mode
- *
- * Apparently:
- * - Darwin 5.5 (MacOS X <= 10.1) starts with FE0 == FE1 == 0,
- * and resets FE0 and FE1 to 0 after each SIGFPE.
- * - Darwin 6.0 (MacOS X 10.2) starts with FE0 == FE1 == 1,
- * and does not reset FE0 or FE1 after a SIGFPE.
- */
-#define FE0_MASK (1<<11)
-#define FE1_MASK (1<<8)
-
-/* a thread cannot get or set its own MSR bits */
-static void *fpu_fpe_enable(void *arg)
-{
- thread_t t = *(thread_t*)arg;
- struct ppc_thread_state state;
- unsigned int state_size = PPC_THREAD_STATE_COUNT;
-
- if (thread_get_state(t, PPC_THREAD_STATE, (natural_t*)&state, &state_size) != KERN_SUCCESS) {
- perror("thread_get_state");
- exit(1);
- }
- if ((state.srr1 & (FE1_MASK|FE0_MASK)) != (FE1_MASK|FE0_MASK)) {
-#if 0
- /* This would also have to be performed in the SIGFPE handler
- to work around the MSR reset older Darwin releases do. */
- state.srr1 |= (FE1_MASK|FE0_MASK);
- thread_set_state(t, PPC_THREAD_STATE, (natural_t*)&state, state_size);
-#else
- fprintf(stderr, "srr1 == 0x%08x, your Darwin is too old\n", state.srr1);
- exit(1);
-#endif
- }
- return NULL; /* Ok, we appear to be on Darwin 6.0 or later */
-}
-
-static void set_fpexc_precise(void)
-{
- thread_t self = mach_thread_self();
- pthread_t enabler;
-
- if (pthread_create(&enabler, NULL, fpu_fpe_enable, &self)) {
- perror("pthread_create");
- } else if (pthread_join(enabler, NULL)) {
- perror("pthread_join");
- }
-}
-
-#endif
-
-static void set_fpscr(unsigned int fpscr)
-{
- union {
- double d;
- unsigned int fpscr[2];
- } u;
- u.fpscr[0] = 0xFFF80000;
- u.fpscr[1] = fpscr;
- __asm__ __volatile__("mtfsf 255,%0" : : "f"(u.d));
-}
-
-static void unmask_fpe(void)
-{
- set_fpexc_precise();
- set_fpscr(0x80|0x40|0x10); /* VE, OE, ZE; not UE or XE */
-}
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("" : "=m"(erl_fp_exception) : "fm"(f));
- return erl_fp_exception;
-}
-
-#else
-
-#include <ieeefp.h>
-
-#define unmask_fpe() fpsetmask(FP_X_INV | FP_X_OFL | FP_X_DZ)
-
-static __inline__ int check_fpe(double f)
-{
- __asm__ __volatile__("" : "=m"(erl_fp_exception) : "g"(f));
- return erl_fp_exception;
-}
-
-#endif
-
-/*
- * Implement SIGFPE handler based on CPU/OS combination
- */
-
-#if (defined(__linux__) && (defined(__i386__) || defined(__x86_64__) || defined(__sparc__) || defined(__powerpc__))) || (defined(__DARWIN__) && (defined(__i386__) || defined(__x86_64__) || defined(__ppc__))) || (defined(__FreeBSD__) && (defined(__i386__) || defined(__x86_64__))) || ((defined(__OpenBSD__) || defined(__NetBSD__)) && defined(__x86_64__)) || (defined(__sun__) && defined(__x86_64__))
-
-#if defined(__linux__) && defined(__i386__)
-#if !defined(X86_FXSR_MAGIC)
-#define X86_FXSR_MAGIC 0x0000
-#endif
-#elif defined(__FreeBSD__) && defined(__i386__)
-#include <sys/types.h>
-#include <machine/npx.h>
-#elif defined(__FreeBSD__) && defined(__x86_64__)
-#include <sys/types.h>
-#include <machine/fpu.h>
-#elif defined(__DARWIN__)
-#include <machine/signal.h>
-#elif defined(__OpenBSD__) && defined(__x86_64__)
-#include <sys/types.h>
-#include <machine/fpu.h>
-#endif
-#if !(defined(__OpenBSD__) && defined(__x86_64__))
-#include <ucontext.h>
-#endif
-#include <string.h>
-
-static void fpe_sig_action(int sig, siginfo_t *si, void *puc)
-{
- ucontext_t *uc = puc;
-#if defined(__linux__)
-#if defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- fpregset_t fpstate = mc->fpregs;
- fpstate->mxcsr = 0x1F80;
- fpstate->swd &= ~0xFF;
-#elif defined(__i386__)
- mcontext_t *mc = &uc->uc_mcontext;
- fpregset_t fpstate = mc->fpregs;
- if ((fpstate->status >> 16) == X86_FXSR_MAGIC)
- ((struct _fpstate*)fpstate)->mxcsr = 0x1F80;
- fpstate->sw &= ~0xFF;
-#elif defined(__sparc__) && defined(__arch64__)
- /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
- struct sigcontext *sc = (struct sigcontext*)puc;
- sc->sigc_regs.tpc = sc->sigc_regs.tnpc;
- sc->sigc_regs.tnpc += 4;
-#elif defined(__sparc__)
- /* on SPARC the 3rd parameter points to a sigcontext not a ucontext */
- struct sigcontext *sc = (struct sigcontext*)puc;
- sc->si_regs.pc = sc->si_regs.npc;
- sc->si_regs.npc = (unsigned long)sc->si_regs.npc + 4;
-#elif defined(__powerpc__)
-#if defined(__powerpc64__)
- mcontext_t *mc = &uc->uc_mcontext;
- unsigned long *regs = &mc->gp_regs[0];
-#else
- mcontext_t *mc = uc->uc_mcontext.uc_regs;
- unsigned long *regs = &mc->gregs[0];
-#endif
- regs[PT_NIP] += 4;
- regs[PT_FPSCR] = 0x80|0x40|0x10; /* VE, OE, ZE; not UE or XE */
-#endif
-#elif defined(__DARWIN__)
-#if defined(DARWIN_MODERN_MCONTEXT)
-#if defined(__x86_64__)
- mcontext_t mc = uc->uc_mcontext;
- struct __darwin_x86_float_state64 *fpstate = &mc->__fs;
- fpstate->__fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->__fpu_fsw &= ~0xFF;
-#elif defined(__i386__)
- mcontext_t mc = uc->uc_mcontext;
- struct __darwin_i386_float_state *fpstate = &mc->__fs;
- fpstate->__fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->__fpu_fsw &= ~0xFF;
-#elif defined(__ppc__)
- mcontext_t mc = uc->uc_mcontext;
- mc->ss.srr0 += 4;
- mc->fs.fpscr = 0x80|0x40|0x10;
-#endif
-#else
-#if defined(__x86_64__)
- mcontext_t mc = uc->uc_mcontext;
- struct x86_float_state64_t *fpstate = &mc->fs;
- fpstate->fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->fpu_fsw &= ~0xFF;
-#elif defined(__i386__)
- mcontext_t mc = uc->uc_mcontext;
- x86_float_state32_t *fpstate = &mc->fs;
- fpstate->fpu_mxcsr = 0x1F80;
- *(unsigned short *)&fpstate->fpu_fsw &= ~0xFF;
-#elif defined(__ppc__)
- mcontext_t mc = uc->uc_mcontext;
- mc->ss.srr0 += 4;
- mc->fs.fpscr = 0x80|0x40|0x10;
-#endif
-#endif
-#elif defined(__FreeBSD__) && defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- struct savefpu *savefpu = (struct savefpu*)&mc->mc_fpstate;
- struct envxmm *envxmm = &savefpu->sv_env;
- envxmm->en_mxcsr = 0x1F80;
- envxmm->en_sw &= ~0xFF;
-#elif defined(__FreeBSD__) && defined(__i386__)
- mcontext_t *mc = &uc->uc_mcontext;
- union savefpu *savefpu = (union savefpu*)&mc->mc_fpstate;
- if (mc->mc_fpformat == _MC_FPFMT_XMM) {
- struct envxmm *envxmm = &savefpu->sv_xmm.sv_env;
- envxmm->en_mxcsr = 0x1F80;
- envxmm->en_sw &= ~0xFF;
- } else {
- struct env87 *env87 = &savefpu->sv_87.sv_env;
- env87->en_sw &= ~0xFF;
- }
-#elif defined(__OpenBSD__) && defined(__x86_64__)
- struct fxsave64 *fxsave = uc->sc_fpstate;
- fxsave->fx_mxcsr = 0x1F80;
- fxsave->fx_fsw &= ~0xFF;
-#elif defined(__NetBSD__) && defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- struct fxsave64 *fxsave = (struct fxsave64 *)&mc->__fpregs;
- fxsave->fx_mxcsr = 0x1F80;
- fxsave->fx_fsw &= ~0xFF;
-#elif defined(__sun__) && defined(__x86_64__)
- mcontext_t *mc = &uc->uc_mcontext;
- struct fpchip_state *fpstate = &mc->fpregs.fp_reg_set.fpchip_state;
- fpstate->mxcsr = 0x1F80;
- fpstate->sw &= ~0xFF;
-#endif
- new_fp_exception();
-}
-
-static void catch_sigfpe(void)
-{
- struct sigaction act;
-
- memset(&act, 0, sizeof act);
- act.sa_sigaction = fpe_sig_action;
- act.sa_flags = SA_SIGINFO;
- sigaction(SIGFPE, &act, NULL);
-}
-
-#else
-
-static void fpe_sig_handler(int sig)
-{
- new_fp_exception();
-}
-
-static void catch_sigfpe(void)
-{
- signal(SIGFPE, fpe_sig_handler);
-}
-
-#endif
-
-/*
- * Generic test code
- */
-
-static void do_init(void)
-{
- catch_sigfpe();
- unmask_fpe();
-}
-
-double a = 3.23e133;
-double b = 3.57e257;
-double res;
-
-void do_fmul(void)
-{
- res = a * b;
-}
-
-int do_check(void)
-{
- if (check_fpe(res)) {
- fprintf(stderr, "res = %g, FPE worked\n", res);
- return 0;
- } else {
- fprintf(stderr, "res = %g, FPE failed\n", res);
- return 1;
- }
-}
-
-int main(int argc, const char **argv)
-{
- if (argc == 3) {
- a = atof(argv[1]);
- b = atof(argv[2]);
- }
- do_init();
- do_fmul();
- return do_check();
-}
-],
-erl_ok=yes,
-erl_ok=no,
-[
-case X$erl_xcomp_reliable_fpe in
- X) erl_ok=cross;;
- Xyes|Xno) erl_ok=$erl_xcomp_reliable_fpe;;
- *) AC_MSG_ERROR([Bad erl_xcomp_reliable_fpe value: $erl_xcomp_reliable_fpe]);;
-esac
-])
-
- if test $erl_ok = yes; then
- FPE=reliable
- AC_MSG_RESULT(reliable)
- else
- FPE=unreliable
- AC_MSG_RESULT([unreliable; testing in software instead])
- AC_DEFINE(NO_FPE_SIGNALS,[],[Define if floating points exceptions are non-existing/not reliable])
- if test $erl_ok = cross; then
- AC_MSG_WARN([result unreliable guessed because of cross compilation])
- fi
- fi
-fi
-
+dnl Permanently disable floating point exceptions.
+dnl On x86/amd64, floating points exceptions have
+dnl unresolved stability issues.
+AC_MSG_CHECKING([for unreliable floating point exceptions])
+FPE=unreliable
AC_SUBST(FPE)
-
+AC_MSG_RESULT([unreliable])
+AC_DEFINE(NO_FPE_SIGNALS,[],[Define if floating points exceptions are non-existing/not reliable])
dnl
dnl Some operating systems allow you to redefine FD_SETSIZE to be able
diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile
index 5fa8b0673a..96cc4413a9 100644
--- a/erts/doc/src/Makefile
+++ b/erts/doc/src/Makefile
@@ -74,6 +74,7 @@ XML_CHAPTER_FILES = \
match_spec.xml \
crash_dump.xml \
alt_dist.xml \
+ alt_disco.xml \
driver.xml \
absform.xml \
inet_cfg.xml \
diff --git a/erts/doc/src/alt_disco.xml b/erts/doc/src/alt_disco.xml
new file mode 100644
index 0000000000..d04221b9b3
--- /dev/null
+++ b/erts/doc/src/alt_disco.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>How to Implement an Alternative Service Discovery for Erlang Distribution
+ </title>
+ <prepared>Timmo Verlaan</prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date>2018-04-25</date>
+ <rev>PA1</rev>
+ <file>alt_disco.xml</file>
+ </header>
+ <p>
+ This section describes how to implement an alternative discovery mechanism
+ for Erlang distribution. Discovery is normally done using DNS and the
+ Erlang Port Mapper Daemon (EPMD) for port discovery.
+ </p>
+
+ <note><p>
+ Support for alternative service discovery mechanisms was added in Erlang/OTP
+ 21.
+ </p></note>
+
+
+ <section>
+ <title>Introduction</title>
+ <p>To implement your own service discovery module you have to write your own
+ EPMD module. The <seealso marker="kernel:erl_epmd">EPMD module</seealso> is
+ responsible for providing the location of another node. The distribution
+ modules (<c>inet_tcp_dist</c>/<c>inet_tls_dist</c>) call the EPMD module to
+ get the IP address and port of the other node. The EPMD module that is part
+ of Erlang/OTP will resolve the hostname using DNS and uses the EPMD unix
+ process to get the port of another node. The EPMD unix process does this by
+ connecting to the other node on a well-known port, port 4369.</p>
+ </section>
+
+ <section>
+ <title>Discovery module</title>
+ <p>The discovery module needs to implement the same API as the regular
+ <seealso marker="kernel:erl_epmd">EPMD module</seealso>. However, instead of
+ communicating with EPMD you can connect to any service to find out
+ connection details of other nodes. A discovery module is enabled
+ by setting <seealso marker="erts:erl#epmd_module">-epmd_module</seealso>
+ when starting erlang. The discovery module must implement the following
+ callbacks:</p>
+
+ <taglist>
+ <tag><seealso marker="kernel:erl_epmd#start_link/0">start_link/0</seealso></tag>
+ <item>Start any processes needed by the discovery module.</item>
+ <tag><seealso marker="kernel:erl_epmd#names/1">names/1</seealso></tag>
+ <item>Return node names held by the registrar for the given host.</item>
+ <tag><seealso marker="kernel:erl_epmd#register_node/2">register_node/2</seealso></tag>
+ <item>Register the given node name with the registrar.</item>
+ <tag><seealso marker="kernel:erl_epmd#port_please/3">port_please/3</seealso></tag>
+ <item>Return the distribution port used by the given node.</item>
+ </taglist>
+
+ <p>The discovery module may implement the following callback:</p>
+
+ <taglist>
+ <tag><seealso marker="kernel:erl_epmd#address_please/3">address_please/3</seealso></tag>
+ <item><p>Return the address of the given node.
+ If not implemented, <seealso marker="kernel:inet#gethostbyname/1">
+ inet:gethostbyname/1</seealso> will be used instead</p>
+ <p>This callback may also return the port of the given node. In that case
+ <seealso marker="kernel:erl_epmd#port_please/3">port_please/3</seealso>
+ may be omitted.</p></item>
+ </taglist>
+ </section>
+</chapter>
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index 74654a295d..4cf0066999 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -631,14 +631,16 @@
of process heaps is destroyed by the crash dump generation.</p>
<p>Option <c>+d</c> instructs the emulator to produce only a
core dump and no crash dump if an internal error is detected.</p>
- <p>Calling <seealso marker="erlang:halt/1">
+ <p>Calling <seealso marker="erlang#halt/1">
<c>erlang:halt/1</c></seealso> with a string argument still
produces a crash dump. On Unix systems, sending an emulator process
a <c>SIGUSR1</c> signal also forces a crash dump.</p>
</item>
<tag><marker id="+e"/><c><![CDATA[+e Number]]></c></tag>
<item>
- <p>Sets the maximum number of ETS tables.</p>
+ <p>Sets the maximum number of ETS tables. This limit is
+ <seealso marker="stdlib:ets#max_ets_tables">partially obsolete</seealso>.
+ </p>
</item>
<tag><c><![CDATA[+ec]]></c></tag>
<item>
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml
index c790872fe4..e6c9905039 100644
--- a/erts/doc/src/erl_driver.xml
+++ b/erts/doc/src/erl_driver.xml
@@ -429,7 +429,7 @@
<taglist>
<tag>Return types for driver callbacks</tag>
<item>
- <p>Rrewrite driver callback
+ <p>Rewrite driver callback
<seealso marker="driver_entry#control"><c>control</c></seealso>
to use return type <c>ErlDrvSSizeT</c> instead of <c>int</c>.</p>
<p>Rewrite driver callback
@@ -841,7 +841,7 @@ int suggested_stack_size;</code>
<p>Thread options structure passed to
<seealso marker="#erl_drv_thread_create">
<c>erl_drv_thread_create</c></seealso>.
- The following fields exists:</p>
+ The following field exists:</p>
<taglist>
<tag><c>suggested_stack_size</c></tag>
<item>A suggestion, in kilowords, on how large a stack to use.
@@ -3220,6 +3220,6 @@ erl_drv_output_term(driver_mk_port(drvport), spec, sizeof(spec) / sizeof(spec[0]
<seealso marker="erlang"><c>erlang(3)</c></seealso>,
<seealso marker="kernel:erl_ddll"><c>erl_ddll(3)</c></seealso>,
section <seealso marker="alt_dist">How to Implement an Alternative
- Carrier for the Erlang Distribution></seealso> in the User's Guide</p>
+ Carrier for the Erlang Distribution</seealso> in the User's Guide</p>
</section>
</cref>
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index f561413fab..15bd80e72f 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -36,8 +36,8 @@
in this module. Some of the BIFs are viewed more
or less as part of the Erlang programming language and are
<em>auto-imported</em>. Thus, it is not necessary to specify the
- module name. For example, the calls <c>atom_to_list(Erlang)</c>
- and <c>erlang:atom_to_list(Erlang)</c> are identical.</p>
+ module name. For example, the calls <c>atom_to_list(erlang)</c>
+ and <c>erlang:atom_to_list(erlang)</c> are identical.</p>
<p>Auto-imported BIFs are listed without module prefix.
BIFs listed with module prefix are not auto-imported.</p>
@@ -2432,6 +2432,26 @@ os_prompt%</pre>
</func>
<func>
+ <name name="is_map_key" arity="2"/>
+ <fsummary></fsummary>
+ <desc>
+ <p>Returns <c>true</c> if map <c><anno>Map</anno></c> contains
+ <c><anno>Key</anno></c> and returns <c>false</c> if it does not
+ contain the <c><anno>Key</anno></c>.</p>
+ <p>The call fails with a <c>{badmap,Map}</c> exception if
+ <c><anno>Map</anno></c> is not a map.</p>
+ <p><em>Example:</em></p>
+ <code type="none">
+> Map = #{"42" => value}.
+#{"42" => value}
+> is_map_key("42",Map).
+true
+> is_map_key(value,Map).
+false</code>
+ </desc>
+ </func>
+
+ <func>
<name name="is_number" arity="1"/>
<fsummary>Check whether a term is a number.</fsummary>
<desc>
@@ -6985,10 +7005,47 @@ ok
from other events in the system. It is only guaranteed that
<c><anno>Suspendee</anno></c> <em>eventually</em> suspends
(unless it
- is resumed). If option <c>asynchronous</c> has <em>not</em>
+ is resumed). If no <c>asynchronous</c> options has
been passed, the caller of <c>erlang:suspend_process/2</c> is
blocked until <c><anno>Suspendee</anno></c> has suspended.</p>
</item>
+ <tag><c>{asynchronous, ReplyTag}</c></tag>
+ <item>
+ <p>A suspend request is sent to the process identified by
+ <c><anno>Suspendee</anno></c>. When the suspend request
+ has been processed, a reply message is sent to the caller
+ of this function. The reply is on the form <c>{ReplyTag,
+ State}</c> where <c>State</c> is either:</p>
+ <taglist>
+ <tag><c>exited</c></tag>
+ <item>
+ <p>
+ <c><anno>Suspendee</anno></c> has exited.
+ </p>
+ </item>
+ <tag><c>suspended</c></tag>
+ <item>
+ <p>
+ <c><anno>Suspendee</anno></c> is now suspended.
+ </p>
+ </item>
+ <tag><c>not_suspended</c></tag>
+ <item>
+ <p>
+ <c><anno>Suspendee</anno></c> is not suspended.
+ This can only happen when the process that
+ issued this request, have called
+ <c>resume_process(<anno>Suspendee</anno>)</c>
+ before getting the reply.
+ </p>
+ </item>
+ </taglist>
+ <p>
+ Appart from the reply message, the <c>{asynchronous,
+ ReplyTag}</c> option behaves exactly the same as the
+ <c>asynchronous</c> option without reply tag.
+ </p>
+ </item>
<tag><c>unless_suspending</c></tag>
<item>
<p>The process identified by <c><anno>Suspendee</anno></c> is
@@ -7012,6 +7069,13 @@ ok
<warning>
<p>This BIF is intended for debugging only.</p>
</warning>
+ <warning>
+ <p>You can easily create deadlocks if processes suspends
+ each other (directly or in circles). In ERTS versions prior
+ to ERTS version 10.0, the runtime system prevented such
+ deadlocks, but this prevention has now been removed due
+ to performance reasons.</p>
+ </warning>
<p>Failures:</p>
<taglist>
<tag><c>badarg</c></tag>
@@ -8102,16 +8166,18 @@ ok
<c>erl(1)</c>.
</p>
</item>
+ <tag><marker id="system_info_ets_count"/>
+ <c>ets_count</c></tag>
+ <item>
+ <p>Returns the number of ETS tables currently existing at the
+ local node.</p>
+ </item>
<tag><marker id="system_info_ets_limit"/>
<c>ets_limit</c></tag>
<item>
- <p>Returns the maximum number of ETS tables allowed. This
- limit can be increased at startup by passing
- command-line flag
- <seealso marker="erts:erl#+e"><c>+e</c></seealso> to
- <c>erl(1)</c> or by setting environment variable
- <c>ERL_MAX_ETS_TABLES</c> before starting the Erlang
- runtime system.</p>
+ <p>Returns the limit for number of ETS tables. This limit is
+ <seealso marker="stdlib:ets#max_ets_tables">partially obsolete</seealso>
+ and number of tables are only limited by available memory.</p>
</item>
<tag><marker id="system_info_port_count"/><c>port_count</c></tag>
<item>
diff --git a/erts/doc/src/match_spec.xml b/erts/doc/src/match_spec.xml
index 888366b239..46a3daebe8 100644
--- a/erts/doc/src/match_spec.xml
+++ b/erts/doc/src/match_spec.xml
@@ -86,12 +86,12 @@
<c><![CDATA[is_list]]></c> | <c><![CDATA[is_number]]></c> |
<c><![CDATA[is_pid]]></c> | <c><![CDATA[is_port]]></c> |
<c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> |
- <c><![CDATA[is_map]]></c> | <c><![CDATA[is_binary]]></c> |
- <c><![CDATA[is_function]]></c> | <c><![CDATA[is_record]]></c> |
- <c><![CDATA[is_seq_trace]]></c> | <c><![CDATA['and']]></c> |
- <c><![CDATA['or']]></c> | <c><![CDATA['not']]></c> |
- <c><![CDATA['xor']]></c> | <c><![CDATA['andalso']]></c> |
- <c><![CDATA['orelse']]></c>
+ <c><![CDATA[is_map]]></c> | <c><![CDATA[is_map_key]]></c> |
+ <c><![CDATA[is_binary]]></c> | <c><![CDATA[is_function]]></c> |
+ <c><![CDATA[is_record]]></c> | <c><![CDATA[is_seq_trace]]></c> |
+ <c><![CDATA['and']]></c> | <c><![CDATA['or']]></c> |
+ <c><![CDATA['not']]></c> | <c><![CDATA['xor']]></c> |
+ <c><![CDATA['andalso']]></c> | <c><![CDATA['orelse']]></c>
</item>
<item>ConditionExpression ::= ExprMatchVariable | { GuardFunction } |
{ GuardFunction, ConditionExpression, ... } | TermConstruct
@@ -168,11 +168,12 @@
<c><![CDATA[is_list]]></c> | <c><![CDATA[is_number]]></c> |
<c><![CDATA[is_pid]]></c> | <c><![CDATA[is_port]]></c> |
<c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> |
- <c><![CDATA[is_map]]></c> | <c><![CDATA[is_binary]]></c> |
- <c><![CDATA[is_function]]></c> | <c><![CDATA[is_record]]></c> |
- <c><![CDATA['and']]></c> | <c><![CDATA['or']]></c> |
- <c><![CDATA['not']]></c> | <c><![CDATA['xor']]></c> |
- <c><![CDATA['andalso']]></c> | <c><![CDATA['orelse']]></c>
+ <c><![CDATA[is_map]]></c> | <c><![CDATA[map_is_key]]></c> |
+ <c><![CDATA[is_binary]]></c> | <c><![CDATA[is_function]]></c> |
+ <c><![CDATA[is_record]]></c> | <c><![CDATA['and']]></c> |
+ <c><![CDATA['or']]></c> | <c><![CDATA['not']]></c> |
+ <c><![CDATA['xor']]></c> | <c><![CDATA['andalso']]></c> |
+ <c><![CDATA['orelse']]></c>
</item>
<item>ConditionExpression ::= ExprMatchVariable | { GuardFunction } |
{ GuardFunction, ConditionExpression, ... } | TermConstruct
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index fcc7ec5b66..f7f86084a9 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -31,6 +31,46 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 9.3.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed bug in <c>enif_binary_to_term</c> which could cause
+ memory corruption for immediate terms (atoms, small
+ integers, pids, ports, empty lists).</p>
+ <p>
+ Own Id: OTP-15080</p>
+ </item>
+ <item>
+ <p>
+ Fixed bug in <c>erlang:system_profile/2</c> that could
+ cause superfluous <c>{profile,_,active,_,_}</c> messages
+ for terminating processes.</p>
+ <p>
+ Own Id: OTP-15085</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 9.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a crash in <c>heart:get_cmd/0</c> when the
+ stored command was too long.</p>
+ <p>
+ Own Id: OTP-15034</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 9.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/erts/doc/src/part.xml b/erts/doc/src/part.xml
index d583b873a0..fc39cb30e6 100644
--- a/erts/doc/src/part.xml
+++ b/erts/doc/src/part.xml
@@ -37,6 +37,7 @@
<xi:include href="match_spec.xml"/>
<xi:include href="crash_dump.xml"/>
<xi:include href="alt_dist.xml"/>
+ <xi:include href="alt_disco.xml"/>
<xi:include href="absform.xml"/>
<xi:include href="tty.xml"/>
<xi:include href="driver.xml"/>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 5dfa60ee74..054692819e 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -517,7 +517,9 @@ release_docs_spec:
# Generated source code. Put in $(TARGET) directory
#
+ifneq ($(strip $(CREATE_DIRS)),)
_create_dirs := $(shell mkdir -p $(CREATE_DIRS))
+endif
# has to be run after _create_dirs
@@ -570,7 +572,7 @@ $(TTF_DIR)/OPCODES-GENERATED: $(OPCODE_TABLES) utils/beam_makeops
-code-model @CODE_MODEL@ \
-outdir $(TTF_DIR) \
-DUSE_VM_PROBES=$(if $(USE_VM_PROBES),1,0) \
- -DNO_FPE_SIGNALS=$(if $filter(unreliable,$(FPE)),1,0) \
+ -DNO_FPE_SIGNALS=$(if $(filter unreliable,$(FPE)),1,0) \
-emulator $(OPCODE_TABLES) && echo $? >$(TTF_DIR)/OPCODES-GENERATED
GENERATE += $(TTF_DIR)/OPCODES-GENERATED
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index fba0611042..45b7540aeb 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -142,6 +142,7 @@ atom bsr
atom bsr_anycrlf
atom bsr_unicode
atom build_type
+atom busy
atom busy_dist_port
atom busy_port
atom call
@@ -252,6 +253,7 @@ atom exception_from
atom exception_trace
atom exclusive
atom exit_status
+atom exited
atom existing
atom existing_processes
atom existing_ports
@@ -445,6 +447,7 @@ atom no_float
atom no_integer
atom no_network
atom no_start_optimize
+atom not_suspended
atom not
atom not_a_list
atom not_loaded
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index d9312f4df8..a0dbd9ec7b 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -603,8 +603,9 @@ badarg:
BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
{
+ erts_aint32_t state;
Process *rp;
- int reds = 0;
+ int dirty, busy, reds = 0;
Eterm res;
if (BIF_P != erts_dirty_process_signal_handler
@@ -618,20 +619,29 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
if (is_not_atom(BIF_ARG_2))
BIF_ERROR(BIF_P, BADARG);
- rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_check_dirty_process_code_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
+ if (BIF_ARG_1 == BIF_P->common.id)
+ BIF_RET(am_normal);
+
+ rp = erts_proc_lookup_raw(BIF_ARG_1);
if (!rp)
- BIF_RET(am_false);
-
+ BIF_RET(am_false);
+
+ state = erts_atomic32_read_nob(&rp->state);
+ dirty = (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+ if (!dirty)
+ BIF_RET(am_normal);
+
+ busy = erts_proc_trylock(rp, ERTS_PROC_LOCK_MAIN) == EBUSY;
+
+ if (busy)
+ BIF_RET(am_busy);
+
res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls);
- if (BIF_P != rp)
- erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
- ASSERT(is_value(res));
+ ASSERT(res == am_true || res == am_false);
BIF_RET2(res, reds);
}
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index ee287243a4..ab5920a67e 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1166,6 +1166,9 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
reds_used = treds > INT_MAX ? INT_MAX : (int) treds;
}
+ if (c_p && ERTS_PROC_GET_PENDING_SUSPEND(c_p))
+ erts_proc_sig_handle_pending_suspend(c_p);
+
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 79244b8544..97e1ee1286 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1364,13 +1364,14 @@ BIF_RETTYPE exit_signal_2(BIF_ALIST_2)
/* Handle flags common to both process_flag_2 and process_flag_3. */
-static BIF_RETTYPE process_flag_aux(Process *BIF_P,
- Process *rp,
- Eterm flag,
- Eterm val)
+static Eterm process_flag_aux(Process *c_p, int *redsp, Eterm flag, Eterm val)
{
Eterm old_value = NIL; /* shut up warning about use before set */
Sint i;
+
+ if (redsp)
+ *redsp = 1;
+
if (flag == am_save_calls) {
struct saved_calls *scb;
if (!is_small(val))
@@ -1390,30 +1391,89 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
}
#ifdef HIPE
- if (rp->flags & F_HIPE_MODE) {
- ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(rp));
- scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(rp, scb);
+ if (c_p->flags & F_HIPE_MODE) {
+ ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p));
+ scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(c_p, scb);
}
else
#endif
{
#ifdef HIPE
- ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(rp));
+ ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(c_p));
#endif
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, scb);
- if (rp == BIF_P && ((scb && i == 0) || (!scb && i != 0))) {
- /* Adjust fcalls to match save calls setting... */
- if (i == 0)
- BIF_P->fcalls += CONTEXT_REDS; /* disabled it */
- else
- BIF_P->fcalls -= CONTEXT_REDS; /* enabled it */
-
- /*
- * Make sure we reschedule immediately so the
- * change take effect at once.
- */
- ERTS_VBUMP_ALL_REDS(BIF_P);
- }
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(c_p, scb);
+
+ if (((scb && i == 0) || (!scb && i != 0))) {
+
+ /*
+ * Make sure we reschedule immediately so the
+ * change take effect at once.
+ */
+ if (!redsp) {
+ /* Executed via BIF call.. */
+ via_bif:
+
+ /* Adjust fcalls to match save calls setting... */
+ if (i == 0)
+ c_p->fcalls += CONTEXT_REDS; /* disabled it */
+ else
+ c_p->fcalls -= CONTEXT_REDS; /* enabled it */
+
+ ERTS_VBUMP_ALL_REDS(c_p);
+ }
+ else {
+ erts_aint32_t state;
+ /*
+ * Executed via signal handler. Try to figure
+ * out in what context we are executing...
+ */
+
+ state = erts_atomic32_read_nob(&c_p->state);
+ if (state & (ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING)) {
+ /*
+ * We are either processing signals before
+ * being executed or executing dirty. That
+ * is, no need to adjust anything...
+ */
+ *redsp = 1;
+ }
+ else {
+ ErtsSchedulerData *esdp;
+ ASSERT(state & ERTS_PSFLG_RUNNING);
+
+ /*
+ * F_DELAY_GC is currently only set when
+ * we handle signals in state running via
+ * receive helper...
+ */
+
+ if (!(c_p->flags & F_DELAY_GC)) {
+ *redsp = 1;
+ goto via_bif;
+ }
+
+ /*
+ * Executing via receive helper...
+ *
+ * We utilize the virtual reds counter
+ * in order to get correct calculation
+ * of reductions consumed when scheduling
+ * out the process...
+ */
+
+ esdp = erts_get_scheduler_data();
+
+ if (i == 0)
+ esdp->virtual_reds += CONTEXT_REDS; /* disabled it */
+ else
+ esdp->virtual_reds -= CONTEXT_REDS; /* enabled it */
+
+ *redsp = -1;
+ }
+ }
+ }
}
if (!scb)
@@ -1423,11 +1483,12 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
}
- BIF_RET(old_value);
+ ASSERT(is_immed(old_value));
+ return old_value;
}
error:
- BIF_ERROR(BIF_P, BADARG);
+ return am_badarg;
}
BIF_RETTYPE process_flag_2(BIF_ALIST_2)
@@ -1596,29 +1657,73 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
/* Fall through and try process_flag_aux() ... */
}
- BIF_RET(process_flag_aux(BIF_P, BIF_P, BIF_ARG_1, BIF_ARG_2));
+ old_value = process_flag_aux(BIF_P, NULL, BIF_ARG_1, BIF_ARG_2);
+ if (old_value != am_badarg)
+ BIF_RET(old_value);
error:
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE process_flag_3(BIF_ALIST_3)
+typedef struct {
+ Eterm flag;
+ Eterm value;
+ ErlOffHeap oh;
+ Eterm heap[1];
+} ErtsProcessFlag3Args;
+
+static Eterm
+exec_process_flag_3(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
{
- Process *rp;
- Eterm res;
+ ErtsProcessFlag3Args *pf3a = arg;
+ Eterm res;
+
+ if (ERTS_PROC_IS_EXITING(c_p))
+ res = am_badarg;
+ else
+ res = process_flag_aux(c_p, redsp, pf3a->flag, pf3a->value);
+ erts_cleanup_offheap(&pf3a->oh);
+ erts_free(ERTS_ALC_T_PF3_ARGS, arg);
+ return res;
+}
+
+
+BIF_RETTYPE erts_internal_process_flag_3(BIF_ALIST_3)
+{
+ Eterm res, *hp;
+ ErlOffHeap *ohp;
+ ErtsProcessFlag3Args *pf3a;
+ Uint flag_sz, value_sz;
+
+ if (BIF_P->common.id == BIF_ARG_1) {
+ res = process_flag_aux(BIF_P, NULL, BIF_ARG_2, BIF_ARG_3);
+ BIF_RET(res);
+ }
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ flag_sz = is_immed(BIF_ARG_2) ? 0 : size_object(BIF_ARG_2);
+ value_sz = is_immed(BIF_ARG_3) ? 0 : size_object(BIF_ARG_3);
+
+ pf3a = erts_alloc(ERTS_ALC_T_PF3_ARGS,
+ sizeof(ErtsProcessFlag3Args)
+ + sizeof(Eterm)*(flag_sz+value_sz-1));
+
+ ohp = &pf3a->oh;
+ ERTS_INIT_OFF_HEAP(&pf3a->oh);
- rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ hp = &pf3a->heap[0];
- if (!rp)
- BIF_ERROR(BIF_P, BADARG);
+ pf3a->flag = copy_struct(BIF_ARG_2, flag_sz, &hp, ohp);
+ pf3a->value = copy_struct(BIF_ARG_3, value_sz, &hp, ohp);
- res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3);
+ res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
+ !0,
+ exec_process_flag_3,
+ (void *) pf3a);
- if (rp != BIF_P)
- erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ if (is_non_value(res))
+ BIF_RET(am_badarg);
return res;
}
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index a47339253e..cf9f61c0b8 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -295,6 +295,19 @@ do { \
(Ret) = THE_NON_VALUE; \
} while (0)
+#define ERTS_BIF_PREP_TRAP4(Ret, Trap, Proc, A0, A1, A2, A3) \
+do { \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
+ (Proc)->arity = 4; \
+ reg[0] = (Eterm) (A0); \
+ reg[1] = (Eterm) (A1); \
+ reg[2] = (Eterm) (A2); \
+ reg[3] = (Eterm) (A3); \
+ (Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
+ (Proc)->freason = TRAP; \
+ (Ret) = THE_NON_VALUE; \
+} while (0)
+
#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
@@ -343,6 +356,18 @@ do { \
return THE_NON_VALUE; \
} while(0)
+#define BIF_TRAP4(Trap_, p, A0, A1, A2, A3) do { \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
+ (p)->arity = 4; \
+ reg[0] = (A0); \
+ reg[1] = (A1); \
+ reg[2] = (A2); \
+ reg[3] = (A3); \
+ (p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
+ (p)->freason = TRAP; \
+ return THE_NON_VALUE; \
+ } while(0)
+
#define BIF_TRAP_CODE_PTR_0(p, Code_) do { \
(p)->arity = 0; \
(p)->i = (BeamInstr*) (Code_); \
@@ -401,6 +426,12 @@ do { \
ERTS_BIF_PREP_TRAP3(RET, (TRP), (P), (A0), (A1), (A2)); \
} while (0)
+#define ERTS_BIF_PREP_YIELD4(RET, TRP, P, A0, A1, A2, A3) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ ERTS_BIF_PREP_TRAP4(RET, (TRP), (P), (A0), (A1), (A2), (A3)); \
+} while (0)
+
#define ERTS_BIF_YIELD0(TRP, P) \
do { \
ERTS_VBUMP_ALL_REDS((P)); \
@@ -425,6 +456,12 @@ do { \
BIF_TRAP3((TRP), (P), (A0), (A1), (A2)); \
} while (0)
+#define ERTS_BIF_YIELD4(TRP, P, A0, A1, A2, A3) \
+do { \
+ ERTS_VBUMP_ALL_REDS((P)); \
+ BIF_TRAP4((TRP), (P), (A0), (A1), (A2), (A3)); \
+} while (0)
+
#define ERTS_BIF_PREP_EXITED(RET, PROC) \
do { \
KILL_CATCHES((PROC)); \
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 276bef2bbb..7548924178 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -125,7 +125,7 @@ bif erlang:pid_to_list/1
bif erlang:ports/0
bif erlang:pre_loaded/0
bif erlang:process_flag/2
-bif erlang:process_flag/3
+bif erts_internal:process_flag/3
bif erlang:process_info/1
bif erlang:process_info/2
bif erlang:processes/0
@@ -154,7 +154,6 @@ bif erlang:unregister/1
bif erlang:whereis/1
bif erlang:spawn_opt/1
bif erlang:setnode/2
-bif erlang:setnode/3
bif erlang:dist_get_stat/1
bif erlang:dist_ctrl_input_handler/2
bif erlang:dist_ctrl_put_data/2
@@ -191,6 +190,8 @@ bif erts_internal:scheduler_wall_time/1
bif erts_internal:dirty_process_handle_signals/1
+bif erts_internal:create_dist_channel/4
+
# inet_db support
bif erlang:port_set_data/2
bif erlang:port_get_data/1
@@ -204,9 +205,9 @@ bif erlang:seq_trace/2
bif erlang:seq_trace_info/1
bif erlang:seq_trace_print/1
bif erlang:seq_trace_print/2
-bif erlang:suspend_process/2
+bif erts_internal:suspend_process/2
bif erlang:resume_process/1
-bif erlang:process_display/2
+bif erts_internal:process_display/2
bif erlang:bump_reductions/1
@@ -341,7 +342,6 @@ bif ets:internal_request_all/0
bif ets:new/2
bif ets:delete/1
bif ets:delete/2
-bif ets:delete_all_objects/1
bif ets:delete_object/2
bif ets:first/1
bif ets:is_compiled_ms/1
@@ -372,7 +372,6 @@ bif ets:select_count/2
bif ets:select_reverse/1
bif ets:select_reverse/2
bif ets:select_reverse/3
-bif ets:select_delete/2
bif ets:select_replace/2
bif ets:match_spec_compile/1
bif ets:match_spec_run_r/3
@@ -696,3 +695,6 @@ bif ets:whereis/1
bif erts_internal:gather_alloc_histograms/1
bif erts_internal:gather_carrier_info/1
ubif erlang:map_get/2
+ubif erlang:is_map_key/2
+bif ets:internal_delete_all/2
+bif ets:internal_select_delete/2
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 026f0a62d4..70474898b2 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -3138,60 +3138,60 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-/**********************************************************************
- ** Allocate a dist entry, set node name install the connection handler
- ** setnode_3({name@host, Creation}, Cid, {Type, Version, Initial, IC, OC})
- ** Type = flag field, where the flags are specified in dist.h
- ** Version = distribution version, >= 1
- ** IC = in_cookie (ignored)
- ** OC = out_cookie (ignored)
- **
- ** Note that in distribution protocols above 1, the Initial parameter
- ** is always NIL and the cookies are always the atom '', cookies are not
- ** sent in the distribution messages but are only used in
- ** the handshake.
- **
- ***********************************************************************/
+/*
+ * erts_internal:create_dist_channel/4 is used by
+ * erlang:setnode/3.
+ */
+
+typedef struct {
+ DistEntry *dep;
+ Uint flags;
+ Uint version;
+} ErtsSetupConnDistCtrl;
+
+static void
+setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep,
+ Eterm ctrlr, Uint flags,
+ Uint version);
-BIF_RETTYPE setnode_3(BIF_ALIST_3)
+static Eterm
+setup_connection_distctrl(Process *c_p, void *arg,
+ int *redsp, ErlHeapFragment **bpp);
+
+BIF_RETTYPE erts_internal_create_dist_channel_4(BIF_ALIST_4)
{
BIF_RETTYPE ret;
Uint flags;
- unsigned long version;
- Eterm ic, oc;
- Eterm *tp;
+ Uint version;
+ Eterm *hp, res_tag = THE_NON_VALUE, res = THE_NON_VALUE;
DistEntry *dep = NULL;
- ErtsProcLocks proc_unlock = 0;
- Process *proc;
+ int de_locked = 0;
Port *pp = NULL;
- Eterm notify_proc;
- erts_aint32_t qflgs;
/*
* Check and pick out arguments
*/
- if (!is_node_name_atom(BIF_ARG_1) ||
- !(is_internal_port(BIF_ARG_2)
- || is_internal_pid(BIF_ARG_2))
- || (erts_this_node->sysname == am_Noname)) {
- goto badarg;
- }
+ /* Node name... */
+ if (!is_node_name_atom(BIF_ARG_1))
+ goto badarg;
- if (!is_tuple(BIF_ARG_3))
- goto badarg;
- tp = tuple_val(BIF_ARG_3);
- if (*tp++ != make_arityval(4))
- goto badarg;
- if (!is_small(*tp))
- goto badarg;
- flags = unsigned_val(*tp++);
- if (!is_small(*tp) || (version = unsigned_val(*tp)) == 0)
- goto badarg;
- ic = *(++tp);
- oc = *(++tp);
- if (!is_atom(ic) || !is_atom(oc))
- goto badarg;
+ /* Distribution controller... */
+ if (!is_internal_port(BIF_ARG_2) && !is_internal_pid(BIF_ARG_2))
+ goto badarg;
+
+ /* Dist flags... */
+ if (!is_small(BIF_ARG_3))
+ goto badarg;
+ flags = unsigned_val(BIF_ARG_3);
+
+ /* Version... */
+ if (!is_small(BIF_ARG_4))
+ goto badarg;
+ version = unsigned_val(BIF_ARG_4);
+
+ if (version == 0)
+ goto badarg;
if (~flags & DFLAG_DIST_MANDATORY) {
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
@@ -3222,74 +3222,79 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
else if (!dep)
goto system_limit; /* Should never happen!!! */
+ erts_de_rlock(dep);
+ de_locked = -1;
+
+ if (dep->state == ERTS_DE_STATE_EXITING) {
+ /* Suspend on dist entry waiting for the exit to finish */
+ ErtsProcList *plp = erts_proclist_create(BIF_P);
+ plp->next = NULL;
+ erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
+ erts_mtx_lock(&dep->qlock);
+ erts_proclist_store_last(&dep->suspended, plp);
+ erts_mtx_unlock(&dep->qlock);
+ goto yield;
+ }
+
+ erts_de_runlock(dep);
+ de_locked = 0;
+
if (is_internal_pid(BIF_ARG_2)) {
if (BIF_P->common.id == BIF_ARG_2) {
- proc_unlock = 0;
- proc = BIF_P;
- }
- else {
- proc_unlock = ERTS_PROC_LOCK_MAIN;
- proc = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_2, proc_unlock);
- }
- erts_de_rwlock(dep);
-
- if (!proc)
- goto badarg;
- else if (proc == ERTS_PROC_LOCK_BUSY) {
- proc_unlock = 0;
- goto yield;
- }
+ ErtsSetupConnDistCtrl scdc;
- erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS);
- proc_unlock |= ERTS_PROC_LOCK_STATUS;
+ scdc.dep = dep;
+ scdc.flags = flags;
+ scdc.version = version;
- if (ERTS_PROC_GET_DIST_ENTRY(proc)) {
- if (dep == ERTS_PROC_GET_DIST_ENTRY(proc)
- && (proc->flags & F_DISTRIBUTION)
- && dep->cid == BIF_ARG_2) {
- ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
- goto done;
- }
- goto badarg;
- }
+ res = setup_connection_distctrl(BIF_P, &scdc, NULL, NULL);
+ BUMP_REDS(BIF_P, 5);
+ dep = NULL;
- if (dep->state == ERTS_DE_STATE_EXITING) {
- /* Suspend on dist entry waiting for the exit to finish */
- ErtsProcList *plp = erts_proclist_create(BIF_P);
- plp->next = NULL;
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- erts_mtx_lock(&dep->qlock);
- erts_proclist_store_last(&dep->suspended, plp);
- erts_mtx_unlock(&dep->qlock);
- goto yield;
- }
- if (dep->state != ERTS_DE_STATE_PENDING) {
- if (dep->state == ERTS_DE_STATE_IDLE)
- erts_set_dist_entry_pending(dep);
- else
+ if (res == am_badarg)
goto badarg;
+
+ ASSERT(is_internal_magic_ref(res));
+ res_tag = am_ok; /* Connection up */
}
+ else {
+ ErtsSetupConnDistCtrl *scdcp;
- if (is_not_nil(dep->cid))
- goto badarg;
+ scdcp = erts_alloc(ERTS_ALC_T_SETUP_CONN_ARG,
+ sizeof(ErtsSetupConnDistCtrl));
- proc->flags |= F_DISTRIBUTION;
- ERTS_PROC_SET_DIST_ENTRY(proc, dep);
+ scdcp->dep = dep;
+ scdcp->flags = flags;
+ scdcp->version = version;
- proc_unlock &= ~ERTS_PROC_LOCK_STATUS;
- erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
+ res = erts_proc_sig_send_rpc_request(BIF_P,
+ BIF_ARG_2,
+ !0,
+ setup_connection_distctrl,
+ (void *) scdcp);
+ if (is_non_value(res))
+ goto badarg;
- dep->send = NULL; /* Only for distr ports... */
+ dep = NULL;
+ ASSERT(is_internal_ordinary_ref(res));
+
+ res_tag = am_message; /* Caller need to wait for dhandle in message */
+ }
+ hp = HAlloc(BIF_P, 3);
}
else {
+ int new;
pp = erts_id2port_sflgs(BIF_ARG_2,
BIF_P,
ERTS_PROC_LOCK_MAIN,
ERTS_PORT_SFLGS_INVALID_LOOKUP);
erts_de_rwlock(dep);
+ de_locked = 1;
+
+ if (dep->state == ERTS_DE_STATE_EXITING)
+ goto badarg;
if (!pp || (erts_atomic32_read_nob(&pp->state)
& ERTS_PORT_SFLG_EXITING))
@@ -3298,65 +3303,108 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0)
goto badarg;
- if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) {
- ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
- goto done; /* Already set */
- }
+ if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep)
+ new = 0;
+ else {
+ if (dep->state != ERTS_DE_STATE_PENDING) {
+ if (dep->state == ERTS_DE_STATE_IDLE)
+ erts_set_dist_entry_pending(dep);
+ else
+ goto badarg;
+ }
- if (dep->state == ERTS_DE_STATE_EXITING) {
- /* Suspend on dist entry waiting for the exit to finish */
- ErtsProcList *plp = erts_proclist_create(BIF_P);
- plp->next = NULL;
- erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
- erts_mtx_lock(&dep->qlock);
- erts_proclist_store_last(&dep->suspended, plp);
- erts_mtx_unlock(&dep->qlock);
- goto yield;
- }
- if (dep->state != ERTS_DE_STATE_PENDING) {
- if (dep->state == ERTS_DE_STATE_IDLE)
- erts_set_dist_entry_pending(dep);
- else
+ if (pp->dist_entry || is_not_nil(dep->cid))
goto badarg;
- }
- if (pp->dist_entry || is_not_nil(dep->cid))
- goto badarg;
+ erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
- erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION);
+ pp->dist_entry = dep;
- pp->dist_entry = dep;
+ ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
- ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output);
+ dep->send = (pp->drv_ptr->outputv
+ ? dist_port_commandv
+ : dist_port_command);
+ ASSERT(dep->send);
- dep->send = (pp->drv_ptr->outputv
- ? dist_port_commandv
- : dist_port_command);
- ASSERT(dep->send);
+ /*
+ * Dist-ports do not use the "busy port message queue" functionality, but
+ * instead use "busy dist entry" functionality.
+ */
+ {
+ ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
+ erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
+ }
- /*
- * Dist-ports do not use the "busy port message queue" functionality, but
- * instead use "busy dist entry" functionality.
- */
- {
- ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED;
- erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL);
+ setup_connection_epiloge_rwunlock(BIF_P, dep, BIF_ARG_2, flags, version);
+ de_locked = 0;
+ new = !0;
}
+ hp = HAlloc(BIF_P, 3 + ERTS_MAGIC_REF_THING_SIZE);
+ res = erts_build_dhandle(&hp, &BIF_P->off_heap, dep);
+ res_tag = am_ok; /* Connection up */
+ if (new)
+ dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ }
+
+ ASSERT(is_value(res) && is_value(res_tag));
+
+ res = TUPLE2(hp, res_tag, res);
+
+ ERTS_BIF_PREP_RET(ret, res);
+
+ done:
+
+ if (dep && dep != erts_this_dist_entry) {
+ if (de_locked) {
+ if (de_locked > 0)
+ erts_de_rwunlock(dep);
+ else
+ erts_de_runlock(dep);
+ }
+ erts_deref_dist_entry(dep);
}
+ if (pp)
+ erts_port_release(pp);
+
+ return ret;
+
+ yield:
+ ERTS_BIF_PREP_YIELD4(ret,
+ bif_export[BIF_erts_internal_create_dist_channel_4],
+ BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+ goto done;
+
+ badarg:
+ ERTS_BIF_PREP_RET(ret, am_badarg);
+ goto done;
+
+ system_limit:
+ ERTS_BIF_PREP_RET(ret, am_system_limit);
+ goto done;
+}
+
+static void
+setup_connection_epiloge_rwunlock(Process *c_p, DistEntry *dep,
+ Eterm ctrlr, Uint flags,
+ Uint version)
+{
+ Eterm notify_proc = NIL;
+ erts_aint32_t qflgs;
+
dep->version = version;
dep->creation = 0;
-#ifdef DEBUG
+ ASSERT(is_internal_port(ctrlr) || is_internal_pid(ctrlr));
ASSERT(erts_atomic_read_nob(&dep->qsize) == 0
|| (dep->state == ERTS_DE_STATE_PENDING));
-#endif
if (flags & DFLAG_DIST_HDR_ATOM_CACHE)
create_cache(dep);
- erts_set_dist_entry_connected(dep, BIF_ARG_2, flags);
+ erts_set_dist_entry_connected(dep, ctrlr, flags);
notify_proc = NIL;
if (erts_atomic_read_nob(&dep->qsize)) {
@@ -3375,50 +3423,100 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3)
}
}
}
- erts_de_rwunlock(dep);
- if (is_internal_pid(notify_proc))
- notify_dist_data(BIF_P, notify_proc);
- ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep));
+ erts_de_rwunlock(dep);
- dep = NULL; /* inc of refc transferred to port (dist_entry field) */
+ if (is_internal_pid(notify_proc))
+ notify_dist_data(c_p, notify_proc);
inc_no_nodes();
- send_nodes_mon_msgs(BIF_P,
+ send_nodes_mon_msgs(c_p,
am_nodeup,
- BIF_ARG_1,
+ dep->sysname,
flags & DFLAG_PUBLISHED ? am_visible : am_hidden,
NIL);
- done:
+}
- if (dep && dep != erts_this_dist_entry) {
- erts_de_rwunlock(dep);
- erts_deref_dist_entry(dep);
+static Eterm
+setup_connection_distctrl(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
+{
+ ErtsSetupConnDistCtrl *scdcp = (ErtsSetupConnDistCtrl *) arg;
+ DistEntry *dep = scdcp->dep;
+ int dep_locked = 0;
+ Eterm *hp;
+ erts_aint32_t state;
+
+ if (redsp)
+ *redsp = 1;
+
+ state = erts_atomic32_read_nob(&c_p->state);
+
+ if (state & ERTS_PSFLG_EXITING)
+ goto badarg;
+
+ erts_de_rwlock(dep);
+ dep_locked = !0;
+
+ if (dep->state == ERTS_DE_STATE_EXITING)
+ goto badarg;
+
+ if (ERTS_PROC_GET_DIST_ENTRY(c_p)) {
+ if (dep == ERTS_PROC_GET_DIST_ENTRY(c_p)
+ && (c_p->flags & F_DISTRIBUTION)
+ && dep->cid == c_p->common.id) {
+ goto connected;
+ }
+ goto badarg;
}
- if (pp)
- erts_port_release(pp);
+ if (dep->state != ERTS_DE_STATE_PENDING) {
+ if (dep->state == ERTS_DE_STATE_IDLE)
+ erts_set_dist_entry_pending(dep);
+ else
+ goto badarg;
+ }
- if (proc_unlock)
- erts_proc_unlock(proc, proc_unlock);
+ if (is_not_nil(dep->cid))
+ goto badarg;
- return ret;
+ c_p->flags |= F_DISTRIBUTION;
+ ERTS_PROC_SET_DIST_ENTRY(c_p, dep);
- yield:
- ERTS_BIF_PREP_YIELD3(ret, bif_export[BIF_setnode_3], BIF_P,
- BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
- goto done;
+ dep->send = NULL; /* Only for distr ports... */
- badarg:
- ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG);
- goto done;
+ if (redsp)
+ *redsp = 5;
- system_limit:
- ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT);
- goto done;
+ setup_connection_epiloge_rwunlock(c_p, dep, c_p->common.id,
+ scdcp->flags, scdcp->version);
+connected:
+
+ /* we take over previous inc in refc of dep */
+
+ if (!bpp) /* called directly... */
+ return erts_make_dhandle(c_p, dep);
+
+ erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg);
+
+ *bpp = new_message_buffer(ERTS_MAGIC_REF_THING_SIZE);
+ hp = (*bpp)->mem;
+ return erts_build_dhandle(&hp, &(*bpp)->off_heap, dep);
+
+badarg:
+
+ if (bpp) /* not called directly */
+ erts_free(ERTS_ALC_T_SETUP_CONN_ARG, arg);
+
+ if (dep_locked)
+ erts_de_rwunlock(dep);
+
+ erts_deref_dist_entry(dep);
+
+ return am_badarg;
}
+
BIF_RETTYPE erts_internal_get_dflags_0(BIF_ALIST_0)
{
return erts_dflags_record;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index d99d2ea57b..575d6ca867 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -2821,20 +2821,20 @@ erts_allocator_options(void *proc)
Eterm as[4];
Eterm ts[4];
- as[l] = am_atom_put("e", 1);
+ as[l] = ERTS_MAKE_AM("e");
ts[l++] = am_true;
switch (a) {
case ERTS_ALC_A_SYSTEM:
- as[l] = am_atom_put("m", 1);
- ts[l++] = am_atom_put("libc", 4);
+ as[l] = ERTS_MAKE_AM("m");
+ ts[l++] = ERTS_MAKE_AM("libc");
if(sas.trim_threshold >= 0) {
- as[l] = am_atom_put("tt", 2);
+ as[l] = ERTS_MAKE_AM("tt");
ts[l++] = erts_bld_uint(hpp, szp,
(Uint) sas.trim_threshold);
}
if(sas.top_pad >= 0) {
- as[l] = am_atom_put("tp", 2);
+ as[l] = ERTS_MAKE_AM("tp");
ts[l++] = erts_bld_uint(hpp, szp, (Uint) sas.top_pad);
}
break;
@@ -2848,7 +2848,7 @@ erts_allocator_options(void *proc)
}
else {
- Eterm atom = am_atom_put("e", 1);
+ Eterm atom = ERTS_MAKE_AM("e");
Eterm term = am_false;
tmp = erts_bld_2tup_list(hpp, szp, 1, &atom, &term);
}
@@ -2859,12 +2859,12 @@ erts_allocator_options(void *proc)
#if HAVE_ERTS_MSEG
if (use_mseg) {
- atoms[length] = am_atom_put("mseg_alloc", 10);
+ atoms[length] = ERTS_MAKE_AM("mseg_alloc");
terms[length++] = erts_mseg_info_options(0, NULL, NULL, hpp, szp);
}
#endif
- atoms[length] = am_atom_put("alloc_util", 10);
+ atoms[length] = ERTS_MAKE_AM("alloc_util");
terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
#if HAVE_ERTS_MMAP
@@ -2874,17 +2874,15 @@ erts_allocator_options(void *proc)
#endif
{
Eterm o[1], v[1];
- o[0] = am_atom_put("t", 1);
+ o[0] = ERTS_MAKE_AM("t");
v[0] = erts_mtrace_enabled ? am_true : am_false;
- atoms[length] = am_atom_put("instr", 5);
+ atoms[length] = ERTS_MAKE_AM("instr");
terms[length++] = erts_bld_2tup_list(hpp, szp, 1, o, v);
}
- atoms[length] = am_atom_put("lock_physical_memory", 20);
- terms[length++] = (lock_all_physical_memory
- ? am_atom_put("all", 3)
- : am_atom_put("no", 2));
+ atoms[length] = ERTS_MAKE_AM("lock_physical_memory");
+ terms[length++] = (lock_all_physical_memory ? am_all : am_no);
settings = erts_bld_2tup_list(hpp, szp, length, atoms, terms);
@@ -2899,10 +2897,10 @@ erts_allocator_options(void *proc)
#if HAVE_ERTS_MSEG
if (use_mseg)
- terms[length++] = am_atom_put("mseg_alloc", 10);
+ terms[length++] = ERTS_MAKE_AM("mseg_alloc");
#endif
#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
- terms[length++] = am_atom_put("sys_aligned_alloc", 17);
+ terms[length++] = ERTS_MAKE_AM("sys_aligned_alloc");
#endif
#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
terms[length++] = ERTS_MAKE_AM("literal_mmap");
@@ -2911,7 +2909,7 @@ erts_allocator_options(void *proc)
#if defined(__GLIBC__)
{
- Eterm AM_glibc = am_atom_put("glibc", 5);
+ Eterm AM_glibc = ERTS_MAKE_AM("glibc");
Eterm version;
version = erts_bld_cons(hpp,
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 4a6a19b210..5409b89bab 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -118,9 +118,6 @@ type PORT DRIVER SYSTEM port
type ATOM LONG_LIVED ATOM atom_entry
type MODULE LONG_LIVED CODE module_entry
type REG_PROC STANDARD PROCESSES reg_proc
-type LINK_LH STANDARD PROCESSES link_lh
-type SUSPEND_MON STANDARD PROCESSES suspend_monitor
-type PEND_SUSPEND SHORT_LIVED PROCESSES pending_suspend
type PROC_LIST SHORT_LIVED PROCESSES proc_list
type SAVED_ESTACK SHORT_LIVED PROCESSES saved_estack
type FUN_ENTRY LONG_LIVED CODE fun_entry
@@ -133,7 +130,6 @@ type TMP_HEAP TEMPORARY PROCESSES tmp_heap
type MSG_REF FIXED_SIZE PROCESSES msg_ref
type MSG EHEAP PROCESSES message
type MSGQ_CHNG SHORT_LIVED PROCESSES messages_queue_change
-type MSG_ROOTS TEMPORARY PROCESSES msg_roots
type ROOTSET TEMPORARY PROCESSES root_set
type LOADER_TMP TEMPORARY CODE loader_tmp
type PREPARED_CODE SHORT_LIVED CODE prepared_code
@@ -178,7 +174,6 @@ type BPD STANDARD SYSTEM bpd
type LINEBUF STANDARD SYSTEM line_buf
type IOQ STANDARD SYSTEM io_queue
type BITS_BUF STANDARD SYSTEM bits_buf
-type TMP_DIST_BUF TEMPORARY SYSTEM tmp_dist_buf
type ASYNC_DATA LONG_LIVED SYSTEM internal_async_data
type ESTACK TEMPORARY SYSTEM estack
type DB_TABLE ETS ETS db_tab
@@ -191,7 +186,6 @@ type DB_MC_STK TEMPORARY ETS db_mc_stack
type DB_MS_RUN_HEAP SHORT_LIVED ETS db_match_spec_run_heap
type DB_MS_CMPL_HEAP TEMPORARY ETS db_match_spec_cmpl_heap
type DB_SEG ETS ETS db_segment
-type DB_SEG_TAB ETS ETS db_segment_tab
type DB_STK ETS ETS db_stack
type DB_TRANS_TAB ETS ETS db_trans_tab
type DB_SEL_LIST ETS ETS db_select_list
@@ -200,7 +194,6 @@ type DB_DMC_ERR_INFO ETS ETS db_dmc_error_info
type DB_TERM ETS ETS db_term
type DB_PROC_CLEANUP SHORT_LIVED ETS db_proc_cleanup_state
type ETS_ALL_REQ SHORT_LIVED ETS ets_all_request
-type INSTR_INFO LONG_LIVED SYSTEM instr_info
type LOGGER_DSBUF TEMPORARY SYSTEM logger_dsbuf
type TMP_DSBUF TEMPORARY SYSTEM tmp_dsbuf
type INFO_DSBUF SYSTEM SYSTEM info_dsbuf
@@ -215,7 +208,6 @@ type PT_HNDL_LIST SHORT_LIVED SYSTEM port_task_handle_list
type MISC_OP_LIST SHORT_LIVED SYSTEM misc_op_list
type PORT_NAMES SHORT_LIVED SYSTEM port_names
type PORT_DATA_LOCK DRIVER SYSTEM port_data_lock
-type NODES_MON STANDARD PROCESSES nodes_monitor
type PTAB_LIST_DEL SHORT_LIVED PROCESSES ptab_list_deleted_el
type PTAB_LIST_CNKI SHORT_LIVED PROCESSES ptab_list_chunk_info
type PTAB_LIST_PIDS SHORT_LIVED PROCESSES ptab_list_pids
@@ -238,10 +230,8 @@ type CPUDATA LONG_LIVED SYSTEM cpu_data
type TMP_CPU_IDS SHORT_LIVED SYSTEM tmp_cpu_ids
type EXT_TERM_DATA SHORT_LIVED PROCESSES external_term_data
type CPU_GRPS_MAP LONG_LIVED SYSTEM cpu_groups_map
-type AUX_WORK_TMO LONG_LIVED SYSTEM aux_work_timeouts
type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q
type CODE_IX_LOCK_Q SHORT_LIVED SYSTEM code_ix_lock_q
-type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
@@ -250,8 +240,6 @@ type NEW_TIME_OFFSET SHORT_LIVED SYSTEM new_time_offset
type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request
type TRACER_NIF LONG_LIVED SYSTEM tracer_nif
type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
-type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
-type DIRTY_START STANDARD PROCESSES dirty_start
type DIRTY_SL SHORT_LIVED SYSTEM dirty_short_lived
type MREF_NSCHED_ENT FIXED_SIZE SYSTEM nsched_magic_ref_entry
type MREF_ENT STANDARD SYSTEM magic_ref_entry
@@ -259,7 +247,6 @@ type MREF_TAB_BKTS STANDARD SYSTEM magic_ref_table_buckets
type MREF_TAB LONG_LIVED SYSTEM magic_ref_table
type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection
type BINARY_FIND SHORT_LIVED PROCESSES binary_find
-type OPEN_PORT_ENV TEMPORARY SYSTEM open_port_env
type CRASH_DUMP STANDARD SYSTEM crash_dump
type DIST_TRANSCODE SHORT_LIVED SYSTEM dist_transcode_context
@@ -273,10 +260,8 @@ type THR_Q_LL LONG_LIVED SYSTEM long_lived_thr_queue
type ASYNC SHORT_LIVED SYSTEM async
type ZLIB STANDARD SYSTEM zlib
-type PORT_LOCK STANDARD SYSTEM port_lock
type DRIVER_LOCK STANDARD SYSTEM driver_lock
type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list
-type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter
type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing
type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data
type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data
@@ -287,6 +272,8 @@ type DIST_DEMONITOR SHORT_LIVED PROCESSES dist_demonitor
type CML_CLEANUP SHORT_LIVED SYSTEM connection_ml_cleanup
type ML_YIELD_STATE SHORT_LIVED SYSTEM monitor_link_yield_state
type ML_DIST STANDARD SYSTEM monitor_link_dist
+type PF3_ARGS SHORT_LIVED PROCESSES process_flag_3_arguments
+type SETUP_CONN_ARG SHORT_LIVED PROCESSES setup_connection_argument
type ENVIRONMENT SYSTEM SYSTEM environment
@@ -315,12 +302,6 @@ type HIPE_EXEC EXEC CODE hipe_code
+endif
-+if heap_frag_elim_test
-
-type SSB SHORT_LIVED PROCESSES ssb
-
-+endif
-
+if lcnt
type LCNT_CARRIER STANDARD SYSTEM lcnt_lock_info_carrier
@@ -340,12 +321,12 @@ type PURGE_DATA SHORT_LIVED CODE purge_data
type DB_HEIR_DATA STANDARD ETS db_heir_data
type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
-type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
type NIF_TRAP_EXPORT STANDARD PROCESSES nif_trap_export_entry
type NIF_EXP_TRACE FIXED_SIZE PROCESSES nif_export_trace
type EXPORT LONG_LIVED CODE export_entry
type MONITOR FIXED_SIZE PROCESSES monitor
+type MONITOR_SUSPEND STANDARD PROCESSES monitor_suspend
type LINK FIXED_SIZE PROCESSES link
type AINFO_REQ SHORT_LIVED SYSTEM alloc_info_request
type SCHED_WTIME_REQ SHORT_LIVED SYSTEM sched_wall_time_request
@@ -363,21 +344,17 @@ type DRV_TAB LONG_LIVED SYSTEM drv_tab
type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state
type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state
type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
-type FD_LIST SHORT_LIVED SYSTEM fd_list
type POLLSET LONG_LIVED SYSTEM pollset
type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
type POLL_FDS LONG_LIVED SYSTEM poll_fds
-type POLL_RES_EVS LONG_LIVED SYSTEM poll_result_events
type FD_STATUS LONG_LIVED SYSTEM fd_status
type SELECT_FDS LONG_LIVED SYSTEM select_fds
+if unix
type SYS_READ_BUF TEMPORARY SYSTEM sys_read_buf
-type FD_TAB LONG_LIVED SYSTEM fd_tab
type FD_ENTRY_BUF STANDARD SYSTEM fd_entry_buf
type CS_PROG_PATH LONG_LIVED SYSTEM cs_prog_path
-type PRT_REP_EXIT STANDARD SYSTEM port_report_exit
type SYS_BLOCKING STANDARD SYSTEM sys_blocking
type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf
@@ -389,7 +366,6 @@ type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf
type DRV_DATA_BUF SYSTEM SYSTEM drv_data_buf
type PRELOADED LONG_LIVED SYSTEM preloaded
type WAITER_OBJ LONG_LIVED SYSTEM waiter_object
-type CON_VPRINTF_BUF TEMPORARY SYSTEM con_vprintf_buf
+endif
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index ff4d10b206..cbae8ce98a 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -414,6 +414,10 @@ struct AOFF_RBTree_t_ {
AOFF_RBTree_t *right;
Uint32 flags;
Uint32 max_sz; /* of all blocks in this sub-tree */
+ union {
+ AOFF_RBTree_t* next; /* for best fit */
+ Sint64 birth_time; /* for age first fit */
+ } u;
};
void aoff_add_pooled_mbc(Allctr_t*, Carrier_t*);
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.c b/erts/emulator/beam/erl_ao_firstfit_alloc.c
index 0c5545401a..ebbe4af53d 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.c
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.c
@@ -100,22 +100,14 @@
#define AOFF_BLK_SZ(B) MBC_FBLK_SZ(&(B)->hdr)
-/* BF block nodes keeps list of all with equal size
- */
-typedef struct {
- AOFF_RBTree_t t;
- AOFF_RBTree_t *next;
-}AOFF_RBTreeList_t;
-
-#define LIST_NEXT(N) (((AOFF_RBTreeList_t*) (N))->next)
-#define LIST_PREV(N) (((AOFF_RBTreeList_t*) (N))->t.parent)
+#define LIST_NEXT(N) (((AOFF_RBTree_t*)(N))->u.next)
+#define LIST_PREV(N) (((AOFF_RBTree_t*)(N))->parent)
typedef struct AOFF_Carrier_t_ AOFF_Carrier_t;
struct AOFF_Carrier_t_ {
Carrier_t crr;
AOFF_RBTree_t rbt_node; /* My node in the carrier tree */
- Sint64 birth_time;
AOFF_RBTree_t* root; /* Root of my block tree */
};
#define RBT_NODE_TO_MBC(PTR) ErtsContainerStruct((PTR), AOFF_Carrier_t, rbt_node)
@@ -199,9 +191,7 @@ static ERTS_INLINE SWord cmp_blocks(enum AOFFSortOrder order,
{
ASSERT(lhs != rhs);
if (order == FF_AGEFF) {
- AOFF_Carrier_t* lc = RBT_NODE_TO_MBC(lhs);
- AOFF_Carrier_t* rc = RBT_NODE_TO_MBC(rhs);
- Sint64 diff = lc->birth_time - rc->birth_time;
+ Sint64 diff = lhs->u.birth_time - rhs->u.birth_time;
#ifdef ARCH_64
if (diff)
return diff;
@@ -296,8 +286,10 @@ erts_aoffalc_start(AOFFAllctr_t *alc,
allctr->mbc_header_size = sizeof(AOFF_Carrier_t);
allctr->min_mbc_size = MIN_MBC_SZ;
allctr->min_mbc_first_free_size = MIN_MBC_FIRST_FREE_SZ;
- allctr->min_block_size = (aoffinit->blk_order == FF_BF ?
- sizeof(AOFF_RBTreeList_t):sizeof(AOFF_RBTree_t));
+ allctr->min_block_size = (aoffinit->blk_order == FF_BF
+ ? (offsetof(AOFF_RBTree_t, u.next)
+ + ErtsSizeofMember(AOFF_RBTree_t, u.next))
+ : offsetof(AOFF_RBTree_t, u));
allctr->vsn_str = ERTS_ALC_AOFF_ALLOC_VSN_STR;
@@ -939,8 +931,11 @@ static void aoff_creating_mbc(Allctr_t *allctr, Carrier_t *carrier)
HARD_CHECK_TREE(NULL, alc->crr_order, *root, 0);
crr->rbt_node.hdr.bhdr = 0;
- if (alc->crr_order == FF_AGEFF || IS_DEBUG)
- crr->birth_time = get_birth_time();
+ if (alc->crr_order == FF_AGEFF || IS_DEBUG) {
+ Sint64 bt = get_birth_time();
+ crr->rbt_node.u.birth_time = bt;
+ crr->crr.cpool.pooled.u.birth_time = bt;
+ }
rbt_insert(alc->crr_order, root, &crr->rbt_node);
/* aoff_link_free_block will add free block later */
@@ -978,6 +973,7 @@ static void aoff_add_mbc(Allctr_t *allctr, Carrier_t *carrier)
void aoff_add_pooled_mbc(Allctr_t *allctr, Carrier_t *crr)
{
+ AOFFAllctr_t *alc = (AOFFAllctr_t *) allctr;
AOFF_RBTree_t **root = &allctr->cpool.pooled_tree;
ASSERT(allctr == crr->cpool.orig_allctr);
@@ -985,7 +981,7 @@ void aoff_add_pooled_mbc(Allctr_t *allctr, Carrier_t *crr)
/* Link carrier in address order tree
*/
- rbt_insert(FF_AOFF, root, &crr->cpool.pooled);
+ rbt_insert(alc->crr_order, root, &crr->cpool.pooled);
HARD_CHECK_TREE(NULL, 0, *root, 0);
}
diff --git a/erts/emulator/beam/erl_ao_firstfit_alloc.h b/erts/emulator/beam/erl_ao_firstfit_alloc.h
index 9cf4fc81a8..dad864801f 100644
--- a/erts/emulator/beam/erl_ao_firstfit_alloc.h
+++ b/erts/emulator/beam/erl_ao_firstfit_alloc.h
@@ -29,10 +29,10 @@
typedef struct AOFFAllctr_t_ AOFFAllctr_t;
enum AOFFSortOrder {
- FF_AGEFF = 0,
+ FF_AGEFF = 0, /* carrier trees only */
FF_AOFF = 1,
- FF_AOBF = 2,
- FF_BF = 3
+ FF_AOBF = 2, /* block trees only */
+ FF_BF = 3 /* block trees only */
};
typedef struct {
diff --git a/erts/emulator/beam/erl_bif_chksum.c b/erts/emulator/beam/erl_bif_chksum.c
index 9095bcd380..cf92687595 100644
--- a/erts/emulator/beam/erl_bif_chksum.c
+++ b/erts/emulator/beam/erl_bif_chksum.c
@@ -44,7 +44,7 @@ void erts_init_bif_chksum(void)
{
/* Non visual BIF to trap to. */
erts_init_trap_export(&chksum_md5_2_exp,
- am_erlang, am_atom_put("md5_trap",8), 2,
+ am_erlang, ERTS_MAKE_AM("md5_trap"), 2,
&md5_2);
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 6f9e507228..5789fa8e71 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -617,17 +617,13 @@ static void collect_one_target_monitor(ErtsMonitor *mon, void *vmicp)
}
typedef struct {
- Process *c_p;
- ErtsProcLocks c_p_locks;
ErtsMonitorSuspend **smi;
Uint smi_i;
Uint smi_max;
- int sz;
+ Uint sz;
} ErtsSuspendMonitorInfoCollection;
-#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC, CP, CPL) do { \
- (SMIC).c_p = (CP); \
- (SMIC).c_p_locks = (CPL); \
+#define ERTS_INIT_SUSPEND_MONITOR_INFOS(SMIC) do { \
(SMIC).smi = NULL; \
(SMIC).smi_i = (SMIC).smi_max = 0; \
(SMIC).sz = 0; \
@@ -660,34 +656,26 @@ do { \
static void
collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
{
- ErtsMonitorSuspend *smon = erts_monitor_suspend(mon);
- ErtsSuspendMonitorInfoCollection *smicp = vsmicp;
- Process *suspendee = erts_pid2proc(smicp->c_p,
- smicp->c_p_locks,
- mon->other.item,
- 0);
- if (suspendee) { /* suspendee is alive */
- Sint a, p;
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- }
+ if (mon->type == ERTS_MON_TYPE_SUSPEND) {
+ Sint count;
+ erts_aint_t mstate;
+ ErtsMonitorSuspend *msp;
+ ErtsSuspendMonitorInfoCollection *smicp;
- ASSERT((smon->active && !smon->pending)
- || (smon->pending && !smon->active));
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+ smicp = vsmicp;
ERTS_EXTEND_SUSPEND_MONITOR_INFOS(smicp);
- smicp->smi[smicp->smi_i] = smon;
+ smicp->smi[smicp->smi_i] = msp;
smicp->sz += 2 /* cons */ + 4 /* 3-tuple */;
- a = (Sint) smon->active; /* quiet compiler warnings */
- p = (Sint) smon->pending; /* on 64-bit machines */
+ mstate = erts_atomic_read_nob(&msp->state);
- if (!IS_SSMALL(a))
- smicp->sz += BIG_UINT_HEAP_SIZE;
- if (!IS_SSMALL(p))
+ count = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(count))
smicp->sz += BIG_UINT_HEAP_SIZE;
+
smicp->smi_i++;
}
}
@@ -1075,8 +1063,10 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
if (c_p->common.id == pid) {
int local_only = c_p->flags & F_LOCAL_SIGS_ONLY;
- int sreds = ERTS_BIF_REDS_LEFT(c_p);
- int sres;
+ int sres, sreds, reds_left;
+
+ reds_left = ERTS_BIF_REDS_LEFT(c_p);
+ sreds = reds_left;
if (!local_only) {
erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
@@ -1085,15 +1075,19 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
}
sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
+
+ BUMP_REDS(c_p, (int) sreds);
+ reds_left -= sreds;
+
if (state & ERTS_PSFLG_EXITING) {
c_p->flags &= ~F_LOCAL_SIGS_ONLY;
goto exited;
}
- if (!sres) {
+ if (!sres | (reds_left <= 0)) {
/*
- * More signals to handle; need to yield and continue.
- * Prevent fetching of more signals by setting
- * local-sigs-only flag.
+ * More signals to handle or out of reds; need
+ * to yield and continue. Prevent fetching of
+ * more signals by setting local-sigs-only flag.
*/
c_p->flags |= F_LOCAL_SIGS_ONLY;
goto yield;
@@ -1166,6 +1160,7 @@ process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
else {
if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
goto send_signal;
+ state = ERTS_PSFLG_RUNNING; /* fail state... */
rp = erts_try_lock_sig_free_proc(pid, locks, &state);
if (!rp)
goto undefined;
@@ -1627,56 +1622,56 @@ process_info_aux(Process *c_p,
case ERTS_PI_IX_SUSPENDING: {
ErtsSuspendMonitorInfoCollection smic;
int i;
- Eterm item;
- erts_proc_lock(rp, ERTS_PROC_LOCK_STATUS);
+ ERTS_INIT_SUSPEND_MONITOR_INFOS(smic);
- ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
- c_p,
- (c_p == rp
- ? ERTS_PROC_LOCK_MAIN
- : 0) | ERTS_PROC_LOCK_STATUS);
-
- erts_monitor_tree_foreach(rp->suspend_monitors,
- &collect_one_suspend_monitor,
- &smic);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(rp),
+ collect_one_suspend_monitor,
+ (void *) &smic);
reserve_size += smic.sz;
res = NIL;
for (i = 0; i < smic.smi_i; i++) {
- Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
- Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
- Eterm active;
- Eterm pending;
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
+ Sint ci;
+ Eterm ct, active, pending, item;
Uint sz = 4 + 2;
- if (!IS_SSMALL(a))
- sz += BIG_UINT_HEAP_SIZE;
- if (!IS_SSMALL(p))
- sz += BIG_UINT_HEAP_SIZE;
+
+ msp = smic.smi[i];
+ mstate = erts_atomic_read_nob(&msp->state);
+
+ ci = (Sint) (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK);
+ if (!IS_SSMALL(ci))
+ sz += BIG_UINT_HEAP_SIZE;
ERTS_PI_UNRESERVE(reserve_size, sz);
hp = erts_produce_heap(hfact, sz, reserve_size);
- if (IS_SSMALL(a))
- active = make_small(a);
- else {
- active = small_to_big(a, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- if (IS_SSMALL(p))
- pending = make_small(p);
- else {
- pending = small_to_big(p, hp);
- hp += BIG_UINT_HEAP_SIZE;
- }
- item = TUPLE3(hp, smic.smi[i]->mon.other.item, active, pending);
+ if (IS_SSMALL(ci))
+ ct = make_small(ci);
+ else {
+ ct = small_to_big(ci, hp);
+ hp += BIG_UINT_HEAP_SIZE;
+ }
+
+ if (mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE) {
+ active = ct;
+ pending = make_small(0);
+ }
+ else {
+ active = make_small(0);
+ pending = ct;
+ }
+
+ ASSERT(is_internal_pid(msp->md.origin.other.item));
+
+ item = TUPLE3(hp, msp->md.origin.other.item, active, pending);
hp += 4;
res = CONS(hp, item, res);
}
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
-
*reds += (Uint) smic.smi_i / 4;
ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
@@ -3134,6 +3129,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_db_get_max_tabs()));
}
+ else if (ERTS_IS_ATOM_STR("ets_count",BIF_ARG_1)) {
+ BIF_RET(make_small(erts_ets_table_count()));
+ }
else if (ERTS_IS_ATOM_STR("atom_limit",BIF_ARG_1)) {
BIF_RET(make_small(erts_get_atom_limit()));
}
@@ -3637,26 +3635,46 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
-BIF_RETTYPE process_display_2(BIF_ALIST_2)
+static Eterm
+process_display(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
{
- Process *rp;
+ if (redsp)
+ *redsp = 1;
- if (BIF_ARG_2 != am_backtrace)
- BIF_ERROR(BIF_P, BADARG);
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return am_badarg;
- rp = erts_pid2proc_nropt(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCKS_ALL);
- if(!rp) {
- BIF_ERROR(BIF_P, BADARG);
- }
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_process_display_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
- erts_proc_unlock(rp, (BIF_P == rp
- ? ERTS_PROC_LOCKS_ALL_MINOR
- : ERTS_PROC_LOCKS_ALL));
- BIF_RET(am_true);
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_stack_dump(ERTS_PRINT_STDERR, NULL, c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+
+ return am_true;
+}
+
+
+BIF_RETTYPE erts_internal_process_display_2(BIF_ALIST_2)
+{
+ Eterm res;
+
+ if (BIF_ARG_2 != am_backtrace)
+ BIF_RET(am_badarg);
+
+ if (BIF_P->common.id == BIF_ARG_1) {
+ res = process_display(BIF_P, NULL, NULL, NULL);
+ BIF_RET(res);
+ }
+
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_RET(am_badarg);
+
+ res = erts_proc_sig_send_rpc_request(BIF_P, BIF_ARG_1,
+ !0,
+ process_display,
+ NULL);
+ if (is_non_value(res))
+ BIF_RET(am_badarg);
+
+ BIF_RET(res);
}
/* this is a general call which return some possibly useful information */
@@ -4597,27 +4615,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(am_true);
}
}
- else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
- int old_use_opt, use_opt;
- switch (BIF_ARG_2) {
- case am_true:
- use_opt = 1;
- break;
- case am_false:
- use_opt = 0;
- break;
- default:
- BIF_ERROR(BIF_P, BADARG);
- }
-
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
- erts_thr_progress_block();
- old_use_opt = !erts_disable_proc_not_running_opt;
- erts_disable_proc_not_running_opt = !use_opt;
- erts_thr_progress_unblock();
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
- BIF_RET(old_use_opt ? am_true : am_false);
- }
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
int flag = ERTS_DEBUG_WAIT_COMPLETED_DEALLOCATIONS;
@@ -4688,7 +4685,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
refbin));
}
}
-
+ else if (ERTS_IS_ATOM_STR("ets_force_trap", BIF_ARG_1)) {
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_ets_dbg_force_trap = (BIF_ARG_2 == am_true) ? 1 : 0;
+ BIF_RET(am_ok);
+#else
+ BIF_RET(am_notsup);
+#endif
+ }
}
BIF_ERROR(BIF_P, BADARG);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 1953f79d79..9861483bf0 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -643,12 +643,12 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
SysTimespec tp;
int i;
- if (sys_get_proc_cputime(start, tp) < 0)
+ if (sys_get_cputime(start, tp) < 0)
goto error;
start = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
(SysCpuTime)tp.tv_nsec;
for (i = 0; i < 100; i++)
- sys_get_proc_cputime(stop, tp);
+ sys_get_cputime(stop, tp);
stop = ((SysCpuTime)tp.tv_sec * 1000000000LL) +
(SysCpuTime)tp.tv_nsec;
if (start == 0) goto error;
@@ -809,10 +809,129 @@ Eterm trace_info_2(BIF_ALIST_2)
BIF_ERROR(p, BADARG);
}
erts_release_code_write_permission();
+
+ if (is_internal_ref(res))
+ BIF_TRAP1(erts_await_result, BIF_P, res);
+
BIF_RET(res);
}
static Eterm
+build_trace_flags_term(Eterm **hpp, Uint *szp, Uint trace_flags)
+{
+
+#define ERTS_TFLAG__(F, FN) \
+ if (trace_flags & F) { \
+ if (szp) \
+ sz += 2; \
+ if (hp) { \
+ res = CONS(hp, FN, res); \
+ hp += 2; \
+ } \
+ }
+
+ Eterm res;
+ Uint sz = 0;
+ Eterm *hp;
+
+ if (hpp) {
+ hp = *hpp;
+ res = NIL;
+ }
+ else {
+ hp = NULL;
+ res = THE_NON_VALUE;
+ }
+
+ ERTS_TFLAG__(F_NOW_TS, am_timestamp);
+ ERTS_TFLAG__(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
+ ERTS_TFLAG__(F_MON_TS, am_monotonic_timestamp);
+ ERTS_TFLAG__(F_TRACE_SEND, am_send);
+ ERTS_TFLAG__(F_TRACE_RECEIVE, am_receive);
+ ERTS_TFLAG__(F_TRACE_SOS, am_set_on_spawn);
+ ERTS_TFLAG__(F_TRACE_CALLS, am_call);
+ ERTS_TFLAG__(F_TRACE_PROCS, am_procs);
+ ERTS_TFLAG__(F_TRACE_SOS1, am_set_on_first_spawn);
+ ERTS_TFLAG__(F_TRACE_SOL, am_set_on_link);
+ ERTS_TFLAG__(F_TRACE_SOL1, am_set_on_first_link);
+ ERTS_TFLAG__(F_TRACE_SCHED, am_running);
+ ERTS_TFLAG__(F_TRACE_SCHED_EXIT, am_exiting);
+ ERTS_TFLAG__(F_TRACE_GC, am_garbage_collection);
+ ERTS_TFLAG__(F_TRACE_ARITY_ONLY, am_arity);
+ ERTS_TFLAG__(F_TRACE_RETURN_TO, am_return_to);
+ ERTS_TFLAG__(F_TRACE_SILENT, am_silent);
+ ERTS_TFLAG__(F_TRACE_SCHED_NO, am_scheduler_id);
+ ERTS_TFLAG__(F_TRACE_PORTS, am_ports);
+ ERTS_TFLAG__(F_TRACE_SCHED_PORTS, am_running_ports);
+ ERTS_TFLAG__(F_TRACE_SCHED_PROCS, am_running_procs);
+
+ if (szp)
+ *szp += sz;
+
+ if (hpp)
+ *hpp = hp;
+
+ return res;
+
+#undef ERTS_TFLAG__
+}
+
+static Eterm
+trace_info_tracee(Process *c_p, void *arg, int *redsp, ErlHeapFragment **bpp)
+{
+ ErlHeapFragment *bp;
+ Eterm *hp, res, key;
+ Uint sz;
+
+ *redsp = 1;
+
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return am_undefined;
+
+ key = (Eterm) arg;
+ sz = 3;
+
+ if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(c_p)))
+ erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCK_MAIN,
+ &c_p->common);
+
+ switch (key) {
+ case am_tracer:
+
+ erts_build_tracer_to_term(NULL, NULL, &sz, ERTS_TRACER(c_p));
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ res = erts_build_tracer_to_term(&hp, &bp->off_heap,
+ NULL, ERTS_TRACER(c_p));
+ if (res == am_false)
+ res = NIL;
+ break;
+
+ case am_flags:
+
+ build_trace_flags_term(NULL, &sz, ERTS_TRACE_FLAGS(c_p));
+ bp = new_message_buffer(sz);
+ hp = bp->mem;
+ res = build_trace_flags_term(&hp, NULL, ERTS_TRACE_FLAGS(c_p));
+ break;
+
+ default:
+
+ ERTS_INTERNAL_ERROR("Key not supported");
+ res = NIL;
+ bp = NULL;
+ hp = NULL;
+ break;
+ }
+
+ *redsp += 2;
+
+ res = TUPLE2(hp, key, res);
+ *bpp = bp;
+ return res;
+}
+
+static Eterm
trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
{
Eterm tracer;
@@ -846,24 +965,19 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
erts_port_release(tracee);
} else if (is_internal_pid(pid_spec)) {
- Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCK_MAIN);
-
- if (tracee == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key);
+ Eterm ref;
- if (!tracee)
- return am_undefined;
+ if (key != am_flags && key != am_tracer)
+ goto error;
- if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
- erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN,
- &tracee->common);
+ ref = erts_proc_sig_send_rpc_request(p, pid_spec, !0,
+ trace_info_tracee,
+ (void *) key);
- tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
- trace_flags = ERTS_TRACE_FLAGS(tracee);
+ if (is_non_value(ref))
+ return am_undefined;
- if (tracee != p)
- erts_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
+ return ref;
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
@@ -873,48 +987,16 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
}
if (key == am_flags) {
- int num_flags = 21; /* MAXIMUM number of flags. */
- Uint needed = 3+2*num_flags;
- Eterm flag_list = NIL;
- Eterm* limit;
+ Eterm flag_list;
+ Uint sz = 3;
+ Eterm *hp;
-#define FLAG0(flag_mask,flag) \
- if (trace_flags & (flag_mask)) { flag_list = CONS(hp, flag, flag_list); hp += 2; } else {}
+ build_trace_flags_term(NULL, &sz, trace_flags);
+
+ hp = HAlloc(p, sz);
+
+ flag_list = build_trace_flags_term(&hp, NULL, trace_flags);
-#if defined(DEBUG)
- /*
- * Check num_flags if this assertion fires.
- */
-# define FLAG ASSERT(num_flags-- > 0); FLAG0
-#else
-# define FLAG FLAG0
-#endif
- hp = HAlloc(p, needed);
- limit = hp+needed;
- FLAG(F_NOW_TS, am_timestamp);
- FLAG(F_STRICT_MON_TS, am_strict_monotonic_timestamp);
- FLAG(F_MON_TS, am_monotonic_timestamp);
- FLAG(F_TRACE_SEND, am_send);
- FLAG(F_TRACE_RECEIVE, am_receive);
- FLAG(F_TRACE_SOS, am_set_on_spawn);
- FLAG(F_TRACE_CALLS, am_call);
- FLAG(F_TRACE_PROCS, am_procs);
- FLAG(F_TRACE_SOS1, am_set_on_first_spawn);
- FLAG(F_TRACE_SOL, am_set_on_link);
- FLAG(F_TRACE_SOL1, am_set_on_first_link);
- FLAG(F_TRACE_SCHED, am_running);
- FLAG(F_TRACE_SCHED_EXIT, am_exiting);
- FLAG(F_TRACE_GC, am_garbage_collection);
- FLAG(F_TRACE_ARITY_ONLY, am_arity);
- FLAG(F_TRACE_RETURN_TO, am_return_to);
- FLAG(F_TRACE_SILENT, am_silent);
- FLAG(F_TRACE_SCHED_NO, am_scheduler_id);
- FLAG(F_TRACE_PORTS, am_ports);
- FLAG(F_TRACE_SCHED_PORTS, am_running_ports);
- FLAG(F_TRACE_SCHED_PROCS, am_running_procs);
-#undef FLAG0
-#undef FLAG
- HRelease(p,limit,hp+3);
return TUPLE2(hp, key, flag_list);
} else if (key == am_tracer) {
if (tracer == am_false)
diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h
index 46653a8580..7dfd0c273a 100644
--- a/erts/emulator/beam/erl_binary.h
+++ b/erts/emulator/beam/erl_binary.h
@@ -146,9 +146,7 @@ typedef union {
/* A "magic" binary flag */
#define BIN_FLAG_MAGIC 1
-#define BIN_FLAG_USR1 2 /* Reserved for use by different modules too mark */
-#define BIN_FLAG_USR2 4 /* certain binaries as special (used by ets) */
-#define BIN_FLAG_DRV 8
+#define BIN_FLAG_DRV 2
#endif /* ERL_BINARY_H__TYPES__ */
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index ca2ebb7c27..5bae1730e4 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -50,6 +50,34 @@ erts_atomic_t erts_ets_misc_mem_size;
** Utility macros
*/
+#define DB_BIF_GET_TABLE(TB, WHAT, KIND, BIF_IX) \
+ DB_GET_TABLE(TB, BIF_ARG_1, WHAT, KIND, BIF_IX, NULL, BIF_P)
+
+#define DB_TRAP_GET_TABLE(TB, TID, WHAT, KIND, BIF_EXP) \
+ DB_GET_TABLE(TB, TID, WHAT, KIND, 0, BIF_EXP, BIF_P)
+
+#define DB_GET_TABLE(TB, TID, WHAT, KIND, BIF_IX, BIF_EXP, PROC) \
+do { \
+ Uint freason__; \
+ if (!(TB = db_get_table(PROC, TID, WHAT, KIND, &freason__))) { \
+ return db_bif_fail(PROC, freason__, BIF_IX, BIF_EXP); \
+ } \
+}while(0)
+
+static BIF_RETTYPE db_bif_fail(Process* p, Uint freason,
+ Uint bif_ix, Export* bif_exp)
+{
+ if (freason == TRAP) {
+ if (!bif_exp)
+ bif_exp = bif_export[bif_ix];
+ p->arity = bif_exp->info.mfa.arity;
+ p->i = (BeamInstr*) bif_exp->addressv[erts_active_code_ix()];
+ }
+ p->freason = freason;
+ return THE_NON_VALUE;
+}
+
+
/* Get a key from any table structure and a tagged object */
#define TERM_GETKEY(tb, obj) db_getkey((tb)->common.keypos, (obj))
@@ -294,10 +322,10 @@ erts_db_make_tid(Process *c_p, DbTableCommon *tb)
/*
** The meta hash table of all NAMED ets tables
*/
-# define META_NAME_TAB_LOCK_CNT 16
+# define META_NAME_TAB_LOCK_CNT 256
union {
erts_rwmtx_t lck;
- byte _cache_line_alignment[64];
+ byte align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))];
}meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT];
static struct meta_name_tab_entry {
union {
@@ -326,8 +354,7 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
typedef enum {
LCK_READ=1, /* read only access */
LCK_WRITE=2, /* exclusive table write access */
- LCK_WRITE_REC=3, /* record write access */
- LCK_NONE=4
+ LCK_WRITE_REC=3 /* record write access */
} db_lock_kind_t;
extern DbTableMethod db_hash;
@@ -337,9 +364,6 @@ int user_requested_db_max_tabs;
int erts_ets_realloc_always_moves;
int erts_ets_always_compress;
static int db_max_tabs;
-static Eterm ms_delete_all;
-static Eterm ms_delete_all_buff[8]; /* To compare with for deletion
- of all objects */
/*
** Forward decls, static functions
@@ -351,18 +375,19 @@ static void set_heir(Process* me, DbTable* tb, Eterm heir, UWord heir_data);
static void free_heir_data(DbTable*);
static SWord free_fixations_locked(Process* p, DbTable *tb);
+static void delete_all_objects_continue(Process* p, DbTable* tb);
static SWord free_table_continue(Process *p, DbTable *tb, SWord reds);
static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb);
-static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1);
+static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1);
static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1);
static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1);
static Eterm table_info(Process* p, DbTable* tb, Eterm What);
-static BIF_RETTYPE ets_select1(Process* p, Eterm arg1);
-static BIF_RETTYPE ets_select2(Process* p, Eterm arg1, Eterm arg2);
-static BIF_RETTYPE ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3);
+static BIF_RETTYPE ets_select1(Process* p, int bif_ix, Eterm arg1);
+static BIF_RETTYPE ets_select2(Process* p, DbTable*, Eterm tid, Eterm ms);
+static BIF_RETTYPE ets_select3(Process* p, DbTable*, Eterm tid, Eterm ms, Sint chunk_size);
/*
@@ -425,7 +450,7 @@ save_sched_table(Process *c_p, DbTable *tb)
DbTable *first;
ASSERT(esdp);
- esdp->ets_tables.count++;
+ erts_atomic_inc_nob(&esdp->ets_tables.count);
erts_refc_inc(&tb->common.refc, 1);
first = esdp->ets_tables.clist;
@@ -449,8 +474,8 @@ remove_sched_table(ErtsSchedulerData *esdp, DbTable *tb)
ASSERT(erts_get_ref_numbers_thr_id(ERTS_MAGIC_BIN_REFN(tb->common.btid))
== (Uint32) esdp->no);
- ASSERT(esdp->ets_tables.count > 0);
- esdp->ets_tables.count--;
+ ASSERT(erts_atomic_read_nob(&esdp->ets_tables.count) > 0);
+ erts_atomic_dec_nob(&esdp->ets_tables.count);
eaydp = ERTS_SCHED_AUX_YIELD_DATA(esdp, ets_all);
if (eaydp->ongoing) {
@@ -636,15 +661,42 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
}
}
+static ERTS_INLINE int db_is_exclusive(DbTable* tb, db_lock_kind_t kind)
+{
+ return kind != LCK_READ && tb->common.is_thread_safe;
+}
+
+static DbTable* handle_lacking_permission(Process* p, DbTable* tb,
+ db_lock_kind_t kind,
+ Uint* freason_p)
+{
+ if (tb->common.status & DB_BUSY) {
+ if (!db_is_exclusive(tb, kind)) {
+ db_unlock(tb, kind);
+ db_lock(tb, LCK_WRITE);
+ }
+ delete_all_objects_continue(p, tb);
+ db_unlock(tb, LCK_WRITE);
+ tb = NULL;
+ *freason_p = TRAP;
+ }
+ else if (p->common.id != tb->common.owner) {
+ db_unlock(tb, kind);
+ tb = NULL;
+ *freason_p = BADARG;
+ }
+ return tb;
+}
+
static ERTS_INLINE
DbTable* db_get_table_aux(Process *p,
Eterm id,
int what,
db_lock_kind_t kind,
- int meta_already_locked)
+ int meta_already_locked,
+ Uint* freason_p)
{
DbTable *tb;
- erts_rwmtx_t *mtl = NULL;
/*
* IMPORTANT: Only scheduler threads are allowed
@@ -654,13 +706,13 @@ DbTable* db_get_table_aux(Process *p,
ASSERT(erts_get_scheduler_data());
if (is_atom(id)) {
+ erts_rwmtx_t *mtl;
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
if (!meta_already_locked)
erts_rwmtx_rlock(mtl);
else{
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
|| erts_lc_rwmtx_is_rwlocked(mtl));
- mtl = NULL;
}
tb = NULL;
if (bucket->pu.tb != NULL) {
@@ -679,20 +731,29 @@ DbTable* db_get_table_aux(Process *p,
}
}
}
+ if (!meta_already_locked)
+ erts_rwmtx_runlock(mtl);
}
else
tb = tid2tab(id);
if (tb) {
db_lock(tb, kind);
- if ((tb->common.status & what) == 0
- && p->common.id != tb->common.owner) {
- db_unlock(tb, kind);
- tb = NULL;
- }
+#ifdef ETS_DBG_FORCE_TRAP
+ if (erts_atomic_read_nob(&tb->common.dbg_force_trap) &&
+ erts_atomic_add_read_nob(&tb->common.dbg_force_trap, 2) & 2) {
+ db_unlock(tb, kind);
+ tb = NULL;
+ *freason_p = TRAP;
+ }
+ else
+#endif
+ if (ERTS_UNLIKELY(!(tb->common.status & what)))
+ tb = handle_lacking_permission(p, tb, kind, freason_p);
}
- if (mtl)
- erts_rwmtx_runlock(mtl);
+ else
+ *freason_p = BADARG;
+
return tb;
}
@@ -700,9 +761,10 @@ static ERTS_INLINE
DbTable* db_get_table(Process *p,
Eterm id,
int what,
- db_lock_kind_t kind)
+ db_lock_kind_t kind,
+ Uint* freason_p)
{
- return db_get_table_aux(p, id, what, kind, 0);
+ return db_get_table_aux(p, id, what, kind, 0, freason_p);
}
static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock)
@@ -868,9 +930,7 @@ BIF_RETTYPE ets_safe_fixtable_2(BIF_ALIST_2)
#endif
kind = (BIF_ARG_2 == am_true) ? LCK_READ : LCK_WRITE_REC;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, kind, BIF_ets_safe_fixtable_2);
if (BIF_ARG_2 == am_true) {
fix_table_locked(BIF_P, tb);
@@ -900,11 +960,7 @@ BIF_RETTYPE ets_first_1(BIF_ALIST_1)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_first_1);
cret = tb->common.meth->db_first(BIF_P, tb, &ret);
@@ -927,11 +983,7 @@ BIF_RETTYPE ets_next_2(BIF_ALIST_2)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_next_2);
cret = tb->common.meth->db_next(BIF_P, tb, BIF_ARG_2, &ret);
@@ -954,11 +1006,7 @@ BIF_RETTYPE ets_last_1(BIF_ALIST_1)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_last_1);
cret = tb->common.meth->db_last(BIF_P, tb, &ret);
@@ -981,11 +1029,7 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ);
-
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_prev_2);
cret = tb->common.meth->db_prev(BIF_P,tb,BIF_ARG_2,&ret);
@@ -1003,21 +1047,15 @@ BIF_RETTYPE ets_prev_2(BIF_ALIST_2)
BIF_RETTYPE ets_take_2(BIF_ALIST_2)
{
DbTable* tb;
-#ifdef DEBUG
int cret;
-#endif
Eterm ret;
CHECK_TABLES();
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC);
- if (!tb) {
- BIF_ERROR(BIF_P, BADARG);
- }
-#ifdef DEBUG
- cret =
-#endif
- tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
- ASSERT(cret == DB_ERROR_NONE);
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_take_2);
+
+ cret = tb->common.meth->db_take(BIF_P, tb, BIF_ARG_2, &ret);
+
+ ASSERT(cret == DB_ERROR_NONE); (void)cret;
db_unlock(tb, LCK_WRITE_REC);
BIF_RET(ret);
}
@@ -1035,9 +1073,8 @@ BIF_RETTYPE ets_update_element_3(BIF_ALIST_3)
DeclareTmpHeap(cell,2,BIF_P);
DbUpdateHandle handle;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_element_3);
+
UseTmpHeap(2,BIF_P);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
goto bail_out;
@@ -1108,9 +1145,9 @@ bail_out:
}
static BIF_RETTYPE
-do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
+do_update_counter(Process *p, DbTable* tb,
+ Eterm arg2, Eterm arg3, Eterm arg4)
{
- DbTable* tb;
int cret = DB_ERROR_BADITEM;
Eterm upop_list;
int list_size;
@@ -1126,10 +1163,6 @@ do_update_counter(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Eterm arg4)
Eterm* hstart;
Eterm* hend;
- if ((tb = db_get_table(p, arg1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
-
UseTmpHeap(5, p);
if (!(tb->common.status & (DB_SET | DB_ORDERED_SET))) {
@@ -1303,7 +1336,11 @@ bail_out:
*/
BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
{
- return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
+ DbTable* tb;
+
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_counter_3);
+
+ return do_update_counter(BIF_P, tb, BIF_ARG_2, BIF_ARG_3, THE_NON_VALUE);
}
/*
@@ -1315,10 +1352,14 @@ BIF_RETTYPE ets_update_counter_3(BIF_ALIST_3)
*/
BIF_RETTYPE ets_update_counter_4(BIF_ALIST_4)
{
+ DbTable* tb;
+
if (is_not_tuple(BIF_ARG_4)) {
BIF_ERROR(BIF_P, BADARG);
}
- return do_update_counter(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_update_counter_4);
+
+ return do_update_counter(BIF_P, tb, BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
}
@@ -1339,9 +1380,8 @@ BIF_RETTYPE ets_insert_2(BIF_ALIST_2)
kind = ((is_list(BIF_ARG_2) && CDR(list_val(BIF_ARG_2)) != NIL)
? LCK_WRITE : LCK_WRITE_REC);
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_2);
+
if (BIF_ARG_2 == NIL) {
db_unlock(tb, kind);
BIF_RET(am_true);
@@ -1407,11 +1447,9 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
/* More than one object, use LCK_WRITE to keep atomicity */
kind = LCK_WRITE;
- tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind);
- if (tb == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
- meth = tb->common.meth;
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_new_2);
+
+ meth = tb->common.meth;
for (lst = BIF_ARG_2; is_list(lst); lst = CDR(list_val(lst))) {
if (is_not_tuple(CAR(list_val(lst)))
|| (arityval(*tuple_val(CAR(list_val(lst))))
@@ -1446,9 +1484,8 @@ BIF_RETTYPE ets_insert_new_2(BIF_ALIST_2)
/* Only one object (or NIL)
*/
kind = LCK_WRITE_REC;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, kind)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, kind, BIF_ets_insert_new_2);
+
if (BIF_ARG_2 == NIL) {
db_unlock(tb, kind);
BIF_RET(am_true);
@@ -1487,6 +1524,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
Eterm ret;
Eterm old_name;
erts_rwmtx_t *lck1, *lck2;
+ Uint freason;
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1531,9 +1569,9 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
if (lck2)
erts_rwmtx_rwlock(lck2);
- tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
+ tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1, &freason);
if (!tb)
- goto badarg;
+ goto fail;
if (is_table_named(tb)) {
if (!insert_named_tab(BIF_ARG_2, tb, 1))
@@ -1553,13 +1591,18 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2)
if (lck2)
erts_rwmtx_rwunlock(lck2);
BIF_RET(ret);
- badarg:
+
+badarg:
+ freason = BADARG;
+
+fail:
if (tb)
db_unlock(tb, LCK_WRITE);
erts_rwmtx_rwunlock(lck1);
if (lck2)
erts_rwmtx_rwunlock(lck2);
- BIF_ERROR(BIF_P, BADARG);
+
+ return db_bif_fail(BIF_P, freason, BIF_ets_rename_2, NULL);
}
@@ -1580,9 +1623,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Sint keypos;
int is_named, is_compressed;
int is_fine_locked, frequent_read;
-#ifdef DEBUG
int cret;
-#endif
DbTableMethod* meth;
if (is_not_atom(BIF_ARG_1)) {
@@ -1708,7 +1749,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.meth = meth;
tb->common.the_name = BIF_ARG_1;
tb->common.status = status;
- tb->common.type = status & ERTS_ETS_TABLE_TYPES;
+ tb->common.type = status;
/* Note, 'type' is *read only* from now on... */
erts_refc_init(&tb->common.fix_count, 0);
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
@@ -1720,12 +1761,12 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.fixing_procs = NULL;
tb->common.compress = is_compressed;
-
-#ifdef DEBUG
- cret =
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_atomic_init_nob(&tb->common.dbg_force_trap, erts_ets_dbg_force_trap);
#endif
- meth->db_create(BIF_P, tb);
- ASSERT(cret == DB_ERROR_NONE);
+
+ cret = meth->db_create(BIF_P, tb);
+ ASSERT(cret == DB_ERROR_NONE); (void)cret;
make_btid(tb);
@@ -1735,20 +1776,21 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
ret = make_tid(BIF_P, tb);
save_sched_table(BIF_P, tb);
+ save_owned_table(BIF_P, tb);
if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
tid_clear(BIF_P, tb);
+ delete_owned_table(BIF_P, tb);
db_lock(tb,LCK_WRITE);
free_heir_data(tb);
- tb->common.meth->db_free_table(tb);
+ tb->common.meth->db_free_empty_table(tb);
db_unlock(tb,LCK_WRITE);
table_dec_refc(tb, 0);
BIF_ERROR(BIF_P, BADARG);
}
BIF_P->flags |= F_USING_DB; /* So we can remove tb if p dies */
- save_owned_table(BIF_P, tb);
#ifdef HARDDEBUG
erts_fprintf(stderr,
@@ -1767,13 +1809,19 @@ BIF_RETTYPE ets_whereis_1(BIF_ALIST_1)
{
DbTable* tb;
Eterm res;
+ Uint freason;
if (is_not_atom(BIF_ARG_1)) {
BIF_ERROR(BIF_P, BADARG);
}
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- BIF_RET(am_undefined);
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG)
+ BIF_RET(am_undefined);
+ else {
+ //ToDo: Could we avoid this
+ return db_bif_fail(BIF_P, freason, BIF_ets_whereis_1, NULL);
+ }
}
res = make_tid(BIF_P, tb);
@@ -1793,9 +1841,7 @@ BIF_RETTYPE ets_lookup_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_lookup_2);
cret = tb->common.meth->db_get(BIF_P, tb, BIF_ARG_2, &ret);
@@ -1823,9 +1869,7 @@ BIF_RETTYPE ets_member_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_member_2);
cret = tb->common.meth->db_member(tb, BIF_ARG_2, &ret);
@@ -1856,9 +1900,7 @@ BIF_RETTYPE ets_lookup_element_3(BIF_ALIST_3)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_lookup_element_3);
if (is_not_small(BIF_ARG_3) || ((index = signed_val(BIF_ARG_3)) < 1)) {
db_unlock(tb, LCK_READ);
@@ -1896,9 +1938,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_delete_1);
/*
* Clear all access bits to prevent any ets operation to access the
@@ -1941,7 +1981,8 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1)
reds -= free_fixations_locked(BIF_P, tb);
db_unlock(tb, LCK_WRITE);
- if (free_table_continue(BIF_P, tb, reds) < 0) {
+ reds = free_table_continue(BIF_P, tb, reds);
+ if (reds < 0) {
/*
* Package the DbTable* pointer into a bignum so that it can be safely
* passed through a trap. We used to pass the DbTable* pointer directly
@@ -1970,6 +2011,7 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
Eterm to_pid = BIF_ARG_2;
Eterm from_pid;
DbTable* tb = NULL;
+ Uint freason;
if (!is_internal_pid(to_pid)) {
goto badarg;
@@ -1979,10 +2021,11 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
goto badarg;
}
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->common.id) {
- goto badarg;
- }
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, &freason)) == NULL)
+ goto fail;
+ if (tb->common.owner != BIF_P->common.id)
+ goto badarg;
+
from_pid = tb->common.owner;
if (to_pid == from_pid) {
goto badarg; /* or should we be idempotent? return false maybe */
@@ -2001,9 +2044,12 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3)
BIF_RET(am_true);
badarg:
+ freason = BADARG;
+fail:
if (to_proc != NULL && to_proc != BIF_P) erts_proc_unlock(to_proc, to_locks);
if (tb != NULL) db_unlock(tb, LCK_WRITE);
- BIF_ERROR(BIF_P, BADARG);
+
+ return db_bif_fail(BIF_P, freason, BIF_ets_give_away_3, NULL);
}
BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
@@ -2054,11 +2100,13 @@ BIF_RETTYPE ets_setopts_2(BIF_ALIST_2)
}
}
- if (tail != NIL
- || (tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL
- || tb->common.owner != BIF_P->common.id) {
+ if (tail != NIL)
+ goto badarg;
+
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_setopts_2);
+
+ if (tb->common.owner != BIF_P->common.id)
goto badarg;
- }
if (heir_data != THE_NON_VALUE) {
free_heir_data(tb);
@@ -2082,23 +2130,84 @@ badarg:
}
/*
-** BIF to erase a whole table and release all memory it holds
-*/
-BIF_RETTYPE ets_delete_all_objects_1(BIF_ALIST_1)
+ * Common for delete_all_objects and select_delete(DeleteAll).
+ */
+BIF_RETTYPE ets_internal_delete_all_2(BIF_ALIST_2)
{
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(BIF_P);
+ SWord reds = initial_reds;
+ Eterm nitems;
DbTable* tb;
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE, BIF_ets_internal_delete_all_2);
+
+ if (BIF_ARG_2 == am_undefined) {
+ nitems = erts_make_integer(erts_atomic_read_nob(&tb->common.nitems),
+ BIF_P);
- tb->common.meth->db_delete_all_objects(BIF_P, tb);
+ reds = tb->common.meth->db_delete_all_objects(BIF_P, tb, reds);
+
+ ASSERT(!(tb->common.status & DB_BUSY));
+
+ if (reds < 0) {
+ /*
+ * Oboy, need to trap AND need to be atomic.
+ * Solved by cooperative trapping where every process trying to
+ * access this table (including this process) will "fail" to lookup
+ * the table and instead pitch in deleting objects
+ * (in delete_all_objects_continue) and then trap to self.
+ */
+ ASSERT((tb->common.status & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC))
+ ==
+ (tb->common.type & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC)));
+ tb->common.status &= ~(DB_PRIVATE|DB_PROTECTED|DB_PUBLIC);
+ tb->common.status |= DB_BUSY;
+ db_unlock(tb, LCK_WRITE);
+ BUMP_ALL_REDS(BIF_P);
+ BIF_TRAP2(bif_export[BIF_ets_internal_delete_all_2], BIF_P,
+ BIF_ARG_1, nitems);
+ }
+ else {
+ /* Done, no trapping needed */
+ BUMP_REDS(BIF_P, (initial_reds - reds));
+ }
+
+ }
+ else {
+ /*
+ * The table lookup succeeded and second argument is nitems
+ * and not 'undefined', which means we have trapped at least once
+ * and are now done.
+ */
+ nitems = BIF_ARG_2;
+ }
db_unlock(tb, LCK_WRITE);
+ BIF_RET(nitems);
+}
- BIF_RET(am_true);
+static void delete_all_objects_continue(Process* p, DbTable* tb)
+{
+ SWord initial_reds = ERTS_BIF_REDS_LEFT(p);
+ SWord reds = initial_reds;
+
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
+
+ if ((tb->common.status & (DB_DELETE|DB_BUSY)) != DB_BUSY)
+ return;
+
+ reds = tb->common.meth->db_delete_all_objects(p, tb, reds);
+
+ if (reds < 0) {
+ BUMP_ALL_REDS(p);
+ }
+ else {
+ tb->common.status |= tb->common.type & (DB_PRIVATE|DB_PROTECTED|DB_PUBLIC);
+ tb->common.status &= ~DB_BUSY;
+ BUMP_REDS(p, (initial_reds - reds));
+ }
}
/*
@@ -2114,9 +2223,7 @@ BIF_RETTYPE ets_delete_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_delete_2);
cret = tb->common.meth->db_erase(tb,BIF_ARG_2,&ret);
@@ -2143,9 +2250,8 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_delete_object_2);
+
if (is_not_tuple(BIF_ARG_2) ||
(arityval(*tuple_val(BIF_ARG_2)) < tb->common.keypos)) {
db_unlock(tb, LCK_WRITE_REC);
@@ -2168,7 +2274,7 @@ BIF_RETTYPE ets_delete_object_2(BIF_ALIST_2)
/*
** This is for trapping, cannot be called directly.
*/
-static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
+static BIF_RETTYPE ets_select_delete_trap_1(BIF_ALIST_1)
{
Process *p = BIF_P;
Eterm a1 = BIF_ARG_1;
@@ -2178,15 +2284,14 @@ static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
Eterm ret;
Eterm *tptr;
db_lock_kind_t kind = LCK_WRITE_REC;
-
+
CHECK_TABLES();
ASSERT(is_tuple(a1));
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
- BIF_ERROR(p,BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
+ &ets_select_delete_continue_exp);
cret = tb->common.meth->db_select_delete_continue(p,tb,a1,&ret);
@@ -2210,7 +2315,10 @@ static BIF_RETTYPE ets_select_delete_1(BIF_ALIST_1)
}
-BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
+/*
+ * ets:select_delete/2 without special case for "delete-all".
+ */
+BIF_RETTYPE ets_internal_select_delete_2(BIF_ALIST_2)
{
BIF_RETTYPE result;
DbTable* tb;
@@ -2220,20 +2328,8 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2)
CHECK_TABLES();
- if(eq(BIF_ARG_2, ms_delete_all)) {
- int nitems;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
- nitems = erts_atomic_read_nob(&tb->common.nitems);
- tb->common.meth->db_delete_all_objects(BIF_P, tb);
- db_unlock(tb, LCK_WRITE);
- BIF_RET(erts_make_integer(nitems,BIF_P));
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_internal_select_delete_2);
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
@@ -2350,7 +2446,7 @@ ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp,
ASSERT(!*tablepp);
/* Max heap size needed... */
- sz = esdp->ets_tables.count;
+ sz = erts_atomic_read_nob(&esdp->ets_tables.count);
sz *= ERTS_MAGIC_REF_THING_SIZE + 2;
sz += 3 + ERTS_REF_THING_SIZE;
hfragp = new_message_buffer(sz);
@@ -2434,7 +2530,8 @@ erts_handle_yielded_ets_all_request(ErtsSchedulerData *esdp,
if (!eaydp->queue)
return 0; /* All work completed! */
- if (yc < ERTS_ETS_ALL_TB_YCNT_START && yc > esdp->ets_tables.count)
+ if (yc < ERTS_ETS_ALL_TB_YCNT_START &&
+ yc > erts_atomic_read_nob(&esdp->ets_tables.count))
return 1; /* Yield! */
eaydp->ongoing = ongoing = eaydp->queue;
@@ -2513,7 +2610,6 @@ BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0)
BIF_RET(ref);
}
-
/*
** db_slot(Db, Slot) -> [Items].
*/
@@ -2525,9 +2621,8 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_slot_2);
+
/* The slot number is checked in table specific code. */
cret = tb->common.meth->db_slot(BIF_P, tb, BIF_ARG_2, &ret);
db_unlock(tb, LCK_READ);
@@ -2547,41 +2642,53 @@ BIF_RETTYPE ets_slot_2(BIF_ALIST_2)
BIF_RETTYPE ets_match_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_match_1, BIF_ARG_1);
}
BIF_RETTYPE ets_match_2(BIF_ALIST_2)
{
+ DbTable* tb;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_2);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, tb, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
BIF_RETTYPE ets_match_3(BIF_ALIST_3)
{
+ DbTable* tb;
Eterm ms;
+ Sint chunk_size;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_3);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarDollar, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, tb, BIF_ARG_1, ms, chunk_size);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -2589,34 +2696,35 @@ BIF_RETTYPE ets_match_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_3(BIF_ALIST_3)
{
- return ets_select3(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+ DbTable* tb;
+ Sint chunk_size;
+
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_3);
+
+ return ets_select3(BIF_P, tb, BIF_ARG_1, BIF_ARG_2, chunk_size);
}
static BIF_RETTYPE
-ets_select3(Process* p, Eterm arg1, Eterm arg2, Eterm arg3)
+ets_select3(Process* p, DbTable* tb, Eterm tid, Eterm ms, Sint chunk_size)
{
BIF_RETTYPE result;
- DbTable* tb;
int cret;
Eterm ret;
- Sint chunk_size;
enum DbIterSafety safety;
CHECK_TABLES();
- /* Chunk size strictly greater than 0 */
- if (is_not_small(arg3) || (chunk_size = signed_val(arg3)) <= 0) {
- BIF_ERROR(p, BADARG);
- }
- if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select_chunk(p, tb, arg1,
- arg2, chunk_size,
+ cret = tb->common.meth->db_select_chunk(p, tb, tid,
+ ms, chunk_size,
0 /* not reversed */,
&ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
@@ -2662,9 +2770,8 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
+ &ets_select_continue_exp);
cret = tb->common.meth->db_select_continue(p, tb, a1,
&ret);
@@ -2694,10 +2801,10 @@ static BIF_RETTYPE ets_select_trap_1(BIF_ALIST_1)
BIF_RETTYPE ets_select_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_select_1, BIF_ARG_1);
}
-static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
+static BIF_RETTYPE ets_select1(Process *p, int bif_ix, Eterm arg1)
{
BIF_RETTYPE result;
DbTable* tb;
@@ -2719,10 +2826,10 @@ static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
BIF_ERROR(p, BADARG);
}
tptr = tuple_val(arg1);
- if (arityval(*tptr) < 1 ||
- (tb = db_get_table(p, tptr[1], DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+ if (arityval(*tptr) < 1)
+ BIF_ERROR(p, BADARG);
+
+ DB_GET_TABLE(tb, tptr[1], DB_READ, LCK_READ, bif_ix, NULL, p);
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
@@ -2758,33 +2865,27 @@ static BIF_RETTYPE ets_select1(Process *p, Eterm arg1)
BIF_RETTYPE ets_select_2(BIF_ALIST_2)
{
- return ets_select2(BIF_P, BIF_ARG_1, BIF_ARG_2);
+ DbTable* tb;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_2);
+ return ets_select2(BIF_P, tb, BIF_ARG_1, BIF_ARG_2);
}
static BIF_RETTYPE
-ets_select2(Process* p, Eterm arg1, Eterm arg2)
+ets_select2(Process* p, DbTable* tb, Eterm tid, Eterm ms)
{
BIF_RETTYPE result;
- DbTable* tb;
int cret;
enum DbIterSafety safety;
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
-
- if ((tb = db_get_table(p, arg1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
safety = ITERATION_SAFETY(p,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
}
- cret = tb->common.meth->db_select(p, tb, arg1, arg2, 0, &ret);
+ cret = tb->common.meth->db_select(p, tb, tid, ms, 0, &ret);
if (DID_TRAP(p,ret) && safety != ITER_SAFE) {
fix_table_locked(p, tb);
@@ -2827,9 +2928,9 @@ static BIF_RETTYPE ets_select_count_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_READ, kind)) == NULL) {
- BIF_ERROR(p, BADARG);
- }
+
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_READ, kind,
+ &ets_select_count_continue_exp);
cret = tb->common.meth->db_select_count_continue(p, tb, a1, &ret);
@@ -2864,13 +2965,9 @@ BIF_RETTYPE ets_select_count_2(BIF_ALIST_2)
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_count_2);
+
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
@@ -2920,9 +3017,8 @@ static BIF_RETTYPE ets_select_replace_1(BIF_ALIST_1)
tptr = tuple_val(a1);
ASSERT(arityval(*tptr) >= 1);
- if ((tb = db_get_table(p, tptr[1], DB_WRITE, kind)) == NULL) {
- BIF_ERROR(p,BADARG);
- }
+ DB_TRAP_GET_TABLE(tb, tptr[1], DB_WRITE, kind,
+ &ets_select_replace_continue_exp);
cret = tb->common.meth->db_select_replace_continue(p,tb,a1,&ret);
@@ -2956,9 +3052,7 @@ BIF_RETTYPE ets_select_replace_2(BIF_ALIST_2)
CHECK_TABLES();
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE_REC)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_WRITE, LCK_WRITE_REC, BIF_ets_select_replace_2);
if (tb->common.status & DB_BAG) {
/* Bag implementation presented both semantic consistency
@@ -3009,13 +3103,8 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
Sint chunk_size;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_reverse_3);
/* Chunk size strictly greater than 0 */
if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
@@ -3053,7 +3142,7 @@ BIF_RETTYPE ets_select_reverse_3(BIF_ALIST_3)
BIF_RETTYPE ets_select_reverse_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_select_reverse_1, BIF_ARG_1);
}
BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
@@ -3065,13 +3154,9 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
Eterm ret;
CHECK_TABLES();
- /*
- * Make sure that the table exists.
- */
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_READ, LCK_READ)) == NULL) {
- BIF_ERROR(BIF_P, BADARG);
- }
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_select_reverse_2);
+
safety = ITERATION_SAFETY(BIF_P,tb);
if (safety == ITER_UNSAFE) {
local_fix_table(tb);
@@ -3103,45 +3188,63 @@ BIF_RETTYPE ets_select_reverse_2(BIF_ALIST_2)
/*
-** ets:match_object(Continuation), ets:match_object(Table, Pattern), ets:match_object(Table,Pattern,ChunkSize)
+** ets:match_object(Continuation)
*/
BIF_RETTYPE ets_match_object_1(BIF_ALIST_1)
{
- return ets_select1(BIF_P, BIF_ARG_1);
+ return ets_select1(BIF_P, BIF_ets_match_object_1, BIF_ARG_1);
}
+/*
+** ets:match_object(Table, Pattern)
+*/
BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
{
+ DbTable* tb;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_object_2);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select2(BIF_P, BIF_ARG_1, ms);
+ res = ets_select2(BIF_P, tb, BIF_ARG_1, ms);
UnUseTmpHeap(8,BIF_P);
return res;
}
+/*
+** ets:match_object(Table,Pattern,ChunkSize)
+*/
BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
{
+ DbTable* tb;
+ Sint chunk_size;
Eterm ms;
DeclareTmpHeap(buff,8,BIF_P);
Eterm *hp = buff;
Eterm res;
+ /* Chunk size strictly greater than 0 */
+ if (is_not_small(BIF_ARG_3) || (chunk_size = signed_val(BIF_ARG_3)) <= 0) {
+ BIF_ERROR(BIF_P, BADARG);
+ }
+
+ DB_BIF_GET_TABLE(tb, DB_READ, LCK_READ, BIF_ets_match_object_3);
+
UseTmpHeap(8,BIF_P);
ms = CONS(hp, am_DollarUnderscore, NIL);
hp += 2;
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
hp += 4;
ms = CONS(hp, ms, NIL);
- res = ets_select3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
+ res = ets_select3(BIF_P, tb, BIF_ARG_1, ms, chunk_size);
UnUseTmpHeap(8,BIF_P);
return res;
}
@@ -3162,16 +3265,17 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1)
Eterm res;
int i;
Eterm* hp;
+ Uint freason;
/*Process* rp = NULL;*/
/* If/when we implement lockless private tables:
Eterm owner;
*/
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
BIF_RET(am_undefined);
- }
- BIF_ERROR(BIF_P, BADARG);
+ else
+ return db_bif_fail(BIF_P, freason, BIF_ets_info_1, NULL);
}
/* If/when we implement lockless private tables:
@@ -3228,12 +3332,13 @@ BIF_RETTYPE ets_info_2(BIF_ALIST_2)
{
DbTable* tb;
Eterm ret = THE_NON_VALUE;
+ Uint freason;
- if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL) {
- if (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)) {
+ if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ, &freason)) == NULL) {
+ if (freason == BADARG && (is_atom(BIF_ARG_1) || is_ref(BIF_ARG_1)))
BIF_RET(am_undefined);
- }
- BIF_ERROR(BIF_P, BADARG);
+ else
+ return db_bif_fail(BIF_P, freason, BIF_ets_info_2, NULL);
}
ret = table_info(BIF_P, tb, BIF_ARG_2);
db_unlock(tb, LCK_READ);
@@ -3321,7 +3426,6 @@ int erts_ets_rwmtx_spin_count = -1;
void init_db(ErtsDbSpinCount db_spin_count)
{
int i;
- Eterm *hp;
unsigned bits;
size_t size;
@@ -3387,7 +3491,11 @@ void init_db(ErtsDbSpinCount db_spin_count)
db_max_tabs, ((Uint)1)<<SMALL_BITS);
}
- meta_name_tab_mask = (((Uint) 1)<<(bits-1)) - 1; /* At least half the size of main tab */
+ /*
+ * We don't have ony hard limit for number of tables anymore, .
+ * but we use 'db_max_tabs' to determine size of name hash table.
+ */
+ meta_name_tab_mask = (((Uint) 1)<<bits) - 1;
size = sizeof(struct meta_name_tab_entry)*(meta_name_tab_mask+1);
meta_name_tab = erts_db_alloc_nt(ERTS_ALC_T_DB_TABLES, size);
ERTS_ETS_MISC_MEM_ADD(size);
@@ -3402,35 +3510,28 @@ void init_db(ErtsDbSpinCount db_spin_count)
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_delete_continue_exp,
- am_ets, am_atom_put("delete_trap",11), 1,
- &ets_select_delete_1);
+ am_ets, ERTS_MAKE_AM("select_delete_trap"), 1,
+ &ets_select_delete_trap_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_count_continue_exp,
- am_ets, am_atom_put("count_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("count_trap"), 1,
&ets_select_count_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_replace_continue_exp,
- am_ets, am_atom_put("replace_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("replace_trap"), 1,
&ets_select_replace_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_select_continue_exp,
- am_ets, am_atom_put("select_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("select_trap"), 1,
&ets_select_trap_1);
/* Non visual BIF to trap to. */
erts_init_trap_export(&ets_delete_continue_exp,
- am_ets, am_atom_put("delete_trap",11), 1,
+ am_ets, ERTS_MAKE_AM("delete_trap"), 1,
&ets_delete_trap);
-
- hp = ms_delete_all_buff;
- ms_delete_all = CONS(hp, am_true, NIL);
- hp += 2;
- ms_delete_all = TUPLE3(hp,am_Underscore,NIL,ms_delete_all);
- hp +=4;
- ms_delete_all = CONS(hp, ms_delete_all,NIL);
}
void
@@ -3442,7 +3543,7 @@ erts_ets_sched_spec_data_init(ErtsSchedulerData *esdp)
eaydp->tab = NULL;
eaydp->queue = NULL;
esdp->ets_tables.clist = NULL;
- esdp->ets_tables.count = 0;
+ erts_atomic_init_nob(&esdp->ets_tables.count, 0);
}
@@ -3792,7 +3893,8 @@ unlocked:
erts_rwmtx_runlock(&tb->common.rwlock);
erts_rwmtx_rwlock(&tb->common.rwlock);
*kind_p = LCK_WRITE;
- if (tb->common.status & DB_DELETE) return;
+ if (tb->common.status & (DB_DELETE|DB_BUSY))
+ return;
}
db_unfix_table_hash(&(tb->hash));
}
@@ -3940,7 +4042,8 @@ static BIF_RETTYPE ets_delete_trap(BIF_ALIST_1)
ASSERT(*ptr == make_pos_bignum_header(1));
- if (free_table_continue(BIF_P, tb, reds) < 0) {
+ reds = free_table_continue(BIF_P, tb, reds);
+ if (reds < 0) {
BUMP_ALL_REDS(BIF_P);
BIF_TRAP1(&ets_delete_continue_exp, BIF_P, cont);
}
@@ -4056,7 +4159,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
else if (What == am_data) {
print_table(ERTS_PRINT_STDOUT, NULL, 1, tb);
ret = am_true;
- } else if (What == am_atom_put("fixed",5)) {
+ } else if (ERTS_IS_ATOM_STR("fixed",What)) {
if (IS_FIXED(tb))
ret = am_true;
else
@@ -4108,7 +4211,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
ret = am_false;
}
erts_mtx_unlock(&tb->common.fixlock);
- } else if (What == am_atom_put("stats",5)) {
+ } else if (ERTS_IS_ATOM_STR("stats",What)) {
if (IS_HASH_TABLE(tb->common.status)) {
FloatDef f;
DbHashStats stats;
@@ -4248,6 +4351,18 @@ erts_db_get_max_tabs()
return db_max_tabs;
}
+Uint erts_ets_table_count(void)
+{
+ Uint tb_count = 0;
+ Uint six;
+
+ for (six = 0; six < erts_no_schedulers; six++) {
+ ErtsSchedulerData *esdp = &erts_aligned_scheduler_data[six].esd;
+ tb_count += erts_atomic_read_nob(&esdp->ets_tables.count);
+ }
+ return tb_count;
+}
+
/*
* For testing of meta tables only.
*
@@ -4321,5 +4436,8 @@ void erts_lcnt_update_db_locks(int enable) {
erts_schedule_multi_misc_aux_work(0, erts_no_schedulers,
&lcnt_update_db_locks_per_sched, (void*)(UWord)enable);
}
-
#endif /* ERTS_ENABLE_LOCK_COUNT */
+
+#ifdef ETS_DBG_FORCE_TRAP
+erts_aint_t erts_ets_dbg_force_trap = 0;
+#endif
diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h
index eb6da2c9fb..db1dec015c 100644
--- a/erts/emulator/beam/erl_db.h
+++ b/erts/emulator/beam/erl_db.h
@@ -45,7 +45,7 @@ typedef struct {
} ErtsEtsAllYieldData;
typedef struct {
- Uint count;
+ erts_atomic_t count;
DbTable *clist;
} ErtsEtsTables;
@@ -69,6 +69,7 @@ typedef struct {
/*TT*/
Uint erts_get_ets_misc_mem_size(void);
+Uint erts_ets_table_count(void);
typedef struct {
DbTableCommon common;
@@ -93,7 +94,7 @@ union db_table {
/*TT*/
};
-#define DB_DEF_MAX_TABS 2053 /* Superseeded by environment variable
+#define DB_DEF_MAX_TABS 8192 /* Superseeded by environment variable
"ERL_MAX_ETS_TABLES" */
#define ERL_MAX_ETS_TABLES_ENV "ERL_MAX_ETS_TABLES"
@@ -135,6 +136,10 @@ void erts_lcnt_enable_db_lock_count(DbTable *tb, int enable);
void erts_lcnt_update_db_locks(int enable);
#endif
+#ifdef ETS_DBG_FORCE_TRAP
+extern erts_aint_t erts_ets_dbg_force_trap;
+#endif
+
#endif /* ERL_DB_H__ */
#if defined(ERTS_WANT_DB_INTERNAL__) && !defined(ERTS_HAVE_DB_INTERNAL__)
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index cb5c496e90..74d63325e6 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -21,6 +21,7 @@
/*
** Implementation of unordered ETS tables.
** The tables are implemented as linear dynamic hash tables.
+** https://en.wikipedia.org/wiki/Linear_hashing
*/
/* SMP:
@@ -148,20 +149,14 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval)
return ix;
}
-/* Remember a slot containing a pseudo-deleted item (INVALID_HASH)
- * Return false if we got raced by unfixing thread
- * and the object should be deleted for real.
- */
-static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
- erts_aint_t fixated_by_me)
+
+static ERTS_INLINE int link_fixdel(DbTableHash* tb,
+ FixedDeletion* fixd,
+ erts_aint_t fixated_by_me)
{
erts_aint_t was_next;
erts_aint_t exp_next;
- FixedDeletion* fixd = (FixedDeletion*) erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- sizeof(FixedDeletion));
- ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
- fixd->slot = ix;
+
was_next = erts_atomic_read_acqb(&tb->fixdel);
do { /* Lockless atomic insertion in linked list: */
if (NFIXED(tb) <= fixated_by_me) {
@@ -178,14 +173,33 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
return 1;
}
+/* Remember a slot containing a pseudo-deleted item
+ * Return false if we got raced by unfixing thread
+ * and the object should be deleted for real.
+ */
+static int add_fixed_deletion(DbTableHash* tb, int ix,
+ erts_aint_t fixated_by_me)
+{
+ FixedDeletion* fixd = (FixedDeletion*) erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
+ (DbTable *) tb,
+ sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
+ fixd->slot = ix;
+ fixd->all = 0;
+ return link_fixdel(tb, fixd, fixated_by_me);
+}
+
+
+static ERTS_INLINE int is_pseudo_deleted(HashDbTerm* p)
+{
+ return p->pseudo_deleted;
+}
-#define MAX_HASH 0xEFFFFFFFUL
-#define INVALID_HASH 0xFFFFFFFFUL
/* optimised version of make_hash (normal case? atomic key) */
#define MAKE_HASH(term) \
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
- make_internal_hash(term, 0)) % MAX_HASH)
+ make_internal_hash(term, 0)) & MAX_HASH_MASK)
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
@@ -270,17 +284,22 @@ static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
}
-/*
- * Some special binary flags
- */
-#define BIN_FLAG_ALL_OBJECTS BIN_FLAG_USR1
-
static ERTS_INLINE void free_term(DbTableHash *tb, HashDbTerm* p)
{
db_free_term((DbTable*)tb, p, offsetof(HashDbTerm, dbterm));
}
+static ERTS_INLINE void free_term_list(DbTableHash *tb, HashDbTerm* p)
+{
+ while (p) {
+ HashDbTerm* next = p->next;
+ free_term(tb, p);
+ p = next;
+ }
+}
+
+
/*
* Local types
*/
@@ -290,9 +309,6 @@ struct mp_prefound {
};
struct mp_info {
- int all_objects; /* True if complete objects are always
- * returned from the match_spec (can use
- * copy_shallow on the return value) */
int something_can_match; /* The match_spec is not "impossible" */
int key_given;
struct mp_prefound dlists[10]; /* Default list of "pre-found" buckets */
@@ -402,7 +418,7 @@ static void db_print_hash(fmtfn_t to,
void *to_arg,
int show,
DbTable *tbl);
-static int db_free_table_hash(DbTable *tbl);
+static int db_free_empty_table_hash(DbTable *tbl);
static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds);
@@ -411,7 +427,7 @@ static void db_foreach_offheap_hash(DbTable *,
void (*)(ErlOffHeap *, void *),
void *);
-static int db_delete_all_objects_hash(Process* p, DbTable* tbl);
+static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds);
#ifdef HARDDEBUG
static void db_check_table_hash(DbTableHash *tb);
#endif
@@ -436,7 +452,8 @@ static ERTS_INLINE void try_shrink(DbTableHash* tb)
static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
Eterm key, HashValue hval)
{
- if (b->hvalue != hval) return 0;
+ if (b->hvalue != hval || is_pseudo_deleted(b))
+ return 0;
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
@@ -449,7 +466,8 @@ static ERTS_INLINE int has_live_key(DbTableHash* tb, HashDbTerm* b,
static ERTS_INLINE int has_key(DbTableHash* tb, HashDbTerm* b,
Eterm key, HashValue hval)
{
- if (b->hvalue != hval && b->hvalue != INVALID_HASH) return 0;
+ if (b->hvalue != hval)
+ return 0;
else {
Eterm itemKey = GETKEY(tb, b->dbterm.tpl);
ASSERT(!is_header(itemKey));
@@ -513,7 +531,7 @@ DbTableMethod db_hash =
db_select_replace_continue_hash,
db_take_hash,
db_delete_all_objects_hash,
- db_free_table_hash,
+ db_free_empty_table_hash,
db_free_table_continue_hash,
db_print_hash,
db_foreach_offheap_hash,
@@ -570,51 +588,61 @@ SWord db_unfix_table_hash(DbTableHash *tb)
SWord work = 0;
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)
- || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock)
- && !tb->common.is_thread_safe));
+ || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock)
+ && !tb->common.is_thread_safe));
restart:
fixdel = (FixedDeletion*) erts_atomic_xchg_mb(&tb->fixdel,
- (erts_aint_t) NULL);
- while (fixdel != NULL) {
- FixedDeletion *fx = fixdel;
- int ix = fx->slot;
- HashDbTerm **bp;
- HashDbTerm *b;
- erts_rwmtx_t* lck = WLOCK_HASH(tb,ix);
-
- if (IS_FIXED(tb)) { /* interrupted by fixer */
- WUNLOCK_HASH(lck);
- restore_fixdel(tb,fixdel);
- if (!IS_FIXED(tb)) {
- goto restart; /* unfixed again! */
- }
- return work;
- }
- if (ix < NACTIVE(tb)) {
- bp = &BUCKET(tb, ix);
- b = *bp;
-
- while (b != NULL) {
- if (b->hvalue == INVALID_HASH) {
- *bp = b->next;
- free_term(tb, b);
- work++;
- b = *bp;
- } else {
- bp = &b->next;
- b = b->next;
- }
- }
- }
- /* else slot has been joined and purged by shrink() */
- WUNLOCK_HASH(lck);
- fixdel = fx->next;
- erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
- (DbTable *) tb,
- (void *) fx,
- sizeof(FixedDeletion));
- ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
- work++;
+ (erts_aint_t) NULL);
+ while (fixdel) {
+ FixedDeletion *free_me;
+
+ do {
+ HashDbTerm **bp;
+ HashDbTerm *b;
+ HashDbTerm *free_us = NULL;
+ erts_rwmtx_t* lck;
+
+ lck = WLOCK_HASH(tb, fixdel->slot);
+
+ if (IS_FIXED(tb)) { /* interrupted by fixer */
+ WUNLOCK_HASH(lck);
+ restore_fixdel(tb,fixdel);
+ if (!IS_FIXED(tb)) {
+ goto restart; /* unfixed again! */
+ }
+ return work;
+ }
+ if (fixdel->slot < NACTIVE(tb)) {
+ bp = &BUCKET(tb, fixdel->slot);
+ b = *bp;
+
+ while (b != NULL) {
+ if (is_pseudo_deleted(b)) {
+ HashDbTerm* nxt = b->next;
+ b->next = free_us;
+ free_us = b;
+ work++;
+ b = *bp = nxt;
+ } else {
+ bp = &b->next;
+ b = b->next;
+ }
+ }
+ }
+ /* else slot has been joined and purged by shrink() */
+ WUNLOCK_HASH(lck);
+ free_term_list(tb, free_us);
+
+ }while (fixdel->all && fixdel->slot-- > 0);
+
+ free_me = fixdel;
+ fixdel = fixdel->next;
+ erts_db_free(ERTS_ALC_T_DB_FIX_DEL,
+ (DbTable *) tb,
+ (void *) free_me,
+ sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion));
+ work++;
}
/* ToDo: Maybe try grow/shrink the table as well */
@@ -764,8 +792,9 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
*/
if (tb->common.status & DB_SET) {
HashDbTerm* bnext = b->next;
- if (b->hvalue == INVALID_HASH) {
+ if (is_pseudo_deleted(b)) {
erts_atomic_inc_nob(&tb->common.nitems);
+ b->pseudo_deleted = 0;
}
else if (key_clash_fail) {
ret = DB_ERROR_BADKEY;
@@ -773,14 +802,14 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
}
q = replace_dbterm(tb, b, obj);
q->next = bnext;
- q->hvalue = hval; /* In case of INVALID_HASH */
+ ASSERT(q->hvalue == hval);
*bp = q;
goto Ldone;
}
else if (key_clash_fail) { /* && (DB_BAG || DB_DUPLICATE_BAG) */
q = b;
do {
- if (q->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(q)) {
ret = DB_ERROR_BADKEY;
goto Ldone;
}
@@ -792,9 +821,10 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
q = b;
do {
if (db_eq(&tb->common,obj,&q->dbterm)) {
- if (q->hvalue == INVALID_HASH) {
+ if (is_pseudo_deleted(q)) {
erts_atomic_inc_nob(&tb->common.nitems);
- q->hvalue = hval;
+ q->pseudo_deleted = 0;
+ ASSERT(q->hvalue == hval);
if (q != b) { /* must move to preserve key insertion order */
*qp = q->next;
q->next = b;
@@ -812,6 +842,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail)
Lnew:
q = new_dbterm(tb, obj);
q->hvalue = hval;
+ q->pseudo_deleted = 0;
q->next = b;
*bp = q;
nitems = erts_atomic_inc_read_nob(&tb->common.nitems);
@@ -839,7 +870,7 @@ get_term_list(Process *p, DbTableHash *tb, Eterm key, HashValue hval,
if (tb->common.status & (DB_BAG | DB_DUPLICATE_BAG)) {
while (b2 && has_key(tb, b2, key, hval)) {
- if (b2->hvalue != INVALID_HASH)
+ if (!is_pseudo_deleted(b2))
sz += b2->dbterm.size + 2;
b2 = b2->next;
@@ -935,7 +966,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
while(b2 != NULL && has_key(tb,b2,key,hval)) {
if (ndex > arityval(b2->dbterm.tpl[0])
- && b2->hvalue != INVALID_HASH) {
+ && !is_pseudo_deleted(b2)) {
retval = DB_ERROR_BADITEM;
goto done;
}
@@ -943,7 +974,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl,
}
b = b1;
while(b != b2) {
- if (b->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(b)) {
Eterm *hp;
Eterm copy = db_copy_element_from_ets(&tb->common, p,
&b->dbterm, ndex, &hp, 2);
@@ -978,6 +1009,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
+ HashDbTerm* free_us = NULL;
erts_rwmtx_t* lck;
int nitems_diff = 0;
@@ -993,16 +1025,17 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
if (nitems_diff == -1 && IS_FIXED(tb)
&& add_fixed_deletion(tb, ix, 0)) {
/* Pseudo remove (no need to keep several of same key) */
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
continue;
}
}
else {
- if (nitems_diff && b->hvalue != INVALID_HASH)
+ if (nitems_diff && !is_pseudo_deleted(b))
break;
}
bp = &b->next;
@@ -1013,6 +1046,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret)
erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
*ret = am_true;
return DB_ERROR_NONE;
}
@@ -1027,6 +1061,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
int ix;
HashDbTerm** bp;
HashDbTerm* b;
+ HashDbTerm* free_us = NULL;
erts_rwmtx_t* lck;
int nitems_diff = 0;
int nkeys = 0;
@@ -1045,13 +1080,14 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
if (db_eq(&tb->common,object, &b->dbterm)) {
--nitems_diff;
if (nkeys==1 && IS_FIXED(tb) && add_fixed_deletion(tb,ix,0)) {
- b->hvalue = INVALID_HASH; /* Pseudo remove */
+ b->pseudo_deleted = 1;
bp = &b->next;
b = b->next;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
}
if (tb->common.status & (DB_DUPLICATE_BAG)) {
continue;
@@ -1060,7 +1096,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
}
}
}
- else if (nitems_diff && b->hvalue != INVALID_HASH) {
+ else if (nitems_diff && !is_pseudo_deleted(b)) {
break;
}
bp = &b->next;
@@ -1071,6 +1107,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret)
erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
*ret = am_true;
return DB_ERROR_NONE;
}
@@ -1106,28 +1143,19 @@ static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret)
/*
- * This is just here so I can take care of the return value
- * that is to be sent during a trap (the BIF_TRAP macros explicitly returns)
- */
-static BIF_RETTYPE bif_trap1(Export *bif,
- Process *p,
- Eterm p1)
-{
- BIF_TRAP1(bif, p, p1);
-}
-
-
-/*
* Match traversal callbacks
*/
+typedef struct match_callbacks_t_ match_callbacks_t;
+struct match_callbacks_t_
+{
/* Called when no match is possible.
* context_ptr: Pointer to context
* ret: Pointer to traversal function term return.
*
* Both the direct return value and 'ret' are used as the traversal function return values.
*/
-typedef int (*mtraversal_on_nothing_can_match_t)(void* context_ptr, Eterm* ret);
+ int (*on_nothing_can_match)(match_callbacks_t* ctx, Eterm* ret);
/* Called for each match result.
* context_ptr: Pointer to context
@@ -1138,8 +1166,8 @@ typedef int (*mtraversal_on_nothing_can_match_t)(void* context_ptr, Eterm* ret);
*
* Should return 1 for successful match, 0 otherwise.
*/
-typedef int (*mtraversal_on_match_res_t)(void* context_ptr, Sint slot_ix, HashDbTerm*** current_ptr_ptr,
- Eterm match_res);
+ int (*on_match_res)(match_callbacks_t* ctx, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr, Eterm match_res);
/* Called when either we've matched enough elements in this cycle or EOT was reached.
* context_ptr: Pointer to context
@@ -1152,8 +1180,8 @@ typedef int (*mtraversal_on_match_res_t)(void* context_ptr, Sint slot_ix, HashDb
* Both the direct return value and 'ret' are used as the traversal function return values.
* If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
*/
-typedef int (*mtraversal_on_loop_ended_t)(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret);
+ int (*on_loop_ended)(match_callbacks_t* ctx, Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp, Eterm* ret);
/* Called when it's time to trap
* context_ptr: Pointer to context
@@ -1165,7 +1193,11 @@ typedef int (*mtraversal_on_loop_ended_t)(void* context_ptr, Sint slot_ix, Sint
* Both the direct return value and 'ret' are used as the traversal function return values.
* If *mpp is set to NULL, it won't be deallocated (useful for trapping.)
*/
-typedef int (*mtraversal_on_trap_t)(void* context_ptr, Sint slot_ix, Sint got, Binary** mpp, Eterm* ret);
+ int (*on_trap)(match_callbacks_t* ctx, Sint slot_ix, Sint got, Binary** mpp,
+ Eterm* ret);
+
+};
+
/*
* Begin hash table match traversal
@@ -1178,11 +1210,7 @@ static int match_traverse(Process* p, DbTableHash* tb,
Eterm** hpp, /* Heap */
int lock_for_write, /* Set to 1 if we're going to delete or
modify existing terms */
- mtraversal_on_nothing_can_match_t on_nothing_can_match,
- mtraversal_on_match_res_t on_match_res,
- mtraversal_on_loop_ended_t on_loop_ended,
- mtraversal_on_trap_t on_trap,
- void* context_ptr, /* State for callbacks above */
+ match_callbacks_t* ctx,
Eterm* ret)
{
Sint slot_ix; /* Slot index */
@@ -1212,14 +1240,10 @@ static int match_traverse(Process* p, DbTableHash* tb,
if (!mpi.something_can_match) {
/* Can't possibly match anything */
- ret_value = on_nothing_can_match(context_ptr, ret);
+ ret_value = ctx->on_nothing_can_match(ctx, ret);
goto done;
}
- if (mpi.all_objects) {
- mpi.mp->intern.flags |= BIN_FLAG_ALL_OBJECTS;
- }
-
/*
* Look for initial slot / bucket
*/
@@ -1235,7 +1259,8 @@ static int match_traverse(Process* p, DbTableHash* tb,
}
slot_ix = next_slot_function(tb,slot_ix,&lck);
if (slot_ix == 0) {
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left,
+ &mpi.mp, ret);
goto done;
}
}
@@ -1253,11 +1278,11 @@ static int match_traverse(Process* p, DbTableHash* tb,
*/
for(;;) {
if (*current_ptr != NULL) {
- if ((*current_ptr)->hvalue != INVALID_HASH) {
- match_res = db_match_dbterm(&tb->common, p, mpi.mp, 0,
+ if (!is_pseudo_deleted(*current_ptr)) {
+ match_res = db_match_dbterm(&tb->common, p, mpi.mp,
&(*current_ptr)->dbterm, hpp, 2);
saved_current = *current_ptr;
- if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
++got;
}
--iterations_left;
@@ -1271,7 +1296,7 @@ static int match_traverse(Process* p, DbTableHash* tb,
else if (mpi.key_given) { /* Key is bound */
unlock_hash_function(lck);
if (current_list_pos == mpi.num_lists) {
- ret_value = on_loop_ended(context_ptr, -1, got, iterations_left, &mpi.mp, ret);
+ ret_value = ctx->on_loop_ended(ctx, -1, got, iterations_left, &mpi.mp, ret);
goto done;
} else {
slot_ix = mpi.lists[current_list_pos].ix;
@@ -1296,18 +1321,18 @@ static int match_traverse(Process* p, DbTableHash* tb,
* Since many heap fragments will make the GC slower, trap and GC now.
*/
unlock_hash_function(lck);
- ret_value = on_trap(context_ptr, slot_ix, got, &mpi.mp, ret);
+ ret_value = ctx->on_trap(ctx, slot_ix, got, &mpi.mp, ret);
goto done;
}
current_ptr = &BUCKET(tb,slot_ix);
}
}
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, &mpi.mp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, &mpi.mp, ret);
done:
/* We should only jump directly to this label if
- * we've already called on_nothing_can_match / on_loop_ended / on_trap
+ * we've already called ctx->nothing_can_match / loop_ended / trap
*/
if (mpi.mp != NULL) {
erts_bin_free(mpi.mp);
@@ -1332,13 +1357,9 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
Binary** mpp, /* Existing match program */
int lock_for_write, /* Set to 1 if we're going to delete or
modify existing terms */
- mtraversal_on_match_res_t on_match_res,
- mtraversal_on_loop_ended_t on_loop_ended,
- mtraversal_on_trap_t on_trap,
- void* context_ptr, /* For callbacks */
+ match_callbacks_t* ctx,
Eterm* ret)
{
- int all_objects = (*mpp)->intern.flags & BIN_FLAG_ALL_OBJECTS;
HashDbTerm** current_ptr; /* Refers to either the bucket pointer or
* the 'next' pointer in the previous term
*/
@@ -1362,7 +1383,7 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
|| (chunk_size && got >= chunk_size))
{
/* Already got all or enough in the match_list */
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, mpp, ret);
goto done;
}
@@ -1380,11 +1401,11 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
current_ptr = &BUCKET(tb,slot_ix);
for(;;) {
if (*current_ptr != NULL) {
- if ((*current_ptr)->hvalue != INVALID_HASH) {
- match_res = db_match_dbterm(&tb->common, p, *mpp, all_objects,
+ if (!is_pseudo_deleted(*current_ptr)) {
+ match_res = db_match_dbterm(&tb->common, p, *mpp,
&(*current_ptr)->dbterm, hpp, 2);
saved_current = *current_ptr;
- if (on_match_res(context_ptr, slot_ix, &current_ptr, match_res)) {
+ if (ctx->on_match_res(ctx, slot_ix, &current_ptr, match_res)) {
++got;
}
--iterations_left;
@@ -1410,14 +1431,14 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
* Since many heap fragments will make the GC slower, trap and GC now.
*/
unlock_hash_function(lck);
- ret_value = on_trap(context_ptr, slot_ix, got, mpp, ret);
+ ret_value = ctx->on_trap(ctx, slot_ix, got, mpp, ret);
goto done;
}
current_ptr = &BUCKET(tb,slot_ix);
}
}
- ret_value = on_loop_ended(context_ptr, slot_ix, got, iterations_left, mpp, ret);
+ ret_value = ctx->on_loop_ended(ctx, slot_ix, got, iterations_left, mpp, ret);
done:
/* We should only jump directly to this label if
@@ -1434,7 +1455,7 @@ done:
* as well as their continuation-handling counterparts.
*/
-static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function,
+static ERTS_INLINE int on_simple_trap(Export* trap_function,
Process* p,
DbTableHash* tb,
Eterm tid,
@@ -1480,16 +1501,16 @@ static ERTS_INLINE int on_mtraversal_simple_trap(Export* trap_function,
make_small(slot_ix),
mpb,
egot);
- *ret = bif_trap1(trap_function, p, continuation);
+ ERTS_BIF_PREP_TRAP1(*ret, trap_function, p, continuation);
return DB_ERROR_NONE;
}
-static ERTS_INLINE int unpack_simple_mtraversal_continuation(Eterm continuation,
- Eterm** tptr_ptr,
- Eterm* tid_ptr,
- Sint* slot_ix_p,
- Binary** mpp,
- Sint* got_p)
+static ERTS_INLINE int unpack_simple_continuation(Eterm continuation,
+ Eterm** tptr_ptr,
+ Eterm* tid_ptr,
+ Sint* slot_ix_p,
+ Binary** mpp,
+ Sint* got_p)
{
Eterm* tptr;
ASSERT(is_tuple(continuation));
@@ -1524,6 +1545,7 @@ static ERTS_INLINE int unpack_simple_mtraversal_continuation(Eterm continuation,
#define MAX_SELECT_CHUNK_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
@@ -1531,83 +1553,86 @@ typedef struct {
Sint chunk_size;
Eterm match_list;
Eterm* prev_continuation_tptr;
-} mtraversal_select_chunk_context_t;
+} select_chunk_context_t;
-static int mtraversal_select_chunk_on_nothing_can_match(void* context_ptr, Eterm* ret) {
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
- *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+static int select_chunk_on_nothing_can_match(match_callbacks_t* ctx_base, Eterm* ret)
+{
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
+ *ret = (ctx->chunk_size > 0 ? am_EOT : NIL);
return DB_ERROR_NONE;
}
-static int mtraversal_select_chunk_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_chunk_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
if (is_value(match_res)) {
- sc_context_ptr->match_list = CONS(sc_context_ptr->hp, match_res, sc_context_ptr->match_list);
+ ctx->match_list = CONS(ctx->hp, match_res, ctx->match_list);
return 1;
}
return 0;
}
-static int mtraversal_select_chunk_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_chunk_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
Eterm mpb;
if (iterations_left == MAX_SELECT_CHUNK_ITERATIONS) {
/* We didn't get to iterate a single time, which means EOT */
- ASSERT(sc_context_ptr->match_list == NIL);
- *ret = (sc_context_ptr->chunk_size > 0 ? am_EOT : NIL);
+ ASSERT(ctx->match_list == NIL);
+ *ret = (ctx->chunk_size > 0 ? am_EOT : NIL);
return DB_ERROR_NONE;
}
else {
ASSERT(iterations_left < MAX_SELECT_CHUNK_ITERATIONS);
- BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
- if (sc_context_ptr->chunk_size) {
+ BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (ctx->chunk_size) {
Eterm continuation;
Eterm rest = NIL;
Sint rest_size = 0;
- if (got > sc_context_ptr->chunk_size) { /* Split list in return value and 'rest' */
- Eterm tmp = sc_context_ptr->match_list;
- rest = sc_context_ptr->match_list;
- while (got-- > sc_context_ptr->chunk_size + 1) {
+ if (got > ctx->chunk_size) { /* Split list in return value and 'rest' */
+ Eterm tmp = ctx->match_list;
+ rest = ctx->match_list;
+ while (got-- > ctx->chunk_size + 1) {
tmp = CDR(list_val(tmp));
++rest_size;
}
++rest_size;
- sc_context_ptr->match_list = CDR(list_val(tmp));
+ ctx->match_list = CDR(list_val(tmp));
CDR(list_val(tmp)) = NIL; /* Destructive, the list has never
been in 'user space' */
}
if (rest != NIL || slot_ix >= 0) { /* Need more calls */
- Eterm tid = sc_context_ptr->tid;
- sc_context_ptr->hp = HAllocX(sc_context_ptr->p,
- 3 + 7 + ERTS_MAGIC_REF_THING_SIZE,
- ERTS_MAGIC_REF_THING_SIZE);
- mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &sc_context_ptr->hp);
+ Eterm tid = ctx->tid;
+ ctx->hp = HAllocX(ctx->p,
+ 3 + 7 + ERTS_MAGIC_REF_THING_SIZE,
+ ERTS_MAGIC_REF_THING_SIZE);
+ mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &ctx->hp);
if (is_atom(tid))
- tid = erts_db_make_tid(sc_context_ptr->p,
- &sc_context_ptr->tb->common);
+ tid = erts_db_make_tid(ctx->p,
+ &ctx->tb->common);
continuation = TUPLE6(
- sc_context_ptr->hp,
+ ctx->hp,
tid,
make_small(slot_ix),
- make_small(sc_context_ptr->chunk_size),
+ make_small(ctx->chunk_size),
mpb, rest,
make_small(rest_size));
*mpp = NULL; /* Otherwise the caller will destroy it */
- sc_context_ptr->hp += 7;
- *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, continuation);
+ ctx->hp += 7;
+ *ret = TUPLE2(ctx->hp, ctx->match_list, continuation);
return DB_ERROR_NONE;
} else { /* All data is exhausted */
- if (sc_context_ptr->match_list != NIL) { /* No more data to search but still a
+ if (ctx->match_list != NIL) { /* No more data to search but still a
result to return to the caller */
- sc_context_ptr->hp = HAlloc(sc_context_ptr->p, 3);
- *ret = TUPLE2(sc_context_ptr->hp, sc_context_ptr->match_list, am_EOT);
+ ctx->hp = HAlloc(ctx->p, 3);
+ *ret = TUPLE2(ctx->hp, ctx->match_list, am_EOT);
return DB_ERROR_NONE;
} else { /* Reached the end of the ttable with no data to return */
*ret = am_EOT;
@@ -1615,82 +1640,88 @@ static int mtraversal_select_chunk_on_loop_ended(void* context_ptr, Sint slot_ix
}
}
}
- *ret = sc_context_ptr->match_list;
+ *ret = ctx->match_list;
return DB_ERROR_NONE;
}
}
-static int mtraversal_select_chunk_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_chunk_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
Eterm mpb;
Eterm continuation;
Eterm* hp;
- BUMP_ALL_REDS(sc_context_ptr->p);
+ BUMP_ALL_REDS(ctx->p);
- if (sc_context_ptr->prev_continuation_tptr == NULL) {
- Eterm tid = sc_context_ptr->tid;
+ if (ctx->prev_continuation_tptr == NULL) {
+ Eterm tid = ctx->tid;
/* First time we're trapping */
- hp = HAllocX(sc_context_ptr->p, 7 + ERTS_MAGIC_REF_THING_SIZE,
+ hp = HAllocX(ctx->p, 7 + ERTS_MAGIC_REF_THING_SIZE,
ERTS_MAGIC_REF_THING_SIZE);
if (is_atom(tid))
- tid = erts_db_make_tid(sc_context_ptr->p, &sc_context_ptr->tb->common);
- mpb = erts_db_make_match_prog_ref(sc_context_ptr->p, *mpp, &hp);
+ tid = erts_db_make_tid(ctx->p, &ctx->tb->common);
+ mpb = erts_db_make_match_prog_ref(ctx->p, *mpp, &hp);
continuation = TUPLE6(
hp,
tid,
make_small(slot_ix),
- make_small(sc_context_ptr->chunk_size),
+ make_small(ctx->chunk_size),
mpb,
- sc_context_ptr->match_list,
+ ctx->match_list,
make_small(got));
*mpp = NULL; /* otherwise the caller will destroy it */
}
else {
/* Not the first time we're trapping; reuse continuation terms */
- hp = HAlloc(sc_context_ptr->p, 7);
+ hp = HAlloc(ctx->p, 7);
continuation = TUPLE6(
hp,
- sc_context_ptr->prev_continuation_tptr[1],
+ ctx->prev_continuation_tptr[1],
make_small(slot_ix),
- sc_context_ptr->prev_continuation_tptr[3],
- sc_context_ptr->prev_continuation_tptr[4],
- sc_context_ptr->match_list,
+ ctx->prev_continuation_tptr[3],
+ ctx->prev_continuation_tptr[4],
+ ctx->match_list,
make_small(got));
}
- *ret = bif_trap1(&ets_select_continue_exp, sc_context_ptr->p, continuation);
+ ERTS_BIF_PREP_TRAP1(*ret, &ets_select_continue_exp, ctx->p,
+ continuation);
return DB_ERROR_NONE;
}
-static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, int reverse, Eterm *ret) {
+static int db_select_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern,
+ int reverse, Eterm *ret)
+{
return db_select_chunk_hash(p, tbl, tid, pattern, 0, reverse, ret);
}
-static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Sint chunk_size,
+static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Sint chunk_size,
int reverse, Eterm *ret)
{
- mtraversal_select_chunk_context_t sc_context;
- sc_context.p = p;
- sc_context.tb = &tbl->hash;
- sc_context.tid = tid;
- sc_context.hp = NULL;
- sc_context.chunk_size = chunk_size;
- sc_context.match_list = NIL;
- sc_context.prev_continuation_tptr = NULL;
+ select_chunk_context_t ctx;
+
+ ctx.base.on_nothing_can_match = select_chunk_on_nothing_can_match;
+ ctx.base.on_match_res = select_chunk_on_match_res;
+ ctx.base.on_loop_ended = select_chunk_on_loop_ended;
+ ctx.base.on_trap = select_chunk_on_trap,
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.hp = NULL;
+ ctx.chunk_size = chunk_size;
+ ctx.match_list = NIL;
+ ctx.prev_continuation_tptr = NULL;
return match_traverse(
- sc_context.p, sc_context.tb,
+ ctx.p, ctx.tb,
pattern, NULL,
- sc_context.chunk_size,
+ ctx.chunk_size,
MAX_SELECT_CHUNK_ITERATIONS,
- &sc_context.hp, 0,
- mtraversal_select_chunk_on_nothing_can_match,
- mtraversal_select_chunk_on_match_res,
- mtraversal_select_chunk_on_loop_ended,
- mtraversal_select_chunk_on_trap,
- &sc_context, ret);
+ &ctx.hp, 0,
+ &ctx.base, ret);
}
/*
@@ -1699,47 +1730,50 @@ static int db_select_chunk_hash(Process *p, DbTable *tbl, Eterm tid, Eterm patte
*
*/
-static int mtraversal_select_chunk_continue_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static
+int select_chunk_continue_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_chunk_context_t* sc_context_ptr = (mtraversal_select_chunk_context_t*) context_ptr;
+ select_chunk_context_t* ctx = (select_chunk_context_t*) ctx_base;
Eterm continuation;
Eterm rest = NIL;
Eterm* hp;
ASSERT(iterations_left <= MAX_SELECT_CHUNK_ITERATIONS);
- BUMP_REDS(sc_context_ptr->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
- if (sc_context_ptr->chunk_size) {
+ BUMP_REDS(ctx->p, MAX_SELECT_CHUNK_ITERATIONS - iterations_left);
+ if (ctx->chunk_size) {
Sint rest_size = 0;
- if (got > sc_context_ptr->chunk_size) {
+ if (got > ctx->chunk_size) {
/* Cannot write destructively here,
the list may have
been in user space */
- hp = HAlloc(sc_context_ptr->p, (got - sc_context_ptr->chunk_size) * 2);
- while (got-- > sc_context_ptr->chunk_size) {
- rest = CONS(hp, CAR(list_val(sc_context_ptr->match_list)), rest);
+ hp = HAlloc(ctx->p, (got - ctx->chunk_size) * 2);
+ while (got-- > ctx->chunk_size) {
+ rest = CONS(hp, CAR(list_val(ctx->match_list)), rest);
hp += 2;
- sc_context_ptr->match_list = CDR(list_val(sc_context_ptr->match_list));
+ ctx->match_list = CDR(list_val(ctx->match_list));
++rest_size;
}
}
if (rest != NIL || slot_ix >= 0) {
- hp = HAlloc(sc_context_ptr->p, 3 + 7);
+ hp = HAlloc(ctx->p, 3 + 7);
continuation = TUPLE6(
hp,
- sc_context_ptr->prev_continuation_tptr[1],
+ ctx->prev_continuation_tptr[1],
make_small(slot_ix),
- sc_context_ptr->prev_continuation_tptr[3],
- sc_context_ptr->prev_continuation_tptr[4],
+ ctx->prev_continuation_tptr[3],
+ ctx->prev_continuation_tptr[4],
rest,
make_small(rest_size));
hp += 7;
- *ret = TUPLE2(hp, sc_context_ptr->match_list, continuation);
+ *ret = TUPLE2(hp, ctx->match_list, continuation);
return DB_ERROR_NONE;
} else {
- if (sc_context_ptr->match_list != NIL) {
- hp = HAlloc(sc_context_ptr->p, 3);
- *ret = TUPLE2(hp, sc_context_ptr->match_list, am_EOT);
+ if (ctx->match_list != NIL) {
+ hp = HAlloc(ctx->p, 3);
+ *ret = TUPLE2(hp, ctx->match_list, am_EOT);
return DB_ERROR_NONE;
} else {
*ret = am_EOT;
@@ -1747,15 +1781,17 @@ static int mtraversal_select_chunk_continue_on_loop_ended(void* context_ptr, Sin
}
}
}
- *ret = sc_context_ptr->match_list;
+ *ret = ctx->match_list;
return DB_ERROR_NONE;
}
/*
* This is called when select traps
*/
-static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
- mtraversal_select_chunk_context_t sc_context = {0};
+static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
+ Eterm* ret)
+{
+ select_chunk_context_t ctx;
Eterm* tptr;
Eterm tid;
Binary* mp;
@@ -1790,21 +1826,21 @@ static int db_select_continue_hash(Process* p, DbTable* tbl, Eterm continuation,
match_list = tptr[5];
/* Proceed */
- sc_context.p = p;
- sc_context.tb = &tbl->hash;
- sc_context.tid = tid;
- sc_context.hp = NULL;
- sc_context.chunk_size = chunk_size;
- sc_context.match_list = match_list;
- sc_context.prev_continuation_tptr = tptr;
+ ctx.base.on_match_res = select_chunk_on_match_res;
+ ctx.base.on_loop_ended = select_chunk_continue_on_loop_ended;
+ ctx.base.on_trap = select_chunk_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.hp = NULL;
+ ctx.chunk_size = chunk_size;
+ ctx.match_list = match_list;
+ ctx.prev_continuation_tptr = tptr;
return match_traverse_continue(
- sc_context.p, sc_context.tb, sc_context.chunk_size,
- iterations_left, &sc_context.hp, slot_ix, got, &mp, 0,
- mtraversal_select_chunk_on_match_res, /* Reuse callback */
- mtraversal_select_chunk_continue_on_loop_ended,
- mtraversal_select_chunk_on_trap, /* Reuse callback */
- &sc_context, ret);
+ ctx.p, ctx.tb, ctx.chunk_size,
+ iterations_left, &ctx.hp, slot_ix, got, &mp, 0,
+ &ctx.base, ret);
badparam:
*ret = NIL;
@@ -1823,75 +1859,83 @@ badparam:
#define MAX_SELECT_COUNT_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
- Eterm* hp;
Eterm* prev_continuation_tptr;
-} mtraversal_select_count_context_t;
+} select_count_context_t;
-static int mtraversal_select_count_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+static int select_count_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int mtraversal_select_count_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_count_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
return (match_res == am_true);
}
-static int mtraversal_select_count_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_count_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
+ select_count_context_t* ctx = (select_count_context_t*) ctx_base;
ASSERT(iterations_left <= MAX_SELECT_COUNT_ITERATIONS);
- BUMP_REDS(scnt_context_ptr->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
- *ret = erts_make_integer(got, scnt_context_ptr->p);
+ BUMP_REDS(ctx->p, MAX_SELECT_COUNT_ITERATIONS - iterations_left);
+ *ret = erts_make_integer(got, ctx->p);
return DB_ERROR_NONE;
}
-static int mtraversal_select_count_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_count_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_count_context_t* scnt_context_ptr = (mtraversal_select_count_context_t*) context_ptr;
- return on_mtraversal_simple_trap(
+ select_count_context_t* ctx = (select_count_context_t*) ctx_base;
+ return on_simple_trap(
&ets_select_count_continue_exp,
- scnt_context_ptr->p,
- scnt_context_ptr->tb,
- scnt_context_ptr->tid,
- scnt_context_ptr->prev_continuation_tptr,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
slot_ix, got, mpp, ret);
}
-static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
- mtraversal_select_count_context_t scnt_context = {0};
+static int db_select_count_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ select_count_context_t ctx;
Sint iterations_left = MAX_SELECT_COUNT_ITERATIONS;
Sint chunk_size = 0;
- scnt_context.p = p;
- scnt_context.tb = &tbl->hash;
- scnt_context.tid = tid;
- scnt_context.hp = NULL;
- scnt_context.prev_continuation_tptr = NULL;
+ ctx.base.on_nothing_can_match = select_count_on_nothing_can_match;
+ ctx.base.on_match_res = select_count_on_match_res;
+ ctx.base.on_loop_ended = select_count_on_loop_ended;
+ ctx.base.on_trap = select_count_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
return match_traverse(
- scnt_context.p, scnt_context.tb,
+ ctx.p, ctx.tb,
pattern, NULL,
chunk_size, iterations_left, NULL, 0,
- mtraversal_select_count_on_nothing_can_match,
- mtraversal_select_count_on_match_res,
- mtraversal_select_count_on_loop_ended,
- mtraversal_select_count_on_trap,
- &scnt_context, ret);
+ &ctx.base, ret);
}
/*
* This is called when select_count traps
*/
-static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
- mtraversal_select_count_context_t scnt_context = {0};
+static int db_select_count_continue_hash(Process* p, DbTable* tbl,
+ Eterm continuation, Eterm* ret)
+{
+ select_count_context_t ctx;
Eterm* tptr;
Eterm tid;
Binary* mp;
@@ -1900,25 +1944,24 @@ static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continu
Sint chunk_size = 0;
*ret = NIL;
- if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
- scnt_context.p = p;
- scnt_context.tb = &tbl->hash;
- scnt_context.tid = tid;
- scnt_context.hp = NULL;
- scnt_context.prev_continuation_tptr = tptr;
+ ctx.base.on_match_res = select_count_on_match_res;
+ ctx.base.on_loop_ended = select_count_on_loop_ended;
+ ctx.base.on_trap = select_count_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
return match_traverse_continue(
- scnt_context.p, scnt_context.tb, chunk_size,
+ ctx.p, ctx.tb, chunk_size,
MAX_SELECT_COUNT_ITERATIONS,
NULL, slot_ix, got, &mp, 0,
- mtraversal_select_count_on_match_res, /* Reuse callback */
- mtraversal_select_count_on_loop_ended, /* Reuse callback */
- mtraversal_select_count_on_trap, /* Reuse callback */
- &scnt_context, ret);
+ &ctx.base, ret);
}
#undef MAX_SELECT_COUNT_ITERATIONS
@@ -1933,104 +1976,119 @@ static int db_select_count_continue_hash(Process* p, DbTable* tbl, Eterm continu
#define MAX_SELECT_DELETE_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
- Eterm* hp;
Eterm* prev_continuation_tptr;
erts_aint_t fixated_by_me;
Uint last_pseudo_delete;
-} mtraversal_select_delete_context_t;
+ HashDbTerm* free_us;
+} select_delete_context_t;
-static int mtraversal_select_delete_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+static int select_delete_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_delete_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
HashDbTerm** current_ptr = *current_ptr_ptr;
- mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
HashDbTerm* del;
if (match_res != am_true)
return 0;
- if (NFIXED(sd_context_ptr->tb) > sd_context_ptr->fixated_by_me) { /* fixated by others? */
- if (slot_ix != sd_context_ptr->last_pseudo_delete) {
- if (!add_fixed_deletion(sd_context_ptr->tb, slot_ix, sd_context_ptr->fixated_by_me))
+ if (NFIXED(ctx->tb) > ctx->fixated_by_me) { /* fixated by others? */
+ if (slot_ix != ctx->last_pseudo_delete) {
+ if (!add_fixed_deletion(ctx->tb, slot_ix, ctx->fixated_by_me))
goto do_erase;
- sd_context_ptr->last_pseudo_delete = slot_ix;
+ ctx->last_pseudo_delete = slot_ix;
}
- (*current_ptr)->hvalue = INVALID_HASH;
+ (*current_ptr)->pseudo_deleted = 1;
}
else {
do_erase:
del = *current_ptr;
*current_ptr = (*current_ptr)->next; // replace pointer to term using next
- free_term(sd_context_ptr->tb, del);
+ del->next = ctx->free_us;
+ ctx->free_us = del;
}
- erts_atomic_dec_nob(&sd_context_ptr->tb->common.nitems);
+ erts_atomic_dec_nob(&ctx->tb->common.nitems);
return 1;
}
-static int mtraversal_select_delete_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_delete_on_loop_ended(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Sint iterations_left, Binary** mpp,
+ Eterm* ret)
{
- mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
+ free_term_list(ctx->tb, ctx->free_us);
+ ctx->free_us = NULL;
ASSERT(iterations_left <= MAX_SELECT_DELETE_ITERATIONS);
- BUMP_REDS(sd_context_ptr->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
+ BUMP_REDS(ctx->p, MAX_SELECT_DELETE_ITERATIONS - iterations_left);
if (got) {
- try_shrink(sd_context_ptr->tb);
+ try_shrink(ctx->tb);
}
- *ret = erts_make_integer(got, sd_context_ptr->p);
+ *ret = erts_make_integer(got, ctx->p);
return DB_ERROR_NONE;
}
-static int mtraversal_select_delete_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_delete_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_delete_context_t* sd_context_ptr = (mtraversal_select_delete_context_t*) context_ptr;
- return on_mtraversal_simple_trap(
+ select_delete_context_t* ctx = (select_delete_context_t*) ctx_base;
+ free_term_list(ctx->tb, ctx->free_us);
+ ctx->free_us = NULL;
+ return on_simple_trap(
&ets_select_delete_continue_exp,
- sd_context_ptr->p,
- sd_context_ptr->tb,
- sd_context_ptr->tid,
- sd_context_ptr->prev_continuation_tptr,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
slot_ix, got, mpp, ret);
}
-static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret) {
- mtraversal_select_delete_context_t sd_context = {0};
+static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid,
+ Eterm pattern, Eterm *ret)
+{
+ select_delete_context_t ctx;
Sint chunk_size = 0;
- sd_context.p = p;
- sd_context.tb = &tbl->hash;
- sd_context.tid = tid;
- sd_context.hp = NULL;
- sd_context.prev_continuation_tptr = NULL;
- sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
- sd_context.last_pseudo_delete = (Uint) -1;
+ ctx.base.on_nothing_can_match = select_delete_on_nothing_can_match;
+ ctx.base.on_match_res = select_delete_on_match_res;
+ ctx.base.on_loop_ended = select_delete_on_loop_ended;
+ ctx.base.on_trap = select_delete_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
+ ctx.fixated_by_me = ctx.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
+ ctx.last_pseudo_delete = (Uint) -1;
+ ctx.free_us = NULL;
return match_traverse(
- sd_context.p, sd_context.tb,
+ ctx.p, ctx.tb,
pattern, NULL,
chunk_size,
MAX_SELECT_DELETE_ITERATIONS, NULL, 1,
- mtraversal_select_delete_on_nothing_can_match,
- mtraversal_select_delete_on_match_res,
- mtraversal_select_delete_on_loop_ended,
- mtraversal_select_delete_on_trap,
- &sd_context, ret);
+ &ctx.base, ret);
}
/*
* This is called when select_delete traps
*/
-static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret) {
- mtraversal_select_delete_context_t sd_context = {0};
+static int db_select_delete_continue_hash(Process* p, DbTable* tbl,
+ Eterm continuation, Eterm* ret)
+{
+ select_delete_context_t ctx;
Eterm* tptr;
Eterm tid;
Binary* mp;
@@ -2038,27 +2096,27 @@ static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm contin
Sint slot_ix;
Sint chunk_size = 0;
- if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
- sd_context.p = p;
- sd_context.tb = &tbl->hash;
- sd_context.tid = tid;
- sd_context.hp = NULL;
- sd_context.prev_continuation_tptr = tptr;
- sd_context.fixated_by_me = ONLY_WRITER(p, sd_context.tb) ? 0 : 1; /* TODO: something nicer */
- sd_context.last_pseudo_delete = (Uint) -1;
+ ctx.base.on_match_res = select_delete_on_match_res;
+ ctx.base.on_loop_ended = select_delete_on_loop_ended;
+ ctx.base.on_trap = select_delete_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
+ ctx.fixated_by_me = ONLY_WRITER(p, ctx.tb) ? 0 : 1; /* TODO: something nicer */
+ ctx.last_pseudo_delete = (Uint) -1;
+ ctx.free_us = NULL;
return match_traverse_continue(
- sd_context.p, sd_context.tb, chunk_size,
+ ctx.p, ctx.tb, chunk_size,
MAX_SELECT_DELETE_ITERATIONS,
NULL, slot_ix, got, &mp, 1,
- mtraversal_select_delete_on_match_res, /* Reuse callback */
- mtraversal_select_delete_on_loop_ended, /* Reuse callback */
- mtraversal_select_delete_on_trap, /* Reuse callback */
- &sd_context, ret);
+ &ctx.base, ret);
}
#undef MAX_SELECT_DELETE_ITERATIONS
@@ -2073,24 +2131,26 @@ static int db_select_delete_continue_hash(Process* p, DbTable* tbl, Eterm contin
#define MAX_SELECT_REPLACE_ITERATIONS 1000
typedef struct {
+ match_callbacks_t base;
Process* p;
DbTableHash* tb;
Eterm tid;
- Eterm* hp;
Eterm* prev_continuation_tptr;
-} mtraversal_select_replace_context_t;
+} select_replace_context_t;
-static int mtraversal_select_replace_on_nothing_can_match(void* context_ptr, Eterm* ret) {
+static int select_replace_on_nothing_can_match(match_callbacks_t* ctx_base,
+ Eterm* ret)
+{
*ret = make_small(0);
return DB_ERROR_NONE;
}
-static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_ix,
- HashDbTerm*** current_ptr_ptr,
- Eterm match_res)
+static int select_replace_on_match_res(match_callbacks_t* ctx_base, Sint slot_ix,
+ HashDbTerm*** current_ptr_ptr,
+ Eterm match_res)
{
- mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
- DbTableHash* tb = sr_context_ptr->tb;
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
+ DbTableHash* tb = ctx->tb;
HashDbTerm* new;
HashDbTerm* next;
HashValue hval;
@@ -2106,6 +2166,7 @@ static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_i
new = new_dbterm(tb, match_res);
new->next = next;
new->hvalue = hval;
+ new->pseudo_deleted = 0;
free_term(tb, **current_ptr_ptr);
**current_ptr_ptr = new; /* replace 'next' pointer in previous object */
*current_ptr_ptr = &((**current_ptr_ptr)->next); /* advance to next object */
@@ -2114,35 +2175,37 @@ static int mtraversal_select_replace_on_match_res(void* context_ptr, Sint slot_i
return 0;
}
-static int mtraversal_select_replace_on_loop_ended(void* context_ptr, Sint slot_ix, Sint got,
- Sint iterations_left, Binary** mpp, Eterm* ret)
+static int select_replace_on_loop_ended(match_callbacks_t* ctx_base, Sint slot_ix,
+ Sint got, Sint iterations_left,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
ASSERT(iterations_left <= MAX_SELECT_REPLACE_ITERATIONS);
/* the more objects we've replaced, the more reductions we've consumed */
- BUMP_REDS(sr_context_ptr->p,
+ BUMP_REDS(ctx->p,
MIN(MAX_SELECT_REPLACE_ITERATIONS * 2,
(MAX_SELECT_REPLACE_ITERATIONS - iterations_left) + (int)got));
- *ret = erts_make_integer(got, sr_context_ptr->p);
+ *ret = erts_make_integer(got, ctx->p);
return DB_ERROR_NONE;
}
-static int mtraversal_select_replace_on_trap(void* context_ptr, Sint slot_ix, Sint got,
- Binary** mpp, Eterm* ret)
+static int select_replace_on_trap(match_callbacks_t* ctx_base,
+ Sint slot_ix, Sint got,
+ Binary** mpp, Eterm* ret)
{
- mtraversal_select_replace_context_t* sr_context_ptr = (mtraversal_select_replace_context_t*) context_ptr;
- return on_mtraversal_simple_trap(
+ select_replace_context_t* ctx = (select_replace_context_t*) ctx_base;
+ return on_simple_trap(
&ets_select_replace_continue_exp,
- sr_context_ptr->p,
- sr_context_ptr->tb,
- sr_context_ptr->tid,
- sr_context_ptr->prev_continuation_tptr,
+ ctx->p,
+ ctx->tb,
+ ctx->tid,
+ ctx->prev_continuation_tptr,
slot_ix, got, mpp, ret);
}
static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pattern, Eterm *ret)
{
- mtraversal_select_replace_context_t sr_context = {0};
+ select_replace_context_t ctx;
Sint chunk_size = 0;
/* Bag implementation presented both semantic consistency and performance issues,
@@ -2150,22 +2213,21 @@ static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pat
*/
ASSERT(!(tbl->hash.common.status & DB_BAG));
- sr_context.p = p;
- sr_context.tb = &tbl->hash;
- sr_context.tid = tid;
- sr_context.hp = NULL;
- sr_context.prev_continuation_tptr = NULL;
+ ctx.base.on_nothing_can_match = select_replace_on_nothing_can_match;
+ ctx.base.on_match_res = select_replace_on_match_res;
+ ctx.base.on_loop_ended = select_replace_on_loop_ended;
+ ctx.base.on_trap = select_replace_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = NULL;
return match_traverse(
- sr_context.p, sr_context.tb,
+ ctx.p, ctx.tb,
pattern, db_match_keeps_key,
chunk_size,
MAX_SELECT_REPLACE_ITERATIONS, NULL, 1,
- mtraversal_select_replace_on_nothing_can_match,
- mtraversal_select_replace_on_match_res,
- mtraversal_select_replace_on_loop_ended,
- mtraversal_select_replace_on_trap,
- &sr_context, ret);
+ &ctx.base, ret);
}
/*
@@ -2173,7 +2235,7 @@ static int db_select_replace_hash(Process *p, DbTable *tbl, Eterm tid, Eterm pat
*/
static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm continuation, Eterm* ret)
{
- mtraversal_select_replace_context_t sr_context = {0};
+ select_replace_context_t ctx;
Eterm* tptr;
Eterm tid ;
Binary* mp;
@@ -2182,26 +2244,25 @@ static int db_select_replace_continue_hash(Process* p, DbTable* tbl, Eterm conti
Sint chunk_size = 0;
*ret = NIL;
- if (unpack_simple_mtraversal_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
+ if (unpack_simple_continuation(continuation, &tptr, &tid, &slot_ix, &mp, &got)) {
*ret = NIL;
return DB_ERROR_BADPARAM;
}
/* Proceed */
- sr_context.p = p;
- sr_context.tb = &tbl->hash;
- sr_context.tid = tid;
- sr_context.hp = NULL;
- sr_context.prev_continuation_tptr = tptr;
+ ctx.base.on_match_res = select_replace_on_match_res;
+ ctx.base.on_loop_ended = select_replace_on_loop_ended;
+ ctx.base.on_trap = select_replace_on_trap;
+ ctx.p = p;
+ ctx.tb = &tbl->hash;
+ ctx.tid = tid;
+ ctx.prev_continuation_tptr = tptr;
return match_traverse_continue(
- sr_context.p, sr_context.tb, chunk_size,
+ ctx.p, ctx.tb, chunk_size,
MAX_SELECT_REPLACE_ITERATIONS,
NULL, slot_ix, got, &mp, 1,
- mtraversal_select_replace_on_match_res, /* Reuse callback */
- mtraversal_select_replace_on_loop_ended, /* Reuse callback */
- mtraversal_select_replace_on_trap, /* Reuse callback */
- &sr_context, ret);
+ &ctx.base, ret);
}
@@ -2209,6 +2270,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
{
DbTableHash *tb = &tbl->hash;
HashDbTerm **bp, *b;
+ HashDbTerm *free_us = NULL;
HashValue hval = MAKE_HASH(key);
erts_rwmtx_t *lck = WLOCK_HASH(tb, hval);
int ix = hash_to_ix(tb, hval);
@@ -2226,12 +2288,13 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
&& add_fixed_deletion(tb, ix, 0)) {
/* Pseudo remove (no need to keep several of same key) */
bp = &b->next;
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
b = b->next;
} else {
- *bp = b->next;
- free_term(tb, b);
- b = *bp;
+ HashDbTerm* next = b->next;
+ b->next = free_us;
+ free_us = b;
+ b = *bp = next;
}
}
break;
@@ -2242,6 +2305,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret)
erts_atomic_add_nob(&tb->common.nitems, nitems_diff);
try_shrink(tb);
}
+ free_term_list(tb, free_us);
return DB_ERROR_NONE;
}
@@ -2255,25 +2319,52 @@ void db_initialize_hash(void)
}
-int db_mark_all_deleted_hash(DbTable *tbl)
+static SWord db_mark_all_deleted_hash(DbTable *tbl, SWord reds)
{
+ const int LOOPS_PER_REDUCTION = 8;
DbTableHash *tb = &tbl->hash;
- HashDbTerm* list;
+ FixedDeletion* fixdel;
+ SWord loops = reds * LOOPS_PER_REDUCTION;
int i;
ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb));
- for (i = 0; i < NACTIVE(tb); i++) {
- if ((list = BUCKET(tb,i)) != NULL) {
- add_fixed_deletion(tb, i, 0);
- do {
- list->hvalue = INVALID_HASH;
- list = list->next;
- }while(list != NULL);
- }
+ fixdel = (FixedDeletion*) erts_atomic_read_nob(&tb->fixdel);
+ if (fixdel && fixdel->trap) {
+ /* Continue after trap */
+ ASSERT(fixdel->all);
+ ASSERT(fixdel->slot < NACTIVE(tb));
+ i = fixdel->slot;
+ }
+ else {
+ /* First call */
+ fixdel = erts_db_alloc(ERTS_ALC_T_DB_FIX_DEL,
+ (DbTable *) tb,
+ sizeof(FixedDeletion));
+ ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion));
+ link_fixdel(tb, fixdel, 0);
+ i = 0;
}
+
+ do {
+ HashDbTerm* b;
+ for (b = BUCKET(tb,i); b; b = b->next)
+ b->pseudo_deleted = 1;
+ } while (++i < NACTIVE(tb) && --loops > 0);
+
+ if (i < NACTIVE(tb)) {
+ /* Yield */
+ fixdel->slot = i;
+ fixdel->all = 0;
+ fixdel->trap = 1;
+ return -1;
+ }
+
+ fixdel->slot = NACTIVE(tb) - 1;
+ fixdel->all = 1;
+ fixdel->trap = 0;
erts_atomic_set_nob(&tb->common.nitems, 0);
- return DB_ERROR_NONE;
+ return loops < 0 ? 0 : loops / LOOPS_PER_REDUCTION;
}
@@ -2316,7 +2407,7 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
continue;
erts_print(to, to_arg, "%d: [", i);
while(list != 0) {
- if (list->hvalue == INVALID_HASH)
+ if (is_pseudo_deleted(list))
erts_print(to, to_arg, "*");
if (tb->common.compress) {
Eterm key = GETKEY(tb, list->dbterm.tpl);
@@ -2335,9 +2426,9 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
}
}
-/* release all memory occupied by a single table */
-static int db_free_table_hash(DbTable *tbl)
+static int db_free_empty_table_hash(DbTable *tbl)
{
+ ASSERT(NITEMS(tbl) == 0);
while (db_free_table_continue_hash(tbl, ERTS_SWORD_MAX) < 0)
;
return 0;
@@ -2415,7 +2506,6 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
mpi->num_lists = 0;
mpi->key_given = 1;
mpi->something_can_match = 0;
- mpi->all_objects = 1;
mpi->mp = NULL;
for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
@@ -2468,7 +2558,6 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern,
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
- mpi->all_objects = 0;
}
++i;
if (!(mpi->key_given)) {
@@ -2682,7 +2771,7 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
if (!sz) {
ptr = ptr1;
while(ptr != ptr2) {
- if (ptr->hvalue != INVALID_HASH)
+ if (!is_pseudo_deleted(ptr))
sz += ptr->dbterm.size + 2;
ptr = ptr->next;
}
@@ -2693,7 +2782,7 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
ptr = ptr1;
while(ptr != ptr2) {
- if (ptr->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(ptr)) {
copy = db_copy_object_from_ets(&tb->common, &ptr->dbterm, &hp, &MSO(p));
list = CONS(hp, copy, list);
hp += 2;
@@ -2786,7 +2875,7 @@ static void grow(DbTableHash* tb, int nitems)
p = *pnext;
to_pnext = &BUCKET(tb, to_ix);
while (p != NULL) {
- if (p->hvalue == INVALID_HASH) { /* rare but possible with fine locking */
+ if (is_pseudo_deleted(p)) { /* rare but possible with fine locking */
*pnext = p->next;
free_term(tb, p);
p = *pnext;
@@ -2859,7 +2948,7 @@ static void shrink(DbTableHash* tb, int nitems)
* as we must step through "src" anyway to purge pseudo deleted.
*/
while(*bp != NULL) {
- if ((*bp)->hvalue == INVALID_HASH) {
+ if (is_pseudo_deleted(*bp)) {
HashDbTerm* deleted = *bp;
*bp = deleted->next;
free_term(tb, deleted);
@@ -2917,7 +3006,7 @@ static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr
ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr));
for ( ; list != NULL; list = list->next) {
- if (list->hvalue != INVALID_HASH)
+ if (!is_pseudo_deleted(list))
return list;
}
@@ -2926,7 +3015,7 @@ static HashDbTerm* next_live(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr
list = BUCKET(tb,i);
while (list != NULL) {
- if (list->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(list)) {
*iptr = i;
return list;
}
@@ -2959,7 +3048,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
break;
}
if (has_key(tb, b, key, hval)) {
- if (b->hvalue != INVALID_HASH) {
+ if (!is_pseudo_deleted(b)) {
goto Ldone;
}
break;
@@ -2989,16 +3078,18 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj,
HashDbTerm *q = new_dbterm(tb, obj);
q->hvalue = hval;
+ q->pseudo_deleted = 0;
q->next = NULL;
*bp = b = q;
flags |= DB_INC_TRY_GROW;
} else {
HashDbTerm *q, *next = b->next;
- ASSERT(b->hvalue == INVALID_HASH);
+ ASSERT(is_pseudo_deleted(b));
q = replace_dbterm(tb, b, obj);
q->next = next;
- q->hvalue = hval;
+ ASSERT(q->hvalue == hval);
+ q->pseudo_deleted = 0;
*bp = b = q;
erts_atomic_inc_nob(&tb->common.nitems);
}
@@ -3036,7 +3127,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
if (handle->flags & DB_NEW_OBJECT && cret != DB_ERROR_NONE) {
if (IS_FIXED(tb) && add_fixed_deletion(tb, hash_to_ix(tb, b->hvalue),
0)) {
- b->hvalue = INVALID_HASH;
+ b->pseudo_deleted = 1;
} else {
*bp = b->next;
free_me = b;
@@ -3073,16 +3164,19 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle)
return;
}
-static int db_delete_all_objects_hash(Process* p, DbTable* tbl)
+static SWord db_delete_all_objects_hash(Process* p, DbTable* tbl, SWord reds)
{
if (IS_FIXED(tbl)) {
- db_mark_all_deleted_hash(tbl);
+ reds = db_mark_all_deleted_hash(tbl, reds);
} else {
- db_free_table_hash(tbl);
+ reds = db_free_table_continue_hash(tbl, reds);
+ if (reds < 0)
+ return reds;
+
db_create_hash(p, tbl);
erts_atomic_set_nob(&tbl->hash.common.nitems, 0);
}
- return 0;
+ return reds;
}
void db_foreach_offheap_hash(DbTable *tbl,
@@ -3125,7 +3219,7 @@ void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats)
len = 0;
for (b = BUCKET(tb,ix); b!=NULL; b=b->next) {
len++;
- if (b->hvalue == INVALID_HASH)
+ if (is_pseudo_deleted(b))
++kept_items;
}
sum += len;
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 7d27609825..08e5b13db1 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -24,13 +24,26 @@
#include "erl_db_util.h" /* DbTerm & DbTableCommon */
typedef struct fixed_deletion {
- int slot;
+ UWord slot : sizeof(UWord)*8 - 2;
+ UWord all : 1;
+ UWord trap : 1;
struct fixed_deletion *next;
} FixedDeletion;
+
+typedef Uint32 HashVal;
+
typedef struct hash_db_term {
struct hash_db_term* next; /* next bucket */
- HashValue hvalue; /* stored hash value */
+#if SIZEOF_VOID_P == 4
+ Uint32 hvalue : 31; /* stored hash value */
+ Uint32 pseudo_deleted : 1;
+# define MAX_HASH_MASK (((Uint32)1 << 31)-1)
+#elif SIZEOF_VOID_P == 8
+ Uint32 hvalue;
+ Uint32 pseudo_deleted;
+# define MAX_HASH_MASK ((Uint32)(Sint32)-1)
+#endif
DbTerm dbterm; /* The actual term */
} HashDbTerm;
@@ -86,9 +99,6 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret);
int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret);
-/* not yet in method table */
-int db_mark_all_deleted_hash(DbTable *tbl);
-
typedef struct {
float avg_chain_len;
float std_dev_chain_len;
diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c
index 5a276b9d88..0692583dd4 100644
--- a/erts/emulator/beam/erl_db_tree.c
+++ b/erts/emulator/beam/erl_db_tree.c
@@ -170,11 +170,6 @@ static ERTS_INLINE TreeDbTerm* replace_dbterm(DbTableTree *tb, TreeDbTerm* old,
#define DIR_END 2
/*
- * Special binary flag
- */
-#define BIN_FLAG_ALL_OBJECTS BIN_FLAG_USR1
-
-/*
* Number of records to delete before trapping.
*/
#define DELETE_RECORD_LIMIT 12000
@@ -218,9 +213,6 @@ static void do_dump_tree2(DbTableTree*, int to, void *to_arg, int show,
* functions.
*/
struct mp_info {
- int all_objects; /* True if complete objects are always
- * returned from the match_spec (can use
- * copy_shallow on the return value) */
int something_can_match; /* The match_spec is not "impossible" */
int some_limitation; /* There is some limitation on the search
* area, i. e. least and/or most is set.*/
@@ -248,7 +240,6 @@ struct select_context {
Eterm *lastobj;
Sint32 max;
int keypos;
- int all_objects;
Sint got;
Sint chunk_size;
};
@@ -263,7 +254,6 @@ struct select_count_context {
Eterm *lastobj;
Sint32 max;
int keypos;
- int all_objects;
Sint got;
};
@@ -293,7 +283,6 @@ struct select_replace_context {
Eterm *lastobj;
Sint32 max;
int keypos;
- int all_objects;
Sint replaced;
};
@@ -428,7 +417,7 @@ static int db_select_replace_continue_tree(Process *p, DbTable *tbl,
static int db_take_tree(Process *, DbTable *, Eterm, Eterm *);
static void db_print_tree(fmtfn_t to, void *to_arg,
int show, DbTable *tbl);
-static int db_free_table_tree(DbTable *tbl);
+static int db_free_empty_table_tree(DbTable *tbl);
static SWord db_free_table_continue_tree(DbTable *tbl, SWord);
@@ -436,7 +425,7 @@ static void db_foreach_offheap_tree(DbTable *,
void (*)(ErlOffHeap *, void *),
void *);
-static int db_delete_all_objects_tree(Process* p, DbTable* tbl);
+static SWord db_delete_all_objects_tree(Process* p, DbTable* tbl, SWord reds);
#ifdef HARDDEBUG
static void db_check_table_tree(DbTable *tbl);
@@ -481,7 +470,7 @@ DbTableMethod db_tree =
db_select_replace_continue_tree,
db_take_tree,
db_delete_all_objects_tree,
- db_free_table_tree,
+ db_free_empty_table_tree,
db_free_table_continue_tree,
db_print_tree,
db_foreach_offheap_tree,
@@ -992,7 +981,6 @@ static int db_select_continue_tree(Process *p,
sc.lastobj = NULL;
sc.max = 1000;
sc.keypos = tb->common.keypos;
- sc.all_objects = mp->intern.flags & BIN_FLAG_ALL_OBJECTS;
sc.chunk_size = chunk_size;
reverse = unsigned_val(tptr[7]);
sc.got = signed_val(tptr[8]);
@@ -1143,7 +1131,6 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
}
sc.mp = mpi.mp;
- sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
@@ -1183,8 +1170,6 @@ static int db_select_tree(Process *p, DbTable *tbl, Eterm tid,
sz = size_object(key);
hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb= erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
@@ -1346,7 +1331,6 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
}
sc.mp = mpi.mp;
- sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
@@ -1381,8 +1365,6 @@ static int db_select_count_tree(Process *p, DbTable *tbl, Eterm tid,
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
@@ -1449,7 +1431,6 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
}
sc.mp = mpi.mp;
- sc.all_objects = mpi.all_objects;
if (!mpi.got_partial && mpi.some_limitation &&
CMP_EQ(mpi.least,mpi.most)) {
@@ -1506,8 +1487,6 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
sz = size_object(key);
hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
@@ -1532,8 +1511,6 @@ static int db_select_chunk_tree(Process *p, DbTable *tbl, Eterm tid,
hp = HAlloc(p, 9 + sz + ERTS_MAGIC_REF_THING_SIZE);
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE8
(hp,
@@ -1882,7 +1859,6 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
}
sc.mp = mpi.mp;
- sc.all_objects = mpi.all_objects;
stack = get_static_stack(tb);
if (!mpi.got_partial && mpi.some_limitation &&
@@ -1928,8 +1904,6 @@ static int db_select_replace_tree(Process *p, DbTable *tbl, Eterm tid,
hp += BIG_UINT_HEAP_SIZE;
}
key = copy_struct(key, sz, &hp, &MSO(p));
- if (mpi.all_objects)
- (mpi.mp)->intern.flags |= BIN_FLAG_ALL_OBJECTS;
mpb = erts_db_make_match_prog_ref(p,mpi.mp,&hp);
continuation = TUPLE5
@@ -1995,8 +1969,9 @@ static void db_print_tree(fmtfn_t to, void *to_arg,
}
/* release all memory occupied by a single table */
-static int db_free_table_tree(DbTable *tbl)
+static int db_free_empty_table_tree(DbTable *tbl)
{
+ ASSERT(tbl->tree.root == NULL);
while (db_free_table_continue_tree(tbl, ERTS_SWORD_MAX) < 0)
;
return 1;
@@ -2023,12 +1998,14 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds)
return reds;
}
-static int db_delete_all_objects_tree(Process* p, DbTable* tbl)
+static SWord db_delete_all_objects_tree(Process* p, DbTable* tbl, SWord reds)
{
- db_free_table_tree(tbl);
+ reds = db_free_table_continue_tree(tbl, reds);
+ if (reds < 0)
+ return reds;
db_create_tree(p, tbl);
erts_atomic_set_nob(&tbl->tree.common.nitems, 0);
- return 0;
+ return reds;
}
static void do_db_tree_foreach_offheap(TreeDbTerm *,
@@ -2214,7 +2191,6 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
mpi->got_partial = 0;
mpi->something_can_match = 0;
mpi->mp = NULL;
- mpi->all_objects = 1;
mpi->save_term = NULL;
for (lst = pattern; is_list(lst); lst = CDR(list_val(lst)))
@@ -2264,7 +2240,6 @@ static int analyze_pattern(DbTableTree *tb, Eterm pattern,
if (!is_list(body) || CDR(list_val(body)) != NIL ||
CAR(list_val(body)) != am_DollarUnderscore) {
- mpi->all_objects = 0;
}
++i;
@@ -3339,8 +3314,7 @@ static int doit_select(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0))) {
return 0;
}
- ret = db_match_dbterm(&tb->common,sc->p,sc->mp,sc->all_objects,
- &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
sc->accum = CONS(hp, ret, sc->accum);
}
@@ -3371,8 +3345,7 @@ static int doit_select_count(DbTableTree *tb, TreeDbTerm *this, void *ptr,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
- &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
++(sc->got);
}
@@ -3401,8 +3374,7 @@ static int doit_select_chunk(DbTableTree *tb, TreeDbTerm *this, void *ptr,
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, sc->all_objects,
- &this->dbterm, &hp, 2);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, &hp, 2);
if (is_value(ret)) {
++(sc->got);
sc->accum = CONS(hp, ret, sc->accum);
@@ -3437,8 +3409,7 @@ static int doit_select_delete(DbTableTree *tb, TreeDbTerm *this, void *ptr,
cmp_partly_bound(sc->end_condition,
GETKEY_WITH_POS(sc->keypos, this->dbterm.tpl)) > 0)
return 0;
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
- &this->dbterm, NULL, 0);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &this->dbterm, NULL, 0);
if (ret == am_true) {
key = GETKEY(sc->tb, this->dbterm.tpl);
linkout_tree(sc->tb, key);
@@ -3465,8 +3436,7 @@ static int doit_select_replace(DbTableTree *tb, TreeDbTerm **this, void *ptr,
GETKEY_WITH_POS(sc->keypos, (*this)->dbterm.tpl)) > 0)) {
return 0;
}
- ret = db_match_dbterm(&tb->common, sc->p, sc->mp, 0,
- &(*this)->dbterm, NULL, 0);
+ ret = db_match_dbterm(&tb->common, sc->p, sc->mp, &(*this)->dbterm, NULL, 0);
if (is_value(ret)) {
TreeDbTerm* new;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 6354abfd1f..f1d47326b4 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -644,6 +644,12 @@ static DMCGuardBif guard_tab[] =
DBIF_ALL
},
{
+ am_is_map_key,
+ &is_map_key_2,
+ 2,
+ DBIF_ALL
+ },
+ {
am_bit_size,
&bit_size_1,
1,
@@ -2764,9 +2770,7 @@ Eterm db_format_dmc_err_info(Process *p, DMCErrInfo *ei)
sys_strcpy(buff,tmp->error_string);
sl = sys_strlen(buff);
shp = HAlloc(p, sl * 2 + 5);
- sev = (tmp->severity == dmcWarning) ?
- am_atom_put("warning",7) :
- am_error;
+ sev = (tmp->severity == dmcWarning) ? am_warning : am_error;
tlist = buf_to_intlist(&shp, buff, sl, NIL);
tpl = TUPLE2(shp, sev, tlist);
shp += 3;
@@ -5174,7 +5178,7 @@ BIF_RETTYPE match_spec_test_3(BIF_ALIST_3)
{
Eterm res;
#ifdef DMC_DEBUG
- if (BIF_ARG_3 == am_atom_put("dis",3)) {
+ if (BIF_ARG_3 == ERTS_MAKE_AM("dis")) {
test_disassemble_next = 1;
BIF_RET(am_true);
} else
@@ -5285,7 +5289,7 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
erts_free(ERTS_ALC_T_DB_TMP, arr);
}
erts_bin_free(mps);
- ret = TUPLE4(hp, am_atom_put("ok",2), res, flg, lint_res);
+ ret = TUPLE4(hp, am_ok, res, flg, lint_res);
}
return ret;
}
@@ -5327,7 +5331,7 @@ void db_free_tmp_uncompressed(DbTerm* obj)
}
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
- int all, DbTerm* obj, Eterm** hpp, Uint extra)
+ DbTerm* obj, Eterm** hpp, Uint extra)
{
enum erts_pam_run_flags flags;
Uint32 dummy;
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 6b126f35d6..73d242449e 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -32,6 +32,7 @@
** DMC_DEBUG does NOT need DEBUG, but DEBUG needs DMC_DEBUG
*/
#define DMC_DEBUG 1
+#define ETS_DBG_FORCE_TRAP 1
#endif
/*
@@ -180,10 +181,9 @@ typedef struct db_table_method
Eterm* ret);
int (*db_take)(Process *, DbTable *, Eterm, Eterm *);
- int (*db_delete_all_objects)(Process* p,
- DbTable* db /* [in out] */ );
+ SWord (*db_delete_all_objects)(Process* p, DbTable* db, SWord reds);
- int (*db_free_table)(DbTable* db /* [in out] */ );
+ int (*db_free_empty_table)(DbTable* db);
SWord (*db_free_table_continue)(DbTable* db, SWord reds);
void (*db_print)(fmtfn_t to,
@@ -267,6 +267,10 @@ typedef struct db_table_common {
Uint32 status; /* bit masks defined below */
int keypos; /* defaults to 1 */
int compress;
+
+#ifdef ETS_DBG_FORCE_TRAP
+ erts_atomic_t dbg_force_trap; /* &1 force enabled, &2 trap this call */
+#endif
} DbTableCommon;
/* These are status bit patterns */
@@ -281,9 +285,7 @@ typedef struct db_table_common {
#define DB_FINE_LOCKED (1 << 8) /* write_concurrency */
#define DB_FREQ_READ (1 << 9) /* read_concurrency */
#define DB_NAMED_TABLE (1 << 10)
-
-#define ERTS_ETS_TABLE_TYPES (DB_BAG|DB_SET|DB_DUPLICATE_BAG|DB_ORDERED_SET\
- |DB_FINE_LOCKED|DB_FREQ_READ|DB_NAMED_TABLE)
+#define DB_BUSY (1 << 11)
#define IS_HASH_TABLE(Status) (!!((Status) & \
(DB_BAG | DB_SET | DB_DUPLICATE_BAG)))
@@ -469,7 +471,7 @@ Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
/* Returns newly allocated MatchProg binary with refc == 0*/
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
- int all, DbTerm* obj, Eterm** hpp, Uint extra);
+ DbTerm* obj, Eterm** hpp, Uint extra);
Eterm db_prog_match(Process *p, Process *self,
Binary *prog, Eterm term,
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 0692cea0ee..a65dbbf42b 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -413,21 +413,20 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
{
int cost;
- if (p->flags & F_HIBERNATE_SCHED) {
+ if (p->flags & (F_HIBERNATE_SCHED|F_HIPE_RECV_LOCKED)) {
/*
* We just hibernated. We do *not* want to mess
* up the hibernation by an ordinary GC...
+ *
+ * OR
+ *
+ * We left a receive in HiPE with message
+ * queue lock locked, and we do not want to
+ * do a GC with message queue locked...
*/
return result;
}
-#ifdef HIPE
- if (p->hipe_smp.have_receive_locks) {
- /* Do not want to GC with message queue locked... */
- return result;
- }
-#endif
-
if (!p->mbuf) {
/* Must have GC:d in BIF call... invalidate live_hf_end */
live_hf_end = ERTS_INVALID_HFRAG_PTR;
diff --git a/erts/emulator/beam/erl_map.c b/erts/emulator/beam/erl_map.c
index f577b017c3..48154b5d0f 100644
--- a/erts/emulator/beam/erl_map.c
+++ b/erts/emulator/beam/erl_map.c
@@ -43,6 +43,7 @@
*
* DONE:
* - erlang:is_map/1
+ * - erlang:is_map_key/2
* - erlang:map_size/1
* - erlang:map_get/2
*
@@ -919,7 +920,7 @@ static int hxnodecmp(hxnode_t *a, hxnode_t *b) {
return -1;
}
-/* maps:is_key/2 */
+/* maps:is_key/2 and erlang:is_map_key/2 */
BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
if (is_map(BIF_ARG_2)) {
@@ -929,6 +930,10 @@ BIF_RETTYPE maps_is_key_2(BIF_ALIST_2) {
BIF_ERROR(BIF_P, BADMAP);
}
+BIF_RETTYPE is_map_key_2(BIF_ALIST_2) {
+ BIF_RET(maps_is_key_2(BIF_CALL_ARGS));
+}
+
/* maps:keys/1 */
BIF_RETTYPE maps_keys_1(BIF_ALIST_1) {
@@ -3053,7 +3058,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
Uint path_length = 0;
Uint *path_rest = NULL;
int i, elems, orig_elems;
- Eterm node = map, res, *path_ptr = NULL, *hp;
+ Eterm node = map, res, *patch_ptr = NULL, *hp;
/* A stack WSTACK is used when traversing the hashmap.
* It contains: node, idx, sz, ptr
@@ -3112,15 +3117,22 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
}
if (type == iterator) {
- /* iterator uses the format {K, V, {K, V, {K, V, [Path | Map]}}},
- * so each element is 4 words large */
+ /*
+ * Iterator uses the format {K1, V1, {K2, V2, {K3, V3, [Path | Map]}}},
+ * so each element is 4 words large.
+ * To make iteration order independent of input reductions
+ * the KV-pairs are here built in DESTRUCTIVE non-reverse order.
+ */
hp = HAlloc(BIF_P, 4 * elems);
- res = am_none;
} else {
- /* list used the format [Path, Map, {K,V}, {K,V} | BIF_ARG_3],
- * so each element is 2+3 words large */
+ /*
+ * List used the format [Path, Map, {K3,V3}, {K2,V2}, {K1,V1} | BIF_ARG_3],
+ * so each element is 2+3 words large.
+ * To make list order independent of input reductions
+ * the KV-pairs are here built in FUNCTIONAL reverse order
+ * as this is how the list as a whole is constructed.
+ */
hp = HAlloc(BIF_P, (2 + 3) * elems);
- res = BIF_ARG_3;
}
orig_elems = elems;
@@ -3144,12 +3156,15 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
if (is_list(ptr[PATH_ELEM(curr_path)])) {
Eterm *lst = list_val(ptr[PATH_ELEM(curr_path)]);
if (type == iterator) {
- res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4;
- /* Note where we should patch the Iterator is needed */
- path_ptr = hp-1;
+ res = make_tuple(hp);
+ hp[0] = make_arityval(3);
+ hp[1] = CAR(lst);
+ hp[2] = CDR(lst);
+ patch_ptr = &hp[3];
+ hp += 4;
} else {
Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3;
- res = CONS(hp, tup, res); hp += 2;
+ res = CONS(hp, tup, BIF_ARG_3); hp += 2;
}
elems--;
break;
@@ -3183,7 +3198,12 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
while (idx < sz && elems != 0 && is_list(ptr[idx])) {
Eterm *lst = list_val(ptr[idx]);
if (type == iterator) {
- res = TUPLE3(hp, CAR(lst), CDR(lst), res); hp += 4;
+ *patch_ptr = make_tuple(hp);
+ hp[0] = make_arityval(3);
+ hp[1] = CAR(lst);
+ hp[2] = CDR(lst);
+ patch_ptr = &hp[3];
+ hp += 4;
} else {
Eterm tup = TUPLE2(hp, CAR(lst), CDR(lst)); hp += 3;
res = CONS(hp, tup, res); hp += 2;
@@ -3281,7 +3301,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
if (type == iterator) {
hp = HAlloc(BIF_P, 2);
- *path_ptr = CONS(hp, path, map); hp += 2;
+ *patch_ptr = CONS(hp, path, map); hp += 2;
} else {
hp = HAlloc(BIF_P, 4);
res = CONS(hp, map, res); hp += 2;
@@ -3289,6 +3309,7 @@ BIF_RETTYPE erts_internal_map_next_3(BIF_ALIST_3) {
}
} else {
if (type == iterator) {
+ *patch_ptr = am_none;
HRelease(BIF_P, hp + 4 * elems, hp);
} else {
HRelease(BIF_P, hp + (2+3) * elems, hp);
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index bea7a0fe86..507cc989d2 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -379,7 +379,10 @@ queue_messages(Process* receiver,
erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
}
- erts_proc_notify_new_message(receiver, receiver_locks);
+ if (last == &first->next)
+ erts_proc_notify_new_message(receiver, receiver_locks);
+ else
+ erts_proc_notify_new_sig(receiver, state, ERTS_PSFLG_ACTIVE);
}
static ERTS_INLINE
diff --git a/erts/emulator/beam/erl_monitor_link.c b/erts/emulator/beam/erl_monitor_link.c
index 70f36fb6b7..48d9bd4ca5 100644
--- a/erts/emulator/beam/erl_monitor_link.c
+++ b/erts/emulator/beam/erl_monitor_link.c
@@ -630,7 +630,9 @@ erts_monitor_tree_lookup_create(ErtsMonitor **root, int *created, Uint16 type,
ErtsMonitor *res;
ErtsMonitorCreateCtxt cctxt = {type, origin};
- ERTS_ML_ASSERT(type == ERTS_MON_TYPE_NODE || type == ERTS_MON_TYPE_NODES);
+ ERTS_ML_ASSERT(type == ERTS_MON_TYPE_NODE
+ || type == ERTS_MON_TYPE_NODES
+ || type == ERTS_MON_TYPE_SUSPEND);
res = (ErtsMonitor *) ml_rbt_lookup_create((ErtsMonLnkNode **) root,
target, create_monitor,
@@ -760,11 +762,13 @@ erts_monitor_create(Uint16 type, Eterm ref, Eterm orgn, Eterm trgt, Eterm name)
switch (type) {
case ERTS_MON_TYPE_PROC:
case ERTS_MON_TYPE_PORT:
- case ERTS_MON_TYPE_TIME_OFFSET:
if (is_nil(name)) {
ErtsMonitorDataHeap *mdhp;
ErtsORefThing *ortp;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+
+ ERTS_ML_ASSERT(is_nil(name));
ERTS_ML_ASSERT(is_immed(orgn) && is_immed(trgt));
ERTS_ML_ASSERT(is_internal_ordinary_ref(ref));
@@ -860,10 +864,38 @@ erts_monitor_create(Uint16 type, Eterm ref, Eterm orgn, Eterm trgt, Eterm name)
mdep->dist = NULL;
break;
}
- case ERTS_MON_TYPE_SUSPEND:
- ERTS_INTERNAL_ERROR("Use erts_monitor_suspend_create() instead...");
- mdp = NULL;
+ case ERTS_MON_TYPE_SUSPEND: {
+ ErtsMonitorSuspend *msp;
+
+ ERTS_ML_ASSERT(is_nil(name));
+ ERTS_ML_ASSERT(is_nil(ref));
+ ERTS_ML_ASSERT(is_internal_pid(orgn) && is_internal_pid(trgt));
+
+ msp = erts_alloc(ERTS_ALC_T_MONITOR_SUSPEND,
+ sizeof(ErtsMonitorSuspend));
+ mdp = &msp->md;
+ ERTS_ML_ASSERT(((void *) mdp) == ((void *) msp));
+
+ mdp->ref = NIL;
+
+ mdp->origin.other.item = trgt;
+ mdp->origin.offset = (Uint16) offsetof(ErtsMonitorData, origin);
+ mdp->origin.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ ERTS_ML_ASSERT(mdp->origin.key_offset >= mdp->origin.offset);
+ mdp->origin.flags = (Uint16) ERTS_ML_FLG_EXTENDED;
+ mdp->origin.type = type;
+
+ mdp->target.other.item = orgn;
+ mdp->target.offset = (Uint16) offsetof(ErtsMonitorData, target);
+ mdp->target.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
+ mdp->target.flags = ERTS_ML_FLG_TARGET|ERTS_ML_FLG_EXTENDED;
+ mdp->target.type = type;
+
+ msp->next = NULL;
+ erts_atomic_init_relb(&msp->state, 0);
+
break;
+ }
default:
ERTS_INTERNAL_ERROR("Invalid monitor type");
mdp = NULL;
@@ -887,10 +919,11 @@ erts_monitor_destroy__(ErtsMonitorData *mdp)
ERTS_ML_ASSERT(!(mdp->target.flags & ERTS_ML_FLG_IN_TABLE));
ERTS_ML_ASSERT((mdp->origin.flags & ERTS_ML_FLGS_SAME)
== (mdp->target.flags & ERTS_ML_FLGS_SAME));
- ERTS_ML_ASSERT(mdp->origin.type != ERTS_MON_TYPE_SUSPEND);
if (!(mdp->origin.flags & ERTS_ML_FLG_EXTENDED))
erts_free(ERTS_ALC_T_MONITOR, mdp);
+ else if (mdp->origin.type == ERTS_MON_TYPE_SUSPEND)
+ erts_free(ERTS_ALC_T_MONITOR_SUSPEND, mdp);
else {
ErtsMonitorDataExtended *mdep = (ErtsMonitorDataExtended *) mdp;
ErlOffHeap oh;
@@ -927,10 +960,10 @@ erts_monitor_size(ErtsMonitor *mon)
Uint size, refc;
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
- ERTS_ML_ASSERT(mon->type != ERTS_MON_TYPE_SUSPEND);
-
if (!(mon->flags & ERTS_ML_FLG_EXTENDED))
size = sizeof(ErtsMonitorDataHeap);
+ else if (mon->type == ERTS_MON_TYPE_SUSPEND)
+ size = sizeof(ErtsMonitorSuspend);
else {
ErtsMonitorDataExtended *mdep;
Uint hsz = 0;
@@ -957,54 +990,6 @@ erts_monitor_size(ErtsMonitor *mon)
return size / refc;
}
-
-/* suspend monitors... */
-
-ErtsMonitorSuspend *
-erts_monitor_suspend_create(Eterm pid)
-{
- ErtsMonitorSuspend *msp;
-
- ERTS_ML_ASSERT(is_internal_pid(pid));
-
- msp = erts_alloc(ERTS_ALC_T_SUSPEND_MON,
- sizeof(ErtsMonitorSuspend));
- msp->mon.offset = (Uint16) offsetof(ErtsMonitorSuspend, mon);
- msp->mon.key_offset = (Uint16) offsetof(ErtsMonitor, other.item);
- msp->mon.other.item = pid;
- msp->mon.flags = 0;
- msp->mon.type = ERTS_MON_TYPE_SUSPEND;
- msp->pending = 0;
- msp->active = 0;
- return msp;
-}
-
-static ErtsMonLnkNode *
-create_monitor_suspend(Eterm pid, void *unused)
-{
- ErtsMonitorSuspend *msp = erts_monitor_suspend_create(pid);
- return (ErtsMonLnkNode *) &msp->mon;
-}
-
-ErtsMonitorSuspend *
-erts_monitor_suspend_tree_lookup_create(ErtsMonitor **root, int *created,
- Eterm pid)
-{
- ErtsMonitor *mon;
- mon = (ErtsMonitor *) ml_rbt_lookup_create((ErtsMonLnkNode **) root,
- pid, create_monitor_suspend,
- NULL,
- created);
- return erts_monitor_suspend(mon);
-}
-
-void
-erts_monitor_suspend_destroy(ErtsMonitorSuspend *msp)
-{
- ERTS_ML_ASSERT(!(msp->mon.flags & ERTS_ML_FLG_IN_TABLE));
- erts_free(ERTS_ALC_T_SUSPEND_MON, msp);
-}
-
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Link Operations *
* *
diff --git a/erts/emulator/beam/erl_monitor_link.h b/erts/emulator/beam/erl_monitor_link.h
index 603aead8cc..9ff8aa509a 100644
--- a/erts/emulator/beam/erl_monitor_link.h
+++ b/erts/emulator/beam/erl_monitor_link.h
@@ -246,15 +246,28 @@
*
* --- ERTS_MON_TYPE_SUSPEND -------------------------------------
*
- * Suspend monitor.
+ * Suspend monitor. A local process (origin) suspends another
+ * local process (target).
*
- * Other Item: Suspendee process identifier
- * Key: Suspendee process identifier
- *
- * Valid keys are only ordinary internal references.
+ * Origin:
+ * Other Item: Process identifier of suspendee
+ * (target)
+ * Key: Process identifier of suspendee
+ * (target)
+ * Target:
+ * Other Item: Process identifier of suspender
+ * (origin)
+ * Key: Process identifier of suspender
+ * (origin)
+ * Shared:
+ * Next: Pointer to another suspend monitor
+ * State: Number of suspends and a flag
+ * indicating if the suspend is
+ * active or not.
*
- * This type of monitor is a bit strange and the whole process
- * suspend functionality should be improved...
+ * Origin part of the monitor is stored in the monitor tree of
+ * origin process and target part of the monitor is stored in
+ * monitor list for local targets on the target process.
*
*
*
@@ -638,11 +651,15 @@ struct ErtsMonitorDataExtended__ {
Eterm heap[1]; /* heap start... */
};
-typedef struct {
- ErtsMonitor mon;
- int pending;
- int active;
-} ErtsMonitorSuspend;
+typedef struct ErtsMonitorSuspend__ ErtsMonitorSuspend;
+
+struct ErtsMonitorSuspend__ {
+ ErtsMonitorData md; /* origin = suspender; target = suspendee */
+ ErtsMonitorSuspend *next;
+ erts_atomic_t state;
+};
+#define ERTS_MSUSPEND_STATE_FLG_ACTIVE ((erts_aint_t) (((Uint) 1) << (sizeof(Uint)*8 - 1)))
+#define ERTS_MSUSPEND_STATE_COUNTER_MASK (~ERTS_MSUSPEND_STATE_FLG_ACTIVE)
/*
* --- Monitor tree operations ---
@@ -1094,24 +1111,25 @@ int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
*
* @brief Create a monitor
*
- * Can create all types of monitors exept for suspend monitors
+ * Can create all types of monitors
*
* When the funcion is called it is assumed that:
* - 'ref' is an internal ordinary reference if type is ERTS_MON_TYPE_PROC,
* ERTS_MON_TYPE_PORT, ERTS_MON_TYPE_TIME_OFFSET, or ERTS_MON_TYPE_RESOURCE
- * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE or ERTS_MON_TYPE_NODES
+ * - 'ref' is NIL if type is ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or
+ * ERTS_MON_TYPE_SUSPEND
* - 'ref' is and ordinary internal reference or an external reference if
* type is ERTS_MON_TYPE_DIST_PROC
* - 'name' is an atom or NIL if type is ERTS_MON_TYPE_PROC,
* ERTS_MON_TYPE_PORT, or ERTS_MON_TYPE_DIST_PROC
* - 'name is NIL if type is ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_RESOURCE,
- * ERTS_MON_TYPE_NODE, or ERTS_MON_TYPE_NODES
+ * ERTS_MON_TYPE_NODE, ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND
* If the above is not true, bad things will happen.
*
* @param[in] type ERTS_MON_TYPE_PROC, ERTS_MON_TYPE_PORT,
* ERTS_MON_TYPE_TIME_OFFSET, ERTS_MON_TYPE_DIST_PROC,
* ERTS_MON_TYPE_RESOURCE, ERTS_MON_TYPE_NODE,
- * or ERTS_MON_TYPE_NODES
+ * ERTS_MON_TYPE_NODES, or ERTS_MON_TYPE_SUSPEND
*
* @param[in] ref A reference or NIL depending on type
*
@@ -1119,6 +1137,10 @@ int erts_monitor_list_foreach_delete_yielding(ErtsMonitor **list,
*
* @param[in] target The key of the target
*
+ * @param[in] name An atom (the name) or NIL depending on type
+ *
+ * @returns A pointer to monitor data structure
+ *
*/
ErtsMonitorData *erts_monitor_create(Uint16 type, Eterm ref, Eterm origin,
Eterm target, Eterm name);
@@ -1347,7 +1369,8 @@ erts_monitor_to_data(ErtsMonitor *mon)
ERTS_ML_ASSERT(erts_monitor_origin_offset == (size_t) mdp->origin.offset);
ERTS_ML_ASSERT(!!(mdp->target.flags & ERTS_ML_FLG_TARGET));
ERTS_ML_ASSERT(erts_monitor_target_offset == (size_t) mdp->target.offset);
- if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES) {
+ if (mon->type == ERTS_MON_TYPE_NODE || mon->type == ERTS_MON_TYPE_NODES
+ || mon->type == ERTS_MON_TYPE_SUSPEND) {
ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->origin.key_offset);
ERTS_ML_ASSERT(erts_monitor_node_key_offset == (size_t) mdp->target.key_offset);
}
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index e208792868..0fbf0eb03a 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -1255,8 +1255,10 @@ size_t enif_binary_to_term(ErlNifEnv *dst_env,
if (is_non_value(*term)) {
return 0;
}
- erts_factory_close(&factory);
- cache_env(dst_env);
+ if (size > 0) {
+ erts_factory_close(&factory);
+ cache_env(dst_env);
+ }
ASSERT(bp > data);
return bp - data;
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 4a3671df0c..3953a4c2e9 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -1567,8 +1567,9 @@ fail:
erts_port_dec_refc(pp);
if (ptp) {
- abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
- ptp->type, &ptp->u.alive.td, 0);
+ if (ptp->type == ERTS_PORT_TASK_PROC_SIG)
+ abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
+ ptp->type, &ptp->u.alive.td, 0);
port_task_free(ptp);
}
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index 5165cd22a5..1aa390d94d 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -39,6 +39,7 @@
#include "big.h"
#include "erl_gc.h"
#include "bif.h"
+#include "erl_bif_unique.h"
#include "erl_proc_sig_queue.h"
#include "dtrace-wrapper.h"
@@ -49,7 +50,7 @@
* Note that not all signal are handled using this functionality!
*/
-#define ERTS_SIG_Q_OP_MAX 11
+#define ERTS_SIG_Q_OP_MAX 13
#define ERTS_SIG_Q_OP_EXIT 0
#define ERTS_SIG_Q_OP_EXIT_LINKED 1
@@ -62,7 +63,9 @@
#define ERTS_SIG_Q_OP_TRACE_CHANGE_STATE 8
#define ERTS_SIG_Q_OP_PERSISTENT_MON_MSG 9
#define ERTS_SIG_Q_OP_IS_ALIVE 10
-#define ERTS_SIG_Q_OP_PROCESS_INFO ERTS_SIG_Q_OP_MAX
+#define ERTS_SIG_Q_OP_PROCESS_INFO 11
+#define ERTS_SIG_Q_OP_SYNC_SUSPEND 12
+#define ERTS_SIG_Q_OP_RPC ERTS_SIG_Q_OP_MAX
#define ERTS_SIG_Q_TYPE_MAX (ERTS_MON_LNK_TYPE_MAX + 5)
@@ -154,6 +157,17 @@ typedef struct {
} ErtsIsAliveRequest;
typedef struct {
+ Eterm message;
+ Eterm requester;
+ int async;
+} ErtsSyncSuspendRequest;
+
+typedef struct {
+ ErtsMonitorSuspend *mon;
+ ErtsMessage *sync;
+} ErtsProcSigPendingSuspend;
+
+typedef struct {
ErtsSignalCommon common;
Sint refc;
Sint delayed_len;
@@ -176,6 +190,15 @@ typedef struct {
#define ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE ((Sint) -1)
#define ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC ((Sint) -2)
+typedef struct {
+ ErtsSignalCommon common;
+ Eterm requester;
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **);
+ void *arg;
+ Eterm ref;
+ ErtsORefThing oref_thing;
+} ErtsProcSigRPC;
+
static int handle_msg_tracing(Process *c_p,
ErtsSigRecvTracing *tracing,
ErtsMessage ***next_nm_sig);
@@ -400,13 +423,13 @@ sig_enqueue_trace(Process *c_p, ErtsMessage **sigp, int op,
}
static void
-sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig, ErtsMessage *last)
+sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig)
{
ErtsMessage *tmp;
/* The usual case; no tracing signals... */
- if (sig == (ErtsSignal *) first && sig == (ErtsSignal *) last) {
- sig->common.next = NULL;
+ if (sig == (ErtsSignal *) first) {
+ ASSERT(sig->common.next == NULL);
return;
}
@@ -422,6 +445,8 @@ sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig, ErtsMessage *last
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
destroy_trace_info((ErtsSigTraceInfo *) tmp_free);
break;
+ case ERTS_SIG_Q_OP_MONITOR:
+ break; /* ignore flushed pending signal */
default:
ERTS_INTERNAL_ERROR("Unexpected signal op");
break;
@@ -519,41 +544,42 @@ erts_aint32_t erts_enqueue_signals(Process *rp, ErtsMessage *first,
return enqueue_signals(rp, first, last, last_next, num_msgs, in_state);
}
-static ERTS_INLINE void
-ensure_dirty_proc_handled(Eterm pid,
- erts_aint32_t state,
- erts_aint32_t prio)
+void
+erts_make_dirty_proc_handled(Eterm pid,
+ erts_aint32_t state,
+ erts_aint32_t prio)
{
- if (state & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
- Eterm *hp;
- ErtsMessage *mp;
- Process *sig_handler;
+ Eterm *hp;
+ ErtsMessage *mp;
+ Process *sig_handler;
- if (prio < 0)
- prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
+ ASSERT(state & (ERTS_PSFLG_DIRTY_RUNNING |
+ ERTS_PSFLG_DIRTY_RUNNING_SYS));
- switch (prio) {
- case PRIORITY_MAX:
- sig_handler = erts_dirty_process_signal_handler_max;
- break;
- case PRIORITY_HIGH:
- sig_handler = erts_dirty_process_signal_handler_high;
- break;
- default:
- sig_handler = erts_dirty_process_signal_handler;
- break;
- }
+ if (prio < 0)
+ prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
- /* Make sure signals are handled... */
- mp = erts_alloc_message(0, &hp);
- erts_queue_message(sig_handler, 0, mp, pid, am_system);
+ switch (prio) {
+ case PRIORITY_MAX:
+ sig_handler = erts_dirty_process_signal_handler_max;
+ break;
+ case PRIORITY_HIGH:
+ sig_handler = erts_dirty_process_signal_handler_high;
+ break;
+ default:
+ sig_handler = erts_dirty_process_signal_handler;
+ break;
}
+
+ /* Make sure signals are handled... */
+ mp = erts_alloc_message(0, &hp);
+ erts_queue_message(sig_handler, 0, mp, pid, am_system);
}
static void
check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig);
+
static int
proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
{
@@ -643,7 +669,7 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
first_last_done:
sig->common.specific.next = NULL;
- /* may add signals before and/or after sig */
+ /* may add signals before sig */
sig_enqueue_trace(c_p, sigp, op, rp, &last_next);
last->next = NULL;
@@ -664,29 +690,18 @@ first_last_done:
erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
if (res == 0) {
+ sig_enqueue_trace_cleanup(first, sig);
if (pend_sig) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
if (sig == pend_sig) {
/* We did a switch, callers signal is now pending (still ok) */
ASSERT(esdp->pending_signal.sig);
res = 1;
}
- else {
- ASSERT(first == (ErtsMessage*)pend_sig);
- first = first->next;
- }
- erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
}
- sig_enqueue_trace_cleanup(first, sig, last);
- }
-
- if (!(state & (ERTS_PSFLG_EXITING
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_SIG_IN_Q))) {
- /* Schedule process... */
- state = erts_proc_sys_schedule(rp, state, 0);
}
-
- ensure_dirty_proc_handled(rp->common.id, state, -1);
+ else
+ erts_proc_notify_new_sig(rp, state, 0);
if (!is_normal_sched)
erts_proc_dec_refc(rp);
@@ -742,7 +757,10 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
if (res) {
/* ensure handled if dirty executing... */
state = erts_atomic32_read_nob(&rp->state);
- ensure_dirty_proc_handled(other, state, my_prio);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ erts_make_dirty_proc_handled(other, state, my_prio);
+ }
}
}
}
@@ -1311,6 +1329,8 @@ erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason)
/* Pass signal using old monitor structure... */
ErtsSignal *sig;
+ send_using_monitor_struct:
+
mon->other.item = reason; /* Pass immed reason via other.item... */
sig = (ErtsSignal *) mon;
sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MONITOR_DOWN,
@@ -1322,6 +1342,18 @@ erts_proc_sig_send_monitor_down(ErtsMonitor *mon, Eterm reason)
ErtsMonitorData *mdp = erts_monitor_to_data(mon);
Eterm from_tag, monitored, heap[3];
+ if (mon->type == ERTS_MON_TYPE_SUSPEND) {
+ /*
+ * Set reason to 'undefined', since exit
+ * reason is not used for suspend monitors,
+ * and send using monitor structure. This
+ * since we don't want to trigger
+ * unnecessary memory allocation etc...
+ */
+ reason = am_undefined;
+ goto send_using_monitor_struct;
+ }
+
if (!(mon->flags & ERTS_ML_FLG_NAME)) {
from_tag = monitored = mdp->origin.other.item;
if (is_external_pid(from_tag)) {
@@ -1599,7 +1631,173 @@ erts_proc_sig_send_process_info_request(Process *c_p,
else
erts_free(ERTS_ALC_T_SIG_DATA, pis);
return res;
-}
+}
+
+void
+erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to, Eterm tag, Eterm reply)
+{
+ ErlHeapFragment *hfrag;
+ Uint hsz, tag_sz;
+ Eterm *hp, *start_hp, tag_cpy, msg, default_reply;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ ErtsSyncSuspendRequest *ssusp;
+ int async_suspend;
+
+ tag_sz = size_object(tag);
+
+ hsz = 3 + tag_sz + sizeof(ErtsSyncSuspendRequest)/sizeof(Eterm);
+
+ mp = erts_alloc_message(hsz, &hp);
+ hfrag = &mp->hfrag;
+ mp->next = NULL;
+ ohp = &hfrag->off_heap;
+ start_hp = hp;
+
+ tag_cpy = copy_struct(tag, tag_sz, &hp, ohp);
+
+ async_suspend = is_non_value(reply);
+ default_reply = async_suspend ? am_suspended : reply;
+
+ msg = TUPLE2(hp, tag_cpy, default_reply);
+ hp += 3;
+
+ hfrag->used_size = hp - start_hp;
+
+ ssusp = (ErtsSyncSuspendRequest *) (char *) hp;
+ ssusp->message = msg;
+ ssusp->requester = c_p->common.id;
+ ssusp->async = async_suspend;
+
+ ERL_MESSAGE_TERM(mp) = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_SYNC_SUSPEND,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+ ERL_MESSAGE_TOKEN(mp) = NIL;
+ ERL_MESSAGE_FROM(mp) = am_system;
+#ifdef USE_VM_PROBES
+ ERL_MESSAGE_DT_UTAG(mp) = NIL;
+#endif
+
+ if (proc_queue_signal(c_p, to, (ErtsSignal *) mp, ERTS_SIG_Q_OP_SYNC_SUSPEND))
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else {
+ Eterm *tp;
+ /* It wasn't alive; reply to ourselves... */
+ mp->next = NULL;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ tp = tuple_val(msg);
+ tp[2] = async_suspend ? am_badarg : am_exited;
+ erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN,
+ mp, msg, am_system);
+ }
+}
+
+Eterm
+erts_proc_sig_send_rpc_request(Process *c_p,
+ Eterm to,
+ int reply,
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **),
+ void *arg)
+{
+ Eterm res;
+ ErtsProcSigRPC *sig = erts_alloc(ERTS_ALC_T_SIG_DATA,
+ sizeof(ErtsProcSigRPC));
+ sig->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_RPC,
+ ERTS_SIG_Q_TYPE_UNDEFINED,
+ 0);
+ sig->requester = reply ? c_p->common.id : NIL;
+ sig->func = func;
+ sig->arg = arg;
+
+ if (!reply) {
+ res = am_ok;
+ sig->ref = am_ok;
+ }
+ else {
+ res = erts_make_ref(c_p);
+
+ sys_memcpy((void *) &sig->oref_thing,
+ (void *) internal_ref_val(res),
+ sizeof(ErtsORefThing));
+
+ sig->ref = make_internal_ref(&sig->oref_thing);
+
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
+ }
+
+ if (proc_queue_signal(c_p, to, (ErtsSignal *) sig, ERTS_SIG_Q_OP_RPC))
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else {
+ erts_free(ERTS_ALC_T_SIG_DATA, sig);
+ res = THE_NON_VALUE;
+ if (reply)
+ JOIN_MESSAGE(c_p);
+ }
+
+ return res;
+}
+
+static int
+handle_rpc(Process *c_p, ErtsProcSigRPC *rpc, int cnt, int limit, int *yieldp)
+{
+ Process *rp;
+ ErlHeapFragment *bp = NULL;
+ Eterm res;
+ Uint hsz;
+ int reds, out_cnt;
+
+ /*
+ * reds in:
+ * Reductions left.
+ *
+ * reds out:
+ * Absolute value of reds out equals consumed
+ * amount of reds. If a negative value, force
+ * a yield.
+ */
+
+ reds = (limit - cnt) / ERTS_SIG_REDS_CNT_FACTOR;
+ if (reds <= 0)
+ reds = 1;
+
+ res = (*rpc->func)(c_p, rpc->arg, &reds, &bp);
+
+ if (reds < 0) {
+ /* Force yield... */
+ *yieldp = !0;
+ reds *= -1;
+ }
+
+ out_cnt = reds*ERTS_SIG_REDS_CNT_FACTOR;
+
+ hsz = 3 + sizeof(ErtsORefThing)/sizeof(Eterm);
+
+ rp = erts_proc_lookup(rpc->requester);
+ if (!rp) {
+ if (bp)
+ free_message_buffer(bp);
+ }
+ else {
+ Eterm *hp, msg, ref;
+ ErtsMessage *mp = erts_alloc_message(hsz, &hp);
+
+ sys_memcpy((void *) hp, (void *) &rpc->oref_thing,
+ sizeof(rpc->oref_thing));
+
+ ref = make_internal_ref(hp);
+ hp += sizeof(rpc->oref_thing)/sizeof(Eterm);
+ msg = TUPLE2(hp, ref, res);
+
+ mp->hfrag.next = bp;
+
+ erts_queue_proc_message(c_p, rp, 0, mp, msg);
+ }
+
+ erts_free(ERTS_ALC_T_SIG_DATA, rpc);
+
+ return out_cnt;
+}
static void
is_alive_response(Process *c_p, ErtsMessage *mp, int is_alive)
@@ -2643,6 +2841,155 @@ handle_process_info(Process *c_p, ErtsSigRecvTracing *tracing,
return ((int) reds)*4 + 8;
}
+static void
+handle_suspend(Process *c_p, ErtsMonitor *mon, int *yieldp)
+{
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
+
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+
+ if (!(state & ERTS_PSFLG_DIRTY_RUNNING)) {
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
+
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+ mstate = erts_atomic_read_bor_acqb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ *yieldp = !0;
+ }
+ else {
+ /* Executing dirty; delay suspend... */
+ ErtsProcSigPendingSuspend *psusp;
+ ErtsMonitorSuspend *msp;
+
+ psusp = ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+ if (!psusp) {
+ psusp = erts_alloc(ERTS_ALC_T_SIG_DATA,
+ sizeof(ErtsProcSigPendingSuspend));
+ psusp->mon = NULL;
+ psusp->sync = NULL;
+ ERTS_PROC_SET_PENDING_SUSPEND(c_p, (void *) psusp);
+ }
+
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
+
+ msp->next = psusp->mon;
+ psusp->mon = msp;
+
+ erts_atomic32_inc_nob(&msp->md.refc);
+ }
+}
+
+static void
+sync_suspend_reply(Process *c_p, ErtsMessage *mp, erts_aint32_t state)
+{
+ /*
+ * Sender prepared the message for us. Just patch
+ * the result if necessary. The default prepared
+ * result is 'false'.
+ */
+ Process *rp;
+ ErtsSyncSuspendRequest *ssusp;
+
+ ssusp = (ErtsSyncSuspendRequest *) (char *) (&mp->hfrag.mem[0]
+ + mp->hfrag.used_size);
+
+ ASSERT(ERTS_SIG_IS_NON_MSG(mp));
+ ASSERT(ERTS_PROC_SIG_OP(((ErtsSignal *) mp)->common.tag)
+ == ERTS_SIG_Q_OP_SYNC_SUSPEND);
+ ASSERT(mp->hfrag.alloc_size > mp->hfrag.used_size);
+ ASSERT((mp->hfrag.alloc_size - mp->hfrag.used_size)*sizeof(UWord)
+ >= sizeof(ErtsSyncSuspendRequest));
+ ASSERT(is_internal_pid(ssusp->requester));
+ ASSERT(ssusp->requester != c_p->common.id);
+ ASSERT(is_tuple_arity(ssusp->message, 2));
+ ASSERT(is_immed(tuple_val(ssusp->message)[2]));
+
+ ERL_MESSAGE_TERM(mp) = ssusp->message;
+ mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
+ mp->next = NULL;
+
+ rp = erts_proc_lookup(ssusp->requester);
+ if (!rp)
+ erts_cleanup_messages(mp);
+ else {
+ if ((state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_SUSPENDED)) != ERTS_PSFLG_SUSPENDED) {
+ /* Not suspended -> patch result... */
+ if (state & ERTS_PSFLG_EXITING) {
+ Eterm *tp = tuple_val(ssusp->message);
+ tp[2] = ssusp->async ? am_exited : am_badarg;
+ }
+ else {
+ Eterm *tp = tuple_val(ssusp->message);
+ ASSERT(!(state & ERTS_PSFLG_SUSPENDED));
+ tp[2] = ssusp->async ? am_not_suspended : am_internal_error;
+ }
+ }
+ erts_queue_proc_message(c_p, rp, 0, mp, ssusp->message);
+ }
+}
+
+static void
+handle_sync_suspend(Process *c_p, ErtsMessage *mp)
+{
+ ErtsProcSigPendingSuspend *psusp;
+
+ psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+ if (!psusp)
+ sync_suspend_reply(c_p, mp, erts_atomic32_read_nob(&c_p->state));
+ else {
+ mp->next = psusp->sync;
+ psusp->sync = mp;
+ }
+}
+
+void
+erts_proc_sig_handle_pending_suspend(Process *c_p)
+{
+ ErtsMonitorSuspend *msp;
+ ErtsMessage *sync;
+ ErtsProcSigPendingSuspend *psusp;
+ erts_aint32_t state = erts_atomic32_read_nob(&c_p->state);
+
+ psusp = (ErtsProcSigPendingSuspend *) ERTS_PROC_GET_PENDING_SUSPEND(c_p);
+
+ msp = psusp->mon;
+
+ while (msp) {
+ ErtsMonitorSuspend *next_msp = msp->next;
+ msp->next = NULL;
+ if (!(state & ERTS_PSFLG_EXITING)
+ && erts_monitor_is_in_table(&msp->md.target)) {
+ erts_aint_t mstate;
+
+ mstate = erts_atomic_read_bor_acqb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE)); (void) mstate;
+ erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
+ }
+
+ erts_monitor_release(&msp->md.target);
+
+ msp = next_msp;
+ }
+
+ sync = psusp->sync;
+
+ while (sync) {
+ ErtsMessage *next_sync = sync->next;
+ sync->next = NULL;
+ sync_suspend_reply(c_p, sync, state);
+ sync = next_sync;
+ }
+
+ erts_free(ERTS_ALC_T_SIG_DATA, psusp);
+
+ ERTS_PROC_SET_PENDING_SUSPEND(c_p, NULL);
+}
+
/*
* Called in order to handle incoming signals.
*/
@@ -2653,7 +3000,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
{
Eterm tag;
erts_aint32_t state;
- int cnt, limit, abs_lim, msg_tracing;
+ int yield, cnt, limit, abs_lim, msg_tracing;
ErtsMessage *sig, ***next_nm_sig;
ErtsSigRecvTracing tracing;
@@ -2673,6 +3020,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
limit = *redsp;
*redsp = 0;
+ yield = 0;
if (!c_p->sig_qs.cont) {
if (state == -1)
@@ -2786,6 +3134,18 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
cnt += handle_nodedown(c_p, sig, mdp, next_nm_sig);
}
break;
+ case ERTS_MON_TYPE_SUSPEND:
+ tmon = (ErtsMonitor *) sig;
+ ASSERT(erts_monitor_is_target(tmon));
+ ASSERT(!erts_monitor_is_in_table(tmon));
+ mdp = erts_monitor_to_data(tmon);
+ if (erts_monitor_is_in_table(&mdp->origin)) {
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p),
+ &mdp->origin);
+ omon = &mdp->origin;
+ }
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ break;
default:
ERTS_INTERNAL_ERROR("invalid monitor type");
break;
@@ -2849,9 +3209,13 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
if (mon->type == ERTS_MON_TYPE_DIST_PROC)
erts_monitor_tree_insert(&ERTS_P_MONITORS(c_p), mon);
- else
+ else {
erts_monitor_list_insert(&ERTS_P_LT_MONITORS(c_p), mon);
+ if (mon->type == ERTS_MON_TYPE_SUSPEND)
+ handle_suspend(c_p, mon, &yield);
+ }
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ cnt += 2;
break;
}
@@ -2895,9 +3259,16 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
erts_monitor_tree_delete(&ERTS_P_MONITORS(c_p), tmon);
else {
erts_monitor_list_delete(&ERTS_P_LT_MONITORS(c_p), tmon);
- if (type == ERTS_MON_TYPE_RESOURCE) {
+ switch (type) {
+ case ERTS_MON_TYPE_RESOURCE:
erts_nif_demonitored((ErtsResource *) tmon->other.ptr);
cnt++;
+ break;
+ case ERTS_MON_TYPE_SUSPEND:
+ erts_resume(c_p, ERTS_PROC_LOCK_MAIN);
+ break;
+ default:
+ break;
}
}
erts_monitor_release_both(mdp);
@@ -3012,6 +3383,21 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
break;
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ handle_sync_suspend(c_p, sig);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_RPC:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ remove_nm_sig(c_p, sig, next_nm_sig);
+ cnt += handle_rpc(c_p, (ErtsProcSigRPC *) sig, cnt,
+ limit, &yield);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE: {
Uint16 type = ERTS_PROC_SIG_TYPE(tag);
@@ -3169,6 +3555,15 @@ stop: {
*redsp = cnt/4 + 1;
+ if (yield) {
+ int vreds = max_reds - *redsp;
+ if (vreds > 0) {
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ esdp->virtual_reds += vreds;
+ }
+ *redsp = max_reds;
+ }
+
return res;
}
}
@@ -3277,6 +3672,8 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
case ERTS_MON_TYPE_PROC:
case ERTS_MON_TYPE_DIST_PROC:
case ERTS_MON_TYPE_NODE:
+ case ERTS_MON_TYPE_NODES:
+ case ERTS_MON_TYPE_SUSPEND:
erts_monitor_release((ErtsMonitor *) sig);
break;
default:
@@ -3332,6 +3729,17 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
handle_process_info(c_p, NULL, sig, next_nm_sig, 0);
break;
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
+ handle_sync_suspend(c_p, sig);
+ break;
+
+ case ERTS_SIG_Q_OP_RPC: {
+ int yield = 0;
+ handle_rpc(c_p, (ErtsProcSigRPC *) sig,
+ cnt, limit, &yield);
+ break;
+ }
+
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
destroy_trace_info((ErtsSigTraceInfo *) sig);
break;
@@ -3467,6 +3875,7 @@ erts_proc_sig_signal_size(ErtsSignal *sig)
}
break;
+ case ERTS_SIG_Q_OP_SYNC_SUSPEND:
case ERTS_SIG_Q_OP_PERSISTENT_MON_MSG:
case ERTS_SIG_Q_OP_IS_ALIVE:
size = ((ErtsMessage *) sig)->hfrag.alloc_size;
@@ -3522,6 +3931,10 @@ erts_proc_sig_signal_size(ErtsSignal *sig)
break;
}
+ case ERTS_SIG_Q_OP_RPC:
+ size = sizeof(ErtsProcSigRPC);
+ break;
+
default:
ERTS_INTERNAL_ERROR("Unknown signal");
break;
@@ -3598,17 +4011,13 @@ erts_proc_sig_receive_helper(Process *c_p,
*/
*get_outp = 0;
*msgpp = NULL;
+
return consumed_reds;
}
erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
- if (left_reds <= 0) {
- *get_outp = -1; /* yield */
- *msgpp = NULL;
-
- ASSERT(consumed_reds >= (fcalls - neg_o_reds));
- return consumed_reds;
- }
+ if (left_reds <= 0)
+ break; /* Yield */
/* handle newly arrived signals... */
}
@@ -3629,19 +4038,27 @@ erts_proc_sig_receive_helper(Process *c_p,
max_reds, !0);
consumed_reds += reds;
left_reds -= reds;
- /* we may have exited by an incoming signal... */
- if (state & ERTS_PSFLG_EXITING) {
+
+ /* we may have exited or suspended by an incoming signal... */
+
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_SUSPENDED)) {
+ if (state & ERTS_PSFLG_SUSPENDED)
+ break; /* Yield */
+
/*
* Process need to schedule out in order
* to terminate. Prepare this a bit...
*/
+ ASSERT(state & ERTS_PSFLG_EXITING);
ASSERT(c_p->flags & F_DELAY_GC);
c_p->flags &= ~F_DELAY_GC;
c_p->arity = 0;
c_p->current = NULL;
+
*get_outp = 1;
*msgpp = NULL;
+
return consumed_reds;
}
@@ -3652,17 +4069,20 @@ erts_proc_sig_receive_helper(Process *c_p,
return consumed_reds;
}
- if (left_reds <= 0) {
- *get_outp = -1; /* yield */
- *msgpp = NULL;
-
- ASSERT(consumed_reds >= (fcalls - neg_o_reds));
- return consumed_reds;
- }
+ if (left_reds <= 0)
+ break; /* yield */
ASSERT(!c_p->sig_qs.cont);
/* Go fetch again... */
}
+
+ /* Yield... */
+
+ *get_outp = -1;
+ *msgpp = NULL;
+
+ ASSERT(consumed_reds >= (fcalls - neg_o_reds));
+ return consumed_reds;
}
static int
diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h
index 8b7cd35f61..3fc2d06b2d 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.h
+++ b/erts/emulator/beam/erl_proc_sig_queue.h
@@ -33,6 +33,11 @@
* - Group leader
* - Is process alive
* - Process info request
+ * - Suspend request (monitor of suspend type)
+ * - Resume request (demonitor of suspend type)
+ * - Suspend cleanup (monitor down of suspend type)
+ * - Sync suspend
+ * - RPC request
* - Trace change
*
* The signal queue consists of three parts:
@@ -557,6 +562,102 @@ erts_proc_sig_send_process_info_request(Process *c_p,
Uint reserve_size,
Eterm ref);
+/**
+ *
+ * @brief Send a 'sync suspend' signal to a process.
+ *
+ * A response message '{Tag, Reply}' is sent to the
+ * sender when performed where Tag is the term passed
+ * as 'tag' argument. Reply is either 'suspended',
+ * 'not_suspended', 'exited' if the operation is
+ * asynchronous; otherwise, the 'reply' argument or
+ * 'badarg' if process terminated.
+ *
+ * This signal does *not* change the suspend state, only
+ * reads and reply the state. This signal is typically
+ * sent after a suspend request (monitor of suspend type)
+ * signal has been sent to the process in order to get a
+ * response when the suspend monitor has been processed.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] tag Tag to use in response
+ * message to the sending
+ * process (i.e., c_p).
+ *
+ * @param[in] reply Reply to send if this
+ * is a synchronous operation;
+ * otherwise, THE_NON_VALUE.
+ */
+void
+erts_proc_sig_send_sync_suspend(Process *c_p, Eterm to,
+ Eterm tag, Eterm reply);
+
+/**
+ *
+ * @brief Send an 'rpc' signal to a process.
+ *
+ * The function 'func' will be executed in the
+ * context of the receiving process. A response
+ * message '{Ref, Result}' is sent to the sender
+ * when 'func' has been called. 'Ref' is the reference
+ * returned by this function and 'Result' is the
+ * term returned by 'func'. If the return value of
+ * 'func' is not an immediate term, 'func' has to
+ * allocate a heap fragment where the result is stored
+ * and update the the heap fragment pointer pointer
+ * passed as third argument to point to it.
+ *
+ * If this function returns a reference, 'func' will
+ * be called in the context of the receiver. However,
+ * note that this might happen when the receiver is in
+ * an exiting state. The caller of this function
+ * *unconditionally* has to enter a receive that match
+ * on the returned reference in all clauses as next
+ * receive; otherwise, bad things will happen!
+ *
+ * If THE_NON_VALUE is returned, the receiver did not
+ * exist. The signal was not sent, and no specific
+ * receive has to be entered by the caller.
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ *
+ * @param[in] to Identifier of receiver process.
+ *
+ * @param[in] reply Non-zero if a reply is wanted.
+ *
+ * @param[in] func Function to execute in the
+ * context of the receiver.
+ * First argument will be a
+ * pointer to the process struct
+ * of the receiver process.
+ * Second argument will be 'arg'
+ * (see below). Third argument
+ * will be a pointer to a pointer
+ * to a heap fragment for storage
+ * of result returned from 'func'
+ * (i.e. an 'out' parameter).
+ *
+ * @param[in] arg Void pointer to argument
+ * to pass as second argument
+ * in call of 'func'.
+ *
+ * @returns If the request was sent,
+ * an internal ordinary
+ * reference; otherwise,
+ * THE_NON_VALUE (non-existing
+ * receiver).
+ */
+Eterm
+erts_proc_sig_send_rpc_request(Process *c_p,
+ Eterm to,
+ int reply,
+ Eterm (*func)(Process *, void *, int *, ErlHeapFragment **),
+ void *arg);
/*
* End of send operations of currently supported process signals.
@@ -733,17 +834,50 @@ Sint
erts_proc_sig_privqs_len(Process *c_p);
-/* SVERK: Doc me up! */
+/**
+ * @brief Enqueue list of signals on process.
+ *
+ * Message queue must be locked on receiving process.
+ *
+ * @param rp Receiving process.
+ * @param first First signal in list.
+ * @param last Last signal in list.
+ * @param last_next Pointer to next-pointer to last non-message signal
+ * or NULL if no non-message signal after 'first'.
+ * @param msg_cnt Number of message signals in list.
+ * @param in_state 'state' of rp.
+ *
+ * @return 'state' of rp.
+ */
erts_aint32_t
erts_enqueue_signals(Process *rp, ErtsMessage *first,
ErtsMessage **last, ErtsMessage **last_next,
Uint msg_cnt,
erts_aint32_t in_state);
-/* SVERK: Doc me up! */
+/**
+ *
+ * @brief Flush pending signal.
+ *
+ */
void
erts_proc_sig_send_pending(ErtsSchedulerData* esdp);
+/**
+ *
+ * @brief Schedule process to handle enqueued signal(s).
+ *
+ * @param rp Receiving process.
+ * @param state 'state' of rp.
+ * @param enable_flag Additional state flags to enable, like
+ * ERTS_PSFLG_ACTIVE if message has been enqueued.
+ */
+ERTS_GLB_INLINE void erts_proc_notify_new_sig(Process* rp, erts_aint32_t state,
+ erts_aint32_t enable_flag);
+
+void erts_make_dirty_proc_handled(Eterm pid, erts_aint32_t state,
+ erts_aint32_t prio);
+
typedef struct {
Uint size;
@@ -813,6 +947,21 @@ void
erts_proc_sig_clear_seq_trace_tokens(Process *c_p);
/**
+ *
+ * @brief Handle pending suspend requests
+ *
+ * Should be called by processes when they stop
+ * execution on a dirty scheduler if they have
+ * pending suspend requests (i.e. when
+ * ERTS_PROC_GET_PENDING_SUSPEND(c_p) != NULL).
+ *
+ * @param[in] c_p Pointer to executing
+ * process
+ */
+void
+erts_proc_sig_handle_pending_suspend(Process *c_p);
+
+/**
* @brief Initialize this functionality
*/
void erts_proc_sig_queue_init(void);
@@ -879,6 +1028,24 @@ erts_proc_sig_fetch(Process *proc)
return res;
}
+ERTS_GLB_INLINE void
+erts_proc_notify_new_sig(Process* rp, erts_aint32_t state,
+ erts_aint32_t enable_flag)
+{
+ if (~(state & (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_SIG_IN_Q))
+ | (~state & enable_flag)) {
+ /* Schedule process... */
+ state = erts_proc_sys_schedule(rp, state, enable_flag);
+ }
+
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ erts_make_dirty_proc_handled(rp->common.id, state, -1);
+ }
+}
+
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
#endif /* ERTS_PROC_SIG_QUEUE_H__ */
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index ad7ac27ac3..8253ec4f40 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -185,8 +185,6 @@ sched_get_busy_wait_params(ErtsSchedulerData *esdp)
return &sched_busy_wait_params[esdp->type];
}
-int erts_disable_proc_not_running_opt;
-
static ErtsAuxWorkData *aux_thread_aux_work_data;
static ErtsAuxWorkData *poll_thread_aux_work_data;
@@ -730,6 +728,11 @@ erts_pre_init_process(void)
= ERTS_PSD_DIST_ENTRY_GET_LOCKS;
erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks
= ERTS_PSD_DIST_ENTRY_SET_LOCKS;
+
+ erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].get_locks
+ = ERTS_PSD_PENDING_SUSPEND_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_PENDING_SUSPEND].set_locks
+ = ERTS_PSD_PENDING_SUSPEND_SET_LOCKS;
#endif
}
@@ -744,7 +747,6 @@ void
erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
{
- erts_disable_proc_not_running_opt = 0;
erts_init_proc_lock(ncpu);
init_proclist_alloc();
@@ -6612,13 +6614,13 @@ change_proc_schedule_state(Process *p,
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
- && (!(a & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)
- && (!(a & ERTS_PSFLG_ACTIVE)
- || (a & ERTS_PSFLG_SUSPENDED))))) {
+ & ((a & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ & !(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
}
@@ -8553,427 +8555,22 @@ erts_start_schedulers(void)
}
}
-
-
-static void
-add_pend_suspend(Process *suspendee,
- Eterm originator_pid,
- void (*handle_func)(Process *,
- ErtsProcLocks,
- int,
- Eterm))
-{
- ErtsPendingSuspend *psp = erts_alloc(ERTS_ALC_T_PEND_SUSPEND,
- sizeof(ErtsPendingSuspend));
- psp->next = NULL;
-#ifdef DEBUG
-#if defined(ARCH_64)
- psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead;
-#else
- psp->end = (ErtsPendingSuspend *) 0xdeaddead;
-#endif
-#endif
- psp->pid = originator_pid;
- psp->handle_func = handle_func;
-
- if (suspendee->pending_suspenders)
- suspendee->pending_suspenders->end->next = psp;
- else
- suspendee->pending_suspenders = psp;
- suspendee->pending_suspenders->end = psp;
-}
-
-static void
-handle_pending_suspend(Process *p, ErtsProcLocks p_locks)
-{
- ErtsPendingSuspend *psp;
- int is_alive = !ERTS_PROC_IS_EXITING(p);
-
- ERTS_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS);
-
- /*
- * New pending suspenders might appear while we are processing
- * (since we may release the status lock on p while processing).
- */
- while (p->pending_suspenders) {
- psp = p->pending_suspenders;
- p->pending_suspenders = NULL;
- while (psp) {
- ErtsPendingSuspend *free_psp;
- (*psp->handle_func)(p, p_locks, is_alive, psp->pid);
- free_psp = psp;
- psp = psp->next;
- erts_free(ERTS_ALC_T_PEND_SUSPEND, (void *) free_psp);
- }
- }
-
-}
-
-static ERTS_INLINE void
-cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks)
-{
- if (is_not_nil(p->suspendee)) {
- ErtsMonitor *mon;
- Eterm suspendee = p->suspendee;
- Process *rp;
- if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS,
- suspendee, ERTS_PROC_LOCK_STATUS);
- if (rp) {
- erts_resume(rp, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- }
- if (!(p_locks & ERTS_PROC_LOCK_STATUS))
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- p->suspendee = NIL;
-
- mon = erts_monitor_tree_lookup(p->suspend_monitors,
- suspendee);
- if (mon) {
- erts_monitor_tree_delete(&p->suspend_monitors,
- mon);
- erts_monitor_suspend_destroy(erts_monitor_suspend(mon));
- }
- }
-}
-
-static void
-handle_pend_sync_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
- Process *suspender;
-
- ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ASSERT(is_nil(suspender->suspendee));
- if (suspendee_alive) {
- erts_suspend(suspendee, suspendee_locks, NULL);
- suspender->suspendee = suspendee->common.id;
- }
- /* suspender is suspended waiting for suspendee to suspend;
- resume suspender */
- ASSERT(suspendee != suspender);
- resume_process(suspender, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
- }
-}
-
-static Process *
-pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks, int suspend)
-{
- Process *rp;
- int unlock_c_p_status;
-
- ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p));
-
- ERTS_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN);
- ERTS_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS));
-
- if (c_p->common.id == pid)
- return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
-
- if (c_p_locks & ERTS_PROC_LOCK_STATUS)
- unlock_c_p_status = 0;
- else {
- unlock_c_p_status = 1;
- erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
- }
-
- if (c_p->suspendee == pid) {
- /* Process previously suspended by c_p (below)... */
- ErtsProcLocks rp_locks = pid_locks|ERTS_PROC_LOCK_STATUS;
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, rp_locks);
- c_p->suspendee = NIL;
- ASSERT(c_p->flags & F_P2PNR_RESCHED);
- c_p->flags &= ~F_P2PNR_RESCHED;
- if (!suspend && rp)
- resume_process(rp, rp_locks);
- }
- else {
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, ERTS_PROC_LOCK_STATUS);
-
- if (!rp) {
- c_p->flags &= ~F_P2PNR_RESCHED;
- goto done;
- }
-
- ASSERT(!(c_p->flags & F_P2PNR_RESCHED));
-
- /*
- * Suspend the other process in order to prevent
- * it from being selected for normal execution.
- * This will however not prevent it from being
- * selected for execution of a system task. If
- * it is selected for execution of a system task
- * we might be blocked for quite a while if the
- * try-lock below fails. That is, there is room
- * for improvement here...
- */
-
- if (!suspend_process(c_p, rp)) {
- /* Other process running */
-
- ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)
- & erts_atomic32_read_nob(&rp->state));
-
- if (!suspend
- && (erts_atomic32_read_nob(&rp->state)
- & ERTS_PSFLG_DIRTY_RUNNING)) {
- ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
- if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) {
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
- }
- goto done;
- }
-
- running:
-
- /*
- * If we got pending suspenders and suspend ourselves waiting
- * to suspend another process we might deadlock.
- * In this case we have to yield, be suspended by
- * someone else and then do it all over again.
- */
- if (!c_p->pending_suspenders) {
- /* Mark rp pending for suspend by c_p */
- add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
- ASSERT(is_nil(c_p->suspendee));
-
- /* Suspend c_p; when rp is suspended c_p will be resumed. */
- suspend_process(c_p, c_p);
- c_p->flags |= F_P2PNR_RESCHED;
- }
- /* Yield (caller is assumed to yield immediately in bif). */
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = ERTS_PROC_LOCK_BUSY;
- }
- else {
- ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
- if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) {
- if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_atomic32_read_nob(&rp->state)) {
- /* Executing system task... */
- resume_process(rp, ERTS_PROC_LOCK_STATUS);
- goto running;
- }
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- /*
- * If we are unlucky, the process just got selected for
- * execution of a system task. In this case we may be
- * blocked here for quite a while... Execution of system
- * tasks are fortunately quite rare events. We try to
- * avoid this by checking if it is in a state executing
- * system tasks (above), but it will not prevent all
- * scenarios for a long block here...
- */
- rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
- if (!rp)
- goto done;
- }
-
- /*
- * The previous suspend has prevented the process
- * from being selected for normal execution regardless
- * of locks held or not held on it...
- */
-#ifdef DEBUG
- {
- erts_aint32_t state;
- state = erts_atomic32_read_nob(&rp->state);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
- }
-#endif
-
- if (!suspend)
- resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
- }
- }
-
- done:
-
- if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS))
- erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- if (unlock_c_p_status)
- erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
- return rp;
-}
-
-
-/*
- * Like erts_pid2proc() but:
- *
- * * At least ERTS_PROC_LOCK_MAIN have to be held on c_p.
- * * At least ERTS_PROC_LOCK_MAIN have to be taken on pid.
- * * It also waits for proc to be in a state != running and garbing.
- * * If ERTS_PROC_LOCK_BUSY is returned, the calling process has to
- * yield (ERTS_BIF_YIELD[0-3]()). c_p might in this case have been
- * suspended.
- */
-Process *
-erts_pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- return pid2proc_not_running(c_p, c_p_locks, pid, pid_locks, 0);
-}
-
-/*
- * erts_pid2proc_nropt() is normally the same as
- * erts_pid2proc_not_running(). However it is only
- * to be used when 'not running' is a pure optimization,
- * not a requirement.
- */
-
-Process *
-erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm pid, ErtsProcLocks pid_locks)
-{
- if (erts_disable_proc_not_running_opt)
- return erts_pid2proc(c_p, c_p_locks, pid, pid_locks);
- else
- return erts_pid2proc_not_running(c_p, c_p_locks, pid, pid_locks);
-}
-
-static ERTS_INLINE int
-do_bif_suspend_process(Process *c_p,
- ErtsMonitorSuspend *smon,
- Process *suspendee)
-{
- ASSERT(suspendee);
- ASSERT(!ERTS_PROC_IS_EXITING(suspendee));
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- & erts_proc_lc_my_proc_locks(suspendee));
- if (smon) {
- if (!smon->active) {
- if (!suspend_process(c_p, suspendee))
- return 0;
- }
- smon->active += smon->pending;
- ASSERT(smon->active);
- smon->pending = 0;
- return 1;
- }
- return 0;
-}
-
-static void
-handle_pend_bif_sync_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
- Process *suspender;
-
- ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ErtsMonitorSuspend *smon;
- ErtsMonitor *mon;
- mon = erts_monitor_tree_lookup(suspender->suspend_monitors,
- suspendee->common.id);
- smon = erts_monitor_suspend(mon);
-
- ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive) {
- if (mon) {
- erts_monitor_tree_delete(&suspender->suspend_monitors,
- mon);
- erts_monitor_suspend_destroy(smon);
- }
- }
- else {
-#ifdef DEBUG
- int res =
-#endif
- do_bif_suspend_process(suspendee, smon, suspendee);
- ASSERT(!smon || res != 0);
- suspender->suspendee = suspendee->common.id;
- }
- /* suspender is suspended waiting for suspendee to suspend;
- resume suspender */
- ASSERT(suspender != suspendee);
- resume_process(suspender, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
- }
-}
-
-static void
-handle_pend_bif_async_suspend(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm suspender_pid)
-{
-
- Process *suspender;
-
- ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS);
-
- suspender = erts_pid2proc(suspendee,
- suspendee_locks,
- suspender_pid,
- ERTS_PROC_LOCK_STATUS);
- if (suspender) {
- ErtsMonitorSuspend *smon;
- ErtsMonitor *mon;
- mon = erts_monitor_tree_lookup(suspender->suspend_monitors,
- suspendee->common.id);
- smon = erts_monitor_suspend(mon);
- ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive) {
- if (mon) {
- erts_monitor_tree_delete(&suspender->suspend_monitors,
- mon);
- erts_monitor_suspend_destroy(smon);
- }
- }
- else {
-#ifdef DEBUG
- int res =
-#endif
- do_bif_suspend_process(suspendee, smon, suspendee);
- ASSERT(!smon || res != 0);
- }
- erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
- }
-}
-
-
-/*
- * The erlang:suspend_process/2 BIF
- */
-
BIF_RETTYPE
-suspend_process_2(BIF_ALIST_2)
+erts_internal_suspend_process_2(BIF_ALIST_2)
{
Eterm res;
- Process* suspendee = NULL;
- ErtsMonitorSuspend *smon;
- ErtsProcLocks xlocks = (ErtsProcLocks) 0;
- int created;
-
- /* Options and default values: */
- int asynchronous = 0;
+ Eterm reply_tag = THE_NON_VALUE;
+ Eterm reply_res = THE_NON_VALUE;
+ int suspend;
+ int sync = 0;
+ int async = 0;
int unless_suspending = 0;
-
+ erts_aint_t mstate;
+ ErtsMonitorSuspend *msp;
+ ErtsMonitorData *mdp;
if (BIF_P->common.id == BIF_ARG_1)
- goto badarg; /* We are not allowed to suspend ourselves */
+ BIF_RET(am_badarg); /* We are not allowed to suspend ourselves */
if (is_not_nil(BIF_ARG_2)) {
/* Parse option list */
@@ -8987,191 +8584,127 @@ suspend_process_2(BIF_ALIST_2)
unless_suspending = 1;
break;
case am_asynchronous:
- asynchronous = 1;
+ async = 1;
break;
- default:
- goto badarg;
+ default: {
+ if (is_tuple_arity(arg, 2)) {
+ Eterm *tp = tuple_val(arg);
+ if (tp[1] == am_asynchronous) {
+ async = 1;
+ reply_tag = tp[2];
+ break;
+ }
+ }
+ BIF_RET(am_badarg);
}
+ }
arg = CDR(lp);
- }
+ }
if (is_not_nil(arg))
- goto badarg;
+ BIF_RET(am_badarg);
}
- xlocks = ERTS_PROC_LOCK_STATUS;
-
- erts_proc_lock(BIF_P, xlocks);
-
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|xlocks,
- BIF_ARG_1,
- ERTS_PROC_LOCK_STATUS);
- if (!suspendee)
- goto no_suspendee;
-
- smon = erts_monitor_suspend_tree_lookup_create(&BIF_P->suspend_monitors,
- &created,
- BIF_ARG_1);
-
- if (asynchronous) {
- /* --- Asynchronous suspend begin ---------------------------------- */
-
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- & erts_proc_lc_my_proc_locks(BIF_P));
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- == erts_proc_lc_my_proc_locks(suspendee));
-
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- if (unless_suspending)
- res = am_false;
- else if (smon->active == INT_MAX)
- goto system_limit;
- else {
- smon->active++;
- res = am_true;
- }
- /* done */
- }
- else {
- /* We havn't got any active suspends on the suspendee */
- if (smon->pending && unless_suspending)
- res = am_false;
- else {
- if (smon->pending == INT_MAX)
- goto system_limit;
-
- smon->pending++;
-
- if (!do_bif_suspend_process(BIF_P, smon, suspendee))
- add_pend_suspend(suspendee,
- BIF_P->common.id,
- handle_pend_bif_async_suspend);
-
- res = am_true;
- }
- /* done */
- }
- /* --- Asynchronous suspend end ------------------------------------ */
- }
- else /* if (!asynchronous) */ {
- /* --- Synchronous suspend begin ----------------------------------- */
-
- ERTS_LC_ASSERT(((ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS)
- & erts_proc_lc_my_proc_locks(BIF_P))
- == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS));
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
- == erts_proc_lc_my_proc_locks(suspendee));
-
- if (BIF_P->suspendee == BIF_ARG_1) {
- /* We are back after a yield and the suspendee
- has been suspended on behalf of us. */
- ASSERT(smon->active >= 1);
- BIF_P->suspendee = NIL;
- res = (!unless_suspending || smon->active == 1
- ? am_true
- : am_false);
- /* done */
- }
- else if (smon->active) {
- if (unless_suspending)
- res = am_false;
- else {
- smon->active++;
- res = am_true;
- }
- /* done */
- }
- else {
- /* We haven't got any active suspends on the suspendee */
-
- /*
- * If we have pending suspenders and suspend ourselves waiting
- * to suspend another process, or suspend another process
- * we might deadlock. In this case we have to yield,
- * be suspended by someone else, and then do it all over again.
- */
- if (BIF_P->pending_suspenders)
- goto yield;
-
- if (!unless_suspending && smon->pending == INT_MAX)
- goto system_limit;
- if (!unless_suspending || smon->pending == 0)
- smon->pending++;
-
- if (do_bif_suspend_process(BIF_P, smon, suspendee)) {
- res = (!unless_suspending || smon->active == 1
- ? am_true
- : am_false);
- /* done */
- }
- else {
- /* Mark suspendee pending for suspend by BIF_P */
- add_pend_suspend(suspendee,
- BIF_P->common.id,
- handle_pend_bif_sync_suspend);
-
- ASSERT(is_nil(BIF_P->suspendee));
-
- /*
- * Suspend BIF_P; when suspendee is suspended, BIF_P
- * will be resumed and this BIF will be called again.
- * This time with BIF_P->suspendee == BIF_ARG_1 (see
- * above).
- */
- suspend_process(BIF_P, BIF_P);
- goto yield;
- }
- }
- /* --- Synchronous suspend end ------------------------------------- */
- }
-
-#ifdef DEBUG
- {
- erts_aint32_t state = erts_atomic32_read_acqb(&suspendee->state);
- ASSERT((state & ERTS_PSFLG_SUSPENDED)
- || (asynchronous && smon->pending));
- ASSERT((state & ERTS_PSFLG_SUSPENDED)
- || !smon->active);
+ if (!unless_suspending) {
+ ErtsMonitor *mon;
+ mon = erts_monitor_tree_lookup_create(&ERTS_P_MONITORS(BIF_P),
+ &suspend,
+ ERTS_MON_TYPE_SUSPEND,
+ BIF_P->common.id,
+ BIF_ARG_1);
+ ASSERT(mon->other.item == BIF_ARG_1);
+
+ mdp = erts_monitor_to_data(mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+
+ mstate = erts_atomic_inc_read_relb(&msp->state);
+ ASSERT(suspend || (mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 1);
+ sync = !async & !suspend & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ suspend = !!suspend; /* ensure 0|1 */
+ res = am_true;
}
-#endif
-
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(BIF_P, xlocks);
- BIF_RET(res);
-
- system_limit:
- ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
- goto do_return;
-
- no_suspendee: {
+ else {
ErtsMonitor *mon;
- BIF_P->suspendee = NIL;
- mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P),
+ BIF_ARG_1);
if (mon) {
- erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
- erts_monitor_suspend_destroy(erts_monitor_suspend(mon));
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+ mdp = erts_monitor_to_data(mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+ mstate = erts_atomic_read_nob(&msp->state);
+ ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) > 0);
+ mdp = NULL;
+ sync = !async & !(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ suspend = 0;
+ res = am_false;
+ }
+ else {
+ mdp = erts_monitor_create(ERTS_MON_TYPE_SUSPEND, NIL,
+ BIF_P->common.id,
+ BIF_ARG_1, NIL);
+ mon = &mdp->origin;
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(BIF_P), mon);
+ msp = (ErtsMonitorSuspend *) mdp;
+ mstate = erts_atomic_inc_read_relb(&msp->state);
+ ASSERT(!(mstate & ERTS_MSUSPEND_STATE_FLG_ACTIVE));
+ suspend = !0;
+ res = am_true;
}
}
- badarg:
- ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
- goto do_return;
+ if (suspend) {
+ erts_aint32_t state;
+ Process *rp;
+ int send_sig = 0;
+
+ /* fail state... */
+ state = (ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS);
+
+ rp = erts_try_lock_sig_free_proc(BIF_ARG_1,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
+ &state);
+ if (!rp)
+ goto noproc;
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ send_sig = !0;
+ else {
+ send_sig = !suspend_process(BIF_P, rp);
+ if (!send_sig) {
+ erts_monitor_list_insert(&ERTS_P_LT_MONITORS(rp), &mdp->target);
+ erts_atomic_read_bor_relb(&msp->state,
+ ERTS_MSUSPEND_STATE_FLG_ACTIVE);
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ }
+ if (send_sig) {
+ if (erts_proc_sig_send_monitor(&mdp->target, BIF_ARG_1))
+ sync = !async;
+ else {
+ noproc:
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), &mdp->origin);
+ erts_monitor_release_both(mdp);
+ if (!async)
+ res = am_badarg;
+ }
+ }
+ }
- yield:
- ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2],
- BIF_P, BIF_ARG_1, BIF_ARG_2);
-
- do_return:
- if (suspendee)
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- if (xlocks)
- erts_proc_unlock(BIF_P, xlocks);
- return res;
+ if (sync) {
+ ASSERT(is_non_value(reply_tag));
+ reply_res = res;
+ reply_tag = res = erts_make_ref(BIF_P);
+ ERTS_RECV_MARK_SAVE(BIF_P);
+ ERTS_RECV_MARK_SET(BIF_P);
+ }
-}
+ if (is_value(reply_tag))
+ erts_proc_sig_send_sync_suspend(BIF_P, BIF_ARG_1, reply_tag, reply_res);
+ BIF_RET(res);
+}
/*
* The erlang:resume_process/1 BIF
@@ -9181,90 +8714,32 @@ BIF_RETTYPE
resume_process_1(BIF_ALIST_1)
{
ErtsMonitor *mon;
- ErtsMonitorSuspend *smon;
- Process *suspendee;
- int is_active;
+ ErtsMonitorSuspend *msp;
+ erts_aint_t mstate;
if (BIF_P->common.id == BIF_ARG_1)
BIF_ERROR(BIF_P, BADARG);
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
- mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
- smon = erts_monitor_suspend(mon);
-
- if (!smon) {
+ mon = erts_monitor_tree_lookup(ERTS_P_MONITORS(BIF_P),
+ BIF_ARG_1);
+ if (!mon) {
/* No previous suspend or dead suspendee */
- goto error;
- }
- else if (smon->pending) {
- smon->pending--;
- ASSERT(smon->pending >= 0);
- if (smon->active) {
- smon->active += smon->pending;
- smon->pending = 0;
- }
- is_active = smon->active;
- }
- else if (smon->active) {
- smon->active--;
- ASSERT(smon->pending == 0);
- is_active = 1;
- }
- else {
- /* No previous suspend or dead suspendee */
- goto no_suspendee;
+ BIF_ERROR(BIF_P, BADARG);
}
- if (smon->active || smon->pending || !is_active) {
- /* Leave the suspendee as it is; just verify that it is still alive */
- suspendee = erts_proc_lookup(BIF_ARG_1);
- if (!suspendee)
- goto no_suspendee;
+ ASSERT(mon->type == ERTS_MON_TYPE_SUSPEND);
+ msp = (ErtsMonitorSuspend *) erts_monitor_to_data(mon);
- }
- else {
- /* Resume */
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
- BIF_ARG_1,
- ERTS_PROC_LOCK_STATUS);
- if (!suspendee) {
- mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
- smon = erts_monitor_suspend(mon);
- if (!mon)
- goto error;
- goto no_suspendee;
- }
-
- ASSERT(mon == erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1));
+ mstate = erts_atomic_dec_read_relb(&msp->state);
- ASSERT(ERTS_PSFLG_SUSPENDED
- & erts_atomic32_read_nob(&suspendee->state));
- ASSERT(BIF_P != suspendee);
- resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
-
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- }
+ ASSERT((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) >= 0);
- if (!smon->active && !smon->pending) {
- ASSERT(mon);
- erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
- erts_monitor_suspend_destroy(smon);
+ if ((mstate & ERTS_MSUSPEND_STATE_COUNTER_MASK) == 0) {
+ erts_monitor_tree_delete(&ERTS_P_MONITORS(BIF_P), mon);
+ erts_proc_sig_send_demonitor(mon);
}
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
-
BIF_RET(am_true);
-
- no_suspendee:
- /* cleanup */
- ASSERT(mon);
- erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
- erts_monitor_suspend_destroy(smon);
-
- error:
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
- BIF_ERROR(BIF_P, BADARG);
}
BIF_RETTYPE
@@ -9610,6 +9085,17 @@ scheduler_gc_proc(Process *c_p, int reds_left)
return reds;
}
+static void
+unlock_lock_rq(int pre_free, void *vrq)
+{
+ ErtsRunQueue *rq = vrq;
+ if (pre_free)
+ erts_runq_unlock(rq);
+ else
+ erts_runq_lock(rq);
+}
+
+
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -9694,12 +9180,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
- sched_out_proc:
-
- ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
reds = actual_reds = calls - esdp->virtual_reds;
+ internal_sched_out_proc:
+
+ ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+ ASSERT(p->scheduler_data || ERTS_SCHEDULER_IS_DIRTY(esdp));
+
ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
@@ -9741,11 +9229,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* have to re-read state after taking lock */
state = erts_atomic32_read_nob(&p->state);
- if (p->pending_suspenders)
- handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_TRACE
- | ERTS_PROC_LOCK_STATUS));
-
esdp->reductions += reds;
{
@@ -9782,7 +9265,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
if (dec_refc)
- erts_proc_dec_refc(p);
+ erts_proc_dec_refc_free_func(p,
+ unlock_lock_rq,
+ (void *) rq);
}
ASSERT(!esdp->free_process);
@@ -10134,6 +9619,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
state = erts_atomic32_read_nob(&p->state);
if (is_normal_sched) {
+ ASSERT(!p->scheduler_data);
+ p->scheduler_data = esdp;
if ((!!(state & ERTS_PSFLGS_DIRTY_WORK))
& (!(state & ERTS_PSFLG_ACTIVE_SYS))) {
/* Migrate to dirty scheduler... */
@@ -10141,8 +9628,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
goto sched_out_proc;
}
- ASSERT(!p->scheduler_data);
- p->scheduler_data = esdp;
}
else {
if (!(state & ERTS_PSFLGS_DIRTY_WORK)) {
@@ -10195,8 +9680,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (is_normal_sched) {
if (state & ERTS_PSFLG_RUNNING_SYS) {
if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) {
- int local_only = !!(p->flags & F_LOCAL_SIGS_ONLY);
- if (!local_only || (state & ERTS_PSFLG_SIG_Q)) {
+ int local_only = (!!(p->flags & F_LOCAL_SIGS_ONLY)
+ & !(state & ERTS_PSFLG_SUSPENDED));
+ if (!local_only | !!(state & ERTS_PSFLG_SIG_Q)) {
int sig_reds;
/*
* If we have dirty work scheduled we allow
@@ -10282,7 +9768,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
p->fcalls = reds;
-
+ if (reds != context_reds) {
+ actual_reds = context_reds - reds - esdp->virtual_reds;
+ ASSERT(actual_reds >= 0);
+ esdp->virtual_reds = 0;
+ p->reds += actual_reds;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+ }
+
ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
ASSERT(erts_proc_read_refc(p) > 0);
@@ -10332,6 +9828,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
#endif
return p;
+
+ sched_out_proc:
+ actual_reds = context_reds;
+ actual_reds -= reds;
+ actual_reds -= esdp->virtual_reds;
+ reds = actual_reds;
+ goto internal_sched_out_proc;
+
}
}
@@ -11844,7 +11348,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef HIPE
hipe_init_process(&p->hipe);
- hipe_init_process_smp(&p->hipe_smp);
#endif
p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
p->old_hend = p->old_htop = p->old_heap = NULL;
@@ -11893,7 +11396,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
ERTS_P_LINKS(p) = NULL;
ERTS_P_MONITORS(p) = NULL;
ERTS_P_LT_MONITORS(p) = NULL;
- p->suspend_monitors = NULL;
ASSERT(is_pid(parent->group_leader));
@@ -11950,8 +11452,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->trace_msg_q = NULL;
p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -12118,7 +11618,6 @@ void erts_init_empty_process(Process *p)
ERTS_P_MONITORS(p) = NULL;
ERTS_P_LT_MONITORS(p) = NULL;
ERTS_P_LINKS(p) = NULL; /* List of links */
- p->suspend_monitors = NULL;
p->sig_qs.first = NULL;
p->sig_qs.last = &p->sig_qs.first;
p->sig_qs.cont = NULL;
@@ -12169,7 +11668,6 @@ void erts_init_empty_process(Process *p)
#ifdef HIPE
hipe_init_process(&p->hipe);
- hipe_init_process_smp(&p->hipe_smp);
#endif
INIT_HOLE_CHECK(p);
@@ -12182,8 +11680,6 @@ void erts_init_empty_process(Process *p)
erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
p->scheduler_data = NULL;
- p->suspendee = NIL;
- p->pending_suspenders = NULL;
erts_proc_lock_init(p);
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
erts_init_runq_proc(p, ERTS_RUNQ_IX(0), 0);
@@ -12221,7 +11717,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(ERTS_P_MONITORS(p) == NULL);
ASSERT(ERTS_P_LT_MONITORS(p) == NULL);
ASSERT(ERTS_P_LINKS(p) == NULL);
- ASSERT(p->suspend_monitors == NULL);
ASSERT(p->sig_qs.first == NULL);
ASSERT(p->sig_qs.len == 0);
ASSERT(p->bif_timers == NULL);
@@ -12235,8 +11730,6 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->sig_inq.first == NULL);
ASSERT(p->sig_inq.len == 0);
- ASSERT(p->suspendee == NIL);
- ASSERT(p->pending_suspenders == NULL);
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -12342,8 +11835,6 @@ delete_process(Process* p)
erts_cleanup_messages(p->sig_qs.cont);
p->sig_qs.cont = NULL;
- ASSERT(!p->suspend_monitors);
-
p->fvalue = NIL;
}
@@ -12423,6 +11914,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
if (erts_monitor_is_target(mon)) {
/* We are being watched... */
switch (mon->type) {
+ case ERTS_MON_TYPE_SUSPEND:
case ERTS_MON_TYPE_PROC:
erts_proc_sig_send_monitor_down(mon, reason);
mon = NULL;
@@ -12494,6 +11986,7 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
else { /* Origin monitor */
/* We are watching someone else... */
switch (mon->type) {
+ case ERTS_MON_TYPE_SUSPEND:
case ERTS_MON_TYPE_PROC:
erts_proc_sig_send_demonitor(mon);
mon = NULL;
@@ -12646,21 +12139,6 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
erts_link_release(lnk);
}
-static void
-resume_suspend_monitor(ErtsMonitor *mon, void *vc_p)
-{
- ErtsMonitorSuspend *smon = erts_monitor_suspend(mon);
- Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
- smon->mon.other.item, ERTS_PROC_LOCK_STATUS);
- if (suspendee) {
- ASSERT(suspendee != vc_p);
- if (smon->active)
- resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
- }
- erts_monitor_suspend_destroy(smon);
-}
-
/* this function fishishes a process and propagates exit messages - called
by process_main when a process dies */
void
@@ -12692,8 +12170,6 @@ erts_do_exit_process(Process* p, Eterm reason)
set_self_exiting(p, reason, NULL, NULL, NULL);
- cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
-
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
@@ -12826,11 +12302,6 @@ erts_continue_exit_process(Process *p)
p->flags &= ~F_USING_DDLL;
}
- if (p->suspend_monitors)
- erts_monitor_tree_foreach_delete(&p->suspend_monitors,
- resume_suspend_monitor,
- p);
-
/*
* The registered name *should* be the last "erlang resource" to
* cleanup.
@@ -13038,7 +12509,13 @@ erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks,
erts_aint32_t *statep)
{
Process *rp = erts_proc_lookup_raw(pid);
+ erts_aint32_t fail_state = ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q;
erts_aint32_t state;
+ ErtsProcLocks tmp_locks = ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
+
+ tmp_locks |= locks;
+ if (statep)
+ fail_state |= *statep;
if (!rp) {
if (statep)
@@ -13055,28 +12532,28 @@ erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks,
if (state & ERTS_PSFLG_FREE)
return NULL;
- if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q))
+ if (state & fail_state)
return ERTS_PROC_LOCK_BUSY;
- if (!locks)
- return rp;
-
- if (erts_proc_trylock(rp, locks) == EBUSY)
+ if (erts_proc_trylock(rp, tmp_locks) == EBUSY)
return ERTS_PROC_LOCK_BUSY;
state = erts_atomic32_read_nob(&rp->state);
if (statep)
*statep = state;
- if (state & ERTS_PSFLG_FREE) {
- erts_proc_unlock(rp, locks);
- return NULL;
+ if ((state & fail_state)
+ || rp->sig_inq.first
+ || rp->sig_qs.cont) {
+ erts_proc_unlock(rp, tmp_locks);
+ if (state & ERTS_PSFLG_FREE)
+ return NULL;
+ else
+ return ERTS_PROC_LOCK_BUSY;
}
- if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q)) {
- erts_proc_unlock(rp, locks);
- return ERTS_PROC_LOCK_BUSY;
- }
+ if (tmp_locks != locks)
+ erts_proc_unlock(rp, tmp_locks & ~locks);
return rp;
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index b66272194c..a60e117bab 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -805,14 +805,15 @@ erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_ETS_OWNED_TABLES 6
#define ERTS_PSD_ETS_FIXED_TABLES 7
#define ERTS_PSD_DIST_ENTRY 8
-#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 9 /* keep last... */
+#define ERTS_PSD_PENDING_SUSPEND 9
+#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 10 /* keep last... */
-#define ERTS_PSD_SIZE 10
+#define ERTS_PSD_SIZE 11
#if !defined(HIPE)
# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
# undef ERTS_PSD_SIZE
-# define ERTS_PSD_SIZE 9
+# define ERTS_PSD_SIZE 10
#endif
typedef struct {
@@ -849,6 +850,9 @@ typedef struct {
#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_PENDING_SUSPEND_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_PENDING_SUSPEND_SET_LOCKS ERTS_PROC_LOCK_MAIN
+
typedef struct {
ErtsProcLocks get_locks;
ErtsProcLocks set_locks;
@@ -884,20 +888,6 @@ typedef struct {
typedef struct ErtsProcSysTask_ ErtsProcSysTask;
typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs;
-
-typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
-struct ErtsPendingSuspend_ {
- ErtsPendingSuspend *next;
- ErtsPendingSuspend *end;
- Eterm pid;
- void (*handle_func)(Process *suspendee,
- ErtsProcLocks suspendee_locks,
- int suspendee_alive,
- Eterm pid);
-};
-
-
-
/* Defines to ease the change of memory architecture */
# define HEAP_START(p) (p)->heap
# define HEAP_TOP(p) (p)->htop
@@ -992,9 +982,6 @@ struct process {
Process *next; /* Pointer to next process in run queue */
- ErtsMonitor *suspend_monitors; /* Processes suspended by this process via
- erlang:suspend_process/1 */
-
ErtsSignalPrivQueues sig_qs; /* Signal queues */
ErtsBifTimers *bif_timers; /* Bif timers aiming at this process */
@@ -1058,12 +1045,7 @@ struct process {
ErlTraceMessageQueue *trace_msg_q;
erts_proc_lock_t lock;
ErtsSchedulerData *scheduler_data;
- Eterm suspendee;
- ErtsPendingSuspend *pending_suspenders;
erts_atomic_t run_queue;
-#ifdef HIPE
- struct hipe_process_state_smp hipe_smp;
-#endif
#ifdef CHECK_FOR_HOLES
Eterm* last_htop; /* No need to scan the heap below this point. */
@@ -1380,7 +1362,7 @@ extern int erts_system_profile_ts_type;
#define F_DISTRIBUTION (1 << 6) /* Process used in distribution */
#define F_USING_DDLL (1 << 7) /* Process has used the DDLL interface */
#define F_HAVE_BLCKD_MSCHED (1 << 8) /* Process has blocked multi-scheduling */
-#define F_P2PNR_RESCHED (1 << 9) /* Process has been rescheduled via erts_pid2proc_not_running() */
+#define F_UNUSED (1 << 9)
#define F_FORCE_GC (1 << 10) /* Force gc at process in-scheduling */
#define F_DISABLE_GC (1 << 11) /* Disable GC (see below) */
#define F_OFF_HEAP_MSGQ (1 << 12) /* Off heap msg queue */
@@ -1397,10 +1379,12 @@ extern int erts_system_profile_ts_type;
#define F_DIRTY_MAJOR_GC (1 << 23) /* Dirty major GC scheduled */
#define F_DIRTY_MINOR_GC (1 << 24) /* Dirty minor GC scheduled */
#define F_HIBERNATED (1 << 25) /* Hibernated */
-#define F_LOCAL_SIGS_ONLY (1 << 26)
+#define F_LOCAL_SIGS_ONLY (1 << 26) /* Handle privq sigs only */
#define F_TRAP_EXIT (1 << 27) /* Trapping exit */
-#define F_DEFERRED_SAVED_LAST (1 << 28)
-#define F_DELAYED_PSIGQS_LEN (1 << 29)
+#define F_DEFERRED_SAVED_LAST (1 << 28) /* Deferred sig_qs.saved_last */
+#define F_DELAYED_PSIGQS_LEN (1 << 29) /* Delayed update of sig_qs.len */
+#define F_HIPE_RECV_LOCKED (1 << 30) /* HiPE message queue locked */
+#define F_HIPE_RECV_YIELD (1 << 31) /* HiPE receive yield */
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -2048,6 +2032,11 @@ erts_psd_set(Process *p, int ix, void *data)
#define ERTS_PROC_SET_DIST_ENTRY(P, DE) \
((DistEntry *) erts_psd_set((P), ERTS_PSD_DIST_ENTRY, (void *) (DE)))
+#define ERTS_PROC_GET_PENDING_SUSPEND(P) \
+ ((void *) erts_psd_get((P), ERTS_PSD_PENDING_SUSPEND))
+#define ERTS_PROC_SET_PENDING_SUSPEND(P, PS) \
+ ((void *) erts_psd_set((P), ERTS_PSD_PENDING_SUSPEND, (void *) (PS)))
+
#ifdef HIPE
#define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \
((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF))
@@ -2612,16 +2601,6 @@ Process *erts_try_lock_sig_free_proc(Eterm pid,
ErtsProcLocks locks,
erts_aint32_t *statep);
-Process *erts_pid2proc_not_running(Process *,
- ErtsProcLocks,
- Eterm,
- ErtsProcLocks);
-Process *erts_pid2proc_nropt(Process *c_p,
- ErtsProcLocks c_p_locks,
- Eterm pid,
- ErtsProcLocks pid_locks);
-extern int erts_disable_proc_not_running_opt;
-
#ifdef DEBUG
#define ERTS_ASSERT_IS_NOT_EXITING(P) \
do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0)
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 43f396c547..bd38eca4dc 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -921,6 +921,9 @@ ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks);
ERTS_GLB_INLINE void erts_proc_inc_refc(Process *);
ERTS_GLB_INLINE void erts_proc_dec_refc(Process *);
+ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p,
+ void (*func)(int, void *),
+ void *arg);
ERTS_GLB_INLINE void erts_proc_add_refc(Process *, Sint);
ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *);
@@ -993,6 +996,21 @@ ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p)
}
}
+ERTS_GLB_INLINE void erts_proc_dec_refc_free_func(Process *p,
+ void (*func)(int, void *),
+ void *arg)
+{
+ Sint referred;
+ ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
+ referred = erts_ptab_atmc_dec_test_refc(&p->common);
+ if (!referred) {
+ ASSERT(ERTS_PROC_IS_EXITING(p));
+ (*func)(!0, arg);
+ erts_free_proc(p);
+ (*func)(0, arg);
+ }
+}
+
ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
{
Sint referred;
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 4f91d9ad07..29c698e34f 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1860,7 +1860,7 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) {
SysCpuTime t;
SysTimespec tp;
- sys_get_proc_cputime(t, tp);
+ sys_get_cputime(t, tp);
*microsec = (Uint)(tp.tv_nsec / 1000);
t = (tp.tv_sec / 1000000);
*megasec = (Uint)(t % 1000000);
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 065a560b52..f4161b14f2 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -625,20 +625,6 @@ erts_get_system_profile(void) {
return profile;
}
-
-#ifdef HAVE_ERTS_NOW_CPU
-# define GET_NOW(m, s, u) \
-do { \
- if (erts_cpu_timestamp) \
- erts_get_now_cpu(m, s, u); \
- else \
- get_now(m, s, u); \
-} while (0)
-#else
-# define GET_NOW(m, s, u) do {get_now(m, s, u);} while (0)
-#endif
-
-
static void
write_sys_msg_to_port(Eterm unused_to,
Port* trace_port,
@@ -2629,6 +2615,38 @@ erts_tracer_to_term(Process *p, ErtsTracer tracer)
}
}
+Eterm
+erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer)
+{
+ Eterm res;
+ Eterm state;
+ Uint sz;
+
+ if (ERTS_TRACER_IS_NIL(tracer))
+ return am_false;
+
+ state = ERTS_TRACER_STATE(tracer);
+ sz = is_immed(state) ? 0 : size_object(state);
+
+ if (szp)
+ *szp += sz;
+
+ if (hpp)
+ res = is_immed(state) ? state : copy_struct(state, sz, hpp, ohp);
+ else
+ res = THE_NON_VALUE;
+
+ if (ERTS_TRACER_MODULE(tracer) != am_erl_tracer) {
+ if (szp)
+ *szp += 3;
+ if (hpp) {
+ res = TUPLE2(*hpp, ERTS_TRACER_MODULE(tracer), res);
+ *hpp += 3;
+ }
+ }
+
+ return res;
+}
static ERTS_INLINE int
send_to_tracer_nif_raw(Process *c_p, Process *tracee,
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index dbf7ebd2a1..3228e19809 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -198,6 +198,8 @@ int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
ErtsPTabElementCommon *t_p);
int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p);
Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
+Eterm erts_build_tracer_to_term(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, ErtsTracer tracer);
+
ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
void erts_tracer_replace(ErtsPTabElementCommon *t_p,
const ErtsTracer new_tracer);
diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c
index 8673e029e6..d225916ac5 100644
--- a/erts/emulator/beam/erl_unicode.c
+++ b/erts/emulator/beam/erl_unicode.c
@@ -79,23 +79,23 @@ void erts_init_unicode(void)
max_loop_limit = CONTEXT_REDS * LOOP_FACTOR;
/* Non visual BIFs to trap to. */
erts_init_trap_export(&characters_to_utf8_trap_exp,
- am_erlang, am_atom_put("characters_to_utf8_trap",23), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_utf8_trap"), 3,
&characters_to_utf8_trap);
erts_init_trap_export(&characters_to_list_trap_1_exp,
- am_erlang, am_atom_put("characters_to_list_trap_1",25), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_1"), 3,
&characters_to_list_trap_1);
erts_init_trap_export(&characters_to_list_trap_2_exp,
- am_erlang, am_atom_put("characters_to_list_trap_2",25), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_2"), 3,
&characters_to_list_trap_2);
erts_init_trap_export(&characters_to_list_trap_3_exp,
- am_erlang, am_atom_put("characters_to_list_trap_3",25), 3,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_3"), 3,
&characters_to_list_trap_3);
erts_init_trap_export(&characters_to_list_trap_4_exp,
- am_erlang, am_atom_put("characters_to_list_trap_4",25), 1,
+ am_erlang, ERTS_MAKE_AM("characters_to_list_trap_4"), 1,
&characters_to_list_trap_4);
c_to_b_int_trap_exportp = erts_export_put(am_unicode,am_characters_to_binary_int,2);
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index d74052d8b2..87709edc15 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1972,8 +1972,7 @@ static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args
break;
}
- md = MAP2(hp, am_emulator, am_true,
- am_atom_put("tag", 3), el_tag);
+ md = MAP2(hp, am_emulator, am_true, ERTS_MAKE_AM("tag"), el_tag);
hp += MAP2_SZ;
if (is_nil(gl) && is_non_value(pid)) {
@@ -1994,14 +1993,14 @@ static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args
/* no gl */
md = MAP3(hp,
am_error_logger, md,
- am_atom_put("gl", 2), gl,
+ ERTS_MAKE_AM("gl"), gl,
am_time, time);
hp += MAP3_SZ;
pid = NIL;
} else {
md = MAP4(hp,
am_error_logger, md,
- am_atom_put("gl", 2), gl,
+ ERTS_MAKE_AM("gl"), gl,
am_pid, pid,
am_time, time);
hp += MAP4_SZ;
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index bc9a700204..0a65e317ed 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -490,16 +490,21 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
/* same semantics, different debug trace messages */
/* XXX: BEAM has different entries for the locked and unlocked
cases. HiPE doesn't, so we must check dynamically. */
- if (p->hipe_smp.have_receive_locks)
- p->hipe_smp.have_receive_locks = 0;
+ if (p->flags & F_HIPE_RECV_LOCKED)
+ p->flags &= ~F_HIPE_RECV_LOCKED;
else
erts_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
p->i = hipe_beam_pc_resume;
p->arity = 0;
if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING)
ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_ACTIVE);
- else
+ else if (!(p->flags & F_HIPE_RECV_YIELD))
erts_atomic32_read_band_relb(&p->state, ~ERTS_PSFLG_ACTIVE);
+ else {
+ /* Yielded from receive */
+ ERTS_VBUMP_ALL_REDS(p);
+ p->flags &= ~F_HIPE_RECV_YIELD;
+ }
erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
do_schedule:
{
@@ -522,7 +527,7 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
p = erts_schedule(NULL, p, reds_in - p->fcalls);
ERTS_REQ_PROC_MAIN_LOCK(p);
ASSERT(!(p->flags & F_HIPE_MODE));
- p->hipe_smp.have_receive_locks = 0;
+ p->flags &= ~F_HIPE_RECV_LOCKED;
reg = p->scheduler_data->x_reg_array;
}
{
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index 24078af046..211ce0492a 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -144,8 +144,8 @@ BIF_RETTYPE nbif_impl_hipe_set_timeout(NBIF_ALIST_1)
else {
int tres = erts_set_proc_timer_term(p, timeout_value);
if (tres != 0) { /* Wrong time */
- if (p->hipe_smp.have_receive_locks) {
- p->hipe_smp.have_receive_locks = 0;
+ if (p->flags & F_HIPE_RECV_LOCKED) {
+ p->flags &= ~F_HIPE_RECV_LOCKED;
erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
}
BIF_ERROR(p, EXC_TIMEOUT_VALUE);
@@ -549,19 +549,14 @@ Eterm hipe_check_get_msg(Process *c_p)
c_p->i = NULL;
c_p->arity = 0;
c_p->current = NULL;
- (void) erts_proc_sig_receive_helper(c_p, CONTEXT_REDS, 0,
+ (void) erts_proc_sig_receive_helper(c_p, CONTEXT_REDS/4, 0,
&msgp, &get_out);
/* FIXME: Need to bump reductions... */
if (!msgp) {
if (get_out) {
- if (get_out < 0) {
- /*
- * FIXME: We should get out yielding
- * here...
- */
- goto next_message;
- }
- /* Go exit... */
+ if (get_out < 0)
+ c_p->flags |= F_HIPE_RECV_YIELD; /* yield... */
+ /* else: go exit... */
return THE_NON_VALUE;
}
@@ -573,7 +568,7 @@ Eterm hipe_check_get_msg(Process *c_p)
*/
/* XXX: BEAM doesn't need this */
- c_p->hipe_smp.have_receive_locks = 1;
+ c_p->flags |= F_HIPE_RECV_LOCKED;
c_p->flags &= ~F_DELAY_GC;
return THE_NON_VALUE;
}
@@ -618,8 +613,8 @@ void hipe_clear_timeout(Process *c_p)
*/
/* XXX: BEAM has different entries for the locked and unlocked
cases. HiPE doesn't, so we must check dynamically. */
- if (c_p->hipe_smp.have_receive_locks) {
- c_p->hipe_smp.have_receive_locks = 0;
+ if (c_p->flags & F_HIPE_RECV_LOCKED) {
+ c_p->flags &= ~F_HIPE_RECV_LOCKED;
erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
}
if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h
index ef14c75f6c..18354ba0a6 100644
--- a/erts/emulator/hipe/hipe_process.h
+++ b/erts/emulator/hipe/hipe_process.h
@@ -82,13 +82,4 @@ static __inline__ void hipe_delete_process(struct hipe_process_state *p)
erts_free(ERTS_ALC_T_HIPE_STK, (void*)p->nstack);
}
-struct hipe_process_state_smp {
- int have_receive_locks;
-};
-
-static __inline__ void hipe_init_process_smp(struct hipe_process_state_smp *p)
-{
- p->have_receive_locks = 0;
-}
-
#endif /* HIPE_PROCESS_H */
diff --git a/erts/emulator/nifs/unix/unix_prim_file.c b/erts/emulator/nifs/unix/unix_prim_file.c
index 1637f9cb71..2b112dda76 100644
--- a/erts/emulator/nifs/unix/unix_prim_file.c
+++ b/erts/emulator/nifs/unix/unix_prim_file.c
@@ -512,8 +512,8 @@ int efile_sync(efile_data_t *d, int data_only) {
}
int efile_advise(efile_data_t *d, Sint64 offset, Sint64 length, enum efile_advise_t advise) {
- efile_unix_t *u = (efile_unix_t*)d;
#ifdef HAVE_POSIX_FADVISE
+ efile_unix_t *u = (efile_unix_t*)d;
int p_advise;
switch(advise) {
diff --git a/erts/emulator/nifs/win32/win_prim_file.c b/erts/emulator/nifs/win32/win_prim_file.c
index 8058350b25..044bee62cf 100644
--- a/erts/emulator/nifs/win32/win_prim_file.c
+++ b/erts/emulator/nifs/win32/win_prim_file.c
@@ -24,6 +24,7 @@
#include "prim_file_nif.h"
+#include <winioctl.h>
#include <windows.h>
#include <strsafe.h>
#include <wchar.h>
@@ -671,11 +672,48 @@ static int is_executable_file(const efile_path_t *path) {
return 0;
}
+/* Returns whether the path refers to a link-like object, e.g. a junction
+ * point, symbolic link, or mounted folder. */
+static int is_name_surrogate(const efile_path_t *path) {
+ HANDLE handle;
+ int result;
+
+ handle = CreateFileW((const WCHAR*)path->data, GENERIC_READ,
+ FILE_SHARE_FLAGS, NULL, OPEN_EXISTING,
+ FILE_FLAG_OPEN_REPARSE_POINT |
+ FILE_FLAG_BACKUP_SEMANTICS,
+ NULL);
+ result = 0;
+
+ if(handle != INVALID_HANDLE_VALUE) {
+ REPARSE_GUID_DATA_BUFFER reparse_buffer;
+ LPDWORD unused_length;
+ BOOL success;
+
+ success = DeviceIoControl(handle,
+ FSCTL_GET_REPARSE_POINT, NULL, 0,
+ &reparse_buffer, sizeof(reparse_buffer),
+ &unused_length, NULL);
+
+ /* ERROR_MORE_DATA is tolerated since we're guaranteed to have filled
+ * the field we want. */
+ if(success || GetLastError() == ERROR_MORE_DATA) {
+ result = IsReparseTagNameSurrogate(reparse_buffer.ReparseTag);
+ }
+
+ CloseHandle(handle);
+ }
+
+ return result;
+}
+
posix_errno_t efile_read_info(const efile_path_t *path, int follow_links, efile_fileinfo_t *result) {
BY_HANDLE_FILE_INFORMATION native_file_info;
DWORD attributes;
+ int is_link;
sys_memset(&native_file_info, 0, sizeof(native_file_info));
+ is_link = 0;
attributes = GetFileAttributesW((WCHAR*)path->data);
@@ -696,7 +734,11 @@ posix_errno_t efile_read_info(const efile_path_t *path, int follow_links, efile_
} else {
HANDLE handle;
- if(follow_links && (attributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
+ if(attributes & FILE_ATTRIBUTE_REPARSE_POINT) {
+ is_link = is_name_surrogate(path);
+ }
+
+ if(follow_links && is_link) {
posix_errno_t posix_errno;
efile_path_t resolved_path;
@@ -737,7 +779,7 @@ posix_errno_t efile_read_info(const efile_path_t *path, int follow_links, efile_
}
}
- if(attributes & FILE_ATTRIBUTE_REPARSE_POINT) {
+ if(is_link) {
result->type = EFILE_FILETYPE_SYMLINK;
/* This should be _S_IFLNK, but the old driver always set
* non-directories to _S_IFREG. */
@@ -917,6 +959,10 @@ posix_errno_t efile_read_link(ErlNifEnv *env, const efile_path_t *path, ERL_NIF_
return EINVAL;
}
+ if(!is_name_surrogate(path)) {
+ return EINVAL;
+ }
+
posix_errno = internal_read_link(path, &result_bin);
if(posix_errno == 0) {
diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c
index 5dadd8a5a6..145503bea7 100644
--- a/erts/emulator/sys/common/erl_mmap.c
+++ b/erts/emulator/sys/common/erl_mmap.c
@@ -2514,8 +2514,8 @@ Eterm erts_mmap_debug_info(Process* p)
sizeof(values)/sizeof(*values),
tags, values);
- sa_list = TUPLE2(hp, am_atom_put("sa_free_segs",12), sa_list); hp+=3;
- sua_list = TUPLE2(hp, am_atom_put("sua_free_segs",13), sua_list); hp+=3;
+ sa_list = TUPLE2(hp, ERTS_MAKE_AM("sa_free_segs"), sa_list); hp+=3;
+ sua_list = TUPLE2(hp, ERTS_MAKE_AM("sua_free_segs"), sua_list); hp+=3;
list = CONS(hp, sua_list, list); hp+=2;
list = CONS(hp, sa_list, list); hp+=2;
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index e367d565a7..5bfe5a8e2d 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -264,7 +264,7 @@ erts_os_monotonic_time(void)
ERTS_GLB_INLINE void
erts_os_times(ErtsMonotonicTime *mtimep, ErtsSystemTime *stimep)
{
- return (*erts_sys_time_data__.r.o.os_times)(mtimep, stimep);
+ (*erts_sys_time_data__.r.o.os_times)(mtimep, stimep);
}
#endif /* ERTS_OS_TIMES_INLINE_FUNC_PTR_CALL__ */
@@ -292,6 +292,8 @@ erts_sys_perf_counter()
/*
* Functions for measuring CPU time
+ *
+ * Note that gethrvtime is time per process and clock_gettime is per thread.
*/
#if (defined(HAVE_GETHRVTIME) || defined(HAVE_CLOCK_GETTIME_CPU_TIME))
@@ -300,15 +302,15 @@ typedef struct timespec SysTimespec;
#if defined(HAVE_GETHRVTIME)
#define sys_gethrvtime() gethrvtime()
-#define sys_get_proc_cputime(t,tp) (t) = sys_gethrvtime(), \
- (tp).tv_sec = (time_t)((t)/1000000000LL), \
- (tp).tv_nsec = (long)((t)%1000000000LL)
+#define sys_get_cputime(t,tp) (t) = sys_gethrvtime(), \
+ (tp).tv_sec = (time_t)((t)/1000000000LL), \
+ (tp).tv_nsec = (long)((t)%1000000000LL)
int sys_start_hrvtime(void);
int sys_stop_hrvtime(void);
#elif defined(HAVE_CLOCK_GETTIME_CPU_TIME)
#define sys_clock_gettime(cid,tp) clock_gettime((cid),&(tp))
-#define sys_get_proc_cputime(t,tp) sys_clock_gettime(CLOCK_PROCESS_CPUTIME_ID,(tp))
+#define sys_get_cputime(t,tp) sys_clock_gettime(CLOCK_THREAD_CPUTIME_ID,(tp))
#endif
#endif
diff --git a/erts/emulator/test/code_SUITE.erl b/erts/emulator/test/code_SUITE.erl
index 661a2ee6c9..9c6dc3ff83 100644
--- a/erts/emulator/test/code_SUITE.erl
+++ b/erts/emulator/test/code_SUITE.erl
@@ -957,7 +957,7 @@ erl_544(Config) when is_list(Config) ->
StackFun = fun(_, _, _) -> false end,
FormatFun = fun (Term, _) -> io_lib:format("~tp", [Term]) end,
Formated =
- lib:format_stacktrace(1, Stack, StackFun, FormatFun),
+ erl_error:format_stacktrace(1, Stack, StackFun, FormatFun),
true = is_list(Formated),
ok
after
diff --git a/erts/emulator/test/distribution_SUITE.erl b/erts/emulator/test/distribution_SUITE.erl
index e40d346e10..45dd922ff0 100644
--- a/erts/emulator/test/distribution_SUITE.erl
+++ b/erts/emulator/test/distribution_SUITE.erl
@@ -73,7 +73,7 @@
dist_evil_parallel_receiver/0]).
%% epmd_module exports
--export([start_link/0, register_node/2, register_node/3, port_please/2]).
+-export([start_link/0, register_node/2, register_node/3, port_please/2, address_please/3]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -797,7 +797,7 @@ show_term(Term) ->
%% Tests behaviour after net_kernel:stop (OTP-2586).
stop_dist(Config) when is_list(Config) ->
- Str = os:cmd(atom_to_list(lib:progname())
+ Str = os:cmd(ct:get_progname()
++ " -noshell -pa "
++ proplists:get_value(data_dir, Config)
++ " -s run"),
@@ -974,9 +974,9 @@ dist_auto_connect_start(Name, Value) when is_list(Name), is_atom(Value) ->
ModuleDir = filename:dirname(code:which(?MODULE)),
ValueStr = atom_to_list(Value),
Cookie = atom_to_list(erlang:get_cookie()),
- Cmd = lists:concat(
+ Cmd = lists:append(
[%"xterm -e ",
- atom_to_list(lib:progname()),
+ ct:get_progname(),
% " -noinput ",
" -detached ",
long_or_short(), " ", Name,
@@ -2086,6 +2086,11 @@ port_please(_Name, _Ip) ->
{port, Port, Version}
end.
+address_please(_Name, _Address, _AddressFamily) ->
+ %% Use localhost.
+ IP = {127,0,0,1},
+ {ok, IP}.
+
%%% Utilities
timestamp() ->
diff --git a/erts/emulator/test/map_SUITE.erl b/erts/emulator/test/map_SUITE.erl
index 43807b4388..f93c637650 100644
--- a/erts/emulator/test/map_SUITE.erl
+++ b/erts/emulator/test/map_SUITE.erl
@@ -38,6 +38,7 @@
t_map_size/1,
t_map_get/1,
t_is_map/1,
+ t_is_map_key/1,
%% Specific Map BIFs
t_bif_map_get/1,
@@ -718,7 +719,47 @@ t_map_get(Config) when is_list(Config) ->
true = if map_get(a, M2) =:= 1 -> true; true -> false end,
false = if map_get(x, M2) =:= 1 -> true; true -> false end,
do_badmap(fun
- (T) when map_get(T, x) =:= 1 -> ok;
+ (T) when map_get(x, T) =:= 1 -> ok;
+ (T) -> false = is_map(T)
+ end),
+ ok.
+
+t_is_map_key(Config) when is_list(Config) ->
+ %% small map
+ true = is_map_key(a, id(#{a=>1})),
+ true = is_map_key(b, id(#{a=>1, b=>2})),
+ true = is_map_key("hello", id(#{a=>1, "hello"=>"hi"})),
+ true = is_map_key({1,1.0}, id(#{a=>a, {1,1.0}=>"tuple hi"})),
+
+ M0 = id(#{ k1=>"v1", <<"k2">> => <<"v3">> }),
+ true = is_map_key(<<"k2">>, M0#{<<"k2">> => "v4"}),
+
+ %% large map
+ M1 = maps:from_list([{I,I}||I<-lists:seq(1,100)] ++
+ [{a,1},{b,2},{"hello","hi"},{{1,1.0},"tuple hi"},
+ {k1,"v1"},{<<"k2">>,"v3"}]),
+ true = is_map_key(a, M1),
+ true = is_map_key(b, M1),
+ true = is_map_key("hello", M1),
+ true = is_map_key({1,1.0}, M1),
+ true = is_map_key(<<"k2">>, M1),
+
+ %% error cases
+ do_badmap(fun(T) ->
+ {'EXIT',{{badmap,T},[{erlang,is_map_key,_,_}|_]}} =
+ (catch is_map_key(a, T))
+ end),
+
+ false = is_map_key({1,1}, id(#{{1,1.0}=>"tuple"})),
+ false = is_map_key(a, id(#{})),
+ false = is_map_key(a, id(#{b=>1, c=>2})),
+
+ %% in guards
+ M2 = id(#{a=>1}),
+ true = if is_map_key(a, M2) -> true; true -> false end,
+ false = if is_map_key(x, M2) -> true; true -> false end,
+ do_badmap(fun
+ (T) when is_map_key(T, x) =:= 1 -> ok;
(T) -> false = is_map(T)
end),
ok.
diff --git a/erts/emulator/test/match_spec_SUITE.erl b/erts/emulator/test/match_spec_SUITE.erl
index c1bc01f01e..4415d8d1b9 100644
--- a/erts/emulator/test/match_spec_SUITE.erl
+++ b/erts/emulator/test/match_spec_SUITE.erl
@@ -898,6 +898,13 @@ maps(Config) when is_list(Config) ->
{ok,false,[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[{map_get,b,'$1'}],['$_']}], table),
{ok,true,[],[]} = erlang:match_spec_test(#{a => true}, [{'$1',[{map_get,a,'$1'}],[true]}], table),
+ {ok,true,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[],[{is_map_key,a,'$1'}]}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[],[{is_map_key,b,'$1'}]}], table),
+ {ok,'EXIT',[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[],[{is_map_key,a,'$1'}]}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(#{a => 1}, [{'$1',[{is_map_key,b,'$1'}],['$_']}], table),
+ {ok,false,[],[]} = erlang:match_spec_test(not_a_map, [{'$1',[{is_map_key,b,'$1'}],['$_']}], table),
+ {ok,true,[],[]} = erlang:match_spec_test(#{a => true}, [{'$1',[{is_map_key,a,'$1'}],[true]}], table),
+
%% large maps
Ls0 = [{I,<<I:32>>}||I <- lists:seq(1,415)],
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index df521311e3..100fa006e7 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -2663,16 +2663,23 @@ nif_term_to_binary(Config) ->
nif_binary_to_term(Config) ->
ensure_lib_loaded(Config),
- T = {#{ok => nok}, <<0:8096>>, lists:seq(1,100)},
+ BigMap = maps:from_list([{I,-I} || I <- lists:seq(1,100)]),
+ [nif_binary_to_term_do(T)
+ || T <- [{#{ok => nok}, <<0:8096>>, lists:seq(1,100)},
+ atom, 42, self(), BigMap]],
+ ok.
+
+nif_binary_to_term_do(T) ->
+ Dummy = [true|false],
Bin = term_to_binary(T),
Len = byte_size(Bin),
- {Len,T} = binary_to_term_nif(Bin, undefined, 0),
+ {Len,T,Dummy} = binary_to_term_nif(Bin, undefined, 0),
Len = binary_to_term_nif(Bin, self(), 0),
- T = receive M -> M after 1000 -> timeout end,
+ {T,Dummy} = receive M -> M after 1000 -> timeout end,
- {Len, T} = binary_to_term_nif(Bin, undefined, ?ERL_NIF_BIN2TERM_SAFE),
+ {Len,T,Dummy} = binary_to_term_nif(Bin, undefined, ?ERL_NIF_BIN2TERM_SAFE),
false = binary_to_term_nif(<<131,100,0,14,"undefined_atom">>,
- undefined, ?ERL_NIF_BIN2TERM_SAFE),
+ undefined, ?ERL_NIF_BIN2TERM_SAFE),
false = binary_to_term_nif(Bin, undefined, 1),
ok.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index a0aef60cf1..155bda6df0 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -2405,7 +2405,7 @@ static ERL_NIF_TERM term_to_binary(ErlNifEnv* env, int argc, const ERL_NIF_TERM
static ERL_NIF_TERM binary_to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifBinary bin;
- ERL_NIF_TERM term, ret_term;
+ ERL_NIF_TERM term, dummy, ret_term;
ErlNifPid pid;
ErlNifEnv *msg_env = env;
unsigned int opts;
@@ -2418,6 +2418,9 @@ static ERL_NIF_TERM binary_to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM
|| !enif_get_uint(env, argv[2], &opts))
return enif_make_badarg(env);
+ /* build dummy heap term first to provoke OTP-15080 */
+ dummy = enif_make_list_cell(msg_env, atom_true, atom_false);
+
ret = enif_binary_to_term(msg_env, bin.data, bin.size, &term,
(ErlNifBinaryToTerm)opts);
if (!ret)
@@ -2425,11 +2428,12 @@ static ERL_NIF_TERM binary_to_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM
ret_term = enif_make_uint64(env, ret);
if (msg_env != env) {
- enif_send(env, &pid, msg_env, term);
+ enif_send(env, &pid, msg_env,
+ enif_make_tuple2(msg_env, term, dummy));
enif_free_env(msg_env);
return ret_term;
} else {
- return enif_make_tuple2(env, ret_term, term);
+ return enif_make_tuple3(env, ret_term, term, dummy);
}
}
diff --git a/erts/emulator/test/port_SUITE.erl b/erts/emulator/test/port_SUITE.erl
index 5b39d05df8..eb9b94a316 100644
--- a/erts/emulator/test/port_SUITE.erl
+++ b/erts/emulator/test/port_SUITE.erl
@@ -965,7 +965,7 @@ env_slave(File, Env) ->
env_slave(File, Env, Body) ->
file:write_file(File, term_to_binary(Body)),
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
Dir = filename:dirname(code:which(?MODULE)),
Cmd = Program ++ " -pz " ++ Dir ++
" -noinput -run " ++ ?MODULE_STRING ++ " env_slave_main " ++
@@ -1129,7 +1129,7 @@ try_bad_args(Args) ->
cd(Config) when is_list(Config) ->
ct:timetrap({minutes, 1}),
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
DataDir = proplists:get_value(data_dir, Config),
TestDir = filename:join(DataDir, "dir"),
Cmd = Program ++ " -pz " ++ DataDir ++
@@ -1191,7 +1191,7 @@ cd(Config) when is_list(Config) ->
%% be relative the new cwd and not the original
cd_relative(Config) ->
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
DataDir = proplists:get_value(data_dir, Config),
TestDir = filename:join(DataDir, "dir"),
@@ -1214,7 +1214,7 @@ cd_relative(Config) ->
relative_cd() ->
- Program = atom_to_list(lib:progname()),
+ Program = ct:get_progname(),
ok = file:set_cwd(".."),
{ok, Cwd} = file:get_cwd(),
diff --git a/erts/emulator/test/sensitive_SUITE.erl b/erts/emulator/test/sensitive_SUITE.erl
index c3e303bbd1..9b23a30e88 100644
--- a/erts/emulator/test/sensitive_SUITE.erl
+++ b/erts/emulator/test/sensitive_SUITE.erl
@@ -413,7 +413,7 @@ my_process_info(Pid, Tag) ->
t_process_display(Config) when is_list(Config) ->
Dir = filename:dirname(code:which(?MODULE)),
- Cmd = atom_to_list(lib:progname()) ++ " -noinput -pa " ++ Dir ++
+ Cmd = ct:get_progname() ++ " -noinput -pa " ++ Dir ++
" -run " ++ ?MODULE_STRING ++ " remote_process_display",
io:put_chars(Cmd),
P = open_port({spawn,Cmd}, [in,stderr_to_stdout,eof]),
diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl
index fdf4aab24d..7309908337 100644
--- a/erts/emulator/test/system_info_SUITE.erl
+++ b/erts/emulator/test/system_info_SUITE.erl
@@ -37,6 +37,7 @@
-export([process_count/1, system_version/1, misc_smoke_tests/1,
heap_size/1, wordsize/1, memory/1, ets_limit/1, atom_limit/1,
+ ets_count/1,
atom_count/1]).
suite() ->
@@ -45,6 +46,7 @@ suite() ->
all() ->
[process_count, system_version, misc_smoke_tests,
+ ets_count,
heap_size, wordsize, memory, ets_limit, atom_limit, atom_count].
%%%
@@ -478,6 +480,21 @@ get_node_name(Config) ->
++ "-"
++ integer_to_list(erlang:unique_integer([positive]))).
+ets_count(Config) when is_list(Config) ->
+ [ets_count_do([Type | Named])
+ || Type <- [set, bag, duplicate_bag, ordered_set],
+ Named <- [[named_table], []]
+ ],
+ ok.
+
+ets_count_do(Opts) ->
+ Before = erlang:system_info(ets_count),
+ T = ets:new(?MODULE, Opts),
+ After = erlang:system_info(ets_count),
+ After = Before + 1,
+ ets:delete(T),
+ Before = erlang:system_info(ets_count).
+
%% Verify system_info(ets_limit) reflects max ETS table settings.
ets_limit(Config0) when is_list(Config0) ->
diff --git a/erts/emulator/test/system_profile_SUITE.erl b/erts/emulator/test/system_profile_SUITE.erl
index c9be54f668..ae27bfe9df 100644
--- a/erts/emulator/test/system_profile_SUITE.erl
+++ b/erts/emulator/test/system_profile_SUITE.erl
@@ -95,18 +95,20 @@ do_runnable_procs({TsType, TsTypeFlag}) ->
% FIXME: Set #laps and #nodes in config file
Nodes = 10,
Laps = 10,
- Master = ring(Nodes),
+ All = ring(Nodes, [link,monitor]),
+ [Master | _] = All,
undefined = erlang:system_profile(Pid, [runnable_procs]++TsTypeFlag),
% loop a message
ok = ring_message(Master, message, Laps),
+ ok = kill_ring(Master),
+ [receive {'DOWN', _, process, P, _} -> ok end || P <- All],
Events = get_profiler_events(),
- kill_em_all = kill_ring(Master),
erlang:system_profile(undefined, []),
put(master, Master),
put(laps, Laps),
true = has_runnable_event(TsType, Events),
Pids = sort_events_by_pid(Events),
- ok = check_events(TsType, Pids),
+ ok = check_events(TsType, Pids, (Laps+1)*2+2, (Laps+1)*2),
erase(),
exit(Pid,kill),
ok.
@@ -139,7 +141,7 @@ do_runnable_ports({TsType, TsTypeFlag}, Config) ->
erlang:system_profile(undefined, []),
true = has_runnable_event(TsType, Events),
Pids = sort_events_by_pid(Events),
- ok = check_events(TsType, Pids),
+ ok = check_events(TsType, Pids, Laps*2+2, Laps*2),
erase(),
exit(Pid,kill),
ok.
@@ -171,12 +173,12 @@ dont_profile_profiler(Config) when is_list(Config) ->
Nodes = 10,
Laps = 10,
- Master = ring(Nodes),
+ [Master|_] = ring(Nodes, [link]),
undefined = erlang:system_profile(Pid, [runnable_procs]),
% loop a message
ok = ring_message(Master, message, Laps),
erlang:system_profile(undefined, []),
- kill_em_all = kill_ring(Master),
+ ok = kill_ring(Master),
Events = get_profiler_events(),
false = has_profiler_pid_event(Events, Pid),
@@ -248,27 +250,28 @@ check_block_system({TsType, TsTypeFlag}, Nodes) ->
%%% Check events
-check_events(_TsType, []) -> ok;
-check_events(TsType, [Pid | Pids]) ->
+check_events(_TsType, [], _, _) -> ok;
+check_events(TsType, [Pid | Pids], ExpMaster, ExpMember) ->
Master = get(master),
- Laps = get(laps),
CheckPids = get(pids),
{Events, N} = get_pid_events(Pid),
ok = check_event_flow(Events),
ok = check_event_ts(TsType, Events),
IsMember = lists:member(Pid, CheckPids),
- case Pid of
- Master ->
- io:format("Expected ~p and got ~p profile events from ~p: ok~n", [Laps*2+2, N, Pid]),
- N = Laps*2 + 2,
- check_events(TsType, Pids);
- Pid when IsMember == true ->
- io:format("Expected ~p and got ~p profile events from ~p: ok~n", [Laps*2, N, Pid]),
- N = Laps*2,
- check_events(TsType, Pids);
- Pid ->
- check_events(TsType, Pids)
- end.
+ {Title,Exp} = case Pid of
+ Master -> {master,ExpMaster};
+ Pid when IsMember == true -> {member,ExpMember};
+ _ -> {other,N}
+ end,
+ ok = case N of
+ Exp -> ok;
+ _ ->
+ io:format("Expected ~p and got ~p profile events from ~p ~p:~n~p~n",
+ [Exp, N, Title, Pid, Events]),
+ error
+ end,
+ check_events(TsType, Pids, ExpMaster, ExpMember).
+
%% timestamp consistency check for descending timestamps
@@ -296,7 +299,13 @@ check_event_ts(TsType, [{Pid, _, _, TS1}=Event | Events], {Pid,_,_,TS0}) ->
%% consistency check for active vs. inactive activity (runnable)
check_event_flow(Events) ->
- check_event_flow(Events, undefined).
+ case check_event_flow(Events, undefined) of
+ ok -> ok;
+ Error ->
+ io:format("Events = ~p\n", [Events]),
+ Error
+ end.
+
check_event_flow([], _) -> ok;
check_event_flow([Event | PidEvents], undefined) ->
check_event_flow(PidEvents, Event);
@@ -336,10 +345,11 @@ sort_events_by_pid([Event | Events],Pids) ->
%% API
% Returns master pid
-ring(N) ->
- Pids = build_ring(N, []),
+ring(N, SpawnOpt) ->
+ Pids = build_ring(N, [], SpawnOpt),
put(pids, Pids),
- setup_ring(Pids).
+ setup_ring(Pids),
+ Pids.
ring_message(Master, Message, Laps) ->
Master ! {message, Master, Laps, Message},
@@ -347,13 +357,19 @@ ring_message(Master, Message, Laps) ->
{laps_complete, Master} -> ok
end.
-kill_ring(Master) -> Master ! kill_em_all.
+kill_ring(Master) ->
+ Master ! kill_em_all,
+ ok.
%% Process ring helpers
-build_ring(0, Pids) -> Pids;
-build_ring(N, Pids) ->
- build_ring(N - 1, [spawn_link(?MODULE, ring_loop, [undefined]) | Pids]).
+build_ring(0, Pids, _) -> Pids;
+build_ring(N, Pids, SpawnOpt) ->
+ Pid = case spawn_opt(?MODULE, ring_loop, [undefined], SpawnOpt) of
+ {P,_} -> P;
+ P -> P
+ end,
+ build_ring(N-1, [Pid | Pids], SpawnOpt).
setup_ring([Master | Relayers]) ->
% Relayers may not include the master pid
@@ -382,15 +398,13 @@ ring_loop(RelayTo) ->
{message, Master, Lap, Msg}=Message ->
case {self(), Lap} of
{Master, 0} ->
- get(supervisor) ! {laps_complete, self()},
- ring_loop(RelayTo);
+ get(supervisor) ! {laps_complete, self()};
{Master, Lap} ->
- RelayTo ! {message, Master, Lap - 1, Msg},
- ring_loop(RelayTo);
+ RelayTo ! {message, Master, Lap - 1, Msg};
_ ->
- RelayTo ! Message,
- ring_loop(RelayTo)
- end
+ RelayTo ! Message
+ end,
+ ring_loop(RelayTo)
end.
%%%
diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl
index def25dba7d..138aefb29c 100644
--- a/erts/emulator/test/trace_SUITE.erl
+++ b/erts/emulator/test/trace_SUITE.erl
@@ -29,7 +29,7 @@
receive_trace/1, link_receive_call_correlation/1, self_send/1,
timeout_trace/1, send_trace/1,
procs_trace/1, dist_procs_trace/1, procs_new_trace/1,
- suspend/1, mutual_suspend/1, suspend_exit/1, suspender_exit/1,
+ suspend/1, suspend_exit/1, suspender_exit/1,
suspend_system_limit/1, suspend_opts/1, suspend_waiting/1,
new_clear/1, existing_clear/1, tracer_die/1,
set_on_spawn/1, set_on_first_spawn/1, cpu_timestamp/1,
@@ -53,7 +53,7 @@ all() ->
[cpu_timestamp, receive_trace, link_receive_call_correlation,
self_send, timeout_trace,
send_trace, procs_trace, dist_procs_trace, suspend,
- mutual_suspend, suspend_exit, suspender_exit,
+ suspend_exit, suspender_exit,
suspend_system_limit, suspend_opts, suspend_waiting,
new_clear, existing_clear, tracer_die, set_on_spawn,
set_on_first_spawn, set_on_link, set_on_first_link,
@@ -1234,55 +1234,6 @@ do_suspend(Pid, N) ->
erlang:yield(),
do_suspend(Pid, N-1).
-
-
-mutual_suspend(Config) when is_list(Config) ->
- TimeoutSecs = 5*60,
- ct:timetrap({seconds, TimeoutSecs}),
- Parent = self(),
- Fun = fun () ->
- receive
- {go, Pid} ->
- do_mutual_suspend(Pid, 100000)
- end,
- Parent ! {done, self()},
- receive after infinity -> ok end
- end,
- P1 = spawn_link(Fun),
- P2 = spawn_link(Fun),
- T1 = erlang:start_timer((TimeoutSecs - 5)*1000, self(), oops),
- T2 = erlang:start_timer((TimeoutSecs - 5)*1000, self(), oops),
- P1 ! {go, P2},
- P2 ! {go, P1},
- Res1 = receive
- {done, P1} -> done;
- {timeout,T1,_} -> timeout
- end,
- Res2 = receive
- {done, P2} -> done;
- {timeout,T2,_} -> timeout
- end,
- P1S = process_info(P1, status),
- P2S = process_info(P2, status),
- io:format("P1S=~p P2S=~p", [P1S, P2S]),
- false = {status, suspended} == P1S,
- false = {status, suspended} == P2S,
- unlink(P1), exit(P1, bang),
- unlink(P2), exit(P2, bang),
- done = Res1,
- done = Res2,
- ok.
-
-do_mutual_suspend(_Pid, 0) ->
- ok;
-do_mutual_suspend(Pid, N) ->
- %% Suspend a process and test that it is suspended.
- true = erlang:suspend_process(Pid),
- {status, suspended} = process_info(Pid, status),
- %% Unsuspend the process.
- true = erlang:resume_process(Pid),
- do_mutual_suspend(Pid, N-1).
-
suspend_exit(Config) when is_list(Config) ->
ct:timetrap({minutes, 2}),
rand:seed(exsplus, {4711,17,4711}),
@@ -1513,7 +1464,8 @@ suspend_opts(Config) when is_list(Config) ->
dbl_async = AA,
synced = S,
async_once = AO} = Acc) ->
- erlang:suspend_process(Tok, [asynchronous]),
+ Tag = {make_ref(), self()},
+ erlang:suspend_process(Tok, [{asynchronous, Tag}]),
Res = case {suspend_count(Tok), N rem 4} of
{0, 2} ->
erlang:suspend_process(Tok,
@@ -1549,7 +1501,11 @@ suspend_opts(Config) when is_list(Config) ->
_ ->
Acc
end,
- erlang:resume_process(Tok),
+ receive
+ {Tag, Result} ->
+ suspended = Result,
+ erlang:resume_process(Tok)
+ end,
erlang:yield(),
Res
end,
diff --git a/erts/emulator/test/tracer_SUITE.erl b/erts/emulator/test/tracer_SUITE.erl
index e1362ef07a..070462b0f1 100644
--- a/erts/emulator/test/tracer_SUITE.erl
+++ b/erts/emulator/test/tracer_SUITE.erl
@@ -623,7 +623,7 @@ test(Event, TraceFlag, Tc, Expect, _Removes, Dies) ->
Expect(Pid1, State1, Opts),
receive M11 -> ct:fail({unexpected, M11}) after 0 -> ok end,
- if not Dies ->
+ if not Dies andalso Event /= in ->
{flags, [TraceFlag]} = erlang:trace_info(Pid1, flags),
{tracer, {tracer_test, State1}} = erlang:trace_info(Pid1, tracer),
erlang:trace(Pid1, false, [TraceFlag]);
@@ -640,7 +640,7 @@ test(Event, TraceFlag, Tc, Expect, _Removes, Dies) ->
Expect(Pid1T, State1, Opts#{ scheduler_id => number,
timestamp => timestamp}),
receive M11T -> ct:fail({unexpected, M11T}) after 0 -> ok end,
- if not Dies ->
+ if not Dies andalso Event /= in ->
{flags, [scheduler_id, TraceFlag, timestamp]}
= erlang:trace_info(Pid1T, flags),
{tracer, {tracer_test, State1}} = erlang:trace_info(Pid1T, tracer),
@@ -655,7 +655,7 @@ test(Event, TraceFlag, Tc, Expect, _Removes, Dies) ->
Tc(Pid2),
ok = trace_delivered(Pid2),
receive M2 -> ct:fail({unexpected, M2}) after 0 -> ok end,
- if not Dies ->
+ if not Dies andalso Event /= in ->
{flags, [TraceFlag]} = erlang:trace_info(Pid2, flags),
{tracer, {tracer_test, State2}} = erlang:trace_info(Pid2, tracer),
erlang:trace(Pid2, false, [TraceFlag]);
diff --git a/erts/etc/common/heart.c b/erts/etc/common/heart.c
index bc353e384e..8f1e89b638 100644
--- a/erts/etc/common/heart.c
+++ b/erts/etc/common/heart.c
@@ -825,11 +825,8 @@ write_message(fd, mp)
int fd;
struct msg *mp;
{
- int len;
- char* tmp;
+ int len = ntohs(mp->len);
- tmp = (char*) &(mp->len);
- len = (*tmp * 256) + *(tmp+1);
if ((len == 0) || (len > MSG_BODY_SIZE)) {
return MSG_HDR_SIZE;
} /* cc68k wants (char *) */
diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in
index e5ef819444..39e378193a 100644
--- a/erts/etc/unix/etp-commands.in
+++ b/erts/etc/unix/etp-commands.in
@@ -1232,6 +1232,142 @@ end
# Commands for special term bunches.
#
+define etp-sig-int
+ set $etp_sig_is_message = 0
+ set $etp_sig_tag = ($arg0)->m[0]
+ if ($etp_sig_tag & 0x3) != 0 || $etp_sig_tag == etp_the_non_value
+ set $etp_sig_is_message = !0
+ # A message
+ if $etp_sig_tag != etp_the_non_value
+ etp-1 $etp_sig_tag 0
+ else
+ print "!ENCODED-DIST-MSG"
+ end
+ if ($arg0)->m[1] != $etp_nil
+ printf " @token= "
+ etp-1 ($arg0)->m[1] 0
+ end
+ printf " @from= "
+ etp-1 ($arg0)->m[2] 0
+ else
+ if ($etp_sig_tag & 0x3f) != 0x30
+ print "!INVALID-SIGNAL"
+ else
+ set $etp_sig_op = (($etp_sig_tag >> 6) & 0xff)
+ set $etp_sig_type = (($etp_sig_tag >> 14) & 0xff)
+ if $etp_sig_op == 0
+ printf "!EXIT[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 1
+ printf "!EXIT-LINKED[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 2
+ printf "!MONITOR-DOWN[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 3
+ printf "!MONITOR[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 4
+ printf "!DEMONITOR[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 5
+ printf "!LINK[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 6
+ printf "!UNLINK[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 7
+ printf "!GROUP-LEADER[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 8
+ printf "!TRACE-CHANGE-STATE[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 9
+ printf "!PERSISTENT-MONITOR-MESSAGE[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 10
+ printf "!IS-ALIVE[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 11
+ printf "!PROCESS-INFO[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 12
+ printf "!SYNC-SUSPEND[%d]", $etp_sig_type
+ else
+ if $etp_sig_op == 13
+ printf "!RPC[%d]", $etp_sig_type
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+end
+
+
+define etp-sigq-int
+# Args: ErlMessageQueue*
+#
+# Non-reentrant
+#
+ set $etp_sig = ($arg0)
+ set $etp_sig_save = ($arg1)
+ set $etp_sig_save_last = ($arg2)
+ set $etp_sigq_msig_len = 0
+ set $etp_sigq_nmsig_len = 0
+
+ printf " ["
+ while $etp_sig != (void *) 0
+ set $etp_sig_next = $etp_sig->next
+ if $etp_sig != ($arg0)
+ printf " "
+ end
+ etp-sig-int $etp_sig
+ if $etp_sig_is_message
+ set $etp_sigq_msig_len++
+ else
+ set $etp_sigq_nmsig_len++
+ end
+ if $etp_sig_next
+ printf ","
+ end
+ if $etp_sig_save && *$etp_sig_save == $etp_sig
+ printf " %% <== SAVE"
+ else
+ if $etp_sig_save_last && *$etp_sig_save_last == $etp_sig
+ printf " %% <== SAVED_LAST"
+ else
+ end
+ end
+ if $etp_sig_next
+ printf "\n"
+ end
+ set $etp_sig = $etp_sig_next
+ end
+ printf "]\n\n"
+ printf " Message signals: %d\n", $etp_sigq_msig_len
+ printf " Non-message signals: %d\n\n", $etp_sigq_nmsig_len
+end
+
+define etp-sigqs
+ printf " --- Inner signal queue (message queue) ---\n"
+ etp-sigq-int ($arg0)->sig_qs.first ($arg0)->sig_qs.save ($arg0)->sig_qs.saved_last
+ printf " --- Middle signal queue ---\n"
+ etp-sigq-int ($arg0)->sig_qs.cont ($arg0)->sig_qs.save ($arg0)->sig_qs.saved_last
+ printf " --- Outer queue ---\n"
+ etp-sigq-int ($arg0)->sig_inq.first ($arg0)->sig_qs.save ($arg0)->sig_qs.saved_last
+end
+
define etp-msgq
# Args: ErlMessageQueue*
#
@@ -1937,7 +2073,7 @@ document etp-proc-flags
%---------------------------------------------------------------------------
end
-define etp-process-info
+define etp-process-info-int
# Args: Process*
#
printf " Pid: "
@@ -2000,6 +2136,17 @@ define etp-process-info
etp-1 ((Eterm)($etp_proc->parent))
printf "\n Pointer: (Process *) %p\n", $etp_proc
end
+ if ($arg1)
+ etp-sigqs $etp_proc
+ end
+end
+
+define etp-process-info
+ etp-process-info-int ($arg0) 0
+end
+
+define etp-process-info-x
+ etp-process-info-int ($arg0) !0
end
document etp-process-info
@@ -2010,7 +2157,7 @@ document etp-process-info
%---------------------------------------------------------------------------
end
-define etp-processes
+define etp-processes-int
if (!erts_initialized)
printf "No processes, since system isn't initialized!\n"
else
@@ -2026,7 +2173,7 @@ define etp-processes
if ($proc != ((Process *) 0) && $proc != $invalid_proc)
printf "---\n"
printf " Pix: %d\n", $proc_ix
- etp-process-info $proc
+ etp-process-info-int $proc ($arg0)
set $proc_cnt--
end
if $proc_ix == $proc_printile
@@ -2039,6 +2186,14 @@ define etp-processes
end
end
+define etp-processes
+ etp-processes-int 0
+end
+
+define etp-processes-x
+ etp-processes-int !0
+end
+
document etp-processes
%---------------------------------------------------------------------------
% etp-processes
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index 48660f7c71..dfaf664a18 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -334,7 +334,7 @@ ETHREAD_LIB=
endif
-ifneq ($(CREATE_DIRS),)
+ifneq ($(strip $(CREATE_DIRS)),)
_create_dirs := $(shell mkdir -p $(CREATE_DIRS))
endif
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index 10545086b5..495d306a23 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index d3fbc8eb61..99f11495a5 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam b/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam
index 8d9ca3fcae..1013b8de0c 100644
--- a/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam
+++ b/erts/preloaded/ebin/erts_dirty_process_signal_handler.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam
index cdfdaf9640..73bd730eaa 100644
--- a/erts/preloaded/ebin/erts_internal.beam
+++ b/erts/preloaded/ebin/erts_internal.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 0473eda5fb..1b458fc5da 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index d1b3f5dca4..2f1e55442a 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 53e90a4f2d..f4f31b1e4b 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -135,7 +135,7 @@
-export([insert_element/3]).
-export([integer_to_binary/1, integer_to_list/1]).
-export([iolist_size/1, iolist_to_binary/1, iolist_to_iovec/1]).
--export([is_alive/0, is_builtin/3, is_process_alive/1, length/1, link/1]).
+-export([is_alive/0, is_builtin/3, is_map_key/2, is_process_alive/1, length/1, link/1]).
-export([list_to_atom/1, list_to_binary/1]).
-export([list_to_bitstring/1, list_to_existing_atom/1, list_to_float/1]).
-export([list_to_integer/1, list_to_integer/2]).
@@ -1121,6 +1121,13 @@ is_alive() ->
is_builtin(_Module, _Function, _Arity) ->
erlang:nif_error(undefined).
+%% Shadowed by erl_bif_types: erlang:is_map_key/2
+-spec is_map_key(Key, Map) -> boolean() when
+ Key :: term(),
+ Map :: map().
+is_map_key(_,_) ->
+ erlang:nif_error(undef).
+
%% is_process_alive/1
-spec is_process_alive(Pid) -> boolean() when
Pid :: pid().
@@ -1514,8 +1521,21 @@ pre_loaded() ->
-spec erlang:process_display(Pid, Type) -> true when
Pid :: pid(),
Type :: backtrace.
-process_display(_Pid, _Type) ->
- erlang:nif_error(undefined).
+process_display(Pid, Type) ->
+ case case erts_internal:process_display(Pid, Type) of
+ Ref when erlang:is_reference(Ref) ->
+ receive
+ {Ref, Res} ->
+ Res
+ end;
+ Res ->
+ Res
+ end of
+ badarg ->
+ erlang:error(badarg, [Pid, Type]);
+ Result ->
+ Result
+ end.
%% process_flag/3
-spec process_flag(Pid, Flag, Value) -> OldValue when
@@ -1523,8 +1543,15 @@ process_display(_Pid, _Type) ->
Flag :: save_calls,
Value :: non_neg_integer(),
OldValue :: non_neg_integer().
-process_flag(_Pid, _Flag, _Value) ->
- erlang:nif_error(undefined).
+process_flag(Pid, Flag, Value) ->
+ case case erts_internal:process_flag(Pid, Flag, Value) of
+ Ref when erlang:is_reference(Ref) ->
+ receive {Ref, Res} -> Res end;
+ Res -> Res
+ end of
+ badarg -> erlang:error(badarg, [Pid, Flag, Value]);
+ Result -> Result
+ end.
%% process_info/1
-spec process_info(Pid) -> Info when
@@ -1678,12 +1705,26 @@ setnode(_P1, _P2) ->
erlang:nif_error(undefined).
%% setnode/3
--spec erlang:setnode(P1, P2, P3) -> dist_handle() when
- P1 :: atom(),
- P2 :: port(),
- P3 :: {term(), term(), term(), term()}.
-setnode(_P1, _P2, _P3) ->
- erlang:nif_error(undefined).
+-spec erlang:setnode(Node, DistCtrlr, Opts) -> dist_handle() when
+ Node :: atom(),
+ DistCtrlr :: port() | pid(),
+ Opts :: {integer(), integer(), atom(), atom()}.
+setnode(Node, DistCtrlr, {Flags, Ver, IC, OC} = Opts) when erlang:is_atom(IC),
+ erlang:is_atom(OC) ->
+ case case erts_internal:create_dist_channel(Node, DistCtrlr,
+ Flags, Ver) of
+ {ok, DH} -> DH;
+ {message, Ref} -> receive {Ref, Res} -> Res end;
+ Err -> Err
+ end of
+ Error when erlang:is_atom(Error) ->
+ erlang:error(Error, [Node, DistCtrlr, Opts]);
+ DHandle ->
+ DHandle
+ end;
+setnode(Node, DistCtrlr, Opts) ->
+ erlang:error(badarg, [Node, DistCtrlr, Opts]).
+
%% size/1
%% Shadowed by erl_bif_types: erlang:size/1
@@ -1742,9 +1783,32 @@ start_timer(_Time, _Dest, _Msg, _Options) ->
-spec erlang:suspend_process(Suspendee, OptList) -> boolean() when
Suspendee :: pid(),
OptList :: [Opt],
- Opt :: unless_suspending | asynchronous.
-suspend_process(_Suspendee, _OptList) ->
- erlang:nif_error(undefined).
+ Opt :: unless_suspending | asynchronous | {asynchronous, term()}.
+suspend_process(Suspendee, OptList) ->
+ case case erts_internal:suspend_process(Suspendee, OptList) of
+ Ref when erlang:is_reference(Ref) ->
+ receive {Ref, Res} -> Res end;
+ Res ->
+ Res
+ end of
+ true -> true;
+ false -> false;
+ Error -> erlang:error(Error, [Suspendee, OptList])
+ end.
+
+-spec erlang:suspend_process(Suspendee) -> 'true' when
+ Suspendee :: pid().
+suspend_process(Suspendee) ->
+ case case erts_internal:suspend_process(Suspendee, []) of
+ Ref when erlang:is_reference(Ref) ->
+ receive {Ref, Res} -> Res end;
+ Res ->
+ Res
+ end of
+ true -> true;
+ false -> erlang:error(internal_error, [Suspendee]);
+ Error -> erlang:error(Error, [Suspendee])
+ end.
%% system_monitor/0
-spec erlang:system_monitor() -> MonSettings when
@@ -3038,15 +3102,6 @@ send_nosuspend(Pid, Msg, Opts) ->
localtime_to_universaltime(Localtime) ->
erlang:localtime_to_universaltime(Localtime, undefined).
--spec erlang:suspend_process(Suspendee) -> 'true' when
- Suspendee :: pid().
-suspend_process(P) ->
- case catch erlang:suspend_process(P, []) of
- {'EXIT', {Reason, _}} -> erlang:error(Reason, [P]);
- {'EXIT', Reason} -> erlang:error(Reason, [P]);
- Res -> Res
- end.
-
%%
%% Port BIFs
%%
@@ -3723,8 +3778,8 @@ blocks_size([], Acc) ->
Acc.
get_fix_proc([{ProcType, A1, U1}| Rest], {A0, U0}) when ProcType == proc;
- ProcType == monitor_sh;
- ProcType == nlink_sh;
+ ProcType == monitor;
+ ProcType == link;
ProcType == msg_ref;
ProcType == ll_ptimer;
ProcType == hl_ptimer;
@@ -3848,7 +3903,6 @@ aa_mem_data(#memory{processes = Proc,
processes_used = ProcU,
system = Sys} = Mem,
[{ProcData, Sz} | Rest]) when ProcData == bif_timer;
- ProcData == link_lh;
ProcData == process_table ->
aa_mem_data(Mem#memory{processes = Proc+Sz,
processes_used = ProcU+Sz,
diff --git a/erts/preloaded/src/erts_code_purger.erl b/erts/preloaded/src/erts_code_purger.erl
index fd214228c7..c41532ed87 100644
--- a/erts/preloaded/src/erts_code_purger.erl
+++ b/erts/preloaded/src/erts_code_purger.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2016. All Rights Reserved.
+%% Copyright Ericsson AB 2016-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@
-export([start/0, purge/1, soft_purge/1, pending_purge_lambda/3,
finish_after_on_load/2]).
--spec start() -> term().
+-spec start() -> no_return().
start() ->
register(erts_code_purger, self()),
process_flag(trap_exit, true),
diff --git a/erts/preloaded/src/erts_dirty_process_signal_handler.erl b/erts/preloaded/src/erts_dirty_process_signal_handler.erl
index ab71790b9d..381f81ef14 100644
--- a/erts/preloaded/src/erts_dirty_process_signal_handler.erl
+++ b/erts/preloaded/src/erts_dirty_process_signal_handler.erl
@@ -50,10 +50,12 @@ handle_request(Pid) when is_pid(Pid) ->
handle_incoming_signals(Pid, 0);
handle_request({Requester, Target, Prio,
{SysTaskOp, ReqId, Arg} = Op} = Request) ->
- case handle_sys_task(Requester, Target, SysTaskOp, ReqId, Arg) of
- true ->
+ case handle_sys_task(Requester, Target, SysTaskOp, ReqId, Arg, 0) of
+ done ->
ok;
- false ->
+ busy ->
+ self() ! Request;
+ normal ->
%% Target has stopped executing dirty since the
%% initial request was made. Dispatch the
%% request to target and let it handle it itself...
@@ -83,15 +85,19 @@ handle_incoming_signals(Pid, N) ->
_Res -> ok
end.
-handle_sys_task(Requester, Target, check_process_code, ReqId, Module) ->
- case erts_internal:is_process_executing_dirty(Target) of
- false ->
- false;
- true ->
- _ = check_process(Requester, Target, ReqId, Module),
- true
+handle_sys_task(Requester, Target, check_process_code, ReqId, Module, N) ->
+ case erts_internal:check_dirty_process_code(Target, Module) of
+ Bool when Bool == true; Bool == false ->
+ Requester ! {check_process_code, ReqId, Bool},
+ done;
+ busy ->
+ case N > 5 of
+ true ->
+ busy;
+ false ->
+ handle_sys_task(Requester, Target, check_process_code,
+ ReqId, Module, N+1)
+ end;
+ Res ->
+ Res
end.
-
-check_process(Requester, Target, ReqId, Module) ->
- Result = erts_internal:check_dirty_process_code(Target, Module),
- Requester ! {check_process_code, ReqId, Result}.
diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl
index 79169b7d23..88f47e917b 100644
--- a/erts/preloaded/src/erts_internal.erl
+++ b/erts/preloaded/src/erts_internal.erl
@@ -82,6 +82,14 @@
-export([gather_alloc_histograms/1, gather_carrier_info/1]).
+-export([suspend_process/2]).
+
+-export([process_display/2]).
+
+-export([process_flag/3]).
+
+-export([create_dist_channel/4]).
+
%%
%% Await result of send to port
%%
@@ -303,7 +311,8 @@ get_cpc_opts([{allow_gc, AllowGC} | Options], Async) when AllowGC == true;
get_cpc_opts([], Async) ->
Async.
--spec check_dirty_process_code(Pid,Module) -> 'true' | 'false' when
+-spec check_dirty_process_code(Pid, Module) -> Result when
+ Result :: boolean() | 'normal' | 'busy',
Pid :: pid(),
Module :: module().
check_dirty_process_code(_Pid,_Module) ->
@@ -644,3 +653,41 @@ gather_alloc_histograms(_) ->
gather_carrier_info(_) ->
erlang:nif_error(undef).
+
+-spec suspend_process(Suspendee, OptList) -> Result when
+ Result :: boolean() | 'badarg' | reference(),
+ Suspendee :: pid(),
+ OptList :: [Opt],
+ Opt :: unless_suspending | asynchronous | {asynchronous, term()}.
+
+suspend_process(_Suspendee, _OptList) ->
+ erlang:nif_error(undefined).
+
+%% process_display/2
+-spec process_display(Pid, Type) -> 'true' | 'badarg' | reference() when
+ Pid :: pid(),
+ Type :: backtrace.
+process_display(_Pid, _Type) ->
+ erlang:nif_error(undefined).
+
+%% process_flag/3
+-spec process_flag(Pid, Flag, Value) -> OldValue | 'badarg' | reference() when
+ Pid :: pid(),
+ Flag :: save_calls,
+ Value :: non_neg_integer(),
+ OldValue :: non_neg_integer().
+process_flag(_Pid, _Flag, _Value) ->
+ erlang:nif_error(undefined).
+
+-spec create_dist_channel(Node, DistCtrlr, Flags, Ver) -> Result when
+ Node :: atom(),
+ DistCtrlr :: port() | pid(),
+ Flags :: integer(),
+ Ver :: integer(),
+ Result :: {'ok', erlang:dist_handle()}
+ | {'message', reference()}
+ | 'badarg'
+ | 'system_limit'.
+
+create_dist_channel(_Node, _DistCtrlr, _Flags, _Ver) ->
+ erlang:nif_error(undefined).
diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl
index 558a0f883f..2921b9df1d 100644
--- a/erts/preloaded/src/prim_file.erl
+++ b/erts/preloaded/src/prim_file.erl
@@ -562,7 +562,7 @@ list_dir_convert([RawName | Rest], SkipInvalid, Result) ->
logger ! {log,warning,"Non-unicode filename ~p ignored\n", [RawName],
#{pid=>self(),
gl=>group_leader(),
- time=>erlang:monotonic_time(microsecond),
+ time=>erlang:system_time(microsecond),
error_logger=>#{tag=>warning_msg}}},
list_dir_convert(Rest, SkipInvalid, Result);
{error, _} ->
diff --git a/erts/preloaded/src/zlib.erl b/erts/preloaded/src/zlib.erl
index a4ef42204d..6f53e67901 100644
--- a/erts/preloaded/src/zlib.erl
+++ b/erts/preloaded/src/zlib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -87,7 +87,7 @@
%%------------------------------------------------------------------------
%% Public data types.
--type zstream() :: term().
+-type zstream() :: reference().
-type zflush() :: 'none' | 'sync' | 'full' | 'finish'.
-type zlevel() ::
@@ -102,11 +102,11 @@
-type zmethod() :: 'deflated'.
-record(zlib_opts, {
- stream :: zstream(),
- method :: term(),
- input_chunk_size :: integer(),
- output_chunk_size :: integer(),
- flush :: integer()
+ stream :: zstream() | 'undefined',
+ method :: function(),
+ input_chunk_size :: pos_integer(),
+ output_chunk_size :: pos_integer(),
+ flush :: non_neg_integer()
}).
%%------------------------------------------------------------------------
@@ -168,7 +168,7 @@ deflateInit_nif(_Z, _Level, _Method, _WindowBits, _MemLevel, _Strategy) ->
-spec deflateSetDictionary(Z, Dictionary) -> Adler32 when
Z :: zstream(),
Dictionary :: iodata(),
- Adler32 :: integer().
+ Adler32 :: non_neg_integer().
deflateSetDictionary(Z, Dictionary) ->
deflateSetDictionary_nif(Z, Dictionary).
deflateSetDictionary_nif(_Z, _Dictionary) ->
@@ -295,7 +295,7 @@ inflate(Z, Data) ->
Options :: list({exception_on_need_dict, boolean()}),
Decompressed :: iolist() |
{need_dictionary,
- Adler32 :: integer(),
+ Adler32 :: non_neg_integer(),
Output :: iolist()}.
inflate(Z, Data, Options) ->
enqueue_input(Z, Data),
@@ -357,7 +357,7 @@ exception_on_need_dict(Z, Output) when is_list(Output); is_binary(Output) ->
Result :: {continue, Output :: iolist()} |
{finished, Output :: iolist()} |
{need_dictionary,
- Adler32 :: integer(),
+ Adler32 :: non_neg_integer(),
Output :: iolist()}.
safeInflate(Z, Data) ->
enqueue_input(Z, Data),
@@ -389,7 +389,7 @@ getBufSize_nif(_Z) ->
-spec crc32(Z) -> CRC when
Z :: zstream(),
- CRC :: integer().
+ CRC :: non_neg_integer().
crc32(Z) ->
crc32_nif(Z).
crc32_nif(_Z) ->
@@ -398,7 +398,7 @@ crc32_nif(_Z) ->
-spec crc32(Z, Data) -> CRC when
Z :: zstream(),
Data :: iodata(),
- CRC :: integer().
+ CRC :: non_neg_integer().
crc32(Z, Data) when is_reference(Z) ->
erlang:crc32(Data);
crc32(_Z, _Data) ->
@@ -406,9 +406,9 @@ crc32(_Z, _Data) ->
-spec crc32(Z, PrevCRC, Data) -> CRC when
Z :: zstream(),
- PrevCRC :: integer(),
+ PrevCRC :: non_neg_integer(),
Data :: iodata(),
- CRC :: integer().
+ CRC :: non_neg_integer().
crc32(Z, CRC, Data) when is_reference(Z) ->
erlang:crc32(CRC, Data);
crc32(_Z, _CRC, _Data) ->
@@ -416,10 +416,10 @@ crc32(_Z, _CRC, _Data) ->
-spec crc32_combine(Z, CRC1, CRC2, Size2) -> CRC when
Z :: zstream(),
- CRC :: integer(),
- CRC1 :: integer(),
- CRC2 :: integer(),
- Size2 :: integer().
+ CRC :: non_neg_integer(),
+ CRC1 :: non_neg_integer(),
+ CRC2 :: non_neg_integer(),
+ Size2 :: non_neg_integer().
crc32_combine(Z, CRC1, CRC2, Size2) when is_reference(Z) ->
erlang:crc32_combine(CRC1, CRC2, Size2);
crc32_combine(_Z, _CRC1, _CRC2, _Size2) ->
@@ -428,7 +428,7 @@ crc32_combine(_Z, _CRC1, _CRC2, _Size2) ->
-spec adler32(Z, Data) -> CheckSum when
Z :: zstream(),
Data :: iodata(),
- CheckSum :: integer().
+ CheckSum :: non_neg_integer().
adler32(Z, Data) when is_reference(Z) ->
erlang:adler32(Data);
adler32(_Z, _Data) ->
@@ -436,9 +436,9 @@ adler32(_Z, _Data) ->
-spec adler32(Z, PrevAdler, Data) -> CheckSum when
Z :: zstream(),
- PrevAdler :: integer(),
+ PrevAdler :: non_neg_integer(),
Data :: iodata(),
- CheckSum :: integer().
+ CheckSum :: non_neg_integer().
adler32(Z, Adler, Data) when is_reference(Z) ->
erlang:adler32(Adler, Data);
adler32(_Z, _Adler, _Data) ->
@@ -446,10 +446,10 @@ adler32(_Z, _Adler, _Data) ->
-spec adler32_combine(Z, Adler1, Adler2, Size2) -> Adler when
Z :: zstream(),
- Adler :: integer(),
- Adler1 :: integer(),
- Adler2 :: integer(),
- Size2 :: integer().
+ Adler :: non_neg_integer(),
+ Adler1 :: non_neg_integer(),
+ Adler2 :: non_neg_integer(),
+ Size2 :: non_neg_integer().
adler32_combine(Z, Adler1, Adler2, Size2) when is_reference(Z) ->
erlang:adler32_combine(Adler1, Adler2, Size2);
adler32_combine(_Z, _Adler1, _Adler2, _Size2) ->
diff --git a/erts/test/erlc_SUITE.erl b/erts/test/erlc_SUITE.erl
index 394ecc8964..622c4ec06b 100644
--- a/erts/test/erlc_SUITE.erl
+++ b/erts/test/erlc_SUITE.erl
@@ -505,7 +505,7 @@ run_command(Dir, {win32, _}, Cmd) ->
{BatchFile,
Run,
["@echo off\r\n",
- "set ERLC_EMULATOR=", atom_to_list(lib:progname()), "\r\n",
+ "set ERLC_EMULATOR=", ct:get_progname(), "\r\n",
Cmd, "\r\n",
"if errorlevel 1 echo _ERROR_\r\n",
"if not errorlevel 1 echo _OK_\r\n"]};
@@ -514,7 +514,7 @@ run_command(Dir, {unix, _}, Cmd) ->
{Name,
"/bin/sh " ++ Name,
["#!/bin/sh\n",
- "ERLC_EMULATOR='", atom_to_list(lib:progname()), "'\n",
+ "ERLC_EMULATOR='", ct:get_progname(), "'\n",
"export ERLC_EMULATOR\n",
Cmd, "\n",
"case $? in\n",
diff --git a/erts/vsn.mk b/erts/vsn.mk
index c3a62a5535..687c62343e 100644
--- a/erts/vsn.mk
+++ b/erts/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
#
-VSN = 9.3
+VSN = 9.3.2
# Port number 4365 in 4.2
# Port number 4366 in 4.3
diff --git a/lib/common_test/doc/src/ct.xml b/lib/common_test/doc/src/ct.xml
index afd8741cd1..3d35ae4f54 100644
--- a/lib/common_test/doc/src/ct.xml
+++ b/lib/common_test/doc/src/ct.xml
@@ -572,6 +572,16 @@
</func>
<func>
+ <name>get_progname() -&gt; string()</name>
+ <fsummary>Returns the command used to start this Erlang instance.</fsummary>
+ <desc><marker id="get_progname-0"/>
+ <p>Returns the command used to start this Erlang instance.
+ If this information could not be found, the string
+ <c>"no_prog_name"</c> is returned.</p>
+ </desc>
+ </func>
+
+ <func>
<name>get_status() -&gt; TestStatus | {error, Reason} | no_tests_running</name>
<fsummary>Returns status of ongoing test.</fsummary>
<type>
diff --git a/lib/common_test/doc/src/ct_hooks_chapter.xml b/lib/common_test/doc/src/ct_hooks_chapter.xml
index 7ecc2e4298..3957c0f4a5 100644
--- a/lib/common_test/doc/src/ct_hooks_chapter.xml
+++ b/lib/common_test/doc/src/ct_hooks_chapter.xml
@@ -501,12 +501,13 @@
<tag><c>cth_log_redirect</c></tag>
<item>
<p>Built-in</p>
- <p>Captures all <c>error_logger</c> and SASL logging
- events and prints them to the current test case log. If an event cannot be
- associated with a test case, it is printed in the <c>Common Test</c> framework log.
+ <p>Captures all log events that would normally be printed by the default
+ logger handler, and prints them to the current test case log.
+ If an event cannot be associated with a test case, it is printed in
+ the <c>Common Test</c> framework log.
This happens for test cases running in parallel and events occuring
in-between test cases. You can configure the level of
- <seealso marker="sasl:sasl_app">SASL</seealso> events report
+ <seealso marker="sasl:sasl_app">SASL</seealso> reports
using the normal SASL mechanisms.</p>
</item>
<tag><c>cth_surefire</c></tag>
diff --git a/lib/common_test/src/Makefile b/lib/common_test/src/Makefile
index 2a2a9cb5bc..9adcf2f13b 100644
--- a/lib/common_test/src/Makefile
+++ b/lib/common_test/src/Makefile
@@ -166,4 +166,4 @@ release_tests_spec: opt
release_docs_spec: docs
# Include dependencies -- list below added by Kostis Sagonas
-$(EBIN)/cth_log_redirect.beam: ../../kernel/include/logger.hrl
+$(EBIN)/cth_log_redirect.beam: ../../kernel/include/logger.hrl ../../kernel/src/logger_internal.hrl
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index fd7fa07b81..14a9ec07cf 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -87,6 +87,7 @@
decrypt_config_file/2, decrypt_config_file/3]).
-export([get_target_name/1]).
+-export([get_progname/0]).
-export([parse_table/1, listenv/1]).
-export([remaining_test_procs/0]).
@@ -975,7 +976,20 @@ make_priv_dir() ->
%%% belongs to.
get_target_name(Handle) ->
ct_util:get_target_name(Handle).
-
+
+%%%-----------------------------------------------------------------
+%%% @doc Return the command used to start (this) erlang
+
+-spec get_progname() -> string().
+
+get_progname() ->
+ case init:get_argument(progname) of
+ {ok, [[Prog]]} ->
+ Prog;
+ _Other ->
+ "no_prog_name"
+ end.
+
%%%-----------------------------------------------------------------
%%% @spec parse_table(Data) -> {Heading,Table}
%%% Data = [string()]
@@ -1006,7 +1020,6 @@ parse_table(Data) ->
listenv(Telnet) ->
ct_util:listenv(Telnet).
-
%%%-----------------------------------------------------------------
%%% @spec testcases(TestDir, Suite) -> Testcases | {error,Reason}
%%% TestDir = string()
diff --git a/lib/common_test/src/test_server_ctrl.erl b/lib/common_test/src/test_server_ctrl.erl
index 1ae6c8c7c7..67645cac08 100644
--- a/lib/common_test/src/test_server_ctrl.erl
+++ b/lib/common_test/src/test_server_ctrl.erl
@@ -4382,7 +4382,7 @@ do_format_exception(Reason={Error,Stack}) ->
PF = fun(Term, I) ->
io_lib:format("~." ++ integer_to_list(I) ++ "tp", [Term])
end,
- case catch lib:format_exception(1, error, Error, Stack, StackFun, PF, utf8) of
+ case catch erl_error:format_exception(1, error, Error, Stack, StackFun, PF, utf8) of
{'EXIT',_R} ->
{"~tp",Reason};
Formatted ->
diff --git a/lib/common_test/src/test_server_node.erl b/lib/common_test/src/test_server_node.erl
index b2d4f199c3..76588e6887 100644
--- a/lib/common_test/src/test_server_node.erl
+++ b/lib/common_test/src/test_server_node.erl
@@ -591,7 +591,7 @@ cast_to_list(X) -> lists:flatten(io_lib:format("~tw", [X])).
%%% this
%%%
pick_erl_program(default) ->
- cast_to_list(lib:progname());
+ ct:get_progname();
pick_erl_program(L) ->
P = random_element(L),
case P of
@@ -600,7 +600,7 @@ pick_erl_program(L) ->
{release, S} ->
find_release(S);
this ->
- cast_to_list(lib:progname())
+ ct:get_progname()
end.
%% This is an attempt to distinguish between spaces in the program
@@ -611,8 +611,8 @@ pick_erl_program(L) ->
%% ({prog,String}) or if the -program switch to beam is used and
%% includes arguments (typically done by cerl in OTP test environment
%% in order to ensure that slave/peer nodes are started with the same
-%% emulator and flags as the test node. The return from lib:progname()
-%% could then typically be '/<full_path_to>/cerl -gcov').
+%% emulator and flags as the test node. The return from ct:get_progname()
+%% could then typically be "/<full_path_to>/cerl -gcov").
quote_progname(Progname) ->
do_quote_progname(string:lexemes(Progname," ")).
diff --git a/lib/common_test/test_server/ts_erl_config.erl b/lib/common_test/test_server/ts_erl_config.erl
index c7fe4ccf83..e37fa844bb 100644
--- a/lib/common_test/test_server/ts_erl_config.erl
+++ b/lib/common_test/test_server/ts_erl_config.erl
@@ -358,7 +358,15 @@ link_library(_LibName,_Other) ->
%% Returns emulator specific variables.
emu_vars(Vars) ->
[{is_source_build, is_source_build()},
- {erl_name, atom_to_list(lib:progname())}|Vars].
+ {erl_name, get_progname()}|Vars].
+
+get_progname() ->
+ case init:get_argument(progname) of
+ {ok, [[Prog]]} ->
+ Prog;
+ _Other ->
+ "no_prog_name"
+ end.
is_source_build() ->
string:find(erlang:system_info(system_version), "source") =/= nomatch.
diff --git a/lib/common_test/test_server/ts_run.erl b/lib/common_test/test_server/ts_run.erl
index 3f594236bc..5dbbaca916 100644
--- a/lib/common_test/test_server/ts_run.erl
+++ b/lib/common_test/test_server/ts_run.erl
@@ -199,7 +199,7 @@ make_command(Vars, Spec, State) ->
TestPath = filename:nativename(TestDir),
Erl = case os:getenv("TS_RUN_VALGRIND") of
false ->
- atom_to_list(lib:progname());
+ ct:get_progname();
_ ->
case State#state.file of
Dir when is_list(Dir) ->
diff --git a/lib/compiler/src/beam_asm.erl b/lib/compiler/src/beam_asm.erl
index 5ef340c831..df0321e85a 100644
--- a/lib/compiler/src/beam_asm.erl
+++ b/lib/compiler/src/beam_asm.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -475,7 +475,7 @@ encode_alloc_list_1([{floats,Floats}|T], Dict, Acc0) ->
encode_alloc_list_1([], Dict, Acc) ->
{iolist_to_binary(Acc),Dict}.
--spec encode(non_neg_integer(), integer()) -> iodata().
+-spec encode(non_neg_integer(), integer()) -> iolist() | integer().
encode(Tag, N) when N < 0 ->
encode1(Tag, negative_to_bytes(N));
diff --git a/lib/compiler/src/beam_dead.erl b/lib/compiler/src/beam_dead.erl
index dbbaae05eb..762c7bdf9e 100644
--- a/lib/compiler/src/beam_dead.erl
+++ b/lib/compiler/src/beam_dead.erl
@@ -392,6 +392,26 @@ backward([{bif,'or',{f,To0},[Dst,{atom,false}],Dst}=I|Is], D,
_ ->
backward(Is, D, [I|Acc])
end;
+backward([{bif,map_get,{f,FF},[Key,Map],_}=I0,
+ {test,has_map_fields,{f,FT}=F,[Map|Keys0]}=I1|Is], D, Acc) when FF =/= 0 ->
+ case shortcut_label(FF, D) of
+ FT ->
+ case lists:delete(Key, Keys0) of
+ [] ->
+ backward([I0|Is], D, Acc);
+ Keys ->
+ Test = {test,has_map_fields,F,[Map|Keys]},
+ backward([Test|Is], D, [I0|Acc])
+ end;
+ _ ->
+ backward([I1|Is], D, [I0|Acc])
+ end;
+backward([{bif,map_get,{f,FF},[_,Map],_}=I0,
+ {test,is_map,{f,FT},[Map]}=I1|Is], D, Acc) when FF =/= 0 ->
+ case shortcut_label(FF, D) of
+ FT -> backward([I0|Is], D, Acc);
+ _ -> backward([I1|Is], D, [I0|Acc])
+ end;
backward([I|Is], D, Acc) ->
backward(Is, D, [I|Acc]);
backward([], _D, Acc) -> Acc.
diff --git a/lib/compiler/src/beam_peep.erl b/lib/compiler/src/beam_peep.erl
index eb3192fe8f..920fb00397 100644
--- a/lib/compiler/src/beam_peep.erl
+++ b/lib/compiler/src/beam_peep.erl
@@ -77,6 +77,12 @@ peep([{bif,tuple_size,_,[_]=Ops,Dst}=I|Is], SeenTests0, Acc) ->
%% Kill all remembered tests that depend on the destination register.
SeenTests = kill_seen(Dst, SeenTests1),
peep(Is, SeenTests, [I|Acc]);
+peep([{bif,map_get,_,[Key,Map],Dst}=I|Is], SeenTests0, Acc) ->
+ %% Pretend that we have seen {test,has_map_fields,_,[Map,Key]}
+ SeenTests1 = gb_sets:add({has_map_fields,[Map,Key]}, SeenTests0),
+ %% Kill all remembered tests that depend on the destination register.
+ SeenTests = kill_seen(Dst, SeenTests1),
+ peep(Is, SeenTests, [I|Acc]);
peep([{bif,_,_,_,Dst}=I|Is], SeenTests0, Acc) ->
%% Kill all remembered tests that depend on the destination register.
SeenTests = kill_seen(Dst, SeenTests0),
diff --git a/lib/compiler/src/beam_type.erl b/lib/compiler/src/beam_type.erl
index 28f36db399..12da8c9446 100644
--- a/lib/compiler/src/beam_type.erl
+++ b/lib/compiler/src/beam_type.erl
@@ -462,6 +462,9 @@ update({set,[D],[Index,Reg],{bif,element,_}}, Ts0) ->
end,
Ts = tdb_meet(Reg, {tuple,min_size,MinSize,[]}, Ts0),
tdb_store(D, any, Ts);
+update({set,[D],[_Key,Map],{bif,map_get,_}}, Ts0) ->
+ Ts = tdb_meet(Map, map, Ts0),
+ tdb_store(D, any, Ts);
update({set,[D],Args,{bif,N,_}}, Ts) ->
Ar = length(Args),
BoolOp = erl_internal:new_type_test(N, Ar) orelse
diff --git a/lib/compiler/src/cerl_trees.erl b/lib/compiler/src/cerl_trees.erl
index c7a129b42c..533c984221 100644
--- a/lib/compiler/src/cerl_trees.erl
+++ b/lib/compiler/src/cerl_trees.erl
@@ -351,10 +351,9 @@ mapfold(F, S0, T) ->
mapfold(fun(T0, A) -> {T0, A} end, F, S0, T).
-%% @spec mapfold(Pre, Post, Initial::term(), Tree::cerl()) ->
-%% {cerl(), term()}
-%%
-%% Pre = Post = (cerl(), term()) -> {cerl(), term()}
+%% @spec mapfold(Pre, Post, Initial::term(), Tree::cerl()) -> {cerl(), term()}
+%% Pre = (cerl(), term()) -> {cerl(), term()}
+%% Post = (cerl(), term()) -> {cerl(), term()}
%%
%% @doc Does a combined map/fold operation on the nodes of the
%% tree. It begins by calling <code>Pre</code> on the tree, using the
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index c6a0056a70..a37b2064b2 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -295,7 +295,7 @@ format_error_reason({Reason, Stack}) when is_list(Stack) ->
end,
FormatFun = fun (Term, _) -> io_lib:format("~tp", [Term]) end,
[io_lib:format("~tp", [Reason]),"\n\n",
- lib:format_stacktrace(1, Stack, StackFun, FormatFun)];
+ erl_error:format_stacktrace(1, Stack, StackFun, FormatFun)];
format_error_reason(Reason) ->
io_lib:format("~tp", [Reason]).
diff --git a/lib/compiler/src/erl_bifs.erl b/lib/compiler/src/erl_bifs.erl
index 70b36f029e..a7452aebc8 100644
--- a/lib/compiler/src/erl_bifs.erl
+++ b/lib/compiler/src/erl_bifs.erl
@@ -94,6 +94,7 @@ is_pure(erlang, is_function, 1) -> true;
is_pure(erlang, is_integer, 1) -> true;
is_pure(erlang, is_list, 1) -> true;
is_pure(erlang, is_map, 1) -> true;
+is_pure(erlang, is_map_key, 2) -> true;
is_pure(erlang, is_number, 1) -> true;
is_pure(erlang, is_pid, 1) -> true;
is_pure(erlang, is_port, 1) -> true;
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index a13bdedaf9..47042c2393 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -1275,13 +1275,18 @@ let_subst_list([], [], _) -> {[],[],[]}.
%%pattern(Pat, Sub) -> pattern(Pat, Sub, Sub).
pattern(#c_var{}=Pat, Isub, Osub) ->
- case sub_is_val(Pat, Isub) of
+ case sub_is_in_scope(Pat, Isub) of
true ->
+ %% This variable either has a substitution or is used in
+ %% the variable list of an enclosing `let`. In either
+ %% case, it must be renamed to an unused name to avoid
+ %% name capture problems.
V1 = make_var_name(),
Pat1 = #c_var{name=V1},
{Pat1,sub_set_var(Pat, Pat1, sub_add_scope([V1], Osub))};
false ->
- {Pat,sub_del_var(Pat, Osub)}
+ %% This variable has never been used. Add it to the scope.
+ {Pat,sub_add_scope([Pat#c_var.name], Osub)}
end;
pattern(#c_literal{}=Pat, _, Osub) -> {Pat,Osub};
pattern(#c_cons{anno=Anno,hd=H0,tl=T0}, Isub, Osub0) ->
@@ -1460,8 +1465,8 @@ is_subst(_) -> false.
%% sub_set_name(Name, Value, #sub{}) -> #sub{}.
%% sub_del_var(Var, #sub{}) -> #sub{}.
%% sub_subst_var(Var, Value, #sub{}) -> [{Name,Value}].
-%% sub_is_val(Var, #sub{}) -> boolean().
-%% sub_add_scope(#sub{}) -> #sub{}
+%% sub_is_in_scope(Var, #sub{}) -> boolean().
+%% sub_add_scope([Var], #sub{}) -> #sub{}
%% sub_subst_scope(#sub{}) -> #sub{}
%%
%% We use the variable name as key so as not have problems with
@@ -1496,18 +1501,6 @@ sub_set_name(V, Val, #sub{v=S,s=Scope,t=Tdb0}=Sub) ->
Tdb = copy_type(V, Val, Tdb1),
Sub#sub{v=orddict:store(V, Val, S),s=cerl_sets:add_element(V, Scope),t=Tdb}.
-sub_del_var(#c_var{name=V}, #sub{v=S,s=Scope,t=Tdb}=Sub) ->
- %% Profiling shows that for programs with many record operations,
- %% sub_del_var/2 is a bottleneck. Since the scope contains all
- %% variables that are live, we know that V cannot be present in S
- %% if it is not in the scope.
- case cerl_sets:is_element(V, Scope) of
- false ->
- Sub#sub{s=cerl_sets:add_element(V, Scope)};
- true ->
- Sub#sub{v=orddict:erase(V, S),t=kill_types(V, Tdb)}
- end.
-
sub_subst_var(#c_var{name=V}, Val, #sub{v=S0}) ->
%% Fold chained substitutions.
[{V,Val}] ++ [ {K,Val} || {K,#c_var{name=V1}} <- S0, V1 =:= V].
@@ -1533,16 +1526,8 @@ sub_subst_scope_1([H|T], Key, Acc) ->
sub_subst_scope_1(T, Key-1, [{Key,#c_var{name=H}}|Acc]);
sub_subst_scope_1([], _, Acc) -> Acc.
-sub_is_val(#c_var{name=V}, #sub{v=S,s=Scope}) ->
- %% When the bottleneck in sub_del_var/2 was eliminated, this
- %% became the new bottleneck. Since the scope contains all
- %% live variables, a variable V can only be the target for
- %% a substitution if it is in the scope.
- cerl_sets:is_element(V, Scope) andalso v_is_value(V, S).
-
-v_is_value(Var, [{_,#c_var{name=Var}}|_]) -> true;
-v_is_value(Var, [_|T]) -> v_is_value(Var, T);
-v_is_value(_, []) -> false.
+sub_is_in_scope(#c_var{name=V}, #sub{s=Scope}) ->
+ cerl_sets:is_element(V, Scope).
%% warn_no_clause_match(CaseOrig, CaseOpt) -> ok
%% Generate a warning if none of the user-specified clauses
diff --git a/lib/compiler/src/sys_core_inline.erl b/lib/compiler/src/sys_core_inline.erl
index 8c1f69d5de..eee3594922 100644
--- a/lib/compiler/src/sys_core_inline.erl
+++ b/lib/compiler/src/sys_core_inline.erl
@@ -143,7 +143,7 @@ inline_inline(#ifun{body=B,weight=Iw}=If, Is) ->
case find_inl(F, A, Is) of
#ifun{vars=Vs,body=B2,weight=W} when W < Iw ->
#c_let{vars=Vs,
- arg=core_lib:make_values(As),
+ arg=kill_id_anns(core_lib:make_values(As)),
body=kill_id_anns(B2)};
_Other -> Call
end;
@@ -160,7 +160,7 @@ inline_func(#fstat{def={Name,F0}}=Fstat, Is) ->
case find_inl(F, A, Is) of
#ifun{vars=Vs,body=B} ->
{#c_let{vars=Vs,
- arg=core_lib:make_values(As),
+ arg=kill_id_anns(core_lib:make_values(As)),
body=kill_id_anns(B)},
true}; %Have modified
_Other -> {Call,Mod}
diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl
index 8e73b613a0..9652a8476d 100644
--- a/lib/compiler/src/v3_codegen.erl
+++ b/lib/compiler/src/v3_codegen.erl
@@ -589,6 +589,7 @@ is_gc_bif(element, 2) -> false;
is_gc_bif(get, 1) -> false;
is_gc_bif(tuple_size, 1) -> false;
is_gc_bif(map_get, 2) -> false;
+is_gc_bif(is_map_key, 2) -> false;
is_gc_bif(Bif, Arity) ->
not (erl_internal:bool_op(Bif, Arity) orelse
erl_internal:new_type_test(Bif, Arity) orelse
@@ -1620,6 +1621,11 @@ test_cg(is_boolean, [#k_atom{val=Val}], Fail, I, Vdb, Bef, St) ->
false -> [{jump,{f,Fail}}]
end,
{Is,Aft,St};
+test_cg(is_map_key, As, Fail, I, Vdb, Bef, St) ->
+ [Key,Map] = cg_reg_args(As, Bef),
+ Aft = clear_dead(Bef, I, Vdb),
+ F = {f,Fail},
+ {[{test,is_map,F,[Map]},{test,has_map_fields,F,Map,{list,[Key]}}],Aft,St};
test_cg(Test, As, Fail, I, Vdb, Bef, St) ->
Args = cg_reg_args(As, Bef),
Aft = clear_dead(Bef, I, Vdb),
diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl
index 235956a714..3b6ffa8d68 100644
--- a/lib/compiler/test/bs_match_SUITE.erl
+++ b/lib/compiler/test/bs_match_SUITE.erl
@@ -330,6 +330,11 @@ save_restore(Config) when is_list(Config) ->
{"-",<<"x">>} = nnn(C),
{"-",<<"x">>} = ooo(C),
+ a = multiple_matches(<<777:16>>, <<777:16>>),
+ b = multiple_matches(<<777:16>>, <<999:16>>),
+ c = multiple_matches(<<777:16>>, <<57:8>>),
+ d = multiple_matches(<<17:8>>, <<1111:16>>),
+
Bin = <<-1:64>>,
case bad_float_unpack_match(Bin) of
-1 -> ok;
@@ -357,6 +362,11 @@ nnn(<<Char, Tail/binary>>) -> {[Char],Tail}. %% Buggy Tail!
ooo(<<" - ", Tail/binary>>) -> Tail;
ooo(<<Char, Tail/binary>>) -> {[Char],Tail}.
+multiple_matches(<<Y:16>>, <<Y:16>>) -> a;
+multiple_matches(<<_:16>>, <<_:16>>) -> b;
+multiple_matches(<<_:16>>, <<_:8>>) -> c;
+multiple_matches(<<_:8>>, <<_:16>>) -> d.
+
bad_float_unpack_match(<<F:64/float>>) -> F;
bad_float_unpack_match(<<I:64/integer-signed>>) -> I.
diff --git a/lib/compiler/test/core_SUITE.erl b/lib/compiler/test/core_SUITE.erl
index 0e07e8dd2e..d07cd3b8b7 100644
--- a/lib/compiler/test/core_SUITE.erl
+++ b/lib/compiler/test/core_SUITE.erl
@@ -29,7 +29,7 @@
bs_shadowed_size_var/1,
cover_v3_kernel_1/1,cover_v3_kernel_2/1,cover_v3_kernel_3/1,
cover_v3_kernel_4/1,cover_v3_kernel_5/1,
- non_variable_apply/1]).
+ non_variable_apply/1,name_capture/1]).
-include_lib("common_test/include/ct.hrl").
@@ -58,7 +58,7 @@ groups() ->
bs_shadowed_size_var,
cover_v3_kernel_1,cover_v3_kernel_2,cover_v3_kernel_3,
cover_v3_kernel_4,cover_v3_kernel_5,
- non_variable_apply
+ non_variable_apply,name_capture
]}].
@@ -93,6 +93,7 @@ end_per_group(_GroupName, Config) ->
?comp(cover_v3_kernel_4).
?comp(cover_v3_kernel_5).
?comp(non_variable_apply).
+?comp(name_capture).
try_it(Mod, Conf) ->
Src = filename:join(proplists:get_value(data_dir, Conf),
diff --git a/lib/compiler/test/core_SUITE_data/name_capture.core b/lib/compiler/test/core_SUITE_data/name_capture.core
new file mode 100644
index 0000000000..0969f95b72
--- /dev/null
+++ b/lib/compiler/test/core_SUITE_data/name_capture.core
@@ -0,0 +1,110 @@
+module 'name_capture' ['module_info'/0,
+ 'module_info'/1,
+ 'name_capture'/0]
+ attributes ['compile' =
+ [{'inline',[{'badarg_exit',2}]}]]
+'name_capture'/0 =
+ fun () ->
+ case <> of
+ <> when 'true' ->
+ let <_0> =
+ catch
+ apply 'first'/1
+ ('badarg')
+ in case _0 of
+ <{'EXIT',{'badarg',_7}}> when 'true' ->
+ let <Seq> =
+ call 'lists':'seq'
+ (7, 17)
+ in case apply 'first'/1
+ ({'ok',Seq}) of
+ <_8>
+ when call 'erlang':'=:='
+ (_8,
+ Seq) ->
+ let <SomeOtherTerm> =
+ {'some','other','term'}
+ in let <_5> =
+ catch
+ apply 'first'/1
+ (SomeOtherTerm)
+ in case _5 of
+ <{'EXIT',_9}>
+ when call 'erlang':'=:='
+ (_9,
+ SomeOtherTerm) ->
+ 'ok'
+ <_6> when 'true' ->
+ primop 'match_fail'
+ ({'badmatch',_6})
+ end
+ <_3> when 'true' ->
+ primop 'match_fail'
+ ({'badmatch',_3})
+ end
+ <_1> when 'true' ->
+ primop 'match_fail'
+ ({'badmatch',_1})
+ end
+ <> when 'true' ->
+ primop 'match_fail'
+ ({'function_clause'})
+ end
+'first'/1 =
+ fun (_0) ->
+ case _0 of
+ <Tab> when 'true' ->
+ let <_1> =
+ apply 'treq'/2
+ (Tab, 'first')
+ %% The _1 variable in the `let` must be renamed
+ %% to avoid a name capture problem.
+ in let <_0,_1> =
+ <_1,[Tab|[]]>
+ in case <_0,_1> of
+ <'badarg',A> when 'true' ->
+ call 'erlang':'error'
+ ('badarg', A)
+ <{'ok',Reply},_X_A> when 'true' ->
+ Reply
+ <Reply,_X_A> when 'true' ->
+ call 'erlang':'exit'
+ (Reply)
+ <_3,_2> when 'true' ->
+ primop 'match_fail'
+ ({'function_clause',_3,_2})
+ end
+ <_2> when 'true' ->
+ primop 'match_fail'
+ ({'function_clause',_2})
+ end
+'treq'/2 =
+ fun (_0,_1) ->
+ case <_0,_1> of
+ <Action,_4> when 'true' ->
+ Action
+ <_3,_2> when 'true' ->
+ primop 'match_fail'
+ ({'function_clause',_3,_2})
+ end
+'module_info'/0 =
+ fun () ->
+ case <> of
+ <> when 'true' ->
+ call 'erlang':'get_module_info'
+ ('name_capture')
+ <> when 'true' ->
+ primop 'match_fail'
+ ({'function_clause'})
+ end
+'module_info'/1 =
+ fun (_0) ->
+ case _0 of
+ <X> when 'true' ->
+ call 'erlang':'get_module_info'
+ ('name_capture', X)
+ <_1> when 'true' ->
+ primop 'match_fail'
+ ({'function_clause',_1})
+ end
+end
diff --git a/lib/compiler/test/map_SUITE.erl b/lib/compiler/test/map_SUITE.erl
index e98c295da6..6badc7a8b8 100644
--- a/lib/compiler/test/map_SUITE.erl
+++ b/lib/compiler/test/map_SUITE.erl
@@ -1203,12 +1203,18 @@ t_guard_bifs(Config) when is_list(Config) ->
true = map_guard_empty_2(),
true = map_guard_head(#{a=>1}),
false = map_guard_head([]),
+ true = map_get_head(#{a=>1}),
+ false = map_get_head([]),
+ true = map_is_key_head(#{a=>1}),
+ false = map_is_key_head(#{}),
true = map_guard_body(#{a=>1}),
false = map_guard_body({}),
true = map_guard_pattern(#{a=>1, <<"hi">> => "hi" }),
false = map_guard_pattern("list"),
true = map_guard_tautology(),
true = map_guard_ill_map_size(),
+ true = map_field_check_sequence(#{a=>1}),
+ false = map_field_check_sequence(#{}),
ok.
map_guard_empty() when is_map(#{}); false -> true.
@@ -1218,6 +1224,12 @@ map_guard_empty_2() when true; #{} andalso false -> true.
map_guard_head(M) when is_map(M) -> true;
map_guard_head(_) -> false.
+map_get_head(M) when map_get(a, M) =:= 1 -> true;
+map_get_head(_) -> false.
+
+map_is_key_head(M) when is_map_key(a, M) -> true;
+map_is_key_head(M) -> false.
+
map_guard_body(M) -> is_map(M).
map_guard_pattern(#{}) -> true;
@@ -1227,6 +1239,12 @@ map_guard_tautology() when #{} =:= #{}; true -> true.
map_guard_ill_map_size() when true; map_size(0) -> true.
+map_field_check_sequence(M)
+ when is_map(M) andalso is_map_key(a, M) andalso (map_get(a, M) == 1) ->
+ true;
+map_field_check_sequence(_) ->
+ false.
+
t_guard_sequence(Config) when is_list(Config) ->
{1, "a"} = map_guard_sequence_1(#{seq=>1,val=>id("a")}),
{2, "b"} = map_guard_sequence_1(#{seq=>2,val=>id("b")}),
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index dbb6bf8135..2c69dbb5ff 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -60,7 +60,6 @@
#include <openssl/rand.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
-#include <openssl/engine.h>
#include <openssl/err.h>
/* Helper macro to construct a OPENSSL_VERSION_NUMBER.
@@ -102,8 +101,10 @@
# undef FIPS_SUPPORT
# endif
+# if LIBRESSL_VERSION_NUMBER < PACKED_OPENSSL_VERSION_PLAIN(2,7,0)
/* LibreSSL wants the 1.0.1 API */
# define NEED_EVP_COMPATIBILITY_FUNCTIONS
+# endif
#endif
@@ -112,8 +113,10 @@
#endif
-#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,0,0)
-# define HAS_EVP_PKEY_CTX
+#ifndef HAS_LIBRESSL
+# if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,0,0)
+# define HAS_EVP_PKEY_CTX
+# endif
#endif
@@ -121,10 +124,6 @@
#include <openssl/modes.h>
#endif
-#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'h')
-#define HAS_ENGINE_SUPPORT
-#endif
-
#include "crypto_callback.h"
#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(0,9,8) \
@@ -185,6 +184,19 @@
# undef HAVE_RSA_SSLV23_PADDING
#endif
+#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'h') \
+ && defined(HAVE_EC)
+/* If OPENSSL_NO_EC is set, there will be an error in ec.h included from engine.h
+ So if EC is disabled, you can't use Engine either....
+*/
+# define HAS_ENGINE_SUPPORT
+#endif
+
+
+#if defined(HAS_ENGINE_SUPPORT)
+# include <openssl/engine.h>
+#endif
+
#if defined(HAVE_CMAC)
#include <openssl/cmac.h>
#endif
@@ -500,7 +512,6 @@ static ERL_NIF_TERM aes_gcm_decrypt_NO_EVP(ErlNifEnv* env, int argc, const ERL_N
static ERL_NIF_TERM chacha20_poly1305_encrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM chacha20_poly1305_decrypt(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
-static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, char **cmds, int i);
static ERL_NIF_TERM engine_by_id_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM engine_init_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM engine_finish_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
@@ -528,10 +539,12 @@ static int term2point(ErlNifEnv* env, ERL_NIF_TERM term,
static ERL_NIF_TERM bin_from_bn(ErlNifEnv* env, const BIGNUM *bn);
#ifdef HAS_ENGINE_SUPPORT
+static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, char **cmds, int i);
static int zero_terminate(ErlNifBinary bin, char **buf);
#endif
static int library_refc = 0; /* number of users of this dynamic library */
+static int library_initialized = 0;
static ErlNifFunc nif_funcs[] = {
{"info_lib", 0, info_lib},
@@ -993,14 +1006,14 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info)
PRINTF_ERR0("CRYPTO: Could not open resource type 'ENGINE_CTX'");
return __LINE__;
}
+#endif
- if (library_refc > 0) {
+ if (library_initialized) {
/* Repeated loading of this library (module upgrade).
* Atoms and callbacks are already set, we are done.
*/
return 0;
}
-#endif
atom_true = enif_make_atom(env,"true");
atom_false = enif_make_atom(env,"false");
@@ -1107,10 +1120,6 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info)
atom_password = enif_make_atom(env,"password");
#endif
- init_digest_types(env);
- init_cipher_types(env);
- init_algorithms_types(env);
-
#ifdef HAVE_DYNAMIC_CRYPTO_LIB
{
void* handle;
@@ -1156,6 +1165,11 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info)
}
#endif /* OPENSSL_THREADS */
+ init_digest_types(env);
+ init_cipher_types(env);
+ init_algorithms_types(env);
+
+ library_initialized = 1;
return 0;
}
@@ -3057,10 +3071,11 @@ static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
if ((dhkey = EVP_PKEY_new())
&& (params = EVP_PKEY_new())
&& EVP_PKEY_set1_DH(params, dh_params) /* set the key referenced by params to dh_params.
- dh_params (and params) must be freed by us*/
+ dh_params (and params) must be freed */
&& (ctx = EVP_PKEY_CTX_new(params, NULL))
&& EVP_PKEY_keygen_init(ctx)
- && EVP_PKEY_keygen(ctx, &dhkey)
+ && EVP_PKEY_keygen(ctx, &dhkey) /* "performs a key generation operation, the
+ generated key is written to ppkey." (=last arg) */
&& (dh_params = EVP_PKEY_get1_DH(dhkey)) /* return the referenced key. dh_params and dhkey must be freed */
) {
#else
@@ -3094,8 +3109,8 @@ static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
DH_free(dh_params);
#ifdef HAS_EVP_PKEY_CTX
if (ctx) EVP_PKEY_CTX_free(ctx);
- /* if (dhkey) EVP_PKEY_free(dhkey); */
- /* if (params) EVP_PKEY_free(params); */
+ if (dhkey) EVP_PKEY_free(dhkey);
+ if (params) EVP_PKEY_free(params);
#endif
return ret;
}
@@ -3193,7 +3208,7 @@ static ERL_NIF_TERM dh_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_T
if (dh_pub) DH_free(dh_pub);
#ifdef HAS_EVP_PKEY_CTX
if (ctx) EVP_PKEY_CTX_free(ctx);
- /* if (my_priv_key) EVP_PKEY_free(my_priv_key); */
+ if (my_priv_key) EVP_PKEY_free(my_priv_key);
/* if (peer_pub_key) EVP_PKEY_free(peer_pub_key); */
#endif
return ret;
@@ -5407,9 +5422,9 @@ static ERL_NIF_TERM engine_get_id_nif(ErlNifEnv* env, int argc, const ERL_NIF_TE
#endif
}
+#ifdef HAS_ENGINE_SUPPORT
static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, char **cmds, int i)
{
-#ifdef HAS_ENGINE_SUPPORT
ERL_NIF_TERM head, tail;
const ERL_NIF_TERM *tmp_tuple;
ErlNifBinary tmpbin;
@@ -5454,10 +5469,8 @@ static int get_engine_load_cmd_list(ErlNifEnv* env, const ERL_NIF_TERM term, cha
cmds[i] = NULL;
return 0;
}
-#else
- return atom_notsup;
-#endif
}
+#endif
static ERL_NIF_TERM engine_get_all_methods_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* () */
diff --git a/lib/crypto/c_src/otp_test_engine.c b/lib/crypto/c_src/otp_test_engine.c
index 5c6122c06a..d0e23a2a3e 100644
--- a/lib/crypto/c_src/otp_test_engine.c
+++ b/lib/crypto/c_src/otp_test_engine.c
@@ -24,10 +24,8 @@
#include <stdio.h>
#include <string.h>
-#include <openssl/engine.h>
#include <openssl/md5.h>
#include <openssl/rsa.h>
-#include <openssl/pem.h>
#define PACKED_OPENSSL_VERSION(MAJ, MIN, FIX, P) \
((((((((MAJ << 8) | MIN) << 8 ) | FIX) << 8) | (P-'a'+1)) << 4) | 0xf)
@@ -40,6 +38,21 @@
#define OLD
#endif
+#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'o') \
+ && !defined(OPENSSL_NO_EC) \
+ && !defined(OPENSSL_NO_ECDH) \
+ && !defined(OPENSSL_NO_ECDSA)
+# define HAVE_EC
+#endif
+
+#if defined(HAVE_EC)
+/* If OPENSSL_NO_EC is set, there will be an error in ec.h included from engine.h
+ So if EC is disabled, you can't use Engine either....
+*/
+#include <openssl/engine.h>
+#include <openssl/pem.h>
+
+
static const char *test_engine_id = "MD5";
static const char *test_engine_name = "MD5 test engine";
@@ -262,3 +275,5 @@ int pem_passwd_cb_fun(char *buf, int size, int rwflag, void *password)
return 0;
}
}
+
+#endif
diff --git a/lib/crypto/doc/src/notes.xml b/lib/crypto/doc/src/notes.xml
index 1f788a4e35..66619c9e11 100644
--- a/lib/crypto/doc/src/notes.xml
+++ b/lib/crypto/doc/src/notes.xml
@@ -31,6 +31,28 @@
</header>
<p>This document describes the changes made to the Crypto application.</p>
+<section><title>Crypto 4.2.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ If OPENSSL_NO_EC was set, the compilation of the crypto
+ nifs failed.</p>
+ <p>
+ Own Id: OTP-15073</p>
+ </item>
+ <item>
+ <p>
+ C-compile errors for LibreSSL 2.7.0 - 2.7.2 fixed</p>
+ <p>
+ Own Id: OTP-15074 Aux Id: ERL-618 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Crypto 4.2.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/crypto/vsn.mk b/lib/crypto/vsn.mk
index 3432f00836..778aff9d13 100644
--- a/lib/crypto/vsn.mk
+++ b/lib/crypto/vsn.mk
@@ -1 +1 @@
-CRYPTO_VSN = 4.2.1
+CRYPTO_VSN = 4.2.2
diff --git a/lib/debugger/src/dbg_icmd.erl b/lib/debugger/src/dbg_icmd.erl
index 4cd3dce670..55cbada53b 100644
--- a/lib/debugger/src/dbg_icmd.erl
+++ b/lib/debugger/src/dbg_icmd.erl
@@ -467,7 +467,7 @@ mark_break(Cm, LineNo, Le) ->
parse_cmd(Cmd, LineNo) ->
{ok,Tokens,_} = erl_scan:string(Cmd, LineNo, [text]),
- {ok,Forms,Bs} = lib:extended_parse_exprs(Tokens),
+ {ok,Forms,Bs} = erl_eval:extended_parse_exprs(Tokens),
{Forms, Bs}.
%%====================================================================
diff --git a/lib/debugger/src/dbg_wx_win.erl b/lib/debugger/src/dbg_wx_win.erl
index f1298154ab..fea94156c1 100644
--- a/lib/debugger/src/dbg_wx_win.erl
+++ b/lib/debugger/src/dbg_wx_win.erl
@@ -275,7 +275,7 @@ entry(Parent, Title, Prompt, {Type, Value}) ->
verify(Type, Str) ->
case erl_scan:string(Str, 1, [text]) of
{ok, Tokens, _EndLine} when Type==term ->
- case lib:extended_parse_term(Tokens++[{dot, erl_anno:new(1)}]) of
+ case erl_eval:extended_parse_term(Tokens++[{dot, erl_anno:new(1)}]) of
{ok, Value} -> {edit, Value};
_Error ->
ignore
diff --git a/lib/dialyzer/src/dialyzer.erl b/lib/dialyzer/src/dialyzer.erl
index 1538174d4a..185c8c9ae6 100644
--- a/lib/dialyzer/src/dialyzer.erl
+++ b/lib/dialyzer/src/dialyzer.erl
@@ -409,6 +409,10 @@ message_to_string({extra_range, [M, F, A, ExtraRanges, SigRange]}) ->
io_lib:format("The specification for ~w:~tw/~w states that the function"
" might also return ~ts but the inferred return is ~ts\n",
[M, F, A, ExtraRanges, SigRange]);
+message_to_string({missing_range, [M, F, A, ExtraRanges, ContrRange]}) ->
+ io_lib:format("The success typing for ~w:~tw/~w implies that the function"
+ " might also return ~ts but the specification return is ~ts\n",
+ [M, F, A, ExtraRanges, ContrRange]);
message_to_string({overlapping_contract, [M, F, A]}) ->
io_lib:format("Overloaded contract for ~w:~tw/~w has overlapping domains;"
" such contracts are currently unsupported and are simply ignored\n",
diff --git a/lib/dialyzer/src/dialyzer_contracts.erl b/lib/dialyzer/src/dialyzer_contracts.erl
index 0df15e55f9..af7f4385ad 100644
--- a/lib/dialyzer/src/dialyzer_contracts.erl
+++ b/lib/dialyzer/src/dialyzer_contracts.erl
@@ -197,9 +197,11 @@ check_contracts(Contracts, Callgraph, FunTypes, ModOpaques) ->
false ->
[{MFA, Contract}|NewContracts]
end;
- {error, {extra_range, _, _}} ->
- %% do not treat extra range as an error in this check
- %% since that prevents discovering other actual errors
+ {range_warnings, _} ->
+ %% do not treat extra range, either in contract or
+ %% in success typing, as an error in this check
+ %% since that prevents discovering other actual
+ %% errors
[{MFA, Contract}|NewContracts];
{error, _Error} -> NewContracts
end;
@@ -210,14 +212,26 @@ check_contracts(Contracts, Callgraph, FunTypes, ModOpaques) ->
end,
orddict:from_list(lists:foldl(FoldFun, [], orddict:to_list(FunTypes))).
+-type check_contract_return() ::
+ 'ok'
+ | {'error',
+ 'invalid_contract'
+ | {'opaque_mismatch', erl_types:erl_type()}
+ | {'overlapping_contract', [module() | atom() | byte()]}
+ | string()}
+ | {'range_warnings',
+ [{'error', {'extra_range' | 'missing_range',
+ erl_types:erl_type(),
+ erl_types:erl_type()}}]}.
+
%% Checks all components of a contract
--spec check_contract(#contract{}, erl_types:erl_type()) -> 'ok' | {'error', term()}.
+-spec check_contract(#contract{}, erl_types:erl_type()) -> check_contract_return().
check_contract(Contract, SuccType) ->
check_contract(Contract, SuccType, 'universe').
-spec check_contract(#contract{}, erl_types:erl_type(), erl_types:opaques()) ->
- 'ok' | {'error', term()}.
+ check_contract_return().
check_contract(#contract{contracts = Contracts}, SuccType, Opaques) ->
try
@@ -290,15 +304,23 @@ check_contract_inf_list([], _SuccType, _Opaques, OM) ->
check_extraneous([], _SuccType) -> ok;
check_extraneous([C|Cs], SuccType) ->
case check_extraneous_1(C, SuccType) of
- ok -> check_extraneous(Cs, SuccType);
- Error -> Error
+ {error, invalid_contract} = Error ->
+ Error;
+ {error, {extra_range, _, _}} = Error ->
+ {range_warnings, [Error | check_missing(C, SuccType)]};
+ ok ->
+ case check_missing(C, SuccType) of
+ [] -> check_extraneous(Cs, SuccType);
+ ErrorL -> {range_warnings, ErrorL}
+ end
end.
check_extraneous_1(Contract, SuccType) ->
CRng = erl_types:t_fun_range(Contract),
CRngs = erl_types:t_elements(CRng),
STRng = erl_types:t_fun_range(SuccType),
- ?debug("CR = ~tp\nSR = ~tp\n", [CRngs, STRng]),
+ ?debug("\nCR = ~ts\nSR = ~ts\n", [erl_types:t_to_string(CRng),
+ erl_types:t_to_string(STRng)]),
case [CR || CR <- CRngs,
erl_types:t_is_none(erl_types:t_inf(CR, STRng))] of
[] ->
@@ -341,6 +363,18 @@ map_part(Type) ->
is_empty_map(Type) ->
erl_types:t_is_equal(Type, erl_types:t_from_term(#{})).
+check_missing(Contract, SuccType) ->
+ CRng = erl_types:t_fun_range(Contract),
+ STRng = erl_types:t_fun_range(SuccType),
+ STRngs = erl_types:t_elements(STRng),
+ ?debug("\nCR = ~ts\nSR = ~ts\n", [erl_types:t_to_string(CRng),
+ erl_types:t_to_string(STRng)]),
+ case [STR || STR <- STRngs,
+ erl_types:t_is_none(erl_types:t_inf(STR, CRng))] of
+ [] -> [];
+ STRs -> [{error, {missing_range, erl_types:t_sup(STRs), CRng}}]
+ end.
+
%% This is the heart of the "range function"
-spec process_contracts([contract_pair()], [erl_types:erl_type()]) ->
erl_types:erl_type().
@@ -712,22 +746,30 @@ get_invalid_contract_warnings_funs([{MFA, {FileLine, Contract, _Xtra}}|Left],
[W|Acc];
{error, {overlapping_contract, []}} ->
[overlapping_contract_warning(MFA, WarningInfo)|Acc];
- {error, {extra_range, ExtraRanges, STRange}} ->
- Warn =
- case t_from_forms_without_remote(Contract#contract.forms,
- MFA, RecDict) of
- {ok, NoRemoteType} ->
- CRet = erl_types:t_fun_range(NoRemoteType),
- erl_types:t_is_subtype(ExtraRanges, CRet);
- unsupported ->
- true
- end,
- case Warn of
- true ->
- [extra_range_warning(MFA, WarningInfo, ExtraRanges, STRange)|Acc];
- false ->
- Acc
- end;
+ {range_warnings, Errors} ->
+ Fun =
+ fun({error, {extra_range, ExtraRanges, STRange}}, Acc0) ->
+ Warn =
+ case t_from_forms_without_remote(Contract#contract.forms,
+ MFA, RecDict) of
+ {ok, NoRemoteType} ->
+ CRet = erl_types:t_fun_range(NoRemoteType),
+ erl_types:t_is_subtype(ExtraRanges, CRet);
+ unsupported ->
+ true
+ end,
+ case Warn of
+ true ->
+ [extra_range_warning(MFA, WarningInfo,
+ ExtraRanges, STRange)|Acc0];
+ false ->
+ Acc0
+ end;
+ ({error, {missing_range, ExtraRanges, CRange}}, Acc0) ->
+ [missing_range_warning(MFA, WarningInfo,
+ ExtraRanges, CRange)|Acc0]
+ end,
+ lists:foldl(Fun, Acc, Errors);
{error, Msg} ->
[{?WARN_CONTRACT_SYNTAX, WarningInfo, Msg}|Acc];
ok ->
@@ -745,6 +787,9 @@ get_invalid_contract_warnings_funs([{MFA, {FileLine, Contract, _Xtra}}|Left],
{error, _} ->
[invalid_contract_warning(MFA, WarningInfo, BifSig, RecDict)
|Acc];
+ {range_warnings, _} ->
+ picky_contract_check(CSig, BifSig, MFA, WarningInfo,
+ Contract, RecDict, Acc);
ok ->
picky_contract_check(CSig, BifSig, MFA, WarningInfo,
Contract, RecDict, Acc)
@@ -778,6 +823,12 @@ extra_range_warning({M, F, A}, WarningInfo, ExtraRanges, STRange) ->
{?WARN_CONTRACT_SUPERTYPE, WarningInfo,
{extra_range, [M, F, A, ERangesStr, STRangeStr]}}.
+missing_range_warning({M, F, A}, WarningInfo, ExtraRanges, CRange) ->
+ ERangesStr = erl_types:t_to_string(ExtraRanges),
+ CRangeStr = erl_types:t_to_string(CRange),
+ {?WARN_CONTRACT_SUBTYPE, WarningInfo,
+ {missing_range, [M, F, A, ERangesStr, CRangeStr]}}.
+
picky_contract_check(CSig0, Sig0, MFA, WarningInfo, Contract, RecDict, Acc) ->
CSig = erl_types:t_abstract_records(CSig0, RecDict),
Sig = erl_types:t_abstract_records(Sig0, RecDict),
diff --git a/lib/dialyzer/src/dialyzer_dataflow.erl b/lib/dialyzer/src/dialyzer_dataflow.erl
index c5f93a3392..45b4abb253 100644
--- a/lib/dialyzer/src/dialyzer_dataflow.erl
+++ b/lib/dialyzer/src/dialyzer_dataflow.erl
@@ -102,6 +102,8 @@
| 'undefined', % race
fun_homes :: dict:dict(label(), mfa())
| 'undefined', % race
+ reachable_funs :: sets:set(label())
+ | 'undefined', % race
plt :: dialyzer_plt:plt()
| 'undefined', % race
opaques :: [type()]
@@ -269,9 +271,11 @@ traverse(Tree, Map, State) ->
case state__warning_mode(State) of
true -> {State, Map, Type};
false ->
- State2 = state__add_work(get_label(Tree), State),
+ FunLbl = get_label(Tree),
+ State2 = state__add_work(FunLbl, State),
State3 = state__update_fun_env(Tree, Map, State2),
- {State3, Map, Type}
+ State4 = state__add_reachable(FunLbl, State3),
+ {State4, Map, Type}
end;
'let' ->
handle_let(Tree, Map, State);
@@ -3039,25 +3043,35 @@ state__new(Callgraph, Codeserver, Tree, Plt, Module, Records) ->
{TreeMap, FunHomes} = build_tree_map(Tree, Callgraph),
Funs = dict:fetch_keys(TreeMap),
FunTab = init_fun_tab(Funs, dict:new(), TreeMap, Callgraph, Plt),
- ExportedFuns =
- [Fun || Fun <- Funs--[top], dialyzer_callgraph:is_escaping(Fun, Callgraph)],
- Work = init_work(ExportedFuns),
+ ExportedFunctions =
+ [Fun ||
+ Fun <- Funs--[top],
+ dialyzer_callgraph:is_escaping(Fun, Callgraph),
+ dialyzer_callgraph:lookup_name(Fun, Callgraph) =/= error
+ ],
+ Work = init_work(ExportedFunctions),
Env = lists:foldl(fun(Fun, Env) -> dict:store(Fun, map__new(), Env) end,
dict:new(), Funs),
#state{callgraph = Callgraph, codeserver = Codeserver,
envs = Env, fun_tab = FunTab, fun_homes = FunHomes, opaques = Opaques,
plt = Plt, races = dialyzer_races:new(), records = Records,
warning_mode = false, warnings = [], work = Work, tree_map = TreeMap,
- module = Module}.
+ module = Module, reachable_funs = sets:new()}.
state__warning_mode(#state{warning_mode = WM}) ->
WM.
state__set_warning_mode(#state{tree_map = TreeMap, fun_tab = FunTab,
- races = Races} = State) ->
+ races = Races, callgraph = Callgraph,
+ reachable_funs = ReachableFuns} = State) ->
?debug("==========\nStarting warning pass\n==========\n", []),
Funs = dict:fetch_keys(TreeMap),
- State#state{work = init_work([top|Funs--[top]]),
+ Work =
+ [Fun ||
+ Fun <- Funs--[top],
+ dialyzer_callgraph:lookup_name(Fun, Callgraph) =/= error orelse
+ sets:is_element(Fun, ReachableFuns)],
+ State#state{work = init_work(Work),
fun_tab = FunTab, warning_mode = true,
races = dialyzer_races:put_race_analysis(true, Races)}.
@@ -3149,7 +3163,8 @@ state__get_race_warnings(#state{races = Races} = State) ->
State1#state{races = Races1}.
state__get_warnings(#state{tree_map = TreeMap, fun_tab = FunTab,
- callgraph = Callgraph, plt = Plt} = State) ->
+ callgraph = Callgraph, plt = Plt,
+ reachable_funs = ReachableFuns} = State) ->
FoldFun =
fun({top, _}, AccState) -> AccState;
({FunLbl, Fun}, AccState) ->
@@ -3184,7 +3199,12 @@ state__get_warnings(#state{tree_map = TreeMap, fun_tab = FunTab,
GenRet = dialyzer_contracts:get_contract_return(C),
not t_is_unit(GenRet)
end,
- case Warn of
+ %% Do not output warnings for unreachable funs.
+ case
+ Warn andalso
+ (dialyzer_callgraph:lookup_name(FunLbl, Callgraph) =/= error
+ orelse sets:is_element(FunLbl, ReachableFuns))
+ of
true ->
case classify_returns(Fun) of
no_match ->
@@ -3255,6 +3275,10 @@ state__get_args_and_status(Tree, #state{fun_tab = FunTab}) ->
{ok, {ArgTypes, _}} -> {ArgTypes, true}
end.
+state__add_reachable(FunLbl, #state{reachable_funs = ReachableFuns}=State) ->
+ NewReachableFuns = sets:add_element(FunLbl, ReachableFuns),
+ State#state{reachable_funs = NewReachableFuns}.
+
build_tree_map(Tree, Callgraph) ->
Fun =
fun(T, {Dict, Homes, FunLbls} = Acc) ->
diff --git a/lib/dialyzer/src/typer.erl b/lib/dialyzer/src/typer.erl
index 9d3d9ce438..4b99f5f72e 100644
--- a/lib/dialyzer/src/typer.erl
+++ b/lib/dialyzer/src/typer.erl
@@ -401,7 +401,7 @@ get_type({{M, F, A} = MFA, Range, Arg}, CodeServer, Records) ->
Sig = erl_types:t_fun(Arg, Range),
case dialyzer_contracts:check_contract(Contract, Sig) of
ok -> {{F, A}, {contract, Contract}};
- {error, {extra_range, _, _}} ->
+ {range_warnings, _} ->
{{F, A}, {contract, Contract}};
{error, {overlapping_contract, []}} ->
{{F, A}, {contract, Contract}};
diff --git a/lib/dialyzer/test/options1_SUITE_data/results/compiler b/lib/dialyzer/test/options1_SUITE_data/results/compiler
index cbb5115c91..e1dc038800 100644
--- a/lib/dialyzer/test/options1_SUITE_data/results/compiler
+++ b/lib/dialyzer/test/options1_SUITE_data/results/compiler
@@ -28,7 +28,7 @@ cerl_inline.erl:2750: The pattern <{[], L, D}, Vs> can never match the type <[1.
cerl_inline.erl:2752: The pattern <{[], _L, D}, Vs> can never match the type <[1..255,...],[any()]>
cerl_inline.erl:2754: The pattern <{F, L, D}, Vs> can never match the type <[1..255,...],[any()]>
cerl_inline.erl:2756: The pattern <{F, _L, D}, Vs> can never match the type <[1..255,...],[any()]>
-compile.erl:788: The pattern {'error', Es} can never match the type {'ok',<<_:64,_:_*8>>}
+compile.erl:792: The pattern {'error', Es} can never match the type {'ok',<<_:64,_:_*8>>}
core_lint.erl:473: The pattern <{'c_atom', _, 'all'}, 'binary', _Def, St> can never match the type <_,#c_nil{} | {'c_atom' | 'c_char' | 'c_float' | 'c_int' | 'c_string' | 'c_tuple',_,_} | #c_cons{hd::#c_nil{} | {'c_atom' | 'c_char' | 'c_float' | 'c_int' | 'c_string' | 'c_tuple',_,_} | #c_cons{hd::{_,_} | {_,_,_} | {_,_,_,_},tl::{_,_} | {_,_,_} | {_,_,_,_}},tl::#c_nil{} | {'c_atom' | 'c_char' | 'c_float' | 'c_int' | 'c_string' | 'c_tuple',_,_} | #c_cons{hd::{_,_} | {_,_,_} | {_,_,_,_},tl::{_,_} | {_,_,_} | {_,_,_,_}}},[any()],_>
core_lint.erl:505: The pattern <_Req, 'unknown', St> can never match the type <non_neg_integer(),non_neg_integer(),_>
sys_pre_expand.erl:625: Call to missing or unexported function erlang:hash/2
diff --git a/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl b/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
index 7e5ccde2fd..6838cf6734 100644
--- a/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
+++ b/lib/dialyzer/test/options1_SUITE_data/src/compiler/compile.erl
@@ -228,11 +228,15 @@ os_process_size() ->
case os:type() of
{unix, sunos} ->
Size = os:cmd("ps -o vsz -p " ++ os:getpid() ++ " | tail -1"),
- list_to_integer(lib:nonl(Size));
+ list_to_integer(nonl(Size));
_ ->
0
end.
+nonl([$\n]) -> [];
+nonl([]) -> [];
+nonl([H|T]) -> [H|nonl(T)].
+
run_tc({Name,Fun}, St) ->
Before0 = statistics(runtime),
Val = (catch Fun(St)),
diff --git a/lib/dialyzer/test/overspecs_SUITE_data/dialyzer_options b/lib/dialyzer/test/overspecs_SUITE_data/dialyzer_options
new file mode 100644
index 0000000000..ff4517e59d
--- /dev/null
+++ b/lib/dialyzer/test/overspecs_SUITE_data/dialyzer_options
@@ -0,0 +1 @@
+{dialyzer_options, [{warnings, [overspecs]}]}.
diff --git a/lib/dialyzer/test/overspecs_SUITE_data/results/iodata b/lib/dialyzer/test/overspecs_SUITE_data/results/iodata
new file mode 100644
index 0000000000..d9c70330ec
--- /dev/null
+++ b/lib/dialyzer/test/overspecs_SUITE_data/results/iodata
@@ -0,0 +1,2 @@
+
+iodata.erl:7: The success typing for iodata:encode/2 implies that the function might also return integer() but the specification return is binary() | maybe_improper_list(binary() | maybe_improper_list(any(),binary() | []) | byte(),binary() | [])
diff --git a/lib/dialyzer/test/overspecs_SUITE_data/results/iolist b/lib/dialyzer/test/overspecs_SUITE_data/results/iolist
new file mode 100644
index 0000000000..ca556f017c
--- /dev/null
+++ b/lib/dialyzer/test/overspecs_SUITE_data/results/iolist
@@ -0,0 +1,2 @@
+
+iolist.erl:7: The success typing for iolist:encode/2 implies that the function might also return integer() but the specification return is maybe_improper_list(binary() | maybe_improper_list(any(),binary() | []) | byte(),binary() | [])
diff --git a/lib/dialyzer/test/overspecs_SUITE_data/src/iodata.erl b/lib/dialyzer/test/overspecs_SUITE_data/src/iodata.erl
new file mode 100644
index 0000000000..caa44f6c91
--- /dev/null
+++ b/lib/dialyzer/test/overspecs_SUITE_data/src/iodata.erl
@@ -0,0 +1,41 @@
+-module(iodata).
+
+%% A small part of beam_asm.
+
+-export([encode/2]).
+
+-spec encode(non_neg_integer(), integer()) -> iodata(). % extra range binary()
+
+encode(Tag, N) when Tag >= 0, N < 0 ->
+ encode1(Tag, negative_to_bytes(N));
+encode(Tag, N) when Tag >= 0, N < 16 ->
+ (N bsl 4) bor Tag; % not in the specification
+encode(Tag, N) when Tag >= 0, N < 16#800 ->
+ [((N bsr 3) band 2#11100000) bor Tag bor 2#00001000, N band 16#ff];
+encode(Tag, N) when Tag >= 0 ->
+ encode1(Tag, to_bytes(N)).
+
+encode1(Tag, Bytes) ->
+ case iolist_size(Bytes) of
+ Num when 2 =< Num, Num =< 8 ->
+ [((Num-2) bsl 5) bor 2#00011000 bor Tag| Bytes];
+ Num when 8 < Num ->
+ [2#11111000 bor Tag, encode(0, Num-9)| Bytes]
+ end.
+
+to_bytes(N) ->
+ Bin = binary:encode_unsigned(N),
+ case Bin of
+ <<0:1,_/bits>> -> Bin;
+ <<1:1,_/bits>> -> [0,Bin]
+ end.
+
+negative_to_bytes(N) when N >= -16#8000 ->
+ <<N:16>>;
+negative_to_bytes(N) ->
+ Bytes = byte_size(binary:encode_unsigned(-N)),
+ Bin = <<N:Bytes/unit:8>>,
+ case Bin of
+ <<0:1,_/bits>> -> [16#ff,Bin];
+ <<1:1,_/bits>> -> Bin
+ end.
diff --git a/lib/dialyzer/test/overspecs_SUITE_data/src/iolist.erl b/lib/dialyzer/test/overspecs_SUITE_data/src/iolist.erl
new file mode 100644
index 0000000000..7cceeda24e
--- /dev/null
+++ b/lib/dialyzer/test/overspecs_SUITE_data/src/iolist.erl
@@ -0,0 +1,41 @@
+-module(iolist).
+
+%% A small part of beam_asm.
+
+-export([encode/2]).
+
+-spec encode(non_neg_integer(), integer()) -> iolist().
+
+encode(Tag, N) when Tag >= 0, N < 0 ->
+ encode1(Tag, negative_to_bytes(N));
+encode(Tag, N) when Tag >= 0, N < 16 ->
+ (N bsl 4) bor Tag; % not in the specification
+encode(Tag, N) when Tag >= 0, N < 16#800 ->
+ [((N bsr 3) band 2#11100000) bor Tag bor 2#00001000, N band 16#ff];
+encode(Tag, N) when Tag >= 0 ->
+ encode1(Tag, to_bytes(N)).
+
+encode1(Tag, Bytes) ->
+ case iolist_size(Bytes) of
+ Num when 2 =< Num, Num =< 8 ->
+ [((Num-2) bsl 5) bor 2#00011000 bor Tag| Bytes];
+ Num when 8 < Num ->
+ [2#11111000 bor Tag, encode(0, Num-9)| Bytes]
+ end.
+
+to_bytes(N) ->
+ Bin = binary:encode_unsigned(N),
+ case Bin of
+ <<0:1,_/bits>> -> Bin;
+ <<1:1,_/bits>> -> [0,Bin]
+ end.
+
+negative_to_bytes(N) when N >= -16#8000 ->
+ <<N:16>>;
+negative_to_bytes(N) ->
+ Bytes = byte_size(binary:encode_unsigned(-N)),
+ Bin = <<N:Bytes/unit:8>>,
+ case Bin of
+ <<0:1,_/bits>> -> [16#ff,Bin];
+ <<1:1,_/bits>> -> Bin
+ end.
diff --git a/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl b/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl
index a48f73274b..ce144e061f 100644
--- a/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl
+++ b/lib/dialyzer/test/r9c_SUITE_data/src/inets/mod_esi.erl
@@ -285,7 +285,7 @@ eval(Info,"GET",CGIBody,Modules) ->
"~n Modules: ~p",[Modules]),
case auth(CGIBody,Modules) of
true ->
- case lib:eval_str(string:concat(CGIBody,". ")) of
+ case eval_str(string:concat(CGIBody,". ")) of
{error,Reason} ->
?vlog("eval -> error:"
"~n Reason: ~p",[Reason]),
@@ -318,6 +318,48 @@ auth(CGIBody,Modules) ->
false
end.
+%% eval_str(InStr) -> {ok, OutStr} | {error, ErrStr'}
+%% InStr must represent a body
+%% Note: If InStr is a binary it has to be a Latin-1 string.
+%% If you have a UTF-8 encoded binary you have to call
+%% unicode:characters_to_list/1 before the call to eval_str().
+
+-define(result(F,D), lists:flatten(io_lib:format(F, D))).
+
+-spec eval_str(string() | unicode:latin1_binary()) ->
+ {'ok', string()} | {'error', string()}.
+
+eval_str(Str) when is_list(Str) ->
+ case erl_scan:tokens([], Str, 0) of
+ {more, _} ->
+ {error, "Incomplete form (missing .<cr>)??"};
+ {done, {ok, Toks, _}, Rest} ->
+ case all_white(Rest) of
+ true ->
+ case erl_parse:parse_exprs(Toks) of
+ {ok, Exprs} ->
+ case catch erl_eval:exprs(Exprs, erl_eval:new_bindings()) of
+ {value, Val, _} ->
+ {ok, Val};
+ Other ->
+ {error, ?result("*** eval: ~p", [Other])}
+ end;
+ {error, {_Line, Mod, Args}} ->
+ Msg = ?result("*** ~ts",[Mod:format_error(Args)]),
+ {error, Msg}
+ end;
+ false ->
+ {error, ?result("Non-white space found after "
+ "end-of-form :~ts", [Rest])}
+ end
+ end.
+
+all_white([$\s|T]) -> all_white(T);
+all_white([$\n|T]) -> all_white(T);
+all_white([$\t|T]) -> all_white(T);
+all_white([]) -> true;
+all_white(_) -> false.
+
%%----------------------------------------------------------------------
%%Creates the environment list that will be the first arg to the
%%Functions that is called through the ErlScript Schema
diff --git a/lib/dialyzer/test/small_SUITE_data/results/unused_funs b/lib/dialyzer/test/small_SUITE_data/results/unused_funs
new file mode 100644
index 0000000000..c468457ead
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/results/unused_funs
@@ -0,0 +1,5 @@
+
+unused_funs.erl:10: The pattern 'error' can never match the type 'other_error'
+unused_funs.erl:15: Function not_used/0 will never be called
+unused_funs.erl:19: Function foo/1 will never be called
+unused_funs.erl:7: Function test/0 has no local return
diff --git a/lib/dialyzer/test/small_SUITE_data/src/unused_funs.erl b/lib/dialyzer/test/small_SUITE_data/src/unused_funs.erl
new file mode 100644
index 0000000000..c24cf3ea81
--- /dev/null
+++ b/lib/dialyzer/test/small_SUITE_data/src/unused_funs.erl
@@ -0,0 +1,21 @@
+%% See also ERL-593.
+
+-module(unused_funs).
+
+-export([test/0]).
+
+test() -> % "has no local return"
+ Var = outer_scope,
+ case other_error of
+ error -> % "can never match"
+ %% No warnings "no local return" and "_ = 1 can never match 0" (!)
+ foo(fun() -> {Var, 1 = 0} end)
+ end.
+
+not_used() -> % "will never be called"
+ %% No warnings "no local return" and "1 can never match 0".
+ foo(fun() -> 1 = 0 end).
+
+foo(Fun) -> % "will never be called"
+ 1 = 0, % No pattern match warning (foo/1 is not traversed at all).
+ Fun().
diff --git a/lib/dialyzer/test/specdiffs_SUITE_data/dialyzer_options b/lib/dialyzer/test/specdiffs_SUITE_data/dialyzer_options
new file mode 100644
index 0000000000..56b36f2ed4
--- /dev/null
+++ b/lib/dialyzer/test/specdiffs_SUITE_data/dialyzer_options
@@ -0,0 +1 @@
+{dialyzer_options, [{warnings, [specdiffs]}]}.
diff --git a/lib/dialyzer/test/specdiffs_SUITE_data/results/iodata b/lib/dialyzer/test/specdiffs_SUITE_data/results/iodata
new file mode 100644
index 0000000000..3fb12fe000
--- /dev/null
+++ b/lib/dialyzer/test/specdiffs_SUITE_data/results/iodata
@@ -0,0 +1,3 @@
+
+iodata.erl:7: The specification for iodata:encode/2 states that the function might also return binary() but the inferred return is nonempty_maybe_improper_list(<<_:8,_:_*8>> | nonempty_maybe_improper_list(<<_:8,_:_*8>> | nonempty_maybe_improper_list(any(),<<_:8,_:_*8>> | []) | byte(),<<_:8,_:_*8>> | []) | integer(),<<_:8,_:_*8>> | []) | integer()
+iodata.erl:7: The success typing for iodata:encode/2 implies that the function might also return integer() but the specification return is binary() | maybe_improper_list(binary() | maybe_improper_list(any(),binary() | []) | byte(),binary() | [])
diff --git a/lib/dialyzer/test/specdiffs_SUITE_data/results/iolist b/lib/dialyzer/test/specdiffs_SUITE_data/results/iolist
new file mode 100644
index 0000000000..ca556f017c
--- /dev/null
+++ b/lib/dialyzer/test/specdiffs_SUITE_data/results/iolist
@@ -0,0 +1,2 @@
+
+iolist.erl:7: The success typing for iolist:encode/2 implies that the function might also return integer() but the specification return is maybe_improper_list(binary() | maybe_improper_list(any(),binary() | []) | byte(),binary() | [])
diff --git a/lib/dialyzer/test/specdiffs_SUITE_data/src/iodata.erl b/lib/dialyzer/test/specdiffs_SUITE_data/src/iodata.erl
new file mode 100644
index 0000000000..caa44f6c91
--- /dev/null
+++ b/lib/dialyzer/test/specdiffs_SUITE_data/src/iodata.erl
@@ -0,0 +1,41 @@
+-module(iodata).
+
+%% A small part of beam_asm.
+
+-export([encode/2]).
+
+-spec encode(non_neg_integer(), integer()) -> iodata(). % extra range binary()
+
+encode(Tag, N) when Tag >= 0, N < 0 ->
+ encode1(Tag, negative_to_bytes(N));
+encode(Tag, N) when Tag >= 0, N < 16 ->
+ (N bsl 4) bor Tag; % not in the specification
+encode(Tag, N) when Tag >= 0, N < 16#800 ->
+ [((N bsr 3) band 2#11100000) bor Tag bor 2#00001000, N band 16#ff];
+encode(Tag, N) when Tag >= 0 ->
+ encode1(Tag, to_bytes(N)).
+
+encode1(Tag, Bytes) ->
+ case iolist_size(Bytes) of
+ Num when 2 =< Num, Num =< 8 ->
+ [((Num-2) bsl 5) bor 2#00011000 bor Tag| Bytes];
+ Num when 8 < Num ->
+ [2#11111000 bor Tag, encode(0, Num-9)| Bytes]
+ end.
+
+to_bytes(N) ->
+ Bin = binary:encode_unsigned(N),
+ case Bin of
+ <<0:1,_/bits>> -> Bin;
+ <<1:1,_/bits>> -> [0,Bin]
+ end.
+
+negative_to_bytes(N) when N >= -16#8000 ->
+ <<N:16>>;
+negative_to_bytes(N) ->
+ Bytes = byte_size(binary:encode_unsigned(-N)),
+ Bin = <<N:Bytes/unit:8>>,
+ case Bin of
+ <<0:1,_/bits>> -> [16#ff,Bin];
+ <<1:1,_/bits>> -> Bin
+ end.
diff --git a/lib/dialyzer/test/specdiffs_SUITE_data/src/iolist.erl b/lib/dialyzer/test/specdiffs_SUITE_data/src/iolist.erl
new file mode 100644
index 0000000000..7cceeda24e
--- /dev/null
+++ b/lib/dialyzer/test/specdiffs_SUITE_data/src/iolist.erl
@@ -0,0 +1,41 @@
+-module(iolist).
+
+%% A small part of beam_asm.
+
+-export([encode/2]).
+
+-spec encode(non_neg_integer(), integer()) -> iolist().
+
+encode(Tag, N) when Tag >= 0, N < 0 ->
+ encode1(Tag, negative_to_bytes(N));
+encode(Tag, N) when Tag >= 0, N < 16 ->
+ (N bsl 4) bor Tag; % not in the specification
+encode(Tag, N) when Tag >= 0, N < 16#800 ->
+ [((N bsr 3) band 2#11100000) bor Tag bor 2#00001000, N band 16#ff];
+encode(Tag, N) when Tag >= 0 ->
+ encode1(Tag, to_bytes(N)).
+
+encode1(Tag, Bytes) ->
+ case iolist_size(Bytes) of
+ Num when 2 =< Num, Num =< 8 ->
+ [((Num-2) bsl 5) bor 2#00011000 bor Tag| Bytes];
+ Num when 8 < Num ->
+ [2#11111000 bor Tag, encode(0, Num-9)| Bytes]
+ end.
+
+to_bytes(N) ->
+ Bin = binary:encode_unsigned(N),
+ case Bin of
+ <<0:1,_/bits>> -> Bin;
+ <<1:1,_/bits>> -> [0,Bin]
+ end.
+
+negative_to_bytes(N) when N >= -16#8000 ->
+ <<N:16>>;
+negative_to_bytes(N) ->
+ Bytes = byte_size(binary:encode_unsigned(-N)),
+ Bin = <<N:Bytes/unit:8>>,
+ case Bin of
+ <<0:1,_/bits>> -> [16#ff,Bin];
+ <<1:1,_/bits>> -> Bin
+ end.
diff --git a/lib/edoc/src/edoc_doclet.erl b/lib/edoc/src/edoc_doclet.erl
index f55cffe158..6cb3095507 100644
--- a/lib/edoc/src/edoc_doclet.erl
+++ b/lib/edoc/src/edoc_doclet.erl
@@ -40,7 +40,7 @@
-import(edoc_report, [report/2, warning/2]).
-%% @headerfile "edoc_doclet.hrl"
+%% @headerfile "../include/edoc_doclet.hrl"
-include("../include/edoc_doclet.hrl").
-define(EDOC_APP, edoc).
diff --git a/lib/erl_docgen/doc/src/notes.xml b/lib/erl_docgen/doc/src/notes.xml
index 2652b4b0c8..f75d2af5c4 100644
--- a/lib/erl_docgen/doc/src/notes.xml
+++ b/lib/erl_docgen/doc/src/notes.xml
@@ -31,7 +31,22 @@
</header>
<p>This document describes the changes made to the <em>erl_docgen</em> application.</p>
- <section><title>Erl_Docgen 0.7.2</title>
+ <section><title>Erl_Docgen 0.7.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p> Update makefile so db_funcs.xsl is a part of the
+ installed application. </p>
+ <p>
+ Own Id: OTP-15091</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erl_Docgen 0.7.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
diff --git a/lib/erl_docgen/priv/xsl/Makefile b/lib/erl_docgen/priv/xsl/Makefile
index d0dd227169..d381bd4cf7 100644
--- a/lib/erl_docgen/priv/xsl/Makefile
+++ b/lib/erl_docgen/priv/xsl/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2016. All Rights Reserved.
+# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -44,7 +44,8 @@ XSL_FILES = \
db_html.xsl \
db_html_params.xsl \
db_man.xsl \
- db_eix.xsl
+ db_eix.xsl \
+ db_funcs.xsl
# ----------------------------------------------------
diff --git a/lib/erl_docgen/vsn.mk b/lib/erl_docgen/vsn.mk
index 95b2329ac5..a556b73103 100644
--- a/lib/erl_docgen/vsn.mk
+++ b/lib/erl_docgen/vsn.mk
@@ -1 +1 @@
-ERL_DOCGEN_VSN = 0.7.2
+ERL_DOCGEN_VSN = 0.7.3
diff --git a/lib/erl_interface/src/prog/erl_call.c b/lib/erl_interface/src/prog/erl_call.c
index 7577a07a3e..66265b3e6a 100644
--- a/lib/erl_interface/src/prog/erl_call.c
+++ b/lib/erl_interface/src/prog/erl_call.c
@@ -517,6 +517,15 @@ int erl_call(int argc, char **argv)
}
}
+
+ /*
+ * If we loaded any module source code, we can free the buffer
+ * now. This buffer was allocated in read_stdin().
+ */
+ if (module != NULL) {
+ free(module);
+ }
+
/*
* Eval the Erlang functions read from stdin/
*/
@@ -795,8 +804,6 @@ static int get_module(char **mbuf, char **mname)
*mname = (char *) ei_chk_calloc(i+1, sizeof(char));
memcpy(*mname, start, i);
}
- if (*mbuf)
- free(*mbuf); /* Allocated in read_stdin() */
return len;
diff --git a/lib/hipe/cerl/erl_bif_types.erl b/lib/hipe/cerl/erl_bif_types.erl
index fe6ab0659c..48ce641ab9 100644
--- a/lib/hipe/cerl/erl_bif_types.erl
+++ b/lib/hipe/cerl/erl_bif_types.erl
@@ -665,6 +665,8 @@ type(erlang, is_map, 1, Xs, Opaques) ->
check_guard(X, fun (Y) -> t_is_map(Y, Opaques) end,
t_map(), Opaques) end,
strict(erlang, is_map, 1, Xs, Fun, Opaques);
+type(erlang, is_map_key, 2, Xs, Opaques) ->
+ type(maps, is_key, 2, Xs, Opaques);
type(erlang, is_number, 1, Xs, Opaques) ->
Fun = fun (X) ->
check_guard(X, fun (Y) -> t_is_number(Y, Opaques) end,
@@ -2374,6 +2376,8 @@ arg_types(erlang, is_list, 1) ->
[t_any()];
arg_types(erlang, is_map, 1) ->
[t_any()];
+arg_types(erlang, is_map_key, 2) ->
+ [t_any(), t_map()];
arg_types(erlang, is_number, 1) ->
[t_any()];
arg_types(erlang, is_pid, 1) ->
@@ -2396,7 +2400,7 @@ arg_types(erlang, map_size, 1) ->
[t_map()];
%% Guard bif, needs to be here.
arg_types(erlang, map_get, 2) ->
- [t_map(), t_any()];
+ [t_any(), t_map()];
arg_types(erlang, make_fun, 3) ->
[t_atom(), t_atom(), t_arity()];
arg_types(erlang, make_tuple, 2) ->
diff --git a/lib/hipe/cerl/erl_types.erl b/lib/hipe/cerl/erl_types.erl
index a91da97f93..9abb4d31d9 100644
--- a/lib/hipe/cerl/erl_types.erl
+++ b/lib/hipe/cerl/erl_types.erl
@@ -108,13 +108,14 @@
t_is_bitstr/1, t_is_bitstr/2,
t_is_bitwidth/1,
t_is_boolean/1, t_is_boolean/2,
- %% t_is_byte/1,
- %% t_is_char/1,
+ t_is_byte/1,
+ t_is_char/1,
t_is_cons/1, t_is_cons/2,
t_is_equal/2,
t_is_fixnum/1,
t_is_float/1, t_is_float/2,
t_is_fun/1, t_is_fun/2,
+ t_is_identifier/1,
t_is_instance/2,
t_is_integer/1, t_is_integer/2,
t_is_list/1,
@@ -216,19 +217,8 @@
cache__new/0
]).
-%%-define(DO_ERL_TYPES_TEST, true).
-compile({no_auto_import,[min/2,max/2,map_get/2]}).
--ifdef(DO_ERL_TYPES_TEST).
--export([test/0]).
--else.
--define(NO_UNUSED, true).
--endif.
-
--ifndef(NO_UNUSED).
--export([t_is_identifier/1]).
--endif.
-
-export_type([erl_type/0, opaques/0, type_table/0,
var_table/0, cache/0]).
@@ -1190,12 +1180,10 @@ is_fun(_) -> false.
t_identifier() ->
?identifier(?any).
--ifdef(DO_ERL_TYPES_TEST).
--spec t_is_identifier(erl_type()) -> erl_type().
+-spec t_is_identifier(erl_type()) -> boolean().
t_is_identifier(?identifier(_)) -> true;
t_is_identifier(_) -> false.
--endif.
%%------------------------------------
@@ -1366,7 +1354,6 @@ is_integer1(_) -> false.
t_byte() ->
?byte.
--ifdef(DO_ERL_TYPES_TEST).
-spec t_is_byte(erl_type()) -> boolean().
t_is_byte(?int_range(neg_inf, _)) -> false;
@@ -1376,7 +1363,6 @@ t_is_byte(?int_range(From, To))
t_is_byte(?int_set(Set)) ->
(set_min(Set) >= 0) andalso (set_max(Set) =< ?MAX_BYTE);
t_is_byte(_) -> false.
--endif.
%%------------------------------------
@@ -5693,173 +5679,3 @@ family(L) ->
var_table__new() ->
maps:new().
-
-%%=============================================================================
-%% Consistency-testing function(s) below
-%%=============================================================================
-
--ifdef(DO_ERL_TYPES_TEST).
-
-test() ->
- Atom1 = t_atom(),
- Atom2 = t_atom(foo),
- Atom3 = t_atom(bar),
- true = t_is_atom(Atom2),
-
- True = t_atom(true),
- False = t_atom(false),
- Bool = t_boolean(),
- true = t_is_boolean(True),
- true = t_is_boolean(Bool),
- false = t_is_boolean(Atom1),
-
- Binary = t_binary(),
- true = t_is_binary(Binary),
-
- Bitstr = t_bitstr(),
- true = t_is_bitstr(Bitstr),
-
- Bitstr1 = t_bitstr(7, 3),
- true = t_is_bitstr(Bitstr1),
- false = t_is_binary(Bitstr1),
-
- Bitstr2 = t_bitstr(16, 8),
- true = t_is_bitstr(Bitstr2),
- true = t_is_binary(Bitstr2),
-
- ?bitstr(8, 16) = t_subtract(t_bitstr(4, 12), t_bitstr(8, 12)),
- ?bitstr(8, 16) = t_subtract(t_bitstr(4, 12), t_bitstr(8, 12)),
-
- Int1 = t_integer(),
- Int2 = t_integer(1),
- Int3 = t_integer(16#ffffffff),
- true = t_is_integer(Int2),
- true = t_is_byte(Int2),
- false = t_is_byte(Int3),
- false = t_is_byte(t_from_range(-1, 1)),
- true = t_is_byte(t_from_range(1, ?MAX_BYTE)),
-
- Tuple1 = t_tuple(),
- Tuple2 = t_tuple(3),
- Tuple3 = t_tuple([Atom1, Int1]),
- Tuple4 = t_tuple([Tuple1, Tuple2]),
- Tuple5 = t_tuple([Tuple3, Tuple4]),
- Tuple6 = t_limit(Tuple5, 2),
- Tuple7 = t_limit(Tuple5, 3),
- true = t_is_tuple(Tuple1),
-
- Port = t_port(),
- Pid = t_pid(),
- Ref = t_reference(),
- Identifier = t_identifier(),
- false = t_is_reference(Port),
- true = t_is_identifier(Port),
-
- Function1 = t_fun(),
- Function2 = t_fun(Pid),
- Function3 = t_fun([], Pid),
- Function4 = t_fun([Port, Pid], Pid),
- Function5 = t_fun([Pid, Atom1], Int2),
- true = t_is_fun(Function3),
-
- List1 = t_list(),
- List2 = t_list(t_boolean()),
- List3 = t_cons(t_boolean(), List2),
- List4 = t_cons(t_boolean(), t_atom()),
- List5 = t_cons(t_boolean(), t_nil()),
- List6 = t_cons_tl(List5),
- List7 = t_sup(List4, List5),
- List8 = t_inf(List7, t_list()),
- List9 = t_cons(),
- List10 = t_cons_tl(List9),
- true = t_is_boolean(t_cons_hd(List5)),
- true = t_is_list(List5),
- false = t_is_list(List4),
-
- Product1 = t_product([Atom1, Atom2]),
- Product2 = t_product([Atom3, Atom1]),
- Product3 = t_product([Atom3, Atom2]),
-
- Union1 = t_sup(Atom2, Atom3),
- Union2 = t_sup(Tuple2, Tuple3),
- Union3 = t_sup(Int2, Atom3),
- Union4 = t_sup(Port, Pid),
- Union5 = t_sup(Union4, Int1),
- Union6 = t_sup(Function1, Function2),
- Union7 = t_sup(Function4, Function5),
- Union8 = t_sup(True, False),
- true = t_is_boolean(Union8),
- Union9 = t_sup(Int2, t_integer(2)),
- true = t_is_byte(Union9),
- Union10 = t_sup(t_tuple([t_atom(true), ?any]),
- t_tuple([t_atom(false), ?any])),
-
- ?any = t_sup(Product3, Function5),
-
- Atom3 = t_inf(Union3, Atom1),
- Union2 = t_inf(Union2, Tuple1),
- Int2 = t_inf(Int1, Union3),
- Union4 = t_inf(Union4, Identifier),
- Port = t_inf(Union5, Port),
- Function4 = t_inf(Union7, Function4),
- ?none = t_inf(Product2, Atom1),
- Product3 = t_inf(Product1, Product2),
- Function5 = t_inf(Union7, Function5),
- true = t_is_byte(t_inf(Union9, t_number())),
- true = t_is_char(t_inf(Union9, t_number())),
-
- io:format("3? ~p ~n", [?int_set([3])]),
-
- RecDict = dict:store({foo, 2}, [bar, baz], dict:new()),
- Record1 = t_from_term({foo, [1,2], {1,2,3}}),
-
- Types = [
- Atom1,
- Atom2,
- Atom3,
- Binary,
- Int1,
- Int2,
- Tuple1,
- Tuple2,
- Tuple3,
- Tuple4,
- Tuple5,
- Tuple6,
- Tuple7,
- Ref,
- Port,
- Pid,
- Identifier,
- List1,
- List2,
- List3,
- List4,
- List5,
- List6,
- List7,
- List8,
- List9,
- List10,
- Function1,
- Function2,
- Function3,
- Function4,
- Function5,
- Product1,
- Product2,
- Record1,
- Union1,
- Union2,
- Union3,
- Union4,
- Union5,
- Union6,
- Union7,
- Union8,
- Union10,
- t_inf(Union10, t_tuple([t_atom(true), t_integer()]))
- ],
- io:format("~p\n", [[t_to_string(X, RecDict) || X <- Types]]).
-
--endif.
diff --git a/lib/hipe/main/hipe.erl b/lib/hipe/main/hipe.erl
index 97814fe217..ac2e6c1e3b 100644
--- a/lib/hipe/main/hipe.erl
+++ b/lib/hipe/main/hipe.erl
@@ -852,8 +852,8 @@ finalize_fun_sequential({MFA, Icode}, Opts, Servers) ->
print_crash_message(What, Error, StackTrace) ->
StackFun = fun(_,_,_) -> false end,
FormatFun = fun (Term, _) -> io_lib:format("~p", [Term]) end,
- StackTrace = lib:format_stacktrace(1, StackTrace,
- StackFun, FormatFun),
+ StackTraceS = erl_error:format_stacktrace(1, StackTrace,
+ StackFun, FormatFun),
WhatS = case What of
{M,F,A} -> io_lib:format("~w:~w/~w", [M,F,A]);
Mod -> io_lib:format("~w", [Mod])
@@ -862,7 +862,7 @@ print_crash_message(What, Error, StackTrace) ->
"while compiling ~s~n"
"crash reason: ~p~n"
"~s~n",
- [WhatS, Error, StackTrace]).
+ [WhatS, Error, StackTraceS]).
pp_server_start(Opts) ->
set_architecture(Opts),
diff --git a/lib/hipe/opt/hipe_schedule.erl b/lib/hipe/opt/hipe_schedule.erl
deleted file mode 100644
index 0f25940e3d..0000000000
--- a/lib/hipe/opt/hipe_schedule.erl
+++ /dev/null
@@ -1,1483 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% INSTRUCTION SCHEDULER
-%%
-%% This is a basic ILP cycle scheduler:
-%% * set cycle = 0
-%% * while ready[cycle] nonempty do
-%% - take x with greatest priority from ready[cycle]
-%% - try to schedule x;
-%% * if scheduling x was possible,
-%% - reserve resources
-%% - add x to schedule and delete x from dag
-%% - update earliest-time for all successor nodes
-%% as max[earliest[y],cycle+latency[x]]
-%% - if some node y now has no predecessors,
-%% add y to ready[earliest[y]]
-%% * if it was impossible, put x in ready[cycle+1]
-%% (= try again)
-%%
-%% We use the following data structures:
-%% 1. all nodes are numbered and indices used as array keys
-%% 2. priority per node can be computed statically or dynamically
-%% * statically: before scheduling, each node gets a priority value
-%% * dynamically: at each cycle, compute priorities for all ready nodes
-%% 3. earliest: earliest cycle of issue, starts at 0
-%% and is updated as predecessors issue
-%% 4. predecessors: number of predecessors (0 = ready to issue)
-%% 5. successors: list of {Latency,NodeID}
-%% 6. ready: an array indexed by cycle-time (integer), where
-%% ready nodes are kept.
-%% 7. resources: a resource representation (ADT) that answers
-%% certain queries, e.g., "can x be scheduled this cycle"
-%% and "reserve resources for x".
-%% 8. schedule: list of scheduled instructions {Instr,Cycle}
-%% in the order of issue
-%% 9. instructions: maps IDs back to instructions
-%%
-%% Inputs:
-%% - a list of {ID,Node} pairs (where ID is a unique key)
-%% - a dependence list {ID0,Latency,ID1}, which is used to
-%% build the DAG.
-%%
-%% Note that there is some leeway in how things are represented
-%% from here.
-%%
-%% MODIFICATIONS:
-%% - Some basic blocks are not worth scheduling (e.g., GC save/restore code)
-%% yet are pretty voluminous. How do we skip them?
-%% - Scheduling should be done at finalization time: when basic block is
-%% linearized and is definitely at Sparc assembly level, THEN reorder
-%% stuff.
-
--module(hipe_schedule).
--export([cfg/1, est_cfg/1, delete_node/5]).
-
--include("../sparc/hipe_sparc.hrl").
-
-%%-define(debug1,true).
-
--define(debug2(Str,Args),ok).
-%%-define(debug2(Str,Args),io:format(Str,Args)).
-
--define(debug3(Str,Args),ok).
-%%-define(debug3(Str,Args),io:format(Str,Args)).
-
--define(debug4(Str,Args),ok).
-%%-define(debug4(Str,Args),io:format(Str,Args)).
-
--define(debug5(Str,Args),ok).
-%%-define(debug5(Str,Args),io:format(Str,Args)).
-
--define(debug(Str,Args),ok).
-%%-define(debug(Str,Args),io:format(Str,Args)).
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cfg
-%% Argument : CFG - the control flow graph
-%% Returns : CFG - A new cfg with scheduled blocks
-%% Description : Takes each basic block and schedules them one by one.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cfg(CFG) ->
- ?debug3("CFG: ~n~p", [CFG]),
- update_all( [ {L,
- hipe_bb:mk_bb(
- block(L,hipe_bb:code(hipe_sparc_cfg:bb(CFG,L))) )}
- || L <- hipe_sparc_cfg:labels(CFG) ], CFG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : update_all
-%% Argument : Blocks - [{Label, Block}] , a list with labels and new code
-%% used for updating the old CFG.
-%% CFG - The old controlflow graph
-%% Returns : An updated controlflow graph.
-%% Description : Just swappes the basic blocks in the CFG to the scheduled one.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-update_all([],CFG) -> CFG;
-update_all([{L,NewB}|Ls],CFG) ->
- update_all(Ls,hipe_sparc_cfg:bb_add(CFG,L,NewB)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-est_cfg(CFG) ->
- update_all([ {L, hipe_bb:mk_bb(est_block(hipe_bb:code(hipe_sparc_cfg:bb(CFG,L))))}
- || L <- hipe_sparc_cfg:labels(CFG) ], CFG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Provides an estimation of how quickly a block will execute.
-%% This is done by chaining all instructions in sequential order
-%% by 0-cycle dependences (which means they will never be reordered),
-%% then scheduling the mess.
-
-est_block([]) -> [];
-est_block([I]) -> [I];
-est_block(Blk) ->
- {IxBlk,DAG} = est_deps(Blk),
- Sch = bb(IxBlk,DAG),
- separate_block(Sch,IxBlk).
-
-est_deps(Blk) ->
- IxBlk = indexed_bb(Blk),
- DAG = deps(IxBlk),
- {IxBlk, chain_instrs(IxBlk,DAG)}.
-
-chain_instrs([{N,_}|Xs],DAG) ->
- chain_i(N,Xs,DAG).
-
-chain_i(_,[],DAG) -> DAG;
-chain_i(N,[{M,_}|Xs],DAG) ->
- NewDAG = dep_arc(N,zero_latency(),M,DAG),
- chain_i(M,Xs,NewDAG).
-
-zero_latency() -> 0.
-
-lookup_instr([{N,I}|_], N) -> I;
-lookup_instr([_|Xs], N) -> lookup_instr(Xs, N).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : block
-%% Argument : Instrs - [Instr], list of all the instructions in a basic
-%% block.
-%% Returns : A new scheduled block
-%% Description : Schedule a basic block
-%%
-%% Note: does not consider delay slots!
-%% (another argument for using only annulled delay slots?)
-%% * how do we add delay slots? somewhat tricky to
-%% reconcile with the sort of scheduling we consider.
-%% (as-early-as-possible)
-%% => rewrite scheduler into as-late-as-possible?
-%% (=> just reverse the dependence arcs??)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%% Don't fire up the scheduler if there's no work to do.
-block(_, []) ->
- [];
-block(_L, [I]) ->
- case hipe_sparc:is_any_branch(I) of
- true -> [hipe_sparc:nop_create(), I];
- false -> [I]
- end;
-block(_L, Blk) ->
- IxBlk = indexed_bb(Blk),
- case IxBlk of
- [{_N, I}] -> % comments and nops may have been removed.
- case hipe_sparc:is_any_branch(I) of
- true -> [hipe_sparc:nop_create(), I];
- false -> [I]
- end;
- _ ->
- Sch = bb(IxBlk, {DAG, _Preds} = deps(IxBlk)),
- {NewSch, NewIxBlk} = fill_delays(Sch, IxBlk, DAG),
- X = finalize_block(NewSch, NewIxBlk),
- debug1_stuff(Blk, DAG, IxBlk, Sch, X),
- X
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_delays
-%% Argument : Sch - List of {{cycle, C}, {node, N}} : C = current cycle
-%% N = node index
-%% IxBlk - Indexed block [{N, Instr}]
-%% DAG - Dependence graph
-%% Returns : {NewSch, NewIxBlk} - vector with new schedule and vector
-%% with {N, Instr}
-%% Description : Goes through the schedule from back to front looking for
-%% branches/jumps. If one is found fill_del tries to find
-%% an instr to fill the delayslot.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_delays(Sch, IxBlk, DAG) ->
- NewIxBlk = hipe_vectors:list_to_vector(IxBlk),
- %% NewSch = hipe_vectors:list_to_vector(Sch),
- NewSch = fill_del(length(Sch), hipe_vectors:list_to_vector(Sch),
- NewIxBlk, DAG),
- {NewSch, NewIxBlk}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_del
-%% Argument : N - current index in the schedule
-%% Sch - schedule
-%% IxBlk - indexed block
-%% DAG - dependence graph
-%% Returns : Sch - New schedule with possibly a delay instr in the last
-%% position.
-%% Description : If a call/jump is found fill_branch_delay/fill_call_delay
-%% is called to find a delay-filler.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_del(N, Sch, _IxBlk, _DAG) when N < 1 -> Sch;
-fill_del(N, Sch, IxBlk, DAG) ->
- Index = get_index(Sch, N),
- ?debug2("Index for ~p: ~p~nInstr: ~p~n",
- [N, Index, get_instr(IxBlk, Index)]),
- NewSch =
- case get_instr(IxBlk, Index) of
- #call_link{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- #jmp_link{} ->
- fill_call_delay(N - 1, N, Sch, IxBlk, DAG);
- #jmp{} ->
- fill_call_delay(N - 1, N, Sch, IxBlk, DAG);
- #b{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- #br{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- #goto{} ->
- fill_branch_delay(N - 1, N, Sch, IxBlk, DAG);
- _Other ->
- Sch
- end,
- NewSch.
- %% fill_del(N - 1, NewSch, IxBlk, DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_call_delay
-%% Argument : Cand - index in schedule of delay-candidate
-%% Call - index in schedule of call
-%% Sch - schedule vector: < {{cycle,Ci},{node,Nj}}, ... >
-%% IxBlk - block vector: < {N, Instr1}, {N+1, Instr2} ... >
-%% DAG - dependence graph
-%% Returns : Sch - new updated schedule.
-%% Description : Searches backwards through the schedule trying to find an
-%% instr without conflicts with the Call-instr.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_call_delay(Cand, _Call, Sch, _IxBlk, _DAG) when Cand < 1 -> Sch;
-fill_call_delay(Cand, Call, Sch, IxBlk, DAG) ->
- CandIndex = get_index(Sch, Cand),
- CallIndex = get_index(Sch, Call),
- CandI = get_instr(IxBlk, CandIndex),
- case move_or_alu(CandI) of
- true ->
- case single_depend(CandIndex, CallIndex, DAG) of
- false -> % Other instrs depends on Cand ...
- fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG);
-
- true ->
- CallI = get_instr(IxBlk, CallIndex),
-
- CandDefs = ordsets:from_list(hipe_sparc:defines(CandI)),
- %% CandUses = ordsets:from_list(hipe_sparc:uses(CandI)),
- %% CallDefs = ordsets:from_list(hipe_sparc:defines(CallI)),
- CallUses = ordsets:from_list(hipe_sparc:uses(CallI)),
-
- Args = case CallI of
- #jmp_link{} ->
- ordsets:from_list(
- hipe_sparc:jmp_link_args(CallI));
- #jmp{} ->
- ordsets:from_list(hipe_sparc:jmp_args(CallI));
- #call_link{} ->
- ordsets:from_list(
- hipe_sparc:call_link_args(CallI))
- end,
- CallUses2 = ordsets:subtract(CallUses, Args),
- Conflict = ordsets:intersection(CandDefs, CallUses2),
- %% io:format("single_depend -> true:~n ~p~n, ~p~n,~p~n",[CandI,CallI,DAG]),
- %% io:format("Cand = ~p~nCall = ~p~n",[CandI,CallI]),
- %% io:format("CandDefs = ~p~nCallDefs = ~p~n",[CandDefs,CallDefs]),
- %% io:format("CandUses = ~p~nCallUses = ~p~n",[CandUses,CallUses]),
- %% io:format("Args = ~p~nCallUses2 = ~p~n",[Args,CallUses2]),
- %% io:format("Conflict = ~p~n",[Conflict]),
-
- case Conflict of
- [] -> % No conflicts ==> Cand can fill delayslot after Call
- update_schedule(Cand, Call, Sch);
- _ -> % Conflict: try with preceeding instrs
- fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG)
- end
- end;
- false ->
- fill_call_delay(Cand - 1, Call, Sch, IxBlk, DAG)
- end.
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : fill_branch_delay
-%% Argument : Cand - index in schedule of delay-candidate
-%% Branch - index in schedule of branch
-%% Sch - schedule
-%% IxBlk - indexed block
-%% DAG - dependence graph
-%% Returns : Sch - new updated schedule.
-%% Description : Searches backwards through the schedule trying to find an
-%% instr without conflicts with the Branch-instr.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-fill_branch_delay(Cand, _Br, Sch, _IxBlk, _DAG) when Cand < 1 -> Sch;
-fill_branch_delay(Cand, Br, Sch, IxBlk, DAG) ->
- CandIndex = get_index(Sch, Cand),
- BrIndex = get_index(Sch, Br),
- CandI = get_instr(IxBlk, CandIndex),
- case move_or_alu(CandI) of
- true ->
- case single_depend(CandIndex, BrIndex, DAG) of
- false -> % Other instrs depends on Cand ...
- fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG);
-
- true ->
- BrI = get_instr(IxBlk, BrIndex),
- CandDefs = ordsets:from_list(hipe_sparc:defines(CandI)),
- %% CandUses = ordsets:from_list(hipe_sparc:uses(CandI)),
- %% BrDefs = ordsets:from_list(hipe_sparc:defines(BrI)),
- BrUses = ordsets:from_list(hipe_sparc:uses(BrI)),
-
- Conflict = ordsets:intersection(CandDefs, BrUses),
- %% io:format("single_depend -> true: ~p~n, ~p~n,~p~n", [CandI, BrI, DAG]),
- %% io:format("Cand = ~p~nBr = ~p~n",[CandI,BrI]),
- %% io:format("CandDefs = ~p~nBrDefs = ~p~n",[CandDefs,BrDefs]),
- %% io:format("CandUses = ~p~nBrUses = ~p~n",[CandUses,BrUses]),
- %% io:format("Conflict = ~p~n",[Conflict]);
-
- case Conflict of
- [] -> % No conflicts ==>
- % Cand can fill delayslot after Branch
- update_schedule(Cand, Br, Sch);
- _ -> % Conflict: try with preceeding instrs
- fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG)
- end
- end;
- false ->
- fill_branch_delay(Cand - 1, Br, Sch, IxBlk, DAG)
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : update_schedule
-%% Argument : From - the position from where to switch indexes in Sch
-%% To - the position to where to switch indexes in Sch
-%% Sch - schedule
-%% Returns : Sch - an updated schedule
-%% Description : If From is the delay-filler and To is the Call/jump, the
-%% schedule is updated so From gets index To, To gets index
-%% To - 1, and the nodes between From and To gets old_index - 1.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-update_schedule(To, To, Sch) ->
- {{cycle, C}, {node, _N} = Node} = hipe_vectors:get(Sch, To-1),
- hipe_vectors:set(Sch, To-1, {{cycle, C+1}, Node});
-update_schedule(From, To, Sch) ->
- Temp = hipe_vectors:get(Sch, From-1),
- Sch1 = hipe_vectors:set(Sch, From-1, hipe_vectors:get(Sch, From)),
- update_schedule(From + 1, To, hipe_vectors:set(Sch1, From, Temp)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : single_depend
-%% Argument : N - Index of the delayslot candidate
-%% M - Index of the node that N possibly has a single
-%% depend to.
-%% DAG - The dependence graph
-%% Returns : true if no other nodes than N os depending on N
-%% Description : Checks that no other nodes than M depends on N and that the
-%% latency between them is zero or 1.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-single_depend(N, M, DAG) ->
- Deps = hipe_vectors:get(DAG, N-1),
- single_depend(M, Deps).
-
-single_depend(_N, []) -> true;
-single_depend(N, [{0, N}]) -> true;
-single_depend(N, [{1, N}]) -> true;
-single_depend(_N, [{_Lat, _}|_]) -> false.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_index
-%% Argument : Sch - schedule
-%% N - index in schedule
-%% Returns : Index - index of the node
-%% Description : Returns the index of the node on position N in the schedule.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-get_index(Sch, N) ->
- {{cycle, _C}, {node, Index}} = hipe_vectors:get(Sch,N-1),
- Index.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_instr
-%% Argument : IxBlk - indexed block
-%% N - index in block
-%% Returns : Instr
-%% Description : Returns the instr on position N in the indexed block.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-get_instr(IxBlk, N) ->
- {_, Instr} = hipe_vectors:get(IxBlk, N-1),
- Instr.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_instr
-%% Argument : Sch - schedule
-%% IxBlk - indexed block
-%% N - index in schedule
-%% Returns : Instr
-%% Description : Returns the instr on position N in the schedule.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-get_instr(Sch, IxBlk, N) ->
- {{cycle, _C}, {node, Index}} = hipe_vectors:get(Sch, N-1),
- {_, Instr} = hipe_vectors:get(IxBlk, Index-1),
- Instr.
-
-separate_block(Sch,IxBlk) ->
- sep_comments([{C,lookup_instr(IxBlk,N)} || {{cycle,C},{node,N}} <- Sch]).
-
-sep_comments([]) -> [];
-sep_comments([{C,I}|Xs]) ->
- [hipe_sparc:comment_create({cycle,C}), I | sep_comments(Xs,C)].
-
-sep_comments([], _) -> [];
-sep_comments([{C1,I}|Xs], C0) ->
- if
- C1 > C0 ->
- [hipe_sparc:comment_create({cycle,C1}),I|sep_comments(Xs,C1)];
- true ->
- [I|sep_comments(Xs, C0)]
- end.
-
-finalize_block(Sch, IxBlk) ->
- ?debug5("Sch: ~p~nIxBlk: ~p~n",[Sch,IxBlk]),
- finalize_block(1, hipe_vectors:size(Sch), 1, Sch, IxBlk, []).
-
-finalize_block(N, End, _C, Sch, IxBlk, _Instrs) when N =:= End - 1 ->
- NextLast = get_instr(Sch, IxBlk, N),
- Last = get_instr(Sch, IxBlk, End),
- ?debug5("NextLast: ~p~nLast: ~p~n",[NextLast,Last]),
- case hipe_sparc:is_any_branch(Last) of
- true -> % Couldn't fill delayslot ==> add NOP
- [NextLast , hipe_sparc:nop_create(), Last];
- false -> % Last is a delayslot-filler ==> change order...
- [Last, NextLast]
- end;
-finalize_block(N, End, C0, Sch, IxBlk, Instrs) ->
- {{cycle, _C1}, {node, _M}} = hipe_vectors:get(Sch, N-1),
- Instr = get_instr(Sch, IxBlk, N),
- ?debug5("Instr: ~p~n~n",[Instr]),
- [Instr | finalize_block(N + 1, End, C0, Sch, IxBlk, Instrs)].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : bb
-%% Argument : IxBlk - indexed block
-%% DAG - {Dag, Preds} where Dag is dependence graph and
-%% Preds is number of predecessors for each node.
-%% Returns : Sch
-%% Description : Initializes earliest-list, ready-list, priorities, resources
-%% and so on, and calls the cycle_sched which does the scheduling
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-bb(IxBlk,DAG) ->
- bb(length(IxBlk), IxBlk, DAG).
-
-bb(N,IxBlk,{DAG, Preds}) ->
- Earliest = init_earliest(N),
- BigArray = N*10, % "nothing" is this big :-)
- Ready = hipe_schedule_prio:init_ready(BigArray,Preds),
- I_res = init_instr_resources(N, IxBlk),
-
- Prio = hipe_schedule_prio:init_instr_prio(N,DAG),
- Rsrc = init_resources(BigArray),
- ?debug4("I_res: ~n~p~nPrio: ~n~p~nRsrc: ~n~p~n", [I_res,Prio,Rsrc]),
- ?debug('cycle 1~n',[]),
- Sch = empty_schedule(),
- cycle_sched(1,Ready,DAG,Preds,Earliest,Rsrc,I_res,Prio,Sch,N,IxBlk).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cycle_sched
-%% Argument : - C is current cycle, 1 or more.
-%% - Ready is an array (Cycle -> [Node])
-%% yielding the collection of nodes ready to be
-%% scheduled in a cycle.
-%% - DAG is an array (Instr -> [{Latency,Instr}])
-%% represents the dependence DAG.
-%% - Preds is an array (Instr -> NumPreds)
-%% counts the number of predecessors
-%% (0 preds = ready to be scheduled).
-%% - Earl is an array (Instr -> EarliestCycle)
-%% holds the earliest cycle an instruction can be scheduled.
-%% - Rsrc is a 'resource ADT' that handles scheduler resource
-%% management checks whether instruction can be scheduled
-%% this cycle without a stall.
-%% - I_res is an array (Instr -> Required_resources)
-%% holds the resources required to schedule an instruction.
-%% - Sch is the representation of the schedule current schedule.
-%% - N is the number of nodes remaining to be scheduled
-%% tells us when to stop the scheduler.
-%% - IxBlk is the indexed block with instrs
-%% Returns : present schedule
-%% Description : Scheduler main loop.
-%% Pick next ready node in priority order for cycle C until
-%% none remain.
-%% * check each node if it can be scheduled w/o stalling
-%% * if so, schedule it
-%% * otherwise, bump the node to the next cycle
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cycle_sched(C,Ready,DAG,Preds,Earl,Rsrc,I_res,Prio,Sch,N,IxBlk) ->
- case hipe_schedule_prio:next_ready(C,Ready,Prio,IxBlk,DAG,Preds,Earl) of
-% case hipe_schedule_prio:next_ready(C,Ready,Prio,IxBlk) of
- {next,I,Ready1} ->
- ?debug('try ~p~n==> ready = ~p~n',[I, Ready1]),
- case resources_available(C,I,Rsrc,I_res) of
- {yes,NewRsrc} ->
- ?debug(' scheduled~n==> Rscrs = ~p~n',[NewRsrc]),
- NewSch = add_to_schedule(I,C,Sch),
- {ReadyNs,NewDAG,NewPreds,NewEarl} =
- delete_node(C,I,DAG,Preds,Earl),
- ?debug("NewPreds : ~p~n",[Preds]),
- ?debug(' ReadyNs: ~p~n',[ReadyNs]),
- NewReady = hipe_schedule_prio:add_ready_nodes(ReadyNs,
- Ready1),
- ?debug(' New ready: ~p~n',[NewReady]),
- cycle_sched(C,NewReady,NewDAG,NewPreds,NewEarl,
- NewRsrc,I_res,Prio,NewSch,N-1, IxBlk);
- no ->
- ?debug(' resource conflict~n',[]),
- NewReady = hipe_schedule_prio:insert_node(C+1,I,Ready1),
- cycle_sched(C,NewReady,DAG,Preds,Earl,Rsrc,
- I_res,Prio,Sch,N,IxBlk)
- end;
- none -> % schedule next cycle if some node remains
- if
- N > 0 ->
- ?debug('cycle ~p~n',[C+1]),
- cycle_sched(C+1,Ready,DAG,Preds,Earl,
- advance_cycle(Rsrc),
- I_res,Prio,Sch,N, IxBlk);
- true ->
- present_schedule(Sch)
- end
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : init_earliest
-%% Argument : N - number of instrs
-%% Returns :
-%% Description :
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-init_earliest(N) ->
- hipe_vectors:new(N,1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Schedule is kept reversed until the end.
-
--define(present_node(I,Cycle),{{cycle,Cycle},{node,I}}).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_schedule
-%% Description : Returns an empty schedule.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_schedule() -> [].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_to_schedule
-%% Argument : I - instr
-%% Cycle - cycle when I was placed
-%% Sch - schedule
-%% Description : Adds instr to schedule
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_to_schedule(I,Cycle,Sch) ->
- [?present_node(I,Cycle)|Sch].
-
-present_schedule(Sch) -> lists:reverse(Sch).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Interface to resource manager:
-%%
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : init_resources
-%% Description : Yields a 'big enough' array mapping (Cycle -> Resources);
-%% this array is called Rsrc below.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-init_resources(S) ->
- hipe_target_machine:init_resources(S).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : init_instr_resources
-%% Argument : Nodes - a list of the instructions
-%% N - is the number of nodes
-%% Description : return a vector (NodeID -> Resource_requirements)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-init_instr_resources(N,Nodes) ->
- hipe_target_machine:init_instr_resources(N,Nodes).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : resources_available
-%% Argument : Cycle - the current cycle
-%% I - the current instruction (index = NodeID)
-%% Rsrc - a map (Cycle -> Resources)
-%% I_res - maps (NodeID -> Resource_requirements)
-%% Description : returns {yes,NewResTab} | no
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-resources_available(Cycle,I,Rsrc,I_res) ->
- hipe_target_machine:resources_available(Cycle,I,Rsrc,I_res).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : advance_cycle
-%% Argument : Rsrc - resources
-%% Description : Returns an empty resources-state
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-advance_cycle(Rsrc) ->
- hipe_target_machine:advance_cycle(Rsrc).
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : delete_node
-%% Argument : Cycle - current cycle
-%% I - index of instr
-%% DAG - dependence dag
-%% Preds - array with number of predecessors for nodes
-%% Earl - array with earliest-times for nodes
-%% Returns : {ReadyNs,NewDAG,NewPreds,NewEarl}
-%% Description : Deletes node I and updates earliest times for the rest.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-delete_node(Cycle,I,DAG,Preds,Earl) ->
- Succ = hipe_vectors:get(DAG,I-1),
- NewDAG = hipe_vectors:set(DAG,I-1,scheduled), % provides debug 'support'
- {ReadyNs,NewPreds,NewEarl} = update_earliest(Succ,Cycle,Preds,Earl,[]),
- ?debug('earliest after ~p: ~p~n',[I,[{Ix+1,V} || {Ix,V} <- hipe_vectors:list(NewEarl)]]),
- {ReadyNs,NewDAG,NewPreds,NewEarl}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : update_earliest
-%% Argument : Succ - successor list
-%% Cycle - current cycle
-%% Preds - predecessors
-%% Earl - earliest times for nodes
-%% Ready - array with readynodes for cycles
-%% Returns : {Ready,Preds,Earl}
-%% Description : Updates the earliest times for nodes and updates number of
-%% predecessors for nodes
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-update_earliest([],_Cycle,Preds,Earl,Ready) ->
- {Ready,Preds,Earl};
-update_earliest([{Lat,N}|Xs],Cycle,Preds,Earl,Ready) ->
- Old_earl = hipe_vectors:get(Earl,N-1),
- New_earl = erlang:max(Old_earl,Cycle+Lat),
- NewEarl = hipe_vectors:set(Earl,N-1,New_earl),
- Num_preds = hipe_vectors:get(Preds,N-1),
- NewPreds = hipe_vectors:set(Preds,N-1,Num_preds-1),
- if
- Num_preds =:= 0 ->
- ?debug('inconsistent DAG~n',[]),
- exit({update_earliest,N});
- Num_preds =:= 1 ->
- NewReady = [{New_earl,N}|Ready],
- NewPreds2 = hipe_vectors:set(NewPreds,N-1,0),
- update_earliest(Xs,Cycle,NewPreds2,NewEarl,NewReady);
- is_integer(Num_preds), Num_preds > 1 ->
- update_earliest(Xs,Cycle,NewPreds,NewEarl,Ready)
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Collect instruction dependences.
-%%
-%% Three forms:
-%% - data/register
-%% * insert RAW, WAR, WAW dependences
-%% - memory
-%% * stores serialize memory references
-%% * alias analysis may allow loads to bypass stores
-%% - control
-%% * unsafe operations are 'trapped' between branches
-%% * branches are ordered
-%%
-%% returns { [{Index,Instr}], DepDAG }
-%% DepDAG is defined below.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : deps
-%% Argument : BB - Basic block
-%% Returns : {IxBB,DAG} - indexed block and dependence graph. DAG consists
-%% of both Dag and Preds, where Preds is number
-%% of predecessors for nodes.
-%% Description : Collect instruction dependences.
-%%
-%% Three forms:
-%% - data/register
-%% * insert RAW, WAR, WAW dependences
-%% - memory
-%% * stores serialize memory references
-%% * alias analysis may allow loads to bypass stores
-%% - control
-%% * unsafe operations are 'trapped' between branches
-%% * branches are ordered
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-deps(IxBB) ->
- N = length(IxBB),
- DAG = empty_dag(N), % The DAG contains both dependence-arcs and
- % number of predeccessors...
- {_DepTab,DAG1} = dd(IxBB, DAG),
- DAG2 = md(IxBB, DAG1),
- cd(IxBB, DAG2).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_dag
-%% Argument : N - number of nodes
-%% Returns : empty DAG
-%% Description : DAG consists of dependence graph and predeccessors
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_dag(N) ->
- {hipe_vectors:new(N, []), hipe_vectors:new(N, 0)}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : indexed_bb
-%% Argument : BB - basic block
-%% Returns : [{N, Instr}]
-%% Description : Puts indexes to all instrs of a block, removes comments.
-%% NOP's are also removed because if both sparc_schedule and
-%% sparc_post_schedule options are used, the first pass will
-%% add nop's before the branch if necessary, and these are
-%% removed before scheduling the second pass.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-indexed_bb(BB) ->
- indexed_bb(BB,1).
-
-indexed_bb([],_N) -> [];
-indexed_bb([X|Xs],N) ->
- case X of
- #comment{} ->
- indexed_bb(Xs,N);
- #nop{} ->
- indexed_bb(Xs,N);
- _Other ->
- [{N,X}|indexed_bb(Xs,N+1)]
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : dep_arc
-%% Argument : N - Current node
-%% Lat - Latency from current node to M
-%% M - The dependent node
-%% DAG - The dependence graph. Consists of both DAG and
-%% predeccessors
-%% Returns : A new DAG with the arc added and number of predeccessors for
-%% M increased.
-%% Description : Adds a new arc to the graph, if an older arc goes from N to M
-%% it will be replaced with a new arc {max(OldLat, NewLat), M}.
-%% Number of predeccessors for node M is increased.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-dep_arc(N, Lat, M, {Dag,Preds}) ->
- OldDeps = hipe_vectors:get(Dag, N-1),
- %% io:format("{OldDeps} = {~p}~n",[OldDeps]),
- {NewDeps, Status} = add_arc(Lat, M, OldDeps),
- %% io:format("{NewDeps, Status} = {~p, ~p}~n",[NewDeps, Status]),
- NewDag = hipe_vectors:set(Dag, N-1, NewDeps),
- NewPreds = case Status of
- added -> % just increase preds if new arc was added
- OldPreds = hipe_vectors:get(Preds, M-1),
- hipe_vectors:set(Preds, M-1, OldPreds + 1);
- non_added ->
- Preds
- end,
- {NewDag, NewPreds}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_arc
-%% Argument : Lat - The latency from current node to To.
-%% To - The instr-id of the node which the dependence goes to
-%% Arcs - The dependecies that are already in the dep-graph
-%% Returns : A dependence graph sorted by To.
-%% Description : A new arc that is added is sorted in the right place, and if
-%% there is already an arc between nodes A and B, the one with
-%% the greatest latency is chosen.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_arc(Lat,To, []) -> {[{Lat, To}], added};
-add_arc(Lat1, To, [{Lat2, To} | Arcs]) ->
- {[{erlang:max(Lat1, Lat2), To} | Arcs], non_added};
-add_arc(Lat1,To1, [{Lat2, To2} | Arcs]) when To1 < To2 ->
- {[{Lat1, To1}, {Lat2, To2} | Arcs], added};
-add_arc(Lat1 ,To1, [{Lat2, To2} | Arcs]) ->
- {Arcs1, Status} = add_arc(Lat1, To1, Arcs),
- {[{Lat2, To2} | Arcs1], Status}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% The register/data dependence DAG of a block is represented
-%% as a mapping (Variable -> {NextWriter,NextReaders})
-%% where NextWriter is a pair {Ix,Type}
-%% and NextReaders is a list of pairs {Ix,Type}.
-%%
-%% Type is used to determine latencies of operations; on the UltraSparc,
-%% latencies of arcs (n -> m) are determined by both n and m. (E.g., if
-%% n is an integer op and m is a store, then latency is 0; if m is an
-%% integer op, it's 1.)
-
-dd([],DAG) -> { empty_deptab(), DAG };
-dd([{N,I}|Is],DAG0) ->
- {DepTab,DAG1} = dd(Is,DAG0),
- add_deps(N,I,DepTab,DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_deps
-%% Argument : N - current node
-%% Instr - current instr
-%% DepTab - hashtable with {next-writer, next-readers} for reg
-%% DAG - dependence graph
-%% Returns : {DepTab, BlockInfo, DAG} - with new values
-%% Description : Adds dependencies for node N to the graph. The registers that
-%% node N defines and uses are used for computing the
-%% dependencies to the following nodes.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_deps(N,Instr,DepTab,DAG) ->
- {Ds,Us} = def_use(Instr),
- Type = dd_type(Instr),
- {DepTab1,DAG1} = add_write_deps(Ds,N,Type,DepTab,DAG),
- add_read_deps(Us,N,Type,DepTab1,DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Instructions are classified into symbolic categories,
-%% which are subsequently used to determine operation latencies
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-dd_type(Instr) ->
- case Instr of
- #b{} -> branch;
- %% #br{} -> branch;
- #call_link{} -> branch;
- #jmp_link{} -> branch;
- #jmp{} -> branch;
- #goto{} -> branch;
- #load{} -> load;
- #store{} -> store;
- #alu{} -> alu;
- #move{} -> alu;
- #multimove{} ->
- Src = hipe_sparc:multimove_src(Instr),
- Lat = round(length(Src)/2),
- {mmove,Lat};
- #sethi{} -> alu;
- #alu_cc{} -> alu_cc;
- %% #cmov_cc{} -> cmov_cc;
- %% #cmov_r{} -> alu;
- #load_atom{} -> alu;
- #load_address{} -> alu;
- #pseudo_enter{} -> pseudo;
- #pseudo_pop{} -> pseudo;
- #pseudo_return{} -> pseudo;
- #pseudo_spill{} -> pseudo;
- #pseudo_unspill{} -> pseudo
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_write_deps
-%% Argument : Defs - registers that node N defines.
-%% N - current node
-%% Ty - the type of current instr
-%% DepTab - Dependence-table
-%% DAG - The dependence graph.
-%% Returns : {DepTab,DAG} - with new values
-%% Description : Adds dependencies to the graph for nodes that depends on the
-%% registers that N defines.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_write_deps([],_N,_Ty,DepTab,DAG) -> {DepTab,DAG};
-add_write_deps([D|Ds],N,Ty,DepTab,DAG) ->
- {NewDepTab,NewDAG} = add_write_dep(D,N,Ty,DepTab,DAG),
- add_write_deps(Ds,N,Ty,NewDepTab,NewDAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_write_dep
-%% Description : Updates the dependence table with N as next writer, and
-%% updates the DAG with the dependencies from N to subsequent
-%% nodes.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_write_dep(X,N,Ty,DepTab,DAG) ->
- {NxtWriter,NxtReaders} = lookup(X,DepTab),
- NewDepTab = writer(X,N,Ty,DepTab),
- NewDAG = write_deps(N,Ty,NxtWriter,NxtReaders,DAG),
- {NewDepTab, NewDAG}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : write_deps
-%% Argument : Instr - Current instr
-%% Ty - Type of current instr
-%% NxtWriter - The node that is the next writer of the ragister
-%% that Instr defines.
-%% NxtReaders - The nodes that are subsequent readers of the
-%% register that N defines.
-%% DAG - The dependence graph
-%% Returns : Calls raw_deps that finally returns a new DAG with the new
-%% dependence arcs added.
-%% Description : If a next writer exists a dependence arc for this node is
-%% added, and after this raw_deps is called to compute the
-%% arcs for read-after-write dependencies.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-write_deps(Instr,Ty,NxtWriter,NxtReaders,DAG) ->
- DAG1 = case NxtWriter of
- none ->
- DAG;
- {Instr,_} ->
- DAG;
- {Wr,WrTy} ->
- dep_arc(Instr,
- hipe_target_machine:waw_latency(Ty,WrTy),
- Wr, DAG)
- end,
- raw_deps(Instr,Ty,NxtReaders,DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : raw_deps
-%% Argument : Instr - current instr
-%% Type - type of instr
-%% Readers - subsequent readers
-%% DAG - dependence graph
-%% Returns : DAG - A new DAG with read-after-write dependencies added
-%% Description : Updates the DAG with the dependence-arcs from Instr to the
-%% subsequent readers, with the appropriate latencies.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-raw_deps(_Instr,_Type,[],DAG) -> DAG;
-raw_deps(Instr,Ty,[{Rd,RdTy}|Xs],DAG) ->
- raw_deps(Instr,Ty,Xs,
- dep_arc(Instr,hipe_target_machine:raw_latency(Ty,RdTy),
- Rd,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_read_deps
-%% Argument : Uses - The registers that node N uses.
-%% N - Index of the current node.
-%% Ty - Type of current node.
-%% DepTab - Dependence table
-%% DAG - Dependence graph
-%% Returns : {DepTab, DAG} - with updated values.
-%% Description : Adds the read dependencies from node N to subsequent ones,
-%% according to the registers that N uses.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_read_deps([],_N,_Ty,DepTab,DAG) -> {DepTab,DAG};
-add_read_deps([U|Us],N,Ty,DepTab,DAG) ->
- {NewDepTab,NewDAG} = add_read_dep(U,N,Ty,DepTab,DAG),
- add_read_deps(Us,N,Ty,NewDepTab,NewDAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_read_dep
-%% Argument : X - Used register
-%% N - Index of checked instr
-%% Ty - Type of checked instr
-%% DepTab - Hashtable with {next-writer, next-readers}
-%% DAG - Dependence graph
-%% Returns : {DepTab, DAG} - with updated values
-%% Description : Looks up what the next-writer/next-readers are, and adjusts
-%% the table with current node as new reader. Finally
-%% read-dependencies are added to the DAG.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-add_read_dep(X,N,Ty,DepTab,DAG) ->
- {NxtWriter,_NxtReaders} = lookup(X,DepTab),
- NewDepTab = reader(X,N,Ty,DepTab),
- NewDAG = read_deps(N,Ty,NxtWriter,DAG),
- {NewDepTab, NewDAG}.
-
-% If NxtWriter is 'none', then this var is not written subsequently
-% Add WAR from Instr to NxtWriter (if it exists)
-% *** UNFINISHED ***
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : read_deps
-%% Argument : N - Index of current node
-%% Ty - Type of current node
-%% Writer - tuple {NextWriter, WrType} where NextWriter is the
-%% subsequent instr that writes this register next time,
-%% and WrType is the type of that instr.
-%% DAG - The dependence graph
-%% Returns : DAG
-%% Description : Returns a new DAG if a next-writer exists, otherwise the old
-%% DAG is returned.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-read_deps(_Instr,_Ty,none,DAG) ->
- DAG;
-read_deps(_Instr,_Ty,{_Instr,_},DAG) ->
- DAG;
-read_deps(Instr,Ty,{NxtWr,NxtWrTy},DAG) ->
- dep_arc(Instr,hipe_target_machine:war_latency(Ty,NxtWrTy),NxtWr,
- DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_deptab
-%% Description : Creates an empty dependence table (hash-table)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_deptab() ->
- gb_trees:empty().
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : lookup
-%% Argument : X - key (register)
-%% DepTab - dependence table
-%% Returns : {NextWriter, NextReaders}
-%% Description : Returns next writer and a list of following readers on
-%% register X.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-lookup(X, DepTab) ->
- case gb_trees:lookup(X, DepTab) of
- none ->
- {none, []};
- {value, {W, Rs} = Val} ->
- Val
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : writer
-%% Argument : X - key (register)
-%% N - index of writer
-%% Ty - type of writer
-%% DepTab - dependence table to be updated
-%% Returns : DepTab - new dependence table
-%% Description : Sets N tobe next writer on X
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-writer(X, N, Ty, DepTab) ->
- gb_trees:enter(X, {{N, Ty}, []}, DepTab).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : reader
-%% Argument : X - key (register)
-%% N - index of reader
-%% Ty - type of reader
-%% DepTab - dependence table to be updated
-%% Returns : DepTab - new dependence table
-%% Description : Adds N to the dependence table as a reader.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-reader(X,N,Ty,DepTab) ->
- {W,Rs} = lookup(X,DepTab),
- gb_trees:enter(X,{W,[{N,Ty}|Rs]},DepTab).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% The following version of md/2 separates heap- and stack operations,
-%% which allows for greater reordering.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md
-%% Argument : IxBB - indexed block
-%% DAG - dependence graph
-%% Returns : DAG - new dependence graph
-%% Description : Adds arcs for load/store dependencies to the DAG.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md(IxBB, DAG) ->
- md(IxBB,empty_md_state(),DAG).
-
-md([],_,DAG) -> DAG;
-md([{N,I}|Is],St,DAG) ->
- case md_type(I) of
- other ->
- md(Is,St,DAG);
- {st,T} ->
- { WAW_nodes, WAR_nodes, NewSt } = st_overlap(N,T,St),
- md(Is,NewSt,
- md_war_deps(WAR_nodes,N,md_waw_deps(WAW_nodes,N,DAG)));
- {ld,T} ->
- { RAW_nodes, NewSt } = ld_overlap(N,T,St),
- md(Is,NewSt,
- md_raw_deps(RAW_nodes,N,DAG))
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_war_deps
-%% Argument : WAR_nodes - write-after-read nodes depending on N
-%% N - index of current instr
-%% DAG - dependence graph
-%% Returns : DAG - updated DAG
-%% Description : Adds arcs for write-after-read dependencies for N
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_war_deps([],_,DAG) -> DAG;
-md_war_deps([M|Ms],N,DAG) ->
- md_war_deps(Ms,N,dep_arc(M,hipe_target_machine:m_war_latency(),N,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_waw_deps
-%% Argument : WAW_nodes - write-after-write nodes depending on N
-%% N - index of current instr
-%% DAG - dependence graph
-%% Returns : DAG - updated DAG
-%% Description : Adds arcs for write-after-write dependencies for N
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_waw_deps([],_,DAG) -> DAG;
-md_waw_deps([M|Ms],N,DAG) ->
- md_waw_deps(Ms,N,dep_arc(M,hipe_target_machine:m_waw_latency(),N,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_raw_deps
-%% Argument : RAW_nodes - read-after-write nodes depending on N
-%% N - index of current instr
-%% DAG - dependence graph
-%% Returns : DAG - updated DAG
-%% Description : Adds arcs for read-after-write dependencies for N
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_raw_deps([],_,DAG) -> DAG;
-md_raw_deps([M|Ms],N,DAG) ->
- md_raw_deps(Ms,N,dep_arc(M,hipe_target_machine:m_raw_latency(),N,DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : empty_md_state
-%% Description : Returns an empty memorydependence state, eg. 4 lists
-%% representing {StackStores, HeapStores, StackLoads, HeapLoads}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-empty_md_state() -> {[], [], [], []}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : md_type
-%% Argument : I - instr
-%% Description : Maps the instr-type to a simplified type, telling if it's
-%% store/load resp. heap or stack.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-md_type(I) ->
- case I of
- #load{} ->
- Sp = hipe_sparc_registers:stack_pointer(),
- Src = hipe_sparc:load_src(I),
- N = hipe_sparc:reg_nr(Src),
- Off = hipe_sparc:load_off(I),
- if
- N =:= Sp -> % operation on stack
- {ld,{sp,Off}};
- true ->
- {ld,{hp,Src,Off}}
- end;
- #store{} ->
- Sp = hipe_sparc_registers:stack_pointer(),
- Dst = hipe_sparc:store_dest(I),
- N = hipe_sparc:reg_nr(Dst),
- Off = hipe_sparc:store_off(I),
- if
- N =:= Sp ->
- {st,{sp,Off}};
- true ->
- {st,{hp,Dst,Off}}
- end;
- _ ->
- other
- end.
-
-%% Given a memory operation and a 'memory op state',
-%% overlap(N,MemOp,State) returns { Preceding_Dependent_Ops, NewState }.
-%% which are either a tuple { WAW_deps, WAR_deps } or a list RAW_deps.
-%%
-%% NOTES:
-%% Note that Erlang's semantics ("heap stores never overwrite existing data")
-%% means we can be quite free in reordering stores to the heap.
-%% Ld/St to the stack are simply handled by their offsets; since we do not
-%% rename the stack pointer, this is sufficient.
-%% *** We assume all memory ops have uniform size = 4 ***
-%%
-%% NOTES:
-%% The method mentioned above has now been changed because the assumption that
-%% "heap stores never overwrite existing data" caused a bug when the
-%% process-pointer was treated the same way as the heap. We were also told
-%% that the semantics can possibly change in the future, so it would be more
-%% safe to treat the heap store/loads as the stack.
-%% A future improvement can be to do an alias analysis to give more freedom
-%% in reordering stuff...
-%%
-%% Alias state:
-%% { [StackOp], [HeapOp], [StackOp], [HeapOp] }
-%% where StackOp = {InstrID, Offset}
-%% HeapOp = {InstrID, Reg, Offset}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : st_overlap
-%% Argument : N - Index of current node
-%% Type - {sp,Off} or {hp,Dst,Off}, store on stack or heap
-%% State - { [StackStrs], [HeapStrs], [StackLds], [HeapLds] }
-%% where StackStrs/StackLds = {InstrID, Offset}
-%% and HeapStrs/HeapLds = {InstrID, Reg, Offset}
-%% Returns : { DepStrs, DepLds, State } -
-%% where DepStrs/DepLds = [NodeId]
-%% and State is the new state
-%% Description : Adds dependencies for overlapping stores.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-st_overlap(N, {sp, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- {DepSt, IndepSt_Sp} = st_sp_dep(St_Sp, Off),
- {DepLd, IndepLd_Sp} = ld_sp_dep(Ld_Sp, Off),
- {DepSt, DepLd, {[{N, Off}|IndepSt_Sp], St_Hp, IndepLd_Sp, Ld_Hp}};
-st_overlap(N, {hp, Dst, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- DstOff = {Dst, Off},
- {DepSt,_IndepSt_Hp} = st_hp_dep(St_Hp, DstOff),
- {DepLd, IndepLd_Hp} = ld_hp_dep(Ld_Hp, DstOff),
- {DepSt, DepLd, {St_Sp, [{N, Dst, Off}|St_Hp], Ld_Sp, IndepLd_Hp}}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : ld_overlap
-%% Argument : N - Index of current node
-%% Type - {sp,Off} or {hp,Dst,Off}, store on stack or heap
-%% State - { [StackStrs], [HeapStrs], [StackLds], [HeapLds] }
-%% where StackStrs/StackLds = {InstrID, Offset}
-%% and HeapStrs/HeapLds = {InstrID, Reg, Offset}
-%% Returns : { DepStrs, State }
-%% Description : Adds dependencies for overlapping laods
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ld_overlap(N, {sp, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- DepSt = sp_dep_only(St_Sp, Off),
- {DepSt, {St_Sp, St_Hp, [{N, Off}|Ld_Sp], Ld_Hp}};
-ld_overlap(N, {hp, Src, Off}, {St_Sp, St_Hp, Ld_Sp, Ld_Hp}) ->
- DepSt = hp_dep_only(St_Hp, Src, Off),
- {DepSt, {St_Sp, St_Hp, Ld_Sp, [{N, Src, Off}|Ld_Hp]}}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : st_sp_dep
-%% Description : Adds dependencies that are depending on a stack store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-st_sp_dep(Stores, Off) ->
- sp_dep(Stores, Off, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : ld_sp_dep
-%% Description : Adds dependencies that are depending on a stack load
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ld_sp_dep(Loads, Off) ->
- sp_dep(Loads, Off, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : st_hp_dep
-%% Description : Adds dependencies that are depending on a heap store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-st_hp_dep(Stores, {_Reg, _Off} = RegOff) ->
- hp_dep(Stores, RegOff, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : ld_hp_dep
-%% Description : Adds dependencies that are depending on a heap load
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-ld_hp_dep(Loads, {_Reg, _Off} = RegOff) ->
- hp_dep(Loads, RegOff, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : sp_dep
-%% Description : Returns {Dependent, Independent} which are lists of nodes
-%% that depends or not on a stack load/store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-sp_dep([], _Off, Dep, Indep) -> {Dep, Indep};
-sp_dep([{N,Off}|Xs], Off, Dep, Indep) ->
- sp_dep(Xs, Off, [N|Dep], Indep);
-sp_dep([X|Xs], Off, Dep, Indep) ->
- sp_dep(Xs, Off, Dep, [X|Indep]).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : hp_dep
-%% Description : Returns {Dependent, Independent} which are lists of nodes
-%% that depends or not on a heap load/store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-hp_dep([], {_Reg,_Off}, Dep, Indep) -> {Dep,Indep};
-hp_dep([{N,Reg,Off1}|Xs], {Reg,Off}, Dep, Indep) when Off1 =/= Off ->
- hp_dep(Xs, {Reg,Off}, Dep, [{N,Reg,Off1}|Indep]);
-hp_dep([{N,_,_}|Xs], {Reg,Off}, Dep, Indep) ->
- hp_dep(Xs, {Reg,Off}, [N|Dep], Indep).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : sp_dep_only
-%% Description : Returns a list of nodes that are depending on a stack store
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-sp_dep_only(Stores, Off) ->
- [N || {N,Off0} <- Stores, Off =:= Off0].
-
-%% Dependences from heap stores to heap loads.
-%% *** UNFINISHED ***
-%% - but works
-%% This is somewhat subtle:
-%% - a heap load can only bypass a heap store if we KNOW it won't
-%% load the stored value
-%% - unfortunately, we do not know the relationships between registers
-%% at this point, so we can't say that store(p+4) is independent of
-%% load(q+0).
-%% (OR CAN WE? A bit closer reasoning might show that it's possible?)
-%% - We can ONLY say that st(p+c) and ld(p+c') are independent when c /= c'
-%%
-%% (As said before, it might be possible to lighten this restriction?)
-
-hp_dep_only([], _Reg, _Off) -> [];
-hp_dep_only([{_N,Reg,Off_1}|Xs], Reg, Off) when Off_1 =/= Off ->
- hp_dep_only(Xs, Reg, Off);
-hp_dep_only([{N,_,_}|Xs], Reg, Off) ->
- [N|hp_dep_only(Xs, Reg, Off)].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Control dependences:
-%% - add dependences so that
-%% * branches are performed in order
-%% * unsafe operations are 'fenced in' by surrounding branches
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd
-%% Argument : IxBB - indexed block
-%% DAG - dependence graph
-%% Returns : DAG - new dependence graph
-%% Description : Adds conditional dependencies to the DAG
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd(IxBB,DAG) ->
- cd(IxBB, DAG, none, [], []).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd
-%% Argument : IxBB - indexed block
-%% DAG - dependence graph
-%% PrevBr - previous branch
-%% PrevUnsafe - previous unsafe instr (mem-op)
-%% PrevOthers - previous other instrs, used to "fix" preceeding
-%% instrs so they don't bypass a branch.
-%% Returns : DAG - new dependence graph
-%% Description : Adds conditional dependencies to the graph.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd([], DAG, _PrevBr, _PrevUnsafe, _PrevOthers) ->
- DAG;
-cd([{N,I}|Xs], DAG, PrevBr, PrevUnsafe, PrevOthers) ->
- case cd_type(I) of
- {branch,Ty} ->
- DAG1 = cd_branch_to_other_deps(N, PrevOthers, DAG),
- NewDAG = cd_branch_deps(PrevBr, PrevUnsafe, N, Ty, DAG1),
- cd(Xs,NewDAG,{N,Ty},[],[]);
- {unsafe,Ty} ->
- NewDAG = cd_unsafe_deps(PrevBr,N,Ty,DAG),
- cd(Xs, NewDAG, PrevBr, [{N,Ty}|PrevUnsafe], PrevOthers);
- {other,_Ty} ->
- cd(Xs, DAG, PrevBr, PrevUnsafe, [N|PrevOthers])
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_branch_to_other_deps
-%% Argument : N - index of branch
-%% Ms - list of indexes of "others" preceding instrs
-%% DAG - dependence graph
-%% Returns : DAG - new graph
-%% Description : Makes preceding instrs fixed so they don't bypass a branch
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_branch_to_other_deps(_, [], DAG) ->
- DAG;
-cd_branch_to_other_deps(N, [M | Ms], DAG) ->
- cd_branch_to_other_deps(N, Ms, dep_arc(M, zero_latency(), N, DAG)).
-
-%% Is the operation a branch, an unspeculable op or something else?
-
-%% Returns
-%% {branch,BranchType}
-%% {unsafe,OpType}
-%% {other,OpType}
-
-%% *** UNFINISHED ***
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_type
-%% Argument : I - instr
-%% Description : Maps instrs to a simpler type.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_type(I) ->
- case I of
- #goto{} ->
- {branch,uncond};
- #br{} ->
- {branch,'cond'};
- #b{} ->
- {branch,'cond'};
- #call_link{} ->
- {branch,call};
- #jmp_link{} ->
- {branch,call};
- #jmp{} ->
- {branch,call};
- #load{} ->
- {unsafe,load};
- #store{} ->
- {unsafe,load};
- T ->
- {other,T}
- end.
-
-%% add dependences to keep order of branches + unspeculable ops:
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_branch_deps
-%% Argument : PrevBr - preceeding branch
-%% PrevUnsafe - preceeding unsafe ops, eg, mem-ops
-%% N - current id.
-%% Ty - type of current instr
-%% DAG - dependence graph
-%% Returns : DAG - new DAG
-%% Description : Adds arcs between branches and calls deps_to_unsafe that adds
-%% arcs between branches and unsafe ops.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_branch_deps(PrevBr, PrevUnsafe, N, Ty, DAG) ->
- DAG1 = case PrevBr of
- none ->
- DAG;
- {Br,BrTy} ->
- dep_arc(Br,
- hipe_target_machine:br_br_latency(BrTy,Ty),
- N, DAG)
- end,
- deps_to_unsafe(PrevUnsafe, N, Ty, DAG1).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : deps_to_unsafe
-%% Description : Adds dependencies between unsafe's and branches
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-deps_to_unsafe([], _, _, DAG) -> DAG;
-deps_to_unsafe([{M,UTy}|Us], N, Ty, DAG) ->
- deps_to_unsafe(Us,N,Ty,
- dep_arc(M, hipe_target_machine:unsafe_to_br_latency(UTy,Ty),
- N, DAG)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cd_unsafe_deps
-%% Description : Adds dependencies between branches and unsafe's
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-cd_unsafe_deps(none, _, _, DAG) ->
- DAG;
-cd_unsafe_deps({Br,BrTy}, N, Ty, DAG) ->
- dep_arc(Br, hipe_target_machine:br_to_unsafe_latency(BrTy, Ty), N, DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : def_use
-%% Argument : Instr
-%% Description : Returns the registers that Instr defines resp. uses as 2 lists
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-def_use(Instr) ->
- {hipe_sparc:defines(Instr), hipe_sparc:uses(Instr)}.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : move_or_alu
-%% Description : True if the instruction is a move or an alu; false otherwise
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-move_or_alu(#move{}) -> true;
-move_or_alu(#alu{}) -> true;
-move_or_alu(_) -> false.
-
-%% Debugging stuff below %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
--ifdef(debug1).
-debug1_stuff(Blk, DAG, IxBlk, Sch, X) ->
- io:format("Blk: ~p~n",[Blk]),
- io:format("DAG: ~n~p~n~p",[DAG,IxBlk]),
- io:format("~n"),
- print_instrs(IxBlk),
- print_sch(Sch, IxBlk),
- print_instrs2(X).
-
-print_instrs([]) ->
- io:format("~n");
-print_instrs([{N,Instr} | Instrs]) ->
- io:format("(~p): ",[N]),
- hipe_sparc_pp:pp_instr(Instr),
- io:format("~p~n",[element(1,Instr)]),
- print_instrs(Instrs).
-
-print_instrs2([]) ->
- io:format("~n");
-print_instrs2([Instr | Instrs]) ->
- hipe_sparc_pp:pp_instr(Instr),
- print_instrs2(Instrs).
-
-print_sch([],_) -> io:format("~n");
-print_sch([{{cycle,Cycle},{node,I}} | Rest], IxBlk) ->
- io:format("{C~p, N~p} ",[Cycle,I]),
- print_node(I, IxBlk),
- print_sch(Rest, IxBlk).
-
-print_node(_, []) ->
- io:format("~n");
-print_node(I, [{I, Instr} | _]) ->
- hipe_sparc_pp:pp_instr(Instr);
-print_node(I, [_ | IxBlk]) ->
- print_node(I, IxBlk).
--else.
-debug1_stuff(_Blk, _DAG, _IxBlk, _Sch, _X) ->
- ok.
--endif.
diff --git a/lib/hipe/opt/hipe_schedule_prio.erl b/lib/hipe/opt/hipe_schedule_prio.erl
deleted file mode 100644
index 339bb82aab..0000000000
--- a/lib/hipe/opt/hipe_schedule_prio.erl
+++ /dev/null
@@ -1,53 +0,0 @@
-%% -*- erlang-indent-level: 2 -*-
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% PRIORITY HANDLING AND PRIORITY CALCULATION
-%%
-%% Handling of ready nodes and priorities.
-%% - at present, all nodes have the same priority and so on.
-%%
-%% *** UNFINISHED ***
-%% - should compute a static priority estimate
-%% - should dynamically modify priorities + possibly insert NOPs
-%% (e.g., to separate branches, etc.)
-%% - thus, ought to be passed the current schedule and/or resources as well
-
--module(hipe_schedule_prio).
--export([init_ready/2,
- init_instr_prio/2,
- %% initial_ready_set/4,
- next_ready/7,
- add_ready_nodes/2,
- insert_node/3
- ]).
-
-init_ready(Size,Preds) ->
- hipe_ultra_prio:init_ready(Size,Preds).
-
-init_instr_prio(N,DAG) ->
- hipe_ultra_prio:init_instr_prio(N,DAG).
-
-%% initial_ready_set(M,N,Preds,Ready) ->
-%% hipe_ultra_prio:initial_ready_set(M,N,Preds,Ready).
-
-next_ready(C,Ready,Prio,Nodes,DAG,Preds,Earl) ->
- hipe_ultra_prio:next_ready(C,Ready,Prio,Nodes,DAG,Preds,Earl).
-
-add_ready_nodes(NodeLst,Ready) ->
- hipe_ultra_prio:add_ready_nodes(NodeLst,Ready).
-
-insert_node(C,I,Ready) ->
- hipe_ultra_prio:insert_node(C,I,Ready).
diff --git a/lib/hipe/opt/hipe_target_machine.erl b/lib/hipe/opt/hipe_target_machine.erl
deleted file mode 100644
index 75993cb95e..0000000000
--- a/lib/hipe/opt/hipe_target_machine.erl
+++ /dev/null
@@ -1,87 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% INTERFACE TO TARGET MACHINE MODEL
-%%
-%% Interfaces the instruction scheduler to the (resource) machine model.
-
--module(hipe_target_machine).
--export([init_resources/1,
- init_instr_resources/2,
- resources_available/4,
- advance_cycle/1
- ]).
--export([raw_latency/2,
- war_latency/2,
- waw_latency/2,
- %% m_raw_latency/2,
- %% m_war_latency/2,
- %% m_waw_latency/2,
- m_raw_latency/0,
- m_war_latency/0,
- m_waw_latency/0,
- br_to_unsafe_latency/2,
- unsafe_to_br_latency/2,
- br_br_latency/2
- ]).
-
--define(target,hipe_ultra_mod2).
-
-init_resources(X) ->
- ?target:init_resources(X).
-
-init_instr_resources(X,Y) ->
- ?target:init_instr_resources(X,Y).
-
-resources_available(X,Y,Z,W) ->
- ?target:resources_available(X,Y,Z,W).
-
-advance_cycle(X) ->
- ?target:advance_cycle(X).
-
-raw_latency(From,To) ->
- ?target:raw_latency(From,To).
-
-war_latency(From,To) ->
- ?target:war_latency(From,To).
-
-waw_latency(From,To) ->
- ?target:waw_latency(From,To).
-
-%% m_raw_latency(From,To) ->
-%% ?target:m_raw_latency(From,To).
-
-%% m_war_latency(From,To) ->
-%% ?target:m_war_latency(From,To).
-
-%% m_waw_latency(From,To) ->
-%% ?target:m_waw_latency(From,To).
-
-m_raw_latency() ->
- ?target:m_raw_latency().
-
-m_war_latency() ->
- ?target:m_war_latency().
-
-m_waw_latency() ->
- ?target:m_waw_latency().
-
-br_to_unsafe_latency(Br,U) ->
- ?target:br_to_unsafe_latency(Br,U).
-
-unsafe_to_br_latency(U,Br) ->
- ?target:unsafe_to_br_latency(U,Br).
-
-br_br_latency(Br1,Br2) ->
- ?target:br_br_latency(Br1,Br2).
diff --git a/lib/hipe/opt/hipe_ultra_mod2.erl b/lib/hipe/opt/hipe_ultra_mod2.erl
deleted file mode 100644
index cec9c56a1e..0000000000
--- a/lib/hipe/opt/hipe_ultra_mod2.erl
+++ /dev/null
@@ -1,233 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% ULTRASPARC MACHINE MODEL
-%%
-%% This module is used by the scheduler.
-%% The following interface is used:
-%% ...
-%%
-%% NOTES:
-%% - the machine model is simple (on the verge of simplistic)
-%% * all FUs are pipelined => model only one cycle at a time
-%% * instruction latencies are mostly 1
-%% * floating point is left for later (I _think_ it works, but ...)
-%% - conservative: instructions that require multiple resources are
-%% modelled as 'single'; instead, they could reserve IEU+BR or whatever
-%% - possibly inefficient: I think machine state model could be turned into
-%% a bitvector.
-
--module(hipe_ultra_mod2).
--export([init_resources/1,
- init_instr_resources/2,
- resources_available/4,
- advance_cycle/1
- ]).
--export([raw_latency/2,
- war_latency/2,
- waw_latency/2,
- %% m_raw_latency/2,
- %% m_war_latency/2,
- %% m_waw_latency/2,
- m_raw_latency/0,
- m_war_latency/0,
- m_waw_latency/0,
- br_to_unsafe_latency/2,
- unsafe_to_br_latency/2,
- br_br_latency/2
- ]).
-
--include("../sparc/hipe_sparc.hrl").
-
--define(debug(Str,Args),ok).
-%-define(debug(Str,Args),io:format(Str,Args)).
-
--define(debug_ultra(Str,Args),ok).
-%-define(debug_ultra(Str,Args),io:format(Str,Args)).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% Straightforward and somewhat simplistic model for UltraSparc:
-%% - only one cycle at a time is modelled
-%% - resources are simplified:
-%% * ieu0, ieu1, ieu, mem, br, single
-%% * per-cycle state = done | { I0, I1, NumI, X, Mem, Br }
-%% * unoptimized representation (could be bit vector)
-
-init_resources(_Size) ->
- ?debug_ultra('init res ~p~n',[_Size]),
- empty_state().
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-init_instr_resources(N,Nodes) ->
- ultra_instr_rsrcs(Nodes,hipe_vectors:new(N, '')).
-
-ultra_instr_rsrcs([],I_res) -> I_res;
-ultra_instr_rsrcs([N|Ns],I_res) ->
- ultra_instr_rsrcs(Ns,ultra_instr_type(N,I_res)).
-
-ultra_instr_type({N,I},I_res) ->
- hipe_vectors:set(I_res,N-1,instr_type(I)).
-
-instr_type(I) ->
- case I of
- #move{} ->
- ieu;
- #multimove{} -> %% TODO: expand multimoves before scheduling
- ieu;
- #alu{} ->
- case hipe_sparc:alu_operator(I) of
- '>>' -> ieu0;
- '<<' -> ieu0;
- _ -> ieu
- end;
- #alu_cc{} ->
- ieu1;
- #sethi{} ->
- ieu;
- #load{} ->
- mem;
- #store{} ->
- mem;
- #b{} ->
- br;
- #br{} ->
- br;
- #goto{} ->
- br;
- #jmp_link{} -> % imprecise; should be mem+br?
- single;
- #jmp{} -> % imprecise
- br;
- #call_link{} -> % imprecise; should be mem+br?
- single;
- #cmov_cc{} -> % imprecise
- single;
- #cmov_r{} -> % imprecise
- single;
- #load_atom{} -> % should be resolved to sethi/or
- single;
- #load_address{} -> % should be resolved to sethi/or
- single;
- #load_word_index{} -> % should be resolved to sethi/or
- single;
- %% uncommon types:
- #label{} ->
- none;
- #nop{} ->
- none;
- #comment{} ->
- none;
- _ ->
- exit({ultrasparc_instr_type,{cant_schedule,I}})
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-resources_available(_Cycle, I, Rsrc, I_res) ->
- res_avail(instruction_resource(I_res, I), Rsrc).
-
-instruction_resource(I_res, I) ->
- hipe_vectors:get(I_res, I-1).
-
-%% The following function checks resource availability.
-%% * all function units are assumed to be fully pipelined, so only
-%% one cycle at a time is modelled.
-%% * for IEU0 and IEU1, these must precede all generic IEU instructions
-%% (handled by X bit)
-%% * at most 2 integer instructions can issue in a cycle
-%% * mem is straightforward
-%% * br closes the cycle (= returns done).
-%% * single requires an entirely empty state and closes the cycle
-
-res_avail(ieu0, { free, I1, NumI, free, Mem, Br })
- when is_integer(NumI), NumI < 2 ->
- { yes, { occ, I1, NumI+1, free, Mem, Br }};
-res_avail(ieu1, { _I0, free, NumI, free, Mem, Br })
- when is_integer(NumI), NumI < 2 ->
- { yes, { free, occ, NumI+1, free, Mem, Br }};
-res_avail(ieu, { I0, I1, NumI, _X, Mem, Br })
- when is_integer(NumI), NumI < 2 ->
- { yes, { I0, I1, NumI+1, occ, Mem, Br }};
-res_avail(mem, { I0, I1, NumI, X, free, Br }) ->
- { yes, { I0, I1, NumI, X, occ, Br }};
-res_avail(br, { _I0, _I1, _NumI, _X, _Mem, free }) ->
- { yes, done };
-res_avail(single, { free, free, 0, free, free, free }) ->
- { yes, done };
-res_avail(_, _) ->
- no.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-advance_cycle(_Rsrc) ->
- empty_state().
-
-empty_state() -> { free, free, 0, free, free, free }.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Latencies are taken from UltraSparc hardware manual
-%%
-%% *** UNFINISHED ***
-%% more precisely, they are taken from my memory of the US-manual
-%% at the moment.
-%%
-%% Note: all ld/st are assumed to hit in the L1 cache (D-cache),
-%% which is sort of imprecise.
-
-raw_latency(alu, store) -> 0;
-raw_latency(load, _) -> 2; % only if load is L1 hit
-raw_latency(alu_cc, b) -> 0;
-raw_latency(_I0, _I1) ->
- 1.
-
-war_latency(_I0, _I1) ->
- 0.
-
-waw_latency(_I0, _I1) ->
- 1.
-
-%% *** UNFINISHED ***
-%% At present, all load/stores are assumed to hit in the L1 cache,
-%% which isn't really satisfying.
-
-%% m_raw_latency(_St, _Ld) ->
-%% 1.
-%%
-%% m_war_latency(_Ld, _St) ->
-%% 1.
-%%
-%% m_waw_latency(_St1, _St2) ->
-%% 1.
-
-%% Use these for 'default latencies' = do not permit reordering.
-
-m_raw_latency() ->
- 1.
-
-m_war_latency() ->
- 1.
-
-m_waw_latency() ->
- 1.
-
-br_to_unsafe_latency(_BrTy, _UTy) ->
- 0.
-
-unsafe_to_br_latency(_UTy, _BrTy) ->
- 0.
-
-br_br_latency(_BrTy1, _BrTy2) ->
- 0.
diff --git a/lib/hipe/opt/hipe_ultra_prio.erl b/lib/hipe/opt/hipe_ultra_prio.erl
deleted file mode 100644
index 6dd240a33a..0000000000
--- a/lib/hipe/opt/hipe_ultra_prio.erl
+++ /dev/null
@@ -1,298 +0,0 @@
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% PRIORITY HANDLING AND PRIORITY CALCULATION
-%%
-%% Handling of ready nodes and priorities.
-%% Priorities are mainly from the critical path. More priorities are added.
-%% * One version is adding priorities just depending on the instr, so
-%% for example loads get higher priority than stores, and ordered
-%% after reg's and offset for better cache performance.
-%% * The other version gives higher priority to a node that adds more new
-%% nodes to the ready list. This one is maybe not so effectively
-%% implemented, but was added too late for smarter solutions.
-%% One version is commented away
-
--module(hipe_ultra_prio).
--export([init_ready/2,
- init_instr_prio/2,
- %% initial_ready_set/4,
- next_ready/7,
- add_ready_nodes/2,
- insert_node/3
- ]).
-
--include("../sparc/hipe_sparc.hrl").
-
-% At first, only nodes with no predecessors are selected.
-% - if R is empty, there is an error (unless BB itself is empty)
-
-%% Arguments : Size - size of ready-array
-%% Preds - array with number of predecessors for each node
-%% Returns : An array with list of ready-nodes for each cycle.
-
-init_ready(Size, Preds) ->
- P = hipe_vectors:size(Preds),
- Ready = hipe_vectors:new(Size, []),
- R = initial_ready_set(1, P, Preds, []),
- hipe_vectors:set(Ready, 0, R).
-
-init_instr_prio(N, DAG) ->
- critical_path(N, DAG).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : initial_ready_set
-%% Argument : M - current node-index
-%% N - where to stop
-%% Preds - array with number of predecessors for each node
-%% Ready - list with ready-nodes
-%% Returns : Ready - list with ready-nodes
-%% Description : Finds all nodes with no predecessors and adds them to ready.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-initial_ready_set(M, N, Preds, Ready) ->
- if
- M > N ->
- Ready;
- true ->
- case hipe_vectors:get(Preds, M-1) of
- 0 ->
- initial_ready_set(M+1, N, Preds, [M|Ready]);
- V when is_integer(V), V > 0 ->
- initial_ready_set(M+1, N, Preds, Ready)
- end
- end.
-
-%% The following handles the nodes ready to schedule:
-%% 1. select the ready queue of given cycle
-%% 2. if queue empty, return none
-%% 3. otherwise, remove entry with highest priority
-%% and return {next,Highest_Prio,NewReady}
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : next_ready
-%% Argument : C - current cycle
-%% Ready - array with ready nodes
-%% Prio - array with cpath-priorities for all nodes
-%% Nodes - indexed list [{N, Instr}]
-%% Returns : none / {next,Highest_Prio,NewReady}
-%% Description : 1. select the ready queue of given cycle
-%% 2. if queue empty, return none
-%% 3. otherwise, remove entry with highest priority
-%% and return {next,Highest_Prio,NewReady} where Highest_Prio
-%% = Id of instr and NewReady = updated ready-array.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-next_ready(C, Ready, Prio, Nodes, DAG, Preds, Earl) ->
- Curr = hipe_vectors:get(Ready, C-1),
- case Curr of
- [] ->
- none;
- Instrs ->
- {BestI,RestIs} =
- get_best_instr(Instrs, Prio, Nodes, DAG, Preds, Earl, C),
- {next,BestI,hipe_vectors:set(Ready,C-1,RestIs)}
- end.
-
-% next_ready(C,Ready,Prio,Nodes) ->
-% Curr = hipe_vectors:get(Ready,C-1),
-% case Curr of
-% [] ->
-% none;
-% Instrs ->
-% {BestInstr,RestInstrs} = get_best_instr(Instrs, Prio, Nodes),
-% {next,BestInstr,hipe_vectors:set(Ready,C-1,RestInstrs)}
-% end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : get_best_instr
-%% Argument : Instrs - list of node-id's
-%% Prio - array with cpath-priorities for the nodes
-%% Nodes - indexed list [{Id, Instr}]
-%% Returns : {BestSoFar, Rest} - Id of best instr and the rest of id's
-%% Description : Returns the id of the instr that is the best choice.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-get_best_instr([Instr|Instrs], Prio, Nodes, DAG, Preds, Earl, C) ->
- get_best_instr(Instrs, [], Instr, Prio, Nodes, DAG, Preds, Earl, C).
-
-get_best_instr([], Rest, BestSoFar, _Prio, _Nodes, _DAG, _Preds, _Earl, _C) ->
- {BestSoFar, Rest};
-get_best_instr([Instr|Instrs], PassedInstrs, BestSoFar, Prio, Nodes,
- DAG, Preds, Earl, C) ->
- case better(Instr, BestSoFar, Prio, Nodes, DAG, Preds, Earl, C) of
- true ->
- get_best_instr(Instrs, [BestSoFar|PassedInstrs],
- Instr, Prio, Nodes, DAG, Preds, Earl, C);
- false ->
- get_best_instr(Instrs, [Instr|PassedInstrs], BestSoFar, Prio,
- Nodes, DAG, Preds, Earl, C)
- end.
-
-% get_best_instr([Instr|Instrs], Prio, Nodes) ->
-% get_best_instr(Instrs, [], Instr, Prio, Nodes).
-
-% get_best_instr([], Rest, BestSoFar, Prio, Nodes) -> {BestSoFar, Rest};
-% get_best_instr([Instr|Instrs], PassedInstrs, BestSoFar, Prio, Nodes) ->
-% case better(Instr, BestSoFar, Prio, Nodes) of
-% true ->
-% get_best_instr(Instrs, [BestSoFar|PassedInstrs],
-% Instr, Prio, Nodes);
-% false ->
-% get_best_instr(Instrs, [Instr|PassedInstrs],BestSoFar, Prio, Nodes)
-% end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : better
-%% Argument : Instr1 - Id of instr 1
-%% Instr2 - Id of instr 2
-%% Prio - array with cpath-priorities for the nodes
-%% Nodes - indexed list [{Id, Instr}]
-%% Returns : true if Instr1 has higher priority than Instr2
-%% Description : Checks if Instr1 is a better choice than Instr2 for scheduling
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-better(Instr1, Instr2, Prio, Nodes, DAG, Preds, Earl, C) ->
- better_hlp(priority(Instr1, Prio, Nodes, DAG, Preds, Earl, C),
- priority(Instr2, Prio, Nodes, DAG, Preds, Earl, C)).
-
-better_hlp([], []) -> false;
-better_hlp([], [_|_]) -> false;
-better_hlp([_|_], []) -> true;
-better_hlp([X|Xs], [Y|Ys]) -> (X > Y) or ((X =:= Y) and better_hlp(Xs,Ys)).
-
-%%
-%% Returns the instr corresponding to id
-%%
-get_instr(InstrId, [{InstrId,Instr}|_]) -> Instr;
-get_instr(InstrId, [_|Xs]) -> get_instr(InstrId, Xs).
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : priority
-%% Argument : InstrId - Id
-%% Prio - array with cpath-priorities for the nodes
-%% Nodes - indexed list [{Id, Instr}]
-%% Returns : PrioList - list of priorities [MostSignificant, LessSign, ...]
-%% Description : Returns a list of priorities where the first element is the
-%% cpath-priority and the rest are added depending on what kind
-%% of instr it is. Used to order loads/stores sequentially and
-%% there is possibility to add whatever stuff...
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-priority(InstrId, Prio, Nodes, DAG, Preds, Earl, C) ->
- {ReadyNodes,_,_,_} = hipe_schedule:delete_node(C,InstrId,DAG,Preds,Earl),
- Instr = get_instr(InstrId, Nodes),
- Prio1 = hipe_vectors:get(Prio, InstrId-1),
- Prio2 = length(ReadyNodes),
- PrioRest =
- case Instr of
- #load_atom{} ->
- [3];
- #move{} ->
- [3];
- #load{} ->
- Src = hipe_sparc:load_src(Instr),
- Off = hipe_sparc:load_off(Instr),
- case hipe_sparc:is_reg(Off) of
- false -> [3,
- -(hipe_sparc:reg_nr(Src)),
- -(hipe_sparc:imm_value(Off))];
- true -> [1]
- end;
- #store{} ->
- Src = hipe_sparc:store_dest(Instr),
- Off = hipe_sparc:store_off(Instr),
- case hipe_sparc:is_reg(Off) of
- false -> [2,
- -(hipe_sparc:reg_nr(Src)),
- -(hipe_sparc:imm_value(Off))];
- true -> [1]
- end;
- _ -> [0]
- end,
- [Prio1,Prio2|PrioRest].
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : add_ready_nodes
-%% Argument : Nodes - list of [{Cycle,Id}]
-%% Ready - array of ready nodes for all cycles
-%% Returns : NewReady - updated ready-array
-%% Description : Gets a list of instrs and adds them to the ready-array
-%% to the corresponding cycle.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-add_ready_nodes([], Ready) -> Ready;
-add_ready_nodes([{C,I}|Xs], Ready) ->
- add_ready_nodes(Xs, insert_node(C, I, Ready)).
-
-insert_node(C, I, Ready) ->
- Old = hipe_vectors:get(Ready, C-1),
- hipe_vectors:set(Ready, C-1, [I|Old]).
-
-%%
-%% Computes the latency for the "most expensive" way through the graph
-%% for all nodes. Returns an array of priorities for all nodes.
-%%
-critical_path(N, DAG) ->
- critical_path(1, N, DAG, hipe_vectors:new(N, -1)).
-
-critical_path(M, N, DAG, Prio) ->
- if
- M > N ->
- Prio;
- true ->
- critical_path(M+1, N, DAG, cpath(M, DAG, Prio))
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% Function : cpath
-%% Argument : M - current node id
-%% DAG - the dependence graph
-%% Prio - array of priorities for all nodes
-%% Returns : Prio - updated prio array
-%% Description : If node has prio -1, it has not been visited
-%% - otherwise, compute priority as max of priorities of
-%% successors (+ latency)
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-cpath(M, DAG, Prio) ->
- InitPrio = hipe_vectors:get(Prio, M-1),
- if
- InitPrio =:= -1 ->
- cpath_node(M, DAG, Prio);
- true ->
- Prio
- end.
-
-cpath_node(N, DAG, Prio) ->
- SuccL = dag_succ(DAG, N),
- {Max, NewPrio} = cpath_succ(SuccL, DAG, Prio),
- hipe_vectors:set(NewPrio, N-1, Max).
-
-cpath_succ(SuccL, DAG, Prio) ->
- cpath_succ(SuccL, DAG, Prio, 0).
-
-%% performs an unnecessary lookup of priority of Succ, but that might
-%% not be such a big deal
-
-cpath_succ([], _DAG, Prio, NodePrio) -> {NodePrio,Prio};
-cpath_succ([{Lat,Succ}|Xs], DAG, Prio, NodePrio) ->
- NewPrio = cpath(Succ, DAG, Prio),
- NewNodePrio = erlang:max(hipe_vectors:get(NewPrio, Succ - 1) + Lat, NodePrio),
- cpath_succ(Xs, DAG, NewPrio, NewNodePrio).
-
-dag_succ(DAG, N) when is_integer(N) ->
- hipe_vectors:get(DAG, N-1).
-
diff --git a/lib/hipe/test/Makefile b/lib/hipe/test/Makefile
index 544888719f..efeb0887ab 100644
--- a/lib/hipe/test/Makefile
+++ b/lib/hipe/test/Makefile
@@ -7,7 +7,8 @@ include $(ERL_TOP)/make/$(TARGET)/otp.mk
MODULES= \
hipe_SUITE \
- opt_verify_SUITE
+ opt_verify_SUITE \
+ erl_types_SUITE
# .erl files for these modules are automatically generated
GEN_MODULES= \
diff --git a/lib/hipe/test/erl_types_SUITE.erl b/lib/hipe/test/erl_types_SUITE.erl
new file mode 100644
index 0000000000..7d7c144b69
--- /dev/null
+++ b/lib/hipe/test/erl_types_SUITE.erl
@@ -0,0 +1,197 @@
+%% -*- erlang-indent-level: 4 -*-
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+-module(erl_types_SUITE).
+
+-export([all/0,
+ consistency_and_to_string/1]).
+
+%% Simplify calls into erl_types and avoid importing the entire module.
+-define(M, erl_types).
+
+-include_lib("common_test/include/ct.hrl").
+
+all() ->
+ [consistency_and_to_string].
+
+consistency_and_to_string(_Config) ->
+ %% Check consistency of types
+ Atom1 = ?M:t_atom(),
+ Atom2 = ?M:t_atom(foo),
+ Atom3 = ?M:t_atom(bar),
+ true = ?M:t_is_atom(Atom2),
+
+ True = ?M:t_atom(true),
+ False = ?M:t_atom(false),
+ Bool = ?M:t_boolean(),
+ true = ?M:t_is_boolean(True),
+ true = ?M:t_is_boolean(Bool),
+ false = ?M:t_is_boolean(Atom1),
+
+ Binary = ?M:t_binary(),
+ true = ?M:t_is_binary(Binary),
+
+ Bitstr = ?M:t_bitstr(),
+ true = ?M:t_is_bitstr(Bitstr),
+
+ Bitstr1 = ?M:t_bitstr(7, 3),
+ true = ?M:t_is_bitstr(Bitstr1),
+ false = ?M:t_is_binary(Bitstr1),
+
+ Bitstr2 = ?M:t_bitstr(16, 8),
+ true = ?M:t_is_bitstr(Bitstr2),
+ true = ?M:t_is_binary(Bitstr2),
+
+ BitStr816 = ?M:t_bitstr(8,16),
+ BitStr816 = ?M:t_subtract(?M:t_bitstr(4, 12), ?M:t_bitstr(8, 12)),
+
+ Int1 = ?M:t_integer(),
+ Int2 = ?M:t_integer(1),
+ Int3 = ?M:t_integer(16#ffffffff),
+ true = ?M:t_is_integer(Int2),
+ true = ?M:t_is_byte(Int2),
+ false = ?M:t_is_byte(Int3),
+ false = ?M:t_is_byte(?M:t_from_range(-1, 1)),
+ true = ?M:t_is_byte(?M:t_from_range(1, 255)),
+
+ Tuple1 = ?M:t_tuple(),
+ Tuple2 = ?M:t_tuple(3),
+ Tuple3 = ?M:t_tuple([Atom1, Int1]),
+ Tuple4 = ?M:t_tuple([Tuple1, Tuple2]),
+ Tuple5 = ?M:t_tuple([Tuple3, Tuple4]),
+ Tuple6 = ?M:t_limit(Tuple5, 2),
+ Tuple7 = ?M:t_limit(Tuple5, 3),
+ true = ?M:t_is_tuple(Tuple1),
+
+ Port = ?M:t_port(),
+ Pid = ?M:t_pid(),
+ Ref = ?M:t_reference(),
+ Identifier = ?M:t_identifier(),
+ false = ?M:t_is_reference(Port),
+ true = ?M:t_is_identifier(Port),
+
+ Function1 = ?M:t_fun(),
+ Function2 = ?M:t_fun(Pid),
+ Function3 = ?M:t_fun([], Pid),
+ Function4 = ?M:t_fun([Port, Pid], Pid),
+ Function5 = ?M:t_fun([Pid, Atom1], Int2),
+ true = ?M:t_is_fun(Function3),
+
+ List1 = ?M:t_list(),
+ List2 = ?M:t_list(?M:t_boolean()),
+ List3 = ?M:t_cons(?M:t_boolean(), List2),
+ List4 = ?M:t_cons(?M:t_boolean(), ?M:t_atom()),
+ List5 = ?M:t_cons(?M:t_boolean(), ?M:t_nil()),
+ List6 = ?M:t_cons_tl(List5),
+ List7 = ?M:t_sup(List4, List5),
+ List8 = ?M:t_inf(List7, ?M:t_list()),
+ List9 = ?M:t_cons(),
+ List10 = ?M:t_cons_tl(List9),
+ true = ?M:t_is_boolean(?M:t_cons_hd(List5)),
+ true = ?M:t_is_list(List5),
+ false = ?M:t_is_list(List4),
+
+ Product1 = ?M:t_product([Atom1, Atom2]),
+ Product2 = ?M:t_product([Atom3, Atom1]),
+ Product3 = ?M:t_product([Atom3, Atom2]),
+
+ Union1 = ?M:t_sup(Atom2, Atom3),
+ Union2 = ?M:t_sup(Tuple2, Tuple3),
+ Union3 = ?M:t_sup(Int2, Atom3),
+ Union4 = ?M:t_sup(Port, Pid),
+ Union5 = ?M:t_sup(Union4, Int1),
+ Union6 = ?M:t_sup(Function1, Function2),
+ Union7 = ?M:t_sup(Function4, Function5),
+ Union8 = ?M:t_sup(True, False),
+ true = ?M:t_is_boolean(Union8),
+ Union9 = ?M:t_sup(Int2, ?M:t_integer(2)),
+ true = ?M:t_is_byte(Union9),
+ Union10 = ?M:t_sup(?M:t_tuple([?M:t_atom(true), ?M:t_any()]),
+ ?M:t_tuple([?M:t_atom(false), ?M:t_any()])),
+
+ Any = ?M:t_any(),
+ Any = ?M:t_sup(Product3, Function5),
+
+ Atom3 = ?M:t_inf(Union3, Atom1),
+ Union2 = ?M:t_inf(Union2, Tuple1),
+ Int2 = ?M:t_inf(Int1, Union3),
+ Union4 = ?M:t_inf(Union4, Identifier),
+ Port = ?M:t_inf(Union5, Port),
+ Function4 = ?M:t_inf(Union7, Function4),
+ None = ?M:t_none(),
+ None = ?M:t_inf(Product2, Atom1),
+ Product3 = ?M:t_inf(Product1, Product2),
+ Function5 = ?M:t_inf(Union7, Function5),
+ true = ?M:t_is_byte(?M:t_inf(Union9, ?M:t_number())),
+ true = ?M:t_is_char(?M:t_inf(Union9, ?M:t_number())),
+
+ RecDict = #{{record, foo} => {{?FILE, ?LINE}, [{2, [{bar, [], ?M:t_any()},
+ {baz, [], ?M:t_any()}]}]}},
+ Record1 = ?M:t_from_term({foo, [1,2], {1,2,3}}),
+
+ %% Check string representations
+ "atom()" = ?M:t_to_string(Atom1),
+ "'foo'" = ?M:t_to_string(Atom2),
+ "'bar'" = ?M:t_to_string(Atom3),
+
+ "binary()" = ?M:t_to_string(Binary),
+
+ "integer()" = ?M:t_to_string(Int1),
+ "1" = ?M:t_to_string(Int2),
+
+ "tuple()" = ?M:t_to_string(Tuple1),
+ "{_,_,_}" = ?M:t_to_string(Tuple2),
+ "{atom(),integer()}" = ?M:t_to_string(Tuple3),
+ "{tuple(),{_,_,_}}" = ?M:t_to_string(Tuple4),
+ "{{atom(),integer()},{tuple(),{_,_,_}}}" = ?M:t_to_string(Tuple5),
+ "{{_,_},{_,_}}" = ?M:t_to_string(Tuple6),
+ "{{atom(),integer()},{tuple(),{_,_,_}}}" = ?M:t_to_string(Tuple7),
+
+ "reference()" = ?M:t_to_string(Ref),
+ "port()" = ?M:t_to_string(Port),
+ "pid()" = ?M:t_to_string(Pid),
+ "identifier()" = ?M:t_to_string(Identifier),
+
+ "[any()]" = ?M:t_to_string(List1),
+ "[boolean()]" = ?M:t_to_string(List2),
+ "[boolean(),...]" = ?M:t_to_string(List3),
+ "nonempty_improper_list(boolean(),atom())" = ?M:t_to_string(List4),
+ "[boolean(),...]" = ?M:t_to_string(List5),
+ "[boolean()]" = ?M:t_to_string(List6),
+ "nonempty_maybe_improper_list(boolean(),atom() | [])" = ?M:t_to_string(List7),
+ "[boolean(),...]" = ?M:t_to_string(List8),
+ "nonempty_maybe_improper_list()" = ?M:t_to_string(List9),
+ "any()" = ?M:t_to_string(List10),
+
+ "fun()" = ?M:t_to_string(Function1),
+ "fun((...) -> pid())" = ?M:t_to_string(Function2),
+ "fun(() -> pid())" = ?M:t_to_string(Function3),
+ "fun((port(),pid()) -> pid())" = ?M:t_to_string(Function4),
+ "fun((pid(),atom()) -> 1)" = ?M:t_to_string(Function5),
+
+ "<atom(),'foo'>" = ?M:t_to_string(Product1),
+ "<'bar',atom()>" = ?M:t_to_string(Product2),
+
+ "#foo{bar::[1 | 2,...],baz::{1,2,3}}" = ?M:t_to_string(Record1, RecDict),
+
+ "'bar' | 'foo'" = ?M:t_to_string(Union1),
+ "{atom(),integer()} | {_,_,_}" = ?M:t_to_string(Union2),
+ "'bar' | 1" = ?M:t_to_string(Union3),
+ "pid() | port()" = ?M:t_to_string(Union4),
+ "pid() | port() | integer()" = ?M:t_to_string(Union5),
+ "fun()" = ?M:t_to_string(Union6),
+ "fun((pid() | port(),atom() | pid()) -> pid() | 1)" = ?M:t_to_string(Union7),
+ "boolean()" = ?M:t_to_string(Union8),
+ "{'false',_} | {'true',_}" = ?M:t_to_string(Union10),
+ "{'true',integer()}" = ?M:t_to_string(?M:t_inf(Union10, ?M:t_tuple([?M:t_atom(true), ?M:t_integer()]))).
diff --git a/lib/inets/doc/src/notes.xml b/lib/inets/doc/src/notes.xml
index 10dd26322c..1e22809b3a 100644
--- a/lib/inets/doc/src/notes.xml
+++ b/lib/inets/doc/src/notes.xml
@@ -33,7 +33,26 @@
<file>notes.xml</file>
</header>
- <section><title>Inets 6.5.1</title>
+ <section><title>Inets 6.5.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ inets: httpd - Gracefully handle bad headers</p>
+ <p>
+ The option max_headers operated on the individual header
+ length instead of the total length of all headers. Also
+ headers with empty keys are now discarded.</p>
+ <p>
+ Own Id: OTP-15092</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Inets 6.5.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
diff --git a/lib/inets/src/http_lib/http_request.erl b/lib/inets/src/http_lib/http_request.erl
index f68b233e10..8ca1542164 100644
--- a/lib/inets/src/http_lib/http_request.erl
+++ b/lib/inets/src/http_lib/http_request.erl
@@ -27,10 +27,12 @@
key_value(KeyValueStr) ->
case lists:splitwith(fun($:) -> false; (_) -> true end, KeyValueStr) of
- {Key, [$: | Value]} ->
+ {Key, [$: | Value]} when Key =/= [] ->
{http_util:to_lower(string:strip(Key)), string:strip(Value)};
{_, []} ->
- undefined
+ undefined;
+ _ ->
+ undefined
end.
%%-------------------------------------------------------------------------
%% headers(HeaderList, #http_request_h{}) -> #http_request_h{}
diff --git a/lib/inets/src/http_server/httpd_request.erl b/lib/inets/src/http_server/httpd_request.erl
index 007d272323..e513eb8a3a 100644
--- a/lib/inets/src/http_server/httpd_request.erl
+++ b/lib/inets/src/http_server/httpd_request.erl
@@ -259,17 +259,17 @@ parse_headers(<<?LF, Octet, Rest/binary>>, Header, Headers, Current, Max,
%% If ?CR is is missing RFC2616 section-19.3
parse_headers(<<?CR,?LF, Octet, Rest/binary>>, Header, Headers, Current, Max,
Options, Result);
-parse_headers(<<?CR,?LF, Octet, Rest/binary>>, Header, Headers, _, Max,
+parse_headers(<<?CR,?LF, Octet, Rest/binary>>, Header, Headers, Current, Max,
Options, Result) ->
case http_request:key_value(lists:reverse(Header)) of
undefined -> %% Skip headers with missing :
parse_headers(Rest, [Octet], Headers,
- 0, Max, Options, Result);
+ Current, Max, Options, Result);
NewHeader ->
case check_header(NewHeader, Options) of
ok ->
parse_headers(Rest, [Octet], [NewHeader | Headers],
- 0, Max, Options, Result);
+ Current, Max, Options, Result);
{error, Reason} ->
HttpVersion = lists:nth(3, lists:reverse(Result)),
{error, Reason, HttpVersion}
diff --git a/lib/inets/src/http_server/mod_esi.erl b/lib/inets/src/http_server/mod_esi.erl
index 3206d957d9..b49b3a7093 100644
--- a/lib/inets/src/http_server/mod_esi.erl
+++ b/lib/inets/src/http_server/mod_esi.erl
@@ -561,7 +561,7 @@ eval(#mod{method = Method} = ModData, ESIBody, Modules)
end.
generate_webpage(ESIBody) ->
- (catch lib:eval_str(string:concat(ESIBody,". "))).
+ (catch eval_str(string:concat(ESIBody,". "))).
is_authorized(_ESIBody, [all]) ->
true;
@@ -573,3 +573,45 @@ is_authorized(ESIBody, Modules) ->
nomatch ->
false
end.
+
+%% eval_str(InStr) -> {ok, OutStr} | {error, ErrStr'}
+%% InStr must represent a body
+%% Note: If InStr is a binary it has to be a Latin-1 string.
+%% If you have a UTF-8 encoded binary you have to call
+%% unicode:characters_to_list/1 before the call to eval_str().
+
+-define(result(F,D), lists:flatten(io_lib:format(F, D))).
+
+-spec eval_str(string()) ->
+ {'ok', string()} | {'error', string()}.
+
+eval_str(Str) when is_list(Str) ->
+ case erl_scan:tokens([], Str, 0) of
+ {more, _} ->
+ {error, "Incomplete form (missing .<cr>)??"};
+ {done, {ok, Toks, _}, Rest} ->
+ case all_white(Rest) of
+ true ->
+ case erl_parse:parse_exprs(Toks) of
+ {ok, Exprs} ->
+ case catch erl_eval:exprs(Exprs, erl_eval:new_bindings()) of
+ {value, Val, _} ->
+ {ok, Val};
+ Other ->
+ {error, ?result("*** eval: ~p", [Other])}
+ end;
+ {error, {_Line, Mod, Args}} ->
+ Msg = ?result("*** ~ts",[Mod:format_error(Args)]),
+ {error, Msg}
+ end;
+ false ->
+ {error, ?result("Non-white space found after "
+ "end-of-form :~ts", [Rest])}
+ end
+ end.
+
+all_white([$\s|T]) -> all_white(T);
+all_white([$\n|T]) -> all_white(T);
+all_white([$\t|T]) -> all_white(T);
+all_white([]) -> true;
+all_white(_) -> false.
diff --git a/lib/inets/vsn.mk b/lib/inets/vsn.mk
index 3a489357ff..9bbcd06914 100644
--- a/lib/inets/vsn.mk
+++ b/lib/inets/vsn.mk
@@ -19,6 +19,6 @@
# %CopyrightEnd%
APPLICATION = inets
-INETS_VSN = 6.5.1
+INETS_VSN = 6.5.2
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(INETS_VSN)$(PRE_VSN)"
diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile
index 82869d7b15..29dc73a523 100644
--- a/lib/kernel/doc/src/Makefile
+++ b/lib/kernel/doc/src/Makefile
@@ -42,6 +42,7 @@ XML_REF3_FILES = application.xml \
disk_log.xml \
erl_boot_server.xml \
erl_ddll.xml \
+ erl_epmd.xml \
erl_prim_loader_stub.xml \
erlang_stub.xml \
error_handler.xml \
diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml
index fdb2d29f63..8850c1736b 100644
--- a/lib/kernel/doc/src/config.xml
+++ b/lib/kernel/doc/src/config.xml
@@ -37,10 +37,10 @@
data in the system configuration file <c>Name.config</c>.</p>
<p>Configuration parameter values in the configuration file
override the values in the application resource files (see
- <seealso marker="app"><c>app(4)</c></seealso>.
+ <seealso marker="app"><c>app(4)</c></seealso>).
The values in the configuration file can be
overridden by command-line flags (see
- <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>.</p>
+ <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>).</p>
<p>The value of a configuration parameter is retrieved by calling
<c>application:get_env/1,2</c>.</p>
</description>
diff --git a/lib/kernel/doc/src/erl_epmd.xml b/lib/kernel/doc/src/erl_epmd.xml
new file mode 100644
index 0000000000..8b076cd2d7
--- /dev/null
+++ b/lib/kernel/doc/src/erl_epmd.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>erl_epmd</title>
+ <prepared>Timmo Verlaan</prepared>
+ <docno>1</docno>
+ <date>2018-02-19</date>
+ <rev>A</rev>
+ </header>
+ <module>erl_epmd</module>
+ <modulesummary>
+ Erlang interface towards epmd
+ </modulesummary>
+ <description>
+ <p>This module communicates with the EPMD daemon, see <seealso
+ marker="erts:epmd">epmd</seealso>. To implement your own epmd module please
+ see <seealso marker="erts:alt_disco">ERTS User's Guide: How to Implement an
+ Alternative Service Discovery for Erlang Distribution</seealso></p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="start_link" arity="0"/>
+ <fsummary>Callback for erl_distribution supervisor.</fsummary>
+ <desc>
+ <p>This function is invoked as this module is added as a child of the
+ <c>erl_distribution</c> supervisor.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="register_node" arity="2"/>
+ <name name="register_node" arity="3"/>
+ <fsummary>Registers the node with <c>epmd</c>.</fsummary>
+ <desc>
+ <p>Registers the node with <c>epmd</c> and tells epmd what port will be
+ used for the current node. It returns a creation number. This number is
+ incremented on each register to help with identifying if a node is
+ reconnecting to epmd.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="port_please" arity="2"/>
+ <name name="port_please" arity="3"/>
+ <fsummary>Returns the port number for a given node.</fsummary>
+ <desc>
+ <p>Requests the distribution port for the given node of an EPMD
+ instance. Together with the port it returns a distribution protocol
+ version which has been 5 since Erlang/OTP R6.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="address_please" arity="3"/>
+ <fsummary>Returns address and port.</fsummary>
+ <desc>
+ <p>Called by the distribution module. Resolves the <c>Host</c> to an IP
+ address.</p>
+ <p>Another epmd module may return port and distribution protocol version
+ as well.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="names" arity="1"/>
+ <fsummary>Names of Erlang nodes at a host.</fsummary>
+ <desc>
+ <p>Called by <seealso marker="net_adm"><c>net_adm:names/0</c></seealso>.
+ <c>Host</c> defaults to the localhost. Returns the names and associated
+ port numbers of the Erlang nodes that <c>epmd</c> registered at the
+ specified host. Returns <c>{error, address}</c> if <c>epmd</c> is not
+ operational.</p>
+ <p><em>Example:</em></p>
+ <pre>
+(arne@dunn)1> <input>erl_epmd:names(localhost).</input>
+{ok,[{"arne",40262}]}</pre>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml
index cb6165c73e..f418aa5bbe 100644
--- a/lib/kernel/doc/src/error_logger.xml
+++ b/lib/kernel/doc/src/error_logger.xml
@@ -33,44 +33,35 @@
<description>
<note>
- <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ <p>In Erlang/OTP 21.0, a new API for logging was added. The
old <c>error_logger</c> module can still be used by legacy
- code, but new code should use the new API instead.</p>
+ code, but log events are redirected to the new Logger API. New
+ code should use the Logger API directly.</p>
+ <p><c>error_logger</c> is no longer started by default, but is
+ automatically started when an event handler is added
+ with <c>error_logger:add_report_handler/1,2</c>. The <c>error_logger</c>
+ module is then also added as a handler to the new logger.</p>
<p>See <seealso marker="logger"><c>logger(3)</c></seealso> and
the <seealso marker="logger_chapter">Logging</seealso> chapter
- in the user's guide for more information.</p>
+ in the User's Guide for more information.</p>
</note>
<p>The Erlang <em>error logger</em> is an event manager (see
<seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and
<seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>),
- registered as <c>error_logger</c>. Errors, warnings, and info events
- are sent to the error logger from the Erlang runtime system and
- the different Erlang/OTP applications. The events are, by default,
- logged to the terminal. Notice that an event from a process <c>P</c> is
- logged at the node of the group leader of <c>P</c>. This means
- that log output is directed to the node from which a process was
- created, which not necessarily is the same node as where it is
- executing.</p>
- <p>Initially, <c>error_logger</c> has only a primitive event
- handler, which buffers and prints the raw event messages. During
- system startup, the Kernel application replaces this with a
- <em>standard event handler</em>, by default one that writes
- nicely formatted output to the terminal. Kernel can also be
- configured so that events are logged to a file instead, or not logged at all,
- see <seealso marker="kernel_app"><c>kernel(6)</c></seealso>.</p>
- <p>Also the SASL application, if started, adds its own event
- handler, which by default writes supervisor, crash, and progress
- reports to the terminal. See
- <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>.</p>
- <p>It is recommended that user-defined applications report
- errors through the error logger to get uniform reports.
- User-defined event handlers can be added to handle application-specific
- events, see
- <seealso marker="#add_report_handler/1"><c>add_report_handler/1,2</c></seealso>.
- Also, a useful event handler is provided in STDLIB for multi-file
- logging of events, see
- <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>.</p>
+ registered as <c>error_logger</c>.</p>
+ <p>Error logger is no longer started by default, but is
+ automatically started when an event handler is added
+ with <seealso marker="#add_report_handler/1">
+ <c>add_report_handler/1,2</c></seealso>. The <c>error_logger</c>
+ module is then also added as a handler to the new logger,
+ causing log events to be forwarded from logger to error logger,
+ and consequently to all installed error logger event
+ handlers.</p>
+ <p>User-defined event handlers can be added to handle application-specific
+ events.</p>
+ <p>Existing event handlers provided by STDLIB and SASL are still
+ available, but are no longer used by OTP.</p>
<p>Warning events were introduced in Erlang/OTP R9C and are enabled
by default as from Erlang/OTP 18.0. To retain backwards compatibility
with existing user-defined event handlers, the warning events can be
@@ -99,6 +90,9 @@
The function returns <c>ok</c> if successful.</p>
<p>The event handler must be able to handle the events in this module, see
section <seealso marker="#events">Events</seealso>.</p>
+ <p>The first time this function is called,
+ <c>error_logger</c> is added as a Logger handler, and
+ the <c>error_logger</c> process is started.</p>
</desc>
</func>
<func>
@@ -108,37 +102,40 @@
<p>Deletes an event handler from the error logger by calling
<c>gen_event:delete_handler(error_logger, <anno>Handler</anno>, [])</c>,
see <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>.</p>
+ <p>If no more event handlers exist after the deletion,
+ <c>error_logger</c> is removed as a Logger handler, and
+ the <c>error_logger</c> process is stopped.</p>
</desc>
</func>
<func>
<name name="error_msg" arity="1"/>
<name name="error_msg" arity="2"/>
<name name="format" arity="2"/>
- <fsummary>Send a standard error event to the error logger.</fsummary>
+ <fsummary>Log a standard error event.</fsummary>
<desc>
- <p>Sends a standard error event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments
- are the same as the arguments of
+ <p>Log a standard error event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
<seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB.
- The event is handled by the standard event handler.</p>
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
-1> <input>error_logger:error_msg("An error occurred in ~p~n", [a_module]).</input>
-
-=ERROR REPORT==== 11-Aug-2005::14:03:19 ===
+1> <input>error_logger:error_msg("An error occurred in ~p", [a_module]).</input>
+=ERROR REPORT==== 22-May-2018::11:18:43.376917 ===
An error occurred in a_module
ok</pre>
<warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use
- <seealso marker="#error_report/1"><c>error_report/1</c></seealso>
- instead.</p>
- </warning>
- <warning>
<p>If the Unicode translation modifier (<c>t</c>) is used in
- the format string, all error handlers must ensure that the
+ the format string, all event handlers must ensure that the
formatted output is correctly encoded for the I/O
device.</p>
</warning>
@@ -146,79 +143,101 @@ ok</pre>
</func>
<func>
<name name="error_report" arity="1"/>
- <fsummary>Send a standard error report event to the error logger.</fsummary>
+ <fsummary>Log a standard error event.</fsummary>
<desc>
- <p>Sends a standard error report event to the error logger.
- The event is handled by the standard event handler.</p>
+ <p>Log a standard error event. Error logger forwards the event
+ to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
2> <input>error_logger:error_report([{tag1,data1},a_term,{tag2,data}]).</input>
-
-=ERROR REPORT==== 11-Aug-2005::13:45:41 ===
+=ERROR REPORT==== 22-May-2018::11:24:23.699306 ===
tag1: data1
a_term
tag2: data
ok
3> <input>error_logger:error_report("Serious error in my module").</input>
-
-=ERROR REPORT==== 11-Aug-2005::13:45:49 ===
+=ERROR REPORT==== 22-May-2018::11:24:45.972445 ===
Serious error in my module
ok</pre>
</desc>
</func>
<func>
<name name="error_report" arity="2"/>
- <fsummary>Send a user-defined error report event to the error logger.</fsummary>
+ <fsummary>Log a user-defined error event.</fsummary>
<desc>
- <p>Sends a user-defined error report event to the error logger.
- An event handler to handle the event is supposed to have been
- added. The event is ignored by the standard event handler.</p>
+ <p>Log a user-defined error event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
<p>It is recommended that <c><anno>Report</anno></c> follows the same
structure as for
<seealso marker="#error_report/1"><c>error_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
<name name="get_format_depth" arity="0"/>
<fsummary>Get the value of the Kernel application variable
- <c>logger_format_depth</c>.</fsummary>
+ <c>error_logger_format_depth</c>.</fsummary>
<desc>
<p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the
- value of
- <seealso marker="kernel_app#logger_format_depth">
- logger_format_depth</seealso>
+ value of <c>error_logger_format_depth</c>
in the Kernel application, if Depth is an integer. Otherwise,
<c>unlimited</c> is returned.</p>
- <p>For backwards compatibility, the value
- of <c>error_logger_format_depth</c> is used
- if <c>logger_format_depth</c> is not set.</p>
+ <note>
+ <p>The <c>error_logger_format_depth</c> variable
+ is <seealso marker="kernel_app#deprecated-configuration-parameters">
+ deprecated</seealso> since
+ the <seealso marker="logger">Logger API</seealso> was
+ introduced in Erlang/OTP 21.0. The variable, and this
+ function, are kept for backwards compatibility since they
+ still might be used by legacy report handlers.</p>
+ </note>
</desc>
</func>
<func>
<name name="info_msg" arity="1"/>
<name name="info_msg" arity="2"/>
- <fsummary>Send a standard information event to the error logger.</fsummary>
+ <fsummary>Log a standard information event.</fsummary>
<desc>
- <p>Sends a standard information event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments
- are the same as the arguments of
+ <p>Log a standard information event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
<seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB. The event is handled by the standard event handler.</p>
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
-1> <input>error_logger:info_msg("Something happened in ~p~n", [a_module]).</input>
-
-=INFO REPORT==== 11-Aug-2005::14:06:15 ===
+1> <input>error_logger:info_msg("Something happened in ~p", [a_module]).</input>
+=INFO REPORT==== 22-May-2018::12:03:32.612462 ===
Something happened in a_module
ok</pre>
<warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use <c>info_report/1</c> instead.</p>
- </warning>
- <warning>
<p>If the Unicode translation modifier (<c>t</c>) is used in
- the format string, all error handlers must ensure that the
+ the format string, all event handlers must ensure that the
formatted output is correctly encoded for the I/O
device.</p>
</warning>
@@ -226,37 +245,52 @@ ok</pre>
</func>
<func>
<name name="info_report" arity="1"/>
- <fsummary>Send a standard information report event to the error logger.</fsummary>
+ <fsummary>Log a standard information event.</fsummary>
<desc>
- <p>Sends a standard information report event to the error
- logger. The event is handled by the standard event handler.</p>
+ <p>Log a standard information event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
2> <input>error_logger:info_report([{tag1,data1},a_term,{tag2,data}]).</input>
-
-=INFO REPORT==== 11-Aug-2005::13:55:09 ===
+=INFO REPORT==== 22-May-2018::12:06:35.994440 ===
tag1: data1
a_term
tag2: data
ok
3> <input>error_logger:info_report("Something strange happened").</input>
-
-=INFO REPORT==== 11-Aug-2005::13:55:36 ===
+=INFO REPORT==== 22-May-2018::12:06:49.066872 ===
Something strange happened
ok</pre>
</desc>
</func>
<func>
<name name="info_report" arity="2"/>
- <fsummary>Send a user-defined information report event to the error logger.</fsummary>
+ <fsummary>Log a user-defined information event.</fsummary>
<desc>
- <p>Sends a user-defined information report event to the error
- logger. An event handler to handle the event is supposed to
- have been added. The event is ignored by the standard event
- handler.</p>
+ <p>Log a user-defined information event. Error logger forwards
+ the event to Logger, including metadata that allows
+ backwards compatibility with legacy error logger event
+ handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
<p>It is recommended that <c><anno>Report</anno></c> follows the same
structure as for
<seealso marker="#info_report/1"><c>info_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
@@ -355,24 +389,27 @@ ok</pre>
<func>
<name name="warning_msg" arity="1"/>
<name name="warning_msg" arity="2"/>
- <fsummary>Send a standard warning event to the error logger.</fsummary>
+ <fsummary>Log a standard warning event.</fsummary>
<desc>
- <p>Sends a standard warning event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments
- are the same as the arguments of
+ <p>Log a standard warning event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
<seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB.
- The event is handled by the standard event handler. It is tagged
- as an error, warning, or info, see
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler. The log
+ level can be changed to error or info, see
<seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
- <warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use <c>warning_report/1</c> instead.</p>
- </warning>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
<warning>
<p>If the Unicode translation modifier (<c>t</c>) is used in
- the format string, all error handlers must ensure that the
+ the format string, all event handlers must ensure that the
formatted output is correctly encoded for the I/O
device.</p>
</warning>
@@ -380,24 +417,43 @@ ok</pre>
</func>
<func>
<name name="warning_report" arity="1"/>
- <fsummary>Send a standard warning report event to the error logger.</fsummary>
+ <fsummary>Log a standard warning event.</fsummary>
<desc>
- <p>Sends a standard warning report event to the error logger.
- The event is handled by the standard event handler. It is
- tagged as an error, warning, or info, see
+ <p>Log a standard warning event. Error logger forwards the event
+ to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler. The log
+ level can be changed to error or info, see
<seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
<name name="warning_report" arity="2"/>
- <fsummary>Send a user-defined warning report event to the error logger.</fsummary>
+ <fsummary>Log a user-defined warning event.</fsummary>
<desc>
- <p>Sends a user-defined warning report event to the error
- logger. An event handler to handle the event is supposed to
- have been added. The event is ignored by the standard event
- handler. It is tagged as an error, warning, or info,
- depending on the value of
+ <p>Log a user-defined warning event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
+ <p>The log level can be changed to error or info, see
<seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
+ <p>It is recommended that <c><anno>Report</anno></c> follows the same
+ structure as for
+ <seealso marker="#warning_report/1"><c>warning_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
</funcs>
@@ -461,8 +517,9 @@ ok</pre>
<section>
<title>See Also</title>
<p><seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>,
- <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>
- <seealso marker="kernel_app"><c>kernel(6)</c></seealso>
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>,
+ <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>,
+ <seealso marker="kernel_app"><c>kernel(6)</c></seealso>,
<seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml
index 9552332948..e6a7962c5a 100644
--- a/lib/kernel/doc/src/inet.xml
+++ b/lib/kernel/doc/src/inet.xml
@@ -1149,7 +1149,7 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code>
<seealso marker="gen_tcp#recv/2"><c>gen_tcp:recv/2</c></seealso>
gets <c>{error, closed}</c>. In active
mode, the controlling process receives a
- <c>{tcp_close, Socket}</c> message, indicating that the
+ <c>{tcp_closed, Socket}</c> message, indicating that the
peer has closed the connection.</p>
<p>Setting this option to <c>true</c> allows you to
distinguish between a connection that was closed normally,
diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml
index 6e6990ddda..2eadc70abf 100644
--- a/lib/kernel/doc/src/introduction_chapter.xml
+++ b/lib/kernel/doc/src/introduction_chapter.xml
@@ -46,7 +46,6 @@
<item>Start, stop, supervision, configuration, and distribution of applications</item>
<item>Code loading</item>
<item>Logging</item>
- <item>Error logging</item>
<item>Global name service</item>
<item>Supervision of Erlang/OTP</item>
<item>Communication with sockets</item>
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 554d675383..e2a6d30249 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -42,7 +42,6 @@
<item>Start, stop, supervision, configuration, and distribution of applications</item>
<item>Code loading</item>
<item>Logging</item>
- <item>Error logging</item>
<item>Global name service</item>
<item>Supervision of Erlang/OTP</item>
<item>Communication with sockets</item>
@@ -122,21 +121,6 @@
application. For more information about configuration parameters,
see file <seealso marker="app"><c>app(4)</c></seealso>.</p>
<taglist>
- <tag><c>browser_cmd = string() | {M,F,A}</c></tag>
- <item>
- <p>When pressing the <em>Help</em> button in a tool such as Debugger,
- the help text (an HTML file <c>File</c>) is by default
- displayed in a Netscape browser, which is required to be
- operational. This parameter can be used to change the command for
- how to display the help text if another browser than Netscape
- is preferred, or if another platform than Unix or Windows is
- used.</p>
- <p>If set to a string <c>Command</c>, the command
- <c>"Command File"</c> is evaluated using
- <seealso marker="os#cmd/1"><c>os:cmd/1</c></seealso>.</p>
- <p>If set to a module-function-args tuple, <c>{M,F,A}</c>,
- the call <c>apply(M,F,[File|A])</c> is evaluated.</p>
- </item>
<tag><c>distributed = [Distrib]</c></tag>
<item>
<p>Specifies which applications that are distributed and on which
@@ -180,148 +164,59 @@
<p>Permissions are described in
<seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p>
</item>
- <tag><c>logger_dest = Value</c></tag>
+ <tag><marker id="logger"/><c>logger = [Config]</c></tag>
<item>
- <p><c>Value</c> is one of:</p>
- <taglist>
- <tag><c>tty</c></tag>
- <item><p>Installs the standard handler, <seealso marker="logger_std_h">
- <c>logger_std_h(3)</c></seealso>, with <c>type</c> set
- to <c>standard_io</c>. This is the default
- option.</p></item>
- <tag><c>{file, FileName}</c></tag>
- <item><p>Installs the standard handler, <seealso marker="logger_std_h">
- <c>logger_std_h(3)</c></seealso>, with <c>type</c> set
- to <c>{file, FileName}</c>, where <c>FileName</c>
- is a string. The file is opened with encoding UTF-8.</p></item>
- <tag><c>{disk_log, FileName}</c></tag>
- <item><p>Installs the disk_log handler, <seealso marker="logger_disk_log_h">
- <c>logger_disk_log_h(3)</c></seealso>, with <c>file</c> set
- to <c>FileName</c> (a string), and possibly other disk_log
- parameters set by the environment variables
- <c>logger_disk_log_type</c>, <c>logger_disk_log_maxfiles</c> and
- <c>logger_disk_log_maxbytes</c>,
- see <seealso marker="#disk_log_vars">below</seealso>. The
- file is opened with encoding UTF-8.</p></item>
- <tag><c>false</c></tag>
- <item>
- <p>No standard handler is installed, but
- the initial, primitive handler is kept, printing
- raw event messages to <c>tty</c>.</p>
- </item>
- <tag><c>silent</c></tag>
- <item>
- <p>No standard handler is started, and the initial,
- primitive handler is removed.</p>
- </item>
- </taglist>
+ <p>Specifies how <seealso marker="logger"><c>logger</c></seealso> should be
+ configured.</p>
+ <p>For more details and examples, see the <seealso marker="logger_chapter#logger">
+ Configuration</seealso> section in the <seealso marker="logger_chapter">
+ Logger User's Guide</seealso>.
+ </p>
</item>
- <tag><c>logger_level = Level</c></tag>
+ <tag><marker id="logger_level"/><c>logger_level = Level</c></tag>
<item>
- <p><c>Value = emergency | alert | critical | error | warning |
+ <p><c>Level = emergency | alert | critical | error | warning |
notice | info | debug</c></p>
<p>This parameter specifies which log levels to log. The
specified level, and all levels that are more severe, will
be logged.</p>
- <p>This configuration parameter is used both for the global
- logger level, and for the standard handler started by
- the Kernel application (see <c>logger_dest</c> variable above).</p>
- <p>The default value is <c>info</c></p>
- </item>
- <tag><marker id="disk_log_vars"/>
- <c>logger_disk_log_type = halt | wrap</c></tag>
- <item/>
- <tag><c>logger_disk_log_maxfiles = integer()</c></tag>
- <item/>
- <tag><c>logger_disk_log_maxbytes = integer()</c></tag>
- <item>
- <p>If <c>logger_dest</c> is set to {disk_log,File}, then these
- parameters specify the configuration to use when opening the
- disk log file. They specify the type of disk log, the
- maximum number of files (if the type is wrap) and the
- maximum size of each file, respectively.</p>
- <p>The default values are:</p>
- <code>
-logger_disk_log_type = wrap
-logger_disk_log_maxfiles = 10
-logger_disk_log_maxbytes = 1048576</code>
+ <p>The default value is <c>info</c>.</p>
+ <p>To change the global log level at run-time, use
+ <seealso marker="logger#set_logger_config/2">
+ <c>logger:set_logger_config(level, error)</c></seealso>.</p>
</item>
<tag><marker id="logger_sasl_compatible"/>
<c>logger_sasl_compatible = boolean()</c></tag>
<item>
- <p>If this parameter is set to true, then the logger handler
- started by kernel will not log any progress-, crash-, or
- supervisor reports. If the SASL application is starated,
+ <p>If this parameter is set to true, then the <c>default</c> logger handler
+ will not log any progress-, crash-, or supervisor reports.
+ If the SASL application is started,
these log events will be sent to a second handler instance
- named sasl_h, according to values of the SASL environment
- variables <c>sasl_error_logger</c>
+ named <c>sasl</c>, according to values of the SASL
+ configuration parameter <c>sasl_error_logger</c>
and <c>sasl_errlog_type</c>, see
- <seealso marker="sasl:sasl_app#configuration">SASL(6)
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">sasl(6)
</seealso></p>
- <p>The default value is <c>false</c></p>
+ <p>The default value is <c>false</c>.</p>
<p>See chapter <seealso marker="logger_chapter#compatibility">Backwards
compatibility with error_logger</seealso> for more
information about handling of the so called SASL reports.</p>
+ <note><p>This configuration option only effects the <c>default</c>
+ and <c>sasl</c> handler. Any other handlers are uneffected.</p></note>
</item>
- <tag><marker id="logger_log_progress"/>
- <c>logger_log_progress = boolean()</c></tag>
+ <tag><marker id="logger_progress_reports"/>
+ <c>logger_progress_reports = stop | log</c></tag>
<item>
<p>If <c>logger_sasl_compatible = false</c>,
- then <c>logger_log_progress</c> specifies if progress
+ then <c>logger_progress_reports</c> specifies if progress
reports from <c>supervisor</c>
- and <c>application_controller</c> shall be logged or
- not.</p>
- <p>If <c>logger_sasl_compatible = false</c>,
- then <c>logger_log_progress</c> is ignored.</p>
- </item>
- <tag><marker id="logger_format_depth"/>
- <c>logger_format_depth = Depth</c></tag>
- <item>
- <p>Can be used to limit the size of the
- formatted output from the logger handlers.</p>
-
- <note><p>This configuration parameter was introduced in OTP 18.1
- and is experimental. Based on user feedback, it
- can be changed or improved in future releases, for example,
- to gain better control over how to limit the size of the
- formatted output. We have no plans to remove this
- new feature entirely, unless it turns out to be
- useless.</p></note>
-
- <p><c>Depth</c> is a positive integer representing the maximum
- depth to which terms are printed by the logger
- handlers included in OTP. This
- configuration parameter is used by the default formatter,
- <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
- unless the formatter's <c>depth</c> parameter is explicitly set.
- (If you have implemented your own formatter, this configuration
- parameter has no effect on that.)</p>
-
- <p><c>Depth</c> is used as follows: Format strings
- received by the formatter are rewritten.
- The format controls <c>~p</c> and <c>~w</c> are replaced with
- <c>~P</c> and <c>~W</c>, respectively, and <c>Depth</c> is
- used as the depth parameter. For details, see
- <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB.</p>
-
- <note><p>A reasonable starting value for <c>Depth</c> is
- <c>30</c>. We recommend to test crashing various processes in your
- application, examine the logs from the crashes, and then
- increase or decrease the value.</p></note>
- </item>
- <tag><c>logger_max_size = integer() | unlimited</c></tag>
- <item>
- <p>This parameter specifies the maximum size (bytes) each
- log event can have when printed by the standard logger
- handler. If the resulting string after formatting an event
- is bigger than this, it will be truncated before printed
- to the handler's destination.</p>
- </item>
- <tag><c>logger_utc = boolean()</c></tag>
- <item>
- <p>If set to <c>true</c>, the default formatter will display
- all dates in Universal Coordinated Time.</p>
+ and <c>application_controller</c> shall be logged by the
+ default logger.</p>
+ <p>If <c>logger_sasl_compatible = true</c>,
+ then <c>logger_progress_reports</c> is ignored.</p>
+ <p>The default value is <c>stop</c></p>
+ <note><p>This configuration option only effects the <c>default</c>
+ and <c>sasl</c> handler. Any other handlers are uneffected.</p></note>
</item>
<tag><c>global_groups = [GroupTuple]</c></tag>
<item>
@@ -375,7 +270,7 @@ logger_disk_log_maxbytes = 1048576</code>
<tag><c>inet_parse_error_log = silent</c></tag>
<item>
<p>If set, no
- <c>error_logger</c> messages are generated when erroneous
+ <c>logger</c> messages are generated when erroneous
lines are found and skipped in the various Inet configuration
files.</p>
</item>
@@ -586,18 +481,29 @@ MaxT = TickTime + TickTime / 4</code>
<section>
<title>Deprecated Configuration Parameters</title>
- <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ <p>In Erlang/OTP 21.0, a new API for logging was added. The
old <c>error_logger</c> event manager, and event handlers
- running on this manager, will still work, but they are not used
+ running on this manager, still work, but they are no longer used
by default.</p>
- <p>The following application environment variables can still be
- set, but they will only be used if the corresponding new logger
- variables are not set.</p>
+ <p>The following application configuration parameters can still be
+ set, but they are only used if the corresponding configuration
+ parameters for Logger are not set.</p>
<taglist>
<tag><c>error_logger</c></tag>
- <item>Replaced by <c>logger_dest</c></item>
+ <item>Replaced by setting the type of the default
+ <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso>
+ to the same value. Example:
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,#{logger_std_h=>#{type=>{file,"/tmp/erlang.log"}}}}]'
+ </code>
+ </item>
<tag><c>error_logger_format_depth</c></tag>
- <item>Replaced by <c>logger_format_depth</c></item>
+ <item>Replaced by setting the <seealso marker="logger_formatter#depth"><c>depth</c></seealso>
+ parameter of the default handlers formatter. Example:
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,#{formatter=>{logger_formatter,#{legacy_header=>true,template=>[{logger_formatter,header},"\n",msg,"\n"],depth=>10}}}]'
+ </code>
+ </item>
</taglist>
<p>See <seealso marker="logger_chapter#compatibility">Backwards
compatibility with error_logger</seealso> for more
@@ -612,12 +518,12 @@ MaxT = TickTime + TickTime / 4</code>
<seealso marker="disk_log"><c>disk_log(3)</c></seealso>,
<seealso marker="erl_boot_server"><c>erl_boot_server(3)</c></seealso>,
<seealso marker="erl_ddll"><c>erl_ddll(3)</c></seealso>,
- <seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
<seealso marker="file"><c>file(3)</c></seealso>,
<seealso marker="global"><c>global(3)</c></seealso>,
<seealso marker="global_group"><c>global_group(3)</c></seealso>,
<seealso marker="heart"><c>heart(3)</c></seealso>,
<seealso marker="inet"><c>inet(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
<seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>,
<seealso marker="os"><c>os(3)</c></seealso>,
<seealso marker="pg2"><c>pg2(3)</c></seealso>,
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index 66e6e5c689..911eb158da 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -33,10 +33,50 @@
<file>logger.xml</file>
</header>
<module>logger</module>
- <modulesummary>API module for the logger application.</modulesummary>
+ <modulesummary>API module for logging in Erlang/OTP.</modulesummary>
<description>
-
+ <p>
+ This module is the main API for logging in Erlang/OTP. It
+ contains functions that allow applications to use a single log
+ API and the system to manage those log events independently. Use
+ the <seealso marker="#emergency-1">API functions</seealso> or the log
+ <seealso marker="#macros">macros</seealso> to log events. For instance,
+ to log a new error event:</p>
+ <code>
+?LOG_ERROR("error happened because: ~p",[Reason]). %% With macro
+logger:error("error happened because: ~p",[Reason]). %% Without macro
+ </code>
+ <p>This log event is then sent to the configured log handlers which
+ by default means that it is be printed to the console. If you want
+ your systems logs to be printed to a file instead of the console you
+ must configure the default handler to do so. The simplest way is
+ to include the following in your <seealso marker="config"><c>sys.config</c></seealso>.</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler,default,logger_std_h,
+ #{logger_std_h=>#{type=>{file,"path/to/file.log"}}}}]}]}].
+ </code>
+ <p>
+ For more information about:
+ </p>
+ <list type="bulleted">
+ <item>how to use the API,
+ see <seealso marker="logger_chapter">the User's Guide</seealso>.</item>
+ <item>how to configure Logger,
+ see the <seealso marker="logger_chapter#configuration">Configuration</seealso>
+ section in the User's Guide.</item>
+ <item>the convinience macros in logger.hrl,
+ see <seealso marker="#macros">the macro section</seealso>.</item>
+ <item>what the builtin formatter can do,
+ see <seealso marker="logger_formatter">logger_formatter</seealso>.</item>
+ <item>what the builtin handlers can do,
+ see <seealso marker="logger_std_h">logger_std_h</seealso> and
+ <seealso marker="logger_disk_log_h">logger_disk_log_h</seealso>.</item>
+ <item>what builtin filters are available,
+ see <seealso marker="logger_filters">logger_filters</seealso>.</item>
+ </list>
</description>
<datatypes>
@@ -47,7 +87,7 @@
</desc>
</datatype>
<datatype>
- <name name="log"/>
+ <name name="log_event"/>
<desc>
<p></p>
</desc>
@@ -67,37 +107,119 @@
<datatype>
<name name="metadata"/>
<desc>
- <p>Metadata associated with the message to be logged.</p>
+ <p>Metadata for the log event.</p>
+ <p>Logger adds the following metadata to each log event:</p>
+ <list>
+ <item><c>pid => self()</c></item>
+ <item><c>gl => group_leader()</c></item>
+ <item><c>time => erlang:system_time(microsecond)</c></item>
+ </list>
+ <p>When a log macro is used, Logger also inserts location
+ information:</p>
+ <list>
+ <item><c>mfa => {?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}</c></item>
+ <item><c>file => ?FILE</c></item>
+ <item><c>line => ?LINE</c></item>
+ </list>
+ <p>You can add custom metadata, either by specifying a map as
+ the last parameter to any of the log macros or the API
+ functions, or by setting process metadata
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>
+ or <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ <p>Logger merges all the metadata maps before forwarding the
+ log event to the handlers. If the same keys occur, values
+ from the log call overwrite process metadata, which in turn
+ overwrite values set by Logger.</p>
+ <p>The following custom metadata keys have special meaning:</p>
+ <taglist>
+ <tag><c>domain</c></tag>
+ <item>
+ <p>The value associated with this key is used by filters
+ for grouping log events originating from, for example,
+ specific functional
+ areas. See <seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso>
+ for a description of how this field can be used.</p>
+ </item>
+ <tag><c>report_cb</c></tag>
+ <item>
+ <p>If the log message is specified as
+ a <seealso marker="#type-report"><c>report()</c></seealso>,
+ the <c>report_cb</c> key can be associated with a fun
+ (report callback) that converts the report to a format
+ string and arguments. See
+ section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the User's Guide for more
+ information about report callbacks.</p>
+ </item>
+ </taglist>
</desc>
</datatype>
<datatype>
<name name="config"/>
<desc>
- <p></p>
+ <p>Configuration data for the logger part of Logger, or for a handler.</p>
+ <p>The following default values apply:</p>
+ <list>
+ <item><c>level => info</c></item>
+ <item><c>filter_default => log</c></item>
+ <item><c>filters => []</c></item>
+ <item><c>formatter => {logger_formatter,DefaultFormatterConfig</c>}</item>
+ </list>
+ <p>See the <seealso marker="logger_formatter#type-config">
+ <c>logger_formatter(3)</c></seealso> manual page for
+ information about the default configuration for this
+ formatter.</p>
</desc>
</datatype>
<datatype>
<name name="handler_id"/>
<desc>
- <p></p>
+ <p>A unique identifier for a handler instance.</p>
</desc>
</datatype>
<datatype>
<name name="filter_id"/>
<desc>
- <p></p>
+ <p>A unique identifier for a filter.</p>
</desc>
</datatype>
<datatype>
<name name="filter"/>
<desc>
- <p></p>
+ <p>A filter which can be installed for the logger part of
+ Logger, or for a handler.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_arg"/>
+ <desc>
+ <p>The second argument to the filter fun.</p>
</desc>
</datatype>
<datatype>
<name name="filter_return"/>
<desc>
- <p></p>
+ <p>The return value from the filter fun.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="timestamp"/>
+ <desc>
+ <p>A timestamp produced
+ with <seealso marker="erts:erlang#system_time-1">
+ <c>erlang:system_time(microsecond)</c></seealso>.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="formatter_config"/>
+ <desc>
+ <p>Configuration data for the
+ formatter. See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example of a formatter implementation.</p>
</desc>
</datatype>
</datatypes>
@@ -125,15 +247,11 @@
<item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item>
</list>
- <p>All macros expand to a call to logger, where <c>Level</c> is
- taken from the macro name, and the following metadata is added,
- or merged with the given <c>Metadata</c>:</p>
-
- <code>
-#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
- file=>?FILE,
- line=>?LINE}
- </code>
+ <p>All macros expand to a call to Logger, where <c>Level</c> is
+ taken from the macro name, and location data is added to the
+ metadata. See the description of
+ the <seealso marker="#type-metadata"><c>metadata()</c></seealso>
+ type for more information about the location data.</p>
<p>The call is wrapped in a case statement and will be evaluated
only if <c>Level</c> is equal to or below the configured log
@@ -251,23 +369,26 @@
<func>
<name name="get_logger_config" arity="0"/>
- <fsummary>Lookup the current configuration for logger.</fsummary>
+ <fsummary>Look up the current configuration for the logger part
+ of Logger.</fsummary>
<desc>
- <p>Lookup the current configuration for logger.</p>
+ <p>Look up the current configuration for the logger part of
+ Logger.</p>
</desc>
</func>
<func>
<name name="get_handler_config" arity="1"/>
- <fsummary>Lookup the current configuration for the given handler.</fsummary>
+ <fsummary>Look up the current configuration for the given
+ handler.</fsummary>
<desc>
- <p>Lookup the current configuration for the given handler.</p>
+ <p>Look up the current configuration for the given handler.</p>
</desc>
</func>
<func>
<name name="i" arity="0"/>
- <fsummary>Get information about all logger configurations</fsummary>
+ <fsummary>Get all Logger configurations</fsummary>
<desc>
<p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p>
</desc>
@@ -277,39 +398,42 @@
<name name="i" arity="1" clause_i="1"/>
<name name="i" arity="1" clause_i="2"/>
<name name="i" arity="1" clause_i="3"/>
- <fsummary>Get information about all logger configurations</fsummary>
+ <fsummary>Get all Logger configurations</fsummary>
<desc>
- <p>The <c>logger:i/1</c> function can be used to get all
- current logger configuration. The way that the information
- is returned depends on the <c><anno>Action</anno></c></p>
+ <p>Display or return all current Logger configurations.</p>
<taglist>
- <tag>string</tag>
- <item>Return the pretty printed current logger configuration
- as iodata.</item>
- <tag>term</tag>
- <item>Return the current logger configuration as a term. The
- format of this term may change inbetween releases. For a
- stable format use <seealso marker="#get_handler_config/1">
+ <tag><c><anno>Action</anno> = string</c></tag>
+ <item>
+ <p>Return the pretty printed current Logger configuration
+ as iodata.</p>
+ </item>
+ <tag><c><anno>Action</anno> = term</c></tag>
+ <item>
+ <p>Return the current Logger configuration as a term. The
+ format of this term may change between releases. For a
+ stable format use <seealso marker="#get_handler_config/1">
<c>logger:get_handler_config/1</c></seealso>
- and <seealso marker="#get_logger_config/0">
- <c>logger:get_logger_config/0</c></seealso>.
- The same as calling <c>logger:i()</c>.</item>
- <tag>print</tag>
- <item>Pretty print all the current logger configuration to
- standard out. Example:
- <code><![CDATA[1> logger:i().
+ and <seealso marker="#get_logger_config/0">
+ <c>logger:get_logger_config/0</c></seealso>.</p>
+ <p>The same as calling <c>logger:i()</c>.</p>
+ </item>
+ <tag><c><anno>Action</anno> = print</c></tag>
+ <item>
+ <p>Pretty print all the current Logger configuration to
+ standard out. Example:</p>
+ <code><![CDATA[1> logger:i(print).
Current logger configuration:
Level: info
- FilterDefault: log
+ Filter Default: log
Filters:
Handlers:
- Id: logger_std_h
+ Id: default
Module: logger_std_h
Level: info
Formatter:
Module: logger_formatter
- Config: #{template => [{logger_formatter,header},"\n",msg,"\n"],
- legacy_header => true}
+ Config: #{legacy_header => true,single_line => false,
+ template => [{logger_formatter,header},"\n",msg,"\n"]}
Filter Default: stop
Filters:
Id: stop_progress
@@ -320,10 +444,10 @@ Current logger configuration:
Config: stop
Id: domain
Fun: fun logger_filters:domain/2
- Config: {log,prefix_of,[beam,erlang,otp,sasl]}
+ Config: {log,super,[beam,erlang,otp,sasl]}
Id: no_domain
Fun: fun logger_filters:domain/2
- Config: {log,no_domain,[]}
+ Config: {log,undefined,[]}
Handler Config:
logger_std_h: #{type => standard_io}
Level set per module:
@@ -336,9 +460,42 @@ Current logger configuration:
<func>
<name name="add_logger_filter" arity="2"/>
- <fsummary>Add a filter to the logger.</fsummary>
+ <fsummary>Add a filter to the logger part of Logger.</fsummary>
<desc>
- <p>Add a filter to the logger.</p>
+ <p>Add a filter to the logger part of Logger.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handlers:</p>
+ <taglist>
+ <tag><c>log_event()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next logger filter, if
+ any, is applied. If no more logger filters exist, the
+ log event is forwarded to the handler part of Logger,
+ where handler filters are applied.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ logger filter, if any, is applied. If no more logger
+ filters exist, the value of the <c>filter_default</c>
+ configuration parameter for the logger part specifies if
+ the log event shall be discarded or forwarded to the
+ handler part.</p>
+ </item>
+ </taglist>
+ <p>See section <seealso marker="logger_chapter#filters">
+ Filters</seealso> in the User's Guide for more information
+ about filters.</p>
+ <p>Some built-in filters exist. These are defined
+ in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
</desc>
</func>
@@ -347,14 +504,48 @@ Current logger configuration:
<fsummary>Add a filter to the specified handler.</fsummary>
<desc>
<p>Add a filter to the specified handler.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handler callback:</p>
+ <taglist>
+ <tag><c>log_event()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next handler filter, if
+ any, is applied. If no more filters exist for this
+ handler, the log event is forwarded to the handler
+ callback.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ handler filter, if any, is applied. If no more filters
+ exist for this handler, the value of
+ the <c>filter_default</c> configuration parameter for
+ the handler specifies if the log event shall be
+ discarded or forwarded to the handler callback.</p>
+ </item>
+ </taglist>
+ <p>See
+ section <seealso marker="logger_chapter#filters">Filters</seealso>
+ in the User's Guide for more information about filters.</p>
+ <p>Some built-in filters exist. These are defined in
+ <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
</desc>
</func>
<func>
<name name="remove_logger_filter" arity="1"/>
- <fsummary>Remove a filter from the logger.</fsummary>
+ <fsummary>Remove a filter from the logger part of Logger.</fsummary>
<desc>
- <p>Remove the filter with the specified identity from the logger.</p>
+ <p>Remove the filter identified
+ by <c><anno>FilterId</anno></c> from the logger part of Logger.</p>
</desc>
</func>
@@ -362,7 +553,9 @@ Current logger configuration:
<name name="remove_handler_filter" arity="2"/>
<fsummary>Remove a filter from the specified handler.</fsummary>
<desc>
- <p>Remove the filter with the specified identity from the given handler.</p>
+ <p>Remove the filter identified
+ by <c><anno>FilterId</anno></c> from the handler identified
+ by <c><anno>HandlerId</anno></c>.</p>
</desc>
</func>
@@ -371,6 +564,9 @@ Current logger configuration:
<fsummary>Add a handler with the given configuration.</fsummary>
<desc>
<p>Add a handler with the given configuration.</p>
+ <p><c><anno>HandlerId</anno></c> is a unique identifier which
+ must be used in all subsequent calls referring to this
+ handler.</p>
</desc>
</func>
@@ -378,7 +574,7 @@ Current logger configuration:
<name name="remove_handler" arity="1"/>
<fsummary>Remove the handler with the specified identity.</fsummary>
<desc>
- <p>Remove the handler with the specified identity.</p>
+ <p>Remove the handler identified by <c><anno>HandlerId</anno></c>.</p>
</desc>
</func>
@@ -386,15 +582,43 @@ Current logger configuration:
<name name="set_module_level" arity="2"/>
<fsummary>Set the log level for the specified module.</fsummary>
<desc>
- <p>Set the log level for the specified module.</p>
- <p>To change the logging level globally, use
- <seealso marker="#set_logger_config/2"><c>logger:set_logger_config(level, Level)</c></seealso>.
- </p>
+ <p>Set the log level for the
+ specified <c><anno>Module</anno></c>.</p>
+ <p>The log level for a module overrides the global log level
+ of Logger for log events originating from the module in
+ question. Notice, however, that it does not override the
+ level configuration for any handler.</p>
+ <p>For example: Assume that the global log level for Logger
+ is <c>info</c>, and there is one handler, <c>h1</c>, with
+ level <c>info</c> and one handler, <c>h2</c>, with
+ level <c>debug</c>.</p>
+ <p>With this configuration, no debug messages will be logged,
+ since they are all stopped by the global log level.</p>
+ <p>If the level for <c>mymodule</c> is now set
+ to <c>debug</c>, then debug events from this module will be
+ logged by the handler <c>h2</c>, but not by
+ handler <c>h1</c>.</p>
+ <p>Debug events from other modules are still not logged.</p>
+ <p>To change the global log level for Logger, use
+ <seealso marker="#set_logger_config/2">
+ <c>logger:set_logger_config(level,Level)</c></seealso>.</p>
+ <p>To change the log level for a handler, use
+ <seealso marker="#set_handler_config/3">
+ <c>logger:set_handler_config(HandlerId,level,Level)</c></seealso>.</p>
+ <note>
+ <p>The originating module for a log event is only detected
+ if <c>mfa=>{Module,Function,Arity}</c> exists in the
+ metadata. When log macros are used, this association is
+ automatically added to all log events. If an API function
+ is called directly, without using a macro, the logging
+ client must explicitly add this information if module
+ levels shall have any effect.</p>
+ </note>
</desc>
</func>
<func>
- <name name="reset_module_level" arity="1"/>
+ <name name="unset_module_level" arity="1"/>
<fsummary>Remove a module specific log setting.</fsummary>
<desc>
<p>Remove a module specific log setting. After this, the
@@ -403,22 +627,179 @@ Current logger configuration:
</func>
<func>
+ <name name="add_handlers" arity="1" clause_i="1"/>
+ <fsummary>Set up log handlers from the application's
+ configuration parameters.</fsummary>
+ <desc>
+ <p>Reads the application configuration parameter <c>logger</c> and
+ calls <c>logger:add_handlers/1</c> with its contents.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handlers" arity="1" clause_i="2"/>
+ <fsummary>Setup logger handlers.</fsummary>
+ <type name="config_handler"/>
+ <desc>
+ <p>This function should be used by custom Logger handlers to make
+ configuration consistent no matter which handler the system uses.
+ Normal usage is to add a call to <c>logger:add_handlers/1</c>
+ just after the processes that the handler needs are started,
+ and pass the application's <c>logger</c> configuration as the argument.
+ For example:</p>
+ <code>
+-behaviour(application).
+start(_, []) ->
+ case supervisor:start_link({local, my_sup}, my_sup, []) of
+ {ok, Pid} ->
+ ok = logger:add_handlers(my_app),
+ {ok, Pid, []};
+ Error -> Error
+ end.</code>
+ <p>This reads the <c>logger</c> configuration parameter from
+ the <c>my_all</c> application and starts the configured
+ handlers. The contents of the configuration use the same
+ rules as the
+ <seealso marker="logger_chapter#handler-configuration">logger handler configuration</seealso>.
+ </p>
+ <p>If the handler is meant to replace the default handler, the Kernel's
+ default handler have to be disabled before the new handler is added.
+ A <c>sys.config</c> file that disables the Kernel handler and adds
+ a custom handler could look like this:</p>
+ <code>
+[{kernel,
+ [{logger,
+ %% Disable the default Kernel handler
+ [{handler,default,undefined}]}]},
+ {my_app,
+ [{logger,
+ %% Enable this handler as the default
+ [{handler,default,my_handler,#{}}]}]}].
+ </code>
+ </desc>
+ </func>
+
+ <func>
<name name="set_logger_config" arity="1"/>
+ <fsummary>Set configuration data for the logger part of Logger.</fsummary>
+ <desc>
+ <p>Set configuration data for the logger part of Logger. This
+ overwrites the current logger configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#update_logger_config-1">
+ <c>update_logger_config/1</c></seealso>, or, if a more
+ complex merge is needed, read the current configuration
+ with <seealso marker="#get_logger_config-0"><c>get_logger_config/0</c>
+ </seealso>, then do the merge before writing the new
+ configuration back with this function.</p>
+ <p>If a key is removed compared to the current configuration,
+ the default value is used.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="set_logger_config" arity="2"/>
- <fsummary>Add or update configuration data for the logger.</fsummary>
+ <fsummary>Add or update configuration data for the logger part
+ of Logger.</fsummary>
<desc>
- <p>Add or update configuration data for the logger.</p>
+ <p>Add or update configuration data for the logger part of
+ Logger. If the given <c><anno>Key</anno></c> already exists,
+ its associated value will be changed
+ to <c><anno>Value</anno></c>. If it does not exist, it will
+ be added.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_logger_config" arity="1"/>
+ <fsummary>Update configuration data for the logger part of
+ Logger.</fsummary>
+ <desc>
+ <p>Update configuration data for the logger part of
+ Logger. This function behaves as if it was implemented as
+ follows:</p>
+ <code type="erl">
+{ok,Old} = logger:get_logger_config(),
+logger:set_logger_config(maps:merge(Old,Config)).
+ </code>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_logger_config-1"><c>set_logger_config/1</c>
+ </seealso>.</p>
</desc>
</func>
<func>
<name name="set_handler_config" arity="2"/>
+ <fsummary>Set configuration data for the specified handler.</fsummary>
+ <desc>
+ <p>Set configuration data for the specified handler. This
+ overwrites the current handler configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2</c></seealso>, or, if a more
+ complex merge is needed, read the current configuration
+ with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c>
+ </seealso>, then do the merge before writing the new
+ configuration back with this function.</p>
+ <p>If a key is removed compared to the current configuration,
+ and the key is known by Logger, the default value is used. If
+ it is a custom key, then it is up to the handler
+ implementation if the value is removed or a default value is
+ inserted.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="set_handler_config" arity="3"/>
<fsummary>Add or update configuration data for the specified
handler.</fsummary>
<desc>
<p>Add or update configuration data for the specified
- handler.</p>
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to <c><anno>Value</anno></c>. If it does not exist, it will
+ be added.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_handler_config" arity="2"/>
+ <fsummary>Update configuration data for the specified handler.</fsummary>
+ <desc>
+ <p>Update configuration data for the specified handler. This function
+ behaves as if it was implemented as follows:</p>
+ <code type="erl">
+{ok,{_,Old}} = logger:get_handler_config(HandlerId),
+logger:set_handler_config(HandlerId,maps:merge(Old,Config)).
+ </code>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_handler_config-2"><c>set_handler_config/2</c>
+ </seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_formatter_config" arity="2"/>
+ <fsummary>Update the formatter configuration for the specified handler.</fsummary>
+ <desc>
+ <p>Update the formatter configuration for the specified handler.</p>
+ <p>The new configuration is merged with the existing formatter
+ configuration.</p>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_handler_config-3">
+ <c>set_handler_config(HandlerId,formatter,
+ {FormatterModule,FormatterConfig})</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_formatter_config" arity="3"/>
+ <fsummary>Update the formatter configuration for the specified handler.</fsummary>
+ <desc>
+ <p>Update the formatter configuration for the specified handler.</p>
+ <p>This is equivalent
+ to <br/><seealso marker="#update_formatter_config-2">
+ <c>update_formatter_config(<anno>HandlerId</anno>,#{<anno>Key</anno>=><anno>Value</anno>})</c></seealso></p>
</desc>
</func>
@@ -437,17 +818,37 @@ Current logger configuration:
<name name="set_process_metadata" arity="1"/>
<fsummary>Set metadata to use when logging from current process.</fsummary>
<desc>
- <p>Set metadata which <c>logger</c> automatically inserts it
- in all log events produced on the current
- process. Subsequent calls will overwrite previous data set
- by this function.</p>
- <p>When logging, location data produced by the log macros,
- and/or metadata given as argument to the log call (API
- function or macro), will be merged with the process
- metadata. If the same keys occur, values from the metadata
- argument to the log call will overwrite values in the
- process metadata, which in turn will overwrite values from
- the location data.</p>
+ <p>Set metadata which Logger shall automatically insert in
+ all log events produced on the current process.</p>
+ <p>Location data produced by the log macros, and/or metadata
+ given as argument to the log call (API function or macro),
+ are merged with the process metadata. If the same keys
+ occur, values from the metadata argument to the log call
+ overwrite values from the process metadata, which in turn
+ overwrite values from the location data.</p>
+ <p>Subsequent calls to this function overwrites previous data
+ set. To update existing data instead of overwriting it,
+ see <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_process_metadata" arity="1"/>
+ <fsummary>Set or update metadata to use when logging from
+ current process.</fsummary>
+ <desc>
+ <p>Set or update metadata to use when logging from current
+ process</p>
+ <p>If process metadata exists for the current process, this
+ function behaves as if it was implemented as follows:</p>
+ <code type="erl">
+logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)).
+ </code>
+ <p>If no process metadata exists, the function behaves as
+ <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c>
+ </seealso>.</p>
</desc>
</func>
@@ -457,7 +858,9 @@ Current logger configuration:
<desc>
<p>Retrieve data set
with <seealso marker="#set_process_metadata-1">
- <c>set_process_metadata/1</c></seealso>.</p>
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
</desc>
</func>
@@ -467,12 +870,181 @@ Current logger configuration:
<desc>
<p>Delete data set
with <seealso marker="#set_process_metadata-1">
- <c>set_process_metadata/1</c></seealso>.</p>
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="format_report" arity="1"/>
+ <fsummary>Convert a log message on report form to {Format,Args}.</fsummary>
+ <desc>
+ <p>Convert a log message on report form to <c>{Format,Args}</c>.</p>
+ <p>This is the default report callback used
+ by <seealso marker="logger_formatter">
+ <c>logger_formatter</c></seealso> when no custom report
+ callback is found.</p>
+ <p>The function produces lines of <c>Key: Value</c> from
+ key-value lists. Strings are printed with <c>~ts</c> and
+ other terms with <c>~tp</c>.</p>
+ <p>If the <c><anno>Report</anno></c> is a map, it is
+ converted to a key-value list before formatting as such.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <marker id="handler_callback_functions"/>
+ <title>Handler Callback Functions</title>
+ <p>The following functions are to be exported from a handler
+ callback module.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>HModule:adding_handler(Config1) -> {ok,Config2} | {error,Reason}</name>
+ <fsummary>An instance of this handler is about to be added.</fsummary>
+ <type>
+ <v>Config1 = Config2 =
+ <seealso marker="#type-config">config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when an new handler is about to be
+ added, and the purpose is to verify the configuration and
+ initiate all resources needed by the handler.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config1</c>.</p>
+ <p>If everything succeeds, the callback function can add
+ possible default values or internal state values to the
+ configuration, and return the adjusted map
+ in <c>{ok,Config2}</c>.</p>
+ <p>If the configuration is faulty, or if the initiation fails,
+ the callback function must return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>HModule:changing_config(Config1,Config2) -> {ok,Config3} | {error,Reason}</name>
+ <fsummary>The configuration for this handler is about to change.</fsummary>
+ <type>
+ <v>Config1 = Config2 = Config3 =
+ <seealso marker="#type-config">config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when the configuration for a handler
+ is about to change, and the purpose is to verify and act on
+ the new configuration.</p>
+ <p><c>Config1</c> is the existing configuration
+ and <c>Config2</c> is the new configuration.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config1</c>.</p>
+ <p>If everything succeeds, the callback function must return a
+ possibly adjusted configuration in <c>{ok,Config3}</c>.</p>
+ <p>If the configuration is faulty, the callback function must
+ return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>HModule:log(LogEvent,Config) -> void()</name>
+ <fsummary>Log the given log event.</fsummary>
+ <type>
+ <v>LogEvent =
+ <seealso marker="#type-log_event">log_event()</seealso></v>
+ <v>Config =
+ <seealso marker="#type-config">config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is mandatory.</p>
+ <p>The function is called when all global filters and all
+ handler filters for the handler in question have passed for
+ the given log event.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config</c>.</p>
+ <p>The handler must log the event.</p>
+ <p>The return value from this function is ignored by
+ Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>HModule:removing_handler(Config) -> ok</name>
+ <fsummary>The given handler is about to be removed.</fsummary>
+ <type>
+ <v>Config =
+ <seealso marker="#type-config">config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when a handler is about to be
+ removed, and the purpose is to release all resources used by
+ the handler.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config</c>.</p>
+ <p>The return value is ignored by Logger.</p>
</desc>
</func>
</funcs>
+ <section>
+ <marker id="formatter_callback_functions"/>
+ <title>Formatter Callback Functions</title>
+ <p>The following functions are to be exported from a formatter
+ callback module.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>FModule:check_config(FConfig) -> ok | {error,term()}</name>
+ <fsummary>Validate the given formatter configuration.</fsummary>
+ <type>
+ <v>FConfig =
+ <seealso marker="#type-formatter_config">formatter_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called by a Logger when formatter
+ configuration is set or modified. The formatter must
+ validate the given configuration and return <c>ok</c> if it
+ is correct, and <c>{error,term()}</c> if it is faulty.</p>
+ <p>See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example implementation. <c>logger_formatter</c> is the
+ default formatter used by Logger.</p>
+ </desc>
+ </func>
+ <func>
+ <name>FModule:format(LogEvent,FConfig) -> FormattedLogEntry</name>
+ <fsummary>Format the given log event.</fsummary>
+ <type>
+ <v>LogEvent =
+ <seealso marker="#type-log_event">log_event()</seealso></v>
+ <v>FConfig =
+ <seealso marker="#type-formatter_config">formatter_config()</seealso></v>
+ <v>FormattedLogEntry =
+ <seealso marker="unicode#type-chardata">unicode:chardata()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is mandatory.</p>
+ <p>The function can be called by a log handler to convert a
+ log event term to a printable string. The returned value
+ can, for example, be printed as a log entry to the console
+ or a file using <seealso marker="stdlib:io#put_chars-1">
+ <c>io:put_chars/1,2</c></seealso>.</p>
+ <p>See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example implementation. <c>logger_formatter</c> is the
+ default formatter used by Logger.</p>
+ </desc>
+ </func>
+ </funcs>
</erlref>
diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png
index 727609a6ef..901122193a 100644
--- a/lib/kernel/doc/src/logger_arch.png
+++ b/lib/kernel/doc/src/logger_arch.png
Binary files differ
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 0bc3b37476..a3eec7bd4b 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -30,382 +30,673 @@
<file>logger_chapter.xml</file>
</header>
+ <p>Erlang/OTP 21.0 provides a new standard API for logging
+ through <c>Logger</c>, which is part of the Kernel
+ application. Logger consists of the API for issuing log events,
+ and a customizable backend where log handlers, filters and
+ formatters can be plugged in.</p>
+ <p>By default, the Kernel application installs one log handler at
+ system start. This handler is named <c>default</c>. It receives
+ and processes standard log events produced by the Erlang runtime
+ system, standard behaviours and different Erlang/OTP
+ applications. The log events are by default written to the
+ terminal.</p>
+ <p>You can also configure the system so that the default handler
+ prints log events to a single file, or to a set of wrap logs
+ via <seealso marker="disk_log"><c>disk_log</c></seealso>.</p>
+ <p>By confiugration, you can aslo modify or disable the default
+ handler, replace it by a custom handler, and install additional
+ handlers.</p>
+
<section>
<title>Overview</title>
- <p>Erlang/OTP provides a standard API for logging. The backend of
- this API can be used as is, or it can be customized to suite
- specific needs.</p>
- <p>It consists of two parts - the <em>logger</em> part and the
- <em>handler</em> part. The logger will forward log events to one
- or more handler(s).</p>
+ <p>A <em>log event</em> consists of a <em>log level</em>, the
+ <em>message</em> to be logged, and <em>metadata</em>.</p>
+ <p>The Logger backend forwards log events from the API, first
+ through a set of <em>global filters</em>, then through a set
+ of <em>handler filters</em> for each log handler.</p>
+ <p>Each filter set consists of a <em>log level check</em>,
+ followed by zero or more <em>filter functions</em>.</p>
+ <p>The following figure show a conseptual overview of Logger. The
+ figure shows two log handlers, but any number of handlers can be
+ installed.</p>
<image file="logger_arch.png">
- <icaption>Conceptual overview</icaption>
+ <icaption>Conceptual Overview</icaption>
</image>
- <p><em>Filters</em> can be added to the logger and to each
- handler. The filters decide if an event is to be forwarded or
- not, and they can also modify all parts of the log event.</p>
-
- <p>A <em>formatter</em> can be set for each handler. The formatter
- does the final formatting of the log event, including the log
- message itself, and possibly a timestamp, header and other
- metadata.</p>
-
- <p>In accordance with the Syslog protocol, RFC-5424, eight
- severity levels can be specified:</p>
-
- <table align="left">
- <row>
- <cell><strong>Level</strong></cell>
- <cell align="center"><strong>Integer</strong></cell>
- <cell><strong>Description</strong></cell>
- </row>
- <row>
- <cell>emergency</cell>
- <cell align="center">0</cell>
- <cell>system is unusable</cell>
- </row>
- <row>
- <cell>alert</cell>
- <cell align="center">1</cell>
- <cell>action must be taken immediately</cell>
- </row>
- <row>
- <cell>critical</cell>
- <cell align="center">2</cell>
- <cell>critical contidions</cell>
- </row>
- <row>
- <cell>error</cell>
- <cell align="center">3</cell>
- <cell>error conditions</cell>
- </row>
- <row>
- <cell>warning</cell>
- <cell align="center">4</cell>
- <cell>warning conditions</cell>
- </row>
- <row>
- <cell>notice</cell>
- <cell align="center">5</cell>
- <cell>normal but significant conditions</cell>
- </row>
- <row>
- <cell>info</cell>
- <cell align="center">6</cell>
- <cell>informational messages</cell>
- </row>
- <row>
- <cell>debug</cell>
- <cell align="center">7</cell>
- <cell>debug-level messages</cell>
- </row>
- <tcaption>Severity levels</tcaption>
- </table>
-
- <p>A log event is allowed by Logger if the integer value of
- its <c>Level</c> is less than or equal to the currently
- configured log level. The log level can be configured globally,
- or to allow more verbose logging from a specific part of the
- system, per module.</p>
-
+ <p>Log levels are expressed as atoms. Internally in Logger, the
+ atoms are mapped to integer values, and a log event passes the
+ log level check if the integer value of its log level is less
+ than or equal to the currently configured log level. That is,
+ the check pases if the event is equally or more severe than the
+ configured level. See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of all log
+ levels.</p>
+ <p>The global log level can be overridden by a log level
+ configured per module. This is to, for instance, allow more
+ verbose logging from a specific part of the system.</p>
+ <p>Filter functions can be used for more sophisticated filtering
+ than the log level check provides. A filter function can stop or
+ pass a log event, based on any of the event's contents. It can
+ also modify all parts of the log event. See see
+ section <seealso marker="#filters">Filters</seealso> for more
+ details.</p>
+ <p>If a log event passes through all global filters and all
+ handler filters for a specific handler, Logger forwards the event
+ to the handler callback. The handler formats and prints the
+ event to its destination. See
+ section <seealso marker="#handlers">Handlers</seealso> for
+ more details.</p>
+ <p>Everything up to and including the call to the handler
+ callbacks is executed on the client process, that is, the
+ process where the log event was issued. It is up to the handler
+ implementation if other processes are involved or not.</p>
+ <p>The handlers are called in sequence, and the order is not
+ defined.</p>
+ </section>
+ <section>
+ <title>Logger API</title>
+ <p>The API for logging consists of a set
+ of <seealso marker="logger#macros">macros</seealso>, and a set
+ of functions on the form <c>logger:Level/1,2,3</c>, which are
+ all shortcuts
+ for <seealso marker="logger#log-2">
+ <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p>
+ <p>The difference between using the macros and the exported
+ functions is that macros add location (originator) information
+ to the metadata, and performs lazy evaluation by wrapping the
+ logger call in a case statement, so it is only evaluated if the
+ log level of the event passes the global log level check.</p>
<section>
- <title>Customizable parts</title>
-
+ <marker id="log_level"/>
+ <title>Log Level</title>
+ <p>The log level indicates the severity of a event. In
+ accordance with the Syslog protocol, RFC-5424, eight log
+ levels can be specified. The following table lists all
+ possible log levels by name (atom), integer value, and
+ description:</p>
+
+ <table align="left">
+ <row>
+ <cell><strong>Level</strong></cell>
+ <cell align="center"><strong>Integer</strong></cell>
+ <cell><strong>Description</strong></cell>
+ </row>
+ <row>
+ <cell>emergency</cell>
+ <cell align="center">0</cell>
+ <cell>system is unusable</cell>
+ </row>
+ <row>
+ <cell>alert</cell>
+ <cell align="center">1</cell>
+ <cell>action must be taken immediately</cell>
+ </row>
+ <row>
+ <cell>critical</cell>
+ <cell align="center">2</cell>
+ <cell>critical contidions</cell>
+ </row>
+ <row>
+ <cell>error</cell>
+ <cell align="center">3</cell>
+ <cell>error conditions</cell>
+ </row>
+ <row>
+ <cell>warning</cell>
+ <cell align="center">4</cell>
+ <cell>warning conditions</cell>
+ </row>
+ <row>
+ <cell>notice</cell>
+ <cell align="center">5</cell>
+ <cell>normal but significant conditions</cell>
+ </row>
+ <row>
+ <cell>info</cell>
+ <cell align="center">6</cell>
+ <cell>informational messages</cell>
+ </row>
+ <row>
+ <cell>debug</cell>
+ <cell align="center">7</cell>
+ <cell>debug-level messages</cell>
+ </row>
+ <tcaption>Log Levels</tcaption>
+ </table>
+ <p>Notice that the integer value is only used internally in
+ Logger. In the API, you must always use the atom. To compare
+ the severity of two log levels,
+ use <seealso marker="logger#compare_levels-2">
+ <c>logger:compare_levels/2</c></seealso>.</p>
+ </section>
+ <section>
+ <marker id="log_message"/>
+ <title>Log Message</title>
+ <p>The log message contains the information to be logged. The
+ message can consist of a format string and arguments (given as
+ two separate parameters in the Logger API), a string or a
+ report. The latter, which is either a map or a key-value list,
+ can be accompanied by a report callback specified in the log
+ event's <seealso marker="#metadata">metadata</seealso>. The
+ report callback is a convenience function that
+ the <seealso marker="#formatters">formatter</seealso> can use
+ to convert the report to a format string and arguments. The
+ formatter can also use its own conversion function, if no
+ callback is provided, or if a customized formatting is
+ desired.</p>
+ <p>Example, format string and arguments:</p>
+ <code>logger:error("The file does not exist: ~ts",[Filename])</code>
+ <p>Example, string:</p>
+ <code>logger:notice("Something strange happened!")</code>
+ <p>Example, report, and metadata with report callback:</p>
+ <code>
+logger:debug(#{got => connection_request, id => Id, state => State},
+ #{report_cb => fun(R) -> {"~p",[R]} end})</code>
+ <p>The log message can also be provided through a fun for lazy
+ evaluation. The fun is only evaluated if the global log level
+ check passes, and is therefore recommended if it is expensive
+ to generate the message. The lazy fun must return a string, a
+ report, or a tuple with format string and arguments.</p>
+ </section>
+ <section>
+ <title>Metadata</title>
+ <p>Metadata contains additional data associated with a log
+ message. Logger inserts some metadata fields by default, and
+ the client can add custom metadata in two different ways:</p>
<taglist>
- <tag><marker id="Handler"/>Handler</tag>
+ <tag>Set process metadata</tag>
<item>
- <p>A handler is defined as a module exporting the following
- function:</p>
-
- <code>log(Log, Config) -> ok</code>
-
- <p>A handler is called by the logger backend after filtering on
- logger level and on handler level for the handler which is
- about to be called. The function call is done on the client
- process, and it is up to the handler implementation if other
- processes are to be involved or not.</p>
-
- <p>Multiple instances of the same handler can be
- added. Configuration is per instance.</p>
-
+ <p>Process metadata is set and updated
+ with <seealso marker="logger#set_process_metadata-1">
+ <c>logger:set_process_metadata/1</c></seealso>
+ and <seealso marker="logger#update_process_metadata-1">
+ <c>logger:update_process metadata/1</c></seealso>,
+ respectively. This metadata applies to the process on
+ which these calls are made, and Logger adds the metadata
+ to all log events issued on that process.</p>
</item>
-
- <tag><marker id="Filter"/>Filter</tag>
+ <tag>Add metadata to a specifc log event</tag>
<item>
- <p>Filters can be set on the logger or on a handler. Logger
- filters are applied first, and if passed, the handler filters
- for each handler are applied. The handler plugin is only
- called if all handler filters for the handler in question also
- pass.</p>
-
- <p>A filter is specified as:</p>
-
- <code>{fun((Log,Extra) -> Log | stop | ignore), Extra}</code>
-
- <p>The configuration parameter <c>filter_default</c>
- specifies the behavior if all filters return <c>ignore</c>.
- <c>filter_default</c> is by default set to <c>log</c>.</p>
-
- <p>The <c>Extra</c> parameter may contain any data that the
- filter needs.</p>
+ <p>Metadata associated with one specifc log event is given
+ as the last parameter to the log macro or Logger API
+ function when the event is issued. For example:</p>
+ <code>?LOG_ERROR("Connection closed",#{context => server})</code>
</item>
-
- <tag><marker id="Formatter"/>Formatter</tag>
- <item>
- <p>A formatter is defined as a module exporting the following
- function:</p>
-
- <code>format(Log,Extra) -> string()</code>
-
- <p>The formatter plugin is called by each handler, and the
- returned string can be printed to the handler's destination
- (stdout, file, ...).</p>
- </item>
-
</taglist>
+ <p>See the description of
+ the <seealso marker="logger#type-metadata">
+ <c>logger:metadata()</c></seealso> type for information
+ about which default keys Logger inserts, and how the different
+ metadata maps are merged.</p>
</section>
+ </section>
+ <section>
+ <marker id="filter"/>
+ <title>Filters</title>
+ <p>Filters can be global, or attached to a specific
+ handler. Logger calls the global filters first, and if they all
+ pass, it calls the handler filters for each handler. Logger
+ calls the handler callback only if all filters attached to the
+ handler in question also pass.</p>
+ <p>A filter is defined as:</p>
+ <pre>{FilterFun, Extra}</pre>
+ <p>where <c>FilterFun</c> is a function of arity 2,
+ and <c>Extra</c> is any term. When applying the filter, Logger
+ calls the function with the log event as the first argument,
+ and the value of <c>Extra</c> as the second
+ argument. See <seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso> for type definitions.</p>
+ <p>The filter function can return <c>stop</c>, <c>ignore</c> or
+ the (possibly modified) log event.</p>
+ <p>If <c>stop</c> is returned, the log event is immediately
+ discarded. If the filter is global, no handler filters or
+ callbacks are called. If it is a handler filter, the
+ corresponding handler callback is not called, but the log event
+ is forwarded to filters attached to the next handler, if
+ any.</p>
+ <p>If the log event is returned, the next filter function is
+ called with the returned value as the first argument. That is,
+ if a filter function modifies the log event, the next filter
+ function receives the modified event. The value returned from
+ the last filter function is the value that the handler callback
+ receives.</p>
+ <p>If the filter function returns <c>ignore</c>, it means that it
+ did not recognize the log event, and thus leaves to other
+ filters to decide the event's destiny.</p>
+ <p>The configuration
+ option <seealso marker="#filter_default"><c>filter_default</c></seealso>
+ specifies the behaviour if all filter functions
+ return <c>ignore</c>, or if no filters
+ exist. <c>filter_default</c> is by default set to <c>log</c>,
+ meaning that if all existing filters ignore a log event, Logger
+ forwards the event to the handler
+ callback. If <c>filter_default</c> is set to <c>stop</c>, Logger
+ discards such events.</p>
+ <p>Global filters are added
+ with <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ and removed
+ with <seealso marker="logger#remove_logger_filter-1">
+ <c>logger:remove_logger_filter/1</c></seealso>. They can also
+ be added at system start via the Kernel configuration
+ parameter <seealso marker="#logger"><c>logger</c></seealso>.</p>
+ <p>Handler filters are added
+ with <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>
+ and removed
+ with <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>. They can also
+ be specified directly in the configuration when adding a handler
+ with <seealso marker="logger#add_handler/3">
+ <c>logger:add_handler/3</c></seealso>
+ or via the Kernel configuration
+ parameter <seealso marker="#logger"><c>logger</c></seealso>.</p>
+
+ <p>To see which filters are currently installed in the system,
+ use <seealso marker="logger#i-0"><c>logger:i/0</c></seealso>,
+ or <seealso marker="logger#get_logger_config-0">
+ <c>logger:get_logger_config/0</c></seealso>
+ and <seealso marker="logger#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>. Filters are
+ listed in the order they are applied, that is, the first
+ filter in the list is applied first, and so on.</p>
+
+ <p>For convenience, the following built-in filters exist:</p>
- <section>
- <title>Built-in handlers</title>
-
- <taglist>
- <tag><c>logger_std_h</c></tag>
+ <taglist>
+ <tag><seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso></tag>
<item>
- <p>This is the default handler used by OTP. Multiple instances
- can be started, and each instance will write log events to a
- given destination, console or file. Filters can be used for
- selecting which event to send to which handler instance.</p>
+ <p>Provides a way of filtering log events based on a
+ <c>domain</c> field in <c>Metadata</c>.</p>
</item>
-
- <tag><c>logger_disk_log_h</c></tag>
+ <tag><seealso marker="logger_filters#level-2">
+ <c>logger_filters:level/2</c></seealso></tag>
<item>
- <p>This handler behaves much like logger_std_h, except it uses
- <seealso marker="disk_log"><c>disk_log</c></seealso> as its
- destination.</p>
+ <p>Provides a way of filtering log events based on the log
+ level.</p>
</item>
-
- <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag>
+ <tag><seealso marker="logger_filters#progress-2">
+ <c>logger_filters:progress/2</c></seealso></tag>
<item>
- <p>This handler is to be used for backwards compatibility
- only. It is not started by default, but will be automatically
- started the first time an event handler is added
- with <seealso marker="error_logger#add_report_handler-1">
- <c>error_logger:add_report_handler/1,2</c></seealso>.</p>
-
- <p>No built-in event handlers exist.</p>
+ <p>Stops or allows progress reports from <c>supervisor</c>
+ and <c>application_controller</c>.</p>
</item>
- </taglist>
- </section>
-
- <section>
- <title>Built-in filters</title>
-
- <taglist>
- <tag><c>logger_filters:domain/2</c></tag>
+ <tag><seealso marker="logger_filters#remote_gl-2">
+ <c>logger_filters:remote_gl/2</c></seealso></tag>
<item>
- <p>This filter provides a way of filtering log events based on a
- <c>domain</c> field <c>Metadata</c>. See
- <seealso marker="logger_filters#domain-2">
- <c>logger_filters:domain/2</c></seealso></p>
+ <p>Stops or allows log events originating from a process
+ that has its group leader on a remote node.</p>
</item>
+ </taglist>
+ </section>
- <tag><c>logger_filters:level/2</c></tag>
- <item>
- <p>This filter provides a way of filtering log events based
- on the log level. See <seealso marker="logger_filters#domain-2">
- <c>logger_filters:domain/2</c></seealso></p>
- </item>
+ <section>
+ <marker id="handlers"/>
+ <title>Handlers</title>
+ <p>A handler is defined as a module exporting at least the
+ following function:</p>
+
+ <pre><seealso marker="logger#HModule:log-2">log(LogEvent, Config) -> void()</seealso></pre>
+
+ <p>This function is called when a log event has passed through all
+ global filters, and all handler filters attached to the handler
+ in question. The function call is executed on the client
+ process, and it is up to the handler implementation if other
+ processes are involved or not.</p>
+
+ <p>Logger allows adding multiple instances of a handler
+ callback. That is, if a callback module implementation allows
+ it, you can add multiple handler instances using the same
+ callback module. The different instances are identified by
+ unique handler identities.</p>
+
+ <p>In addition to the mandatory callback function <c>log/2</c>, a
+ handler module can export the optional callback
+ functions <c>adding_handler/1</c>, <c>changing_config/2</c>
+ and <c>removing_handler/1</c>. See
+ section <seealso marker="logger#handler_callback_functions">Handler
+ Callback Functions</seealso> in the logger(3) manual for more
+ information about these function.</p>
+
+ <p>The following built-in handlers exist:</p>
- <tag><c>logger_filters:progress/2</c></tag>
- <item>
- <p>This filter matches all progress reports
- from <c>supervisor</c> and <c>application_controller</c>.
- See <seealso marker="logger_filters#progress/2">
- <c>logger_filters:progress/2</c></seealso></p>
- </item>
+ <taglist>
+ <tag><c>logger_std_h</c></tag>
+ <item>
+ <p>This is the default handler used by OTP. Multiple instances
+ can be started, and each instance will write log events to a
+ given destination, terminal or file.</p>
+ </item>
- <tag><c>logger_filters:remote_gl/2</c></tag>
- <item>
- <p>This filter matches all events originating from a process
- that has its group leader on a remote node.
- See <seealso marker="logger_filters#remote_gl/2">
- <c>logger_filters:remote_gl/2</c></seealso></p>
- </item>
- </taglist>
- </section>
+ <tag><c>logger_disk_log_h</c></tag>
+ <item>
+ <p>This handler behaves much like <c>logger_std_h</c>, except it uses
+ <seealso marker="disk_log"><c>disk_log</c></seealso> as its
+ destination.</p>
+ </item>
- <section>
- <title>Default formatter</title>
+ <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag>
+ <item>
+ <p>This handler is provided for backwards compatibility
+ only. It is not started by default, but will be
+ automatically started the first time an <c>error_logger</c>
+ event handler is added
+ with <seealso marker="error_logger#add_report_handler-1">
+ <c>error_logger:add_report_handler/1,2</c></seealso>.</p>
+
+ <p>The old <c>error_logger</c> event handlers in STDLIB and
+ SASL still exist, but they are not added by Erlang/OTP 21.0
+ or later.</p>
+ </item>
+ </taglist>
+ </section>
- <p>The default formatter is <c>logger_formatter</c>.
- See <seealso marker="logger_formatter#format-2">
- <c>logger_formatter:format/2</c></seealso>.</p>
- </section>
+ <section>
+ <marker id="formatters"/>
+ <title>Formatters</title>
+ <p>A formatter can be used by the handler implementation to do the
+ final formatting of a log event, before printing to the
+ handler's destination. The handler callback receives the
+ formatter information as part of the handler configuration,
+ which is passed as the second argument
+ to <seealso marker="logger#HModule:log-2">
+ <c>HModule:log/2</c></seealso>.</p>
+ <p>The formatter information consits of a formatter
+ module, <c>FModule</c> and its
+ configuration, <c>FConfig</c>. <c>FModule</c> must export the
+ following function, which can be called by the handler:</p>
+ <pre><seealso marker="logger#FModule:format-2">format(LogEvent,FConfig)
+ -> FormattedLogEntry</seealso></pre>
+ <p>The formatter information for a handler is set as a part of its
+ configuration when the handler is added. It can also be changed
+ during runtime
+ with <seealso marker="logger#set_handler_config-3">
+ <c>logger:set_handler_config(HandlerId,formatter,{FModule,FConfig})</c>
+ </seealso>, which overwrites the current formatter information,
+ or with <seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2,3</c></seealso>, which
+ only modifies the formatter configuration.</p>
+ <p>If the formatter module exports the optional callback
+ function <seealso marker="logger#FModule:check_config-1">
+ <c>check_config(FConfig)</c></seealso>, Logger calls this
+ function when the formatter information is set or modified, to
+ verify the validity of the formatter configuration.</p>
+ <p>If no formatter information is specified for a handler, Logger
+ uses <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso> as default.</p>
</section>
<section>
<title>Configuration</title>
+ <p>Logger can be configured either when the system starts through
+ <seealso marker="config">configuration parameters</seealso>,
+ or at run-time by using the <seealso marker="logger">logger(3)</seealso>
+ API. The recommended approach is to do the initial configuration in
+ the <c>sys.config</c> file and then use the API when some configuration
+ has to be changed at runtime, such as the log level.</p>
+
<section>
- <title>Application environment variables</title>
- <p>See <seealso marker="kernel_app#configuration">Kernel(6)</seealso> for
- information about the application environment variables that can
- be used for configuring logger.</p>
+ <title>Kernel Configuration Parameters</title>
+ <p>Logger is best configured by using the configuration parameters
+ of Kernel. There are four possible configuration parameters:
+ <seealso marker="#logger"><c>logger</c></seealso>,
+ <seealso marker="kernel_app#logger_level"><c>logger_level</c></seealso>,
+ <seealso marker="kernel_app#logger_sasl_compatible"><c>logger_sasl_compatible</c></seealso> and
+ <seealso marker="kernel_app#logger_progress_reports"><c>logger_progress_reports</c></seealso>.
+ <c>logger_level</c>, <c>logger_sasl_compatible</c> and <c>logger_progress_reports</c> are described in the
+ <seealso marker="kernel_app#configuration">Kernel Configuration</seealso>,
+ while <c>logger</c> is described below.</p>
+
+ <marker id="logger"/>
+ <p><em>logger</em></p>
+ <p>The application configuration parameter <c>logger</c> is used to configure
+ three different Logger aspects; handlers, logger filters and module levels.
+ The configuration is a list containing tagged tuples that look like this:</p>
+ <taglist>
+ <tag><c>DisableHandler = {handler,default,undefined}</c></tag>
+ <item>
+ <p>Disable the default handler. This allows another application
+ to add its own default handler. See <seealso marker="logger#add_handlers/1">
+ <c>logger:add_handlers/1</c></seealso> for more details.</p>
+ <p>Only one entry of this option is allowed.</p></item>
+ <tag><c>AddHandler = {handler,HandlerId,Module,HandlerConfig}</c></tag>
+ <item>
+ <p>Add a handler as if <seealso marker="logger:add_handler/3">
+ <c>logger:add_handler(HandlerId,Module,HandlerConfig)</c></seealso> is
+ called.</p>
+ <p>It is allowed to have multiple entries of this option.</p></item>
+ <tag><c>Filters = {filters, default, [Filter]}</c><br/>
+ <c>FilterDefault = log | stop</c><br/>
+ <c>Filter = {FilterId, {FilterFun, FilterConfig}}</c></tag>
+ <item>
+ <p>Add the specified <seealso marker="logger#add_logger_filter/2">
+ logger filters</seealso>.</p>
+ <p>Only one entry of this option is allowed.</p></item>
+ <tag><c>ModuleLevel = {module_level, Level, [Module]}</c></tag>
+ <item>
+ <p>This option configures <seealso marker="logger#set_module_level/2">
+ module log level</seealso>.</p>
+ <p>It is allowed to have multiple entries of this option.</p></item>
+ </taglist>
+ <p>Examples:</p>
+ <list>
+ <item>
+ <p>Output logs into the file &quot;logs/erlang.log&quot;</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{ logger_std_h => #{ type => {file,"log/erlang.log"}}}}]}]}].
+ </code>
+ </item>
+ <item>
+ <p>Output logs in single line format</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{ formatter => { logger_formatter,#{ single_line => true}}}}]}]}].
+ </code>
+ </item>
+ <item>
+ <p>Add the pid to each log event</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{ formatter => { logger_formatter,
+ #{ template => [time," ",pid," ",msg,"\n"]}}
+ }}]}]}].
+ </code>
+ </item>
+ <item>
+ <p>Use a different file for debug logging</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{ level => error,
+ logger_std_h => #{ type => {file, "log/erlang.log"}}}},
+ {handler, info, logger_std_h,
+ #{ level => debug,
+ logger_std_h => #{ type => {file, "log/debug.log"}}}}
+ ]}]}].
+ </code>
+ </item>
+ </list>
</section>
<section>
- <title>Logger configuration</title>
+ <title>Global Logger Configuration</title>
<taglist>
- <tag><c>level</c></tag>
+ <tag><c>level = </c><seealso marker="logger#type-level">
+ <c>logger:level()</c></seealso></tag>
<item>
- <p>Specifies the severity level to log.</p>
+ <p>Specifies the global log level to log.</p>
+ <p>See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of
+ possible log levels.</p>
+ <p>The initial value of this option is set by the Kernel
+ configuration
+ parameter <seealso marker="kernel_app#logger_level">
+ <c>logger_level</c></seealso>. It can be changed during
+ runtime
+ with <seealso marker="logger#set_logger_config-2">
+ <c>logger:set_logger_config(level,NewLevel)</c></seealso>.</p>
</item>
- <tag><c>filters</c></tag>
+ <tag><c>filters = [{</c><seealso marker="logger#type-filter_id">
+ <c>logger:filter_id()</c></seealso><c>,</c>
+ <seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso><c>}]</c></tag>
<item>
- <p>Logger filters are added or removed with
+ <p>Global filters are added and removed with
<seealso marker="logger#add_logger_filter-2">
<c>logger:add_logger_filter/2</c></seealso> and
<seealso marker="logger#remove_logger_filter-1">
<c>logger:remove_logger_filter/1</c></seealso>,
respectively.</p>
- <p>See <seealso marker="#Filter">Filter</seealso> for more
- information.</p>
- <p>By default, no filters exist.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more information.</p>
+ <p>Default is <c>[]</c>, that is, no filters exist.</p>
</item>
- <tag><c>filter_default = log | stop</c></tag>
+ <tag><marker id="filter_default"/><c>filter_default = log | stop</c></tag>
<item>
<p>Specifies what to do with an event if all filters
- return <c>ignore</c>.</p>
+ return <c>ignore</c>, or if no filters exist.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more information about how this option is used.</p>
<p>Default is <c>log</c>.</p>
</item>
- <tag><c>handlers</c></tag>
- <item>
- <p>Handlers are added or removed with
- <seealso marker="logger#add_handler-3">
- <c>logger:add_handler/3</c></seealso> and
- <seealso marker="logger#remove_handler-1">
- <c>logger:remove_handler/1</c></seealso>,
- respectively.</p>
- <p>See <seealso marker="#Handler">Handler</seealso> for more
- information.</p>
- </item>
</taglist>
</section>
<section>
<marker id="handler_configuration"/>
- <title>Handler configuration</title>
+ <title>Handler Configuration</title>
<taglist>
- <tag><c>level</c></tag>
+ <tag><c>level = </c><seealso marker="logger#type-level">
+ <c>logger:level()</c></seealso></tag>
<item>
- <p>Specifies the severity level to log.</p>
+ <p>Specifies the log level which the handler logs.</p>
+ <p>See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of
+ possible log levels.</p>
+ <p>The log level can be specified when adding the handler,
+ or changed during runtime with, for
+ instance, <seealso marker="logger#set_handler_config/3">
+ <c>logger:set_handler_config/3</c></seealso>.</p>
+ <p>Default is <c>info</c>.</p>
</item>
- <tag><c>filters</c></tag>
+ <tag><c>filters = [{</c><seealso marker="logger#type-filter_id">
+ <c>logger:filter_id()</c></seealso><c>,</c>
+ <seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso><c>}]</c></tag>
<item>
<p>Handler filters can be specified when adding the handler,
- or added or removed later with
+ or added or removed during runtime with
<seealso marker="logger#add_handler_filter-3">
<c>logger:add_handler_filter/3</c></seealso> and
<seealso marker="logger#remove_handler_filter-2">
<c>logger:remove_handler_filter/2</c></seealso>,
respectively.</p>
- <p>See <seealso marker="#Filter">Filter</seealso> for more
+ <p>See <seealso marker="#filters">Filters</seealso> for more
information.</p>
- <p>By default, no filters exist.</p>
+ <p>Default is <c>[]</c>, that is, no filters exist.</p>
</item>
- <tag><c>filter_default = log | stop</c></tag>
+ <tag><marker id="filter_default"/><c>filter_default = log | stop</c></tag>
<item>
<p>Specifies what to do with an event if all filters
- return <c>ignore</c>.</p>
+ return <c>ignore</c>, or if no filters exist.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more information about how this option is used.</p>
<p>Default is <c>log</c>.</p>
</item>
- <tag><c>depth = pos_integer() | unlimited</c></tag>
- <item>
- <p>Specifies if the depth of terms in the log events shall
- be limited by using control characters <c>~P</c>
- and <c>~W</c> instead of <c>~p</c> and <c>~w</c>,
- respectively. See
- <seealso marker="stdlib:io#format-1"><c>io:format</c></seealso>.</p>
- </item>
- <tag><c>max_size = pos_integer() | unlimited</c></tag>
+ <tag><c>formatter = {module(),</c><seealso marker="logger#type-formatter_config">
+ <c>logger:formatter_config()</c></seealso><c>}</c></tag>
<item>
- <p>Specifies if the size of a log event shall be limited by
- truncating the formatted string.</p>
- </item>
- <tag><c>formatter = {Module::module(),Extra::term()}</c></tag>
- <item>
- <p>See <seealso marker="#Formatter">Formatter</seealso> for more
+ <p>The formatter which the handler can use for converting
+ the log event term to a printable string.</p>
+ <p>See <seealso marker="#formatters">Formatters</seealso> for more
information.</p>
- <p>The default module is <seealso marker="logger_formatter">
- <c>logger_formatter</c></seealso>, and <c>Extra</c> is
- it's configuration map.</p>
+ <p>Default
+ is <c>{logger_formatter,DefaultFormatterConfig}</c>, see
+ the <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ manual for information about this formatter and its
+ default configuration.</p>
</item>
+ <tag><c>HandlerConfig, atom() = term()</c></tag>
+ <item>
+ <p>Any keys not listed above are considered to be handler
+ specific configuration. The configuration of the Kernel
+ handlers can be found in
+ the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
+ </seealso> manual pages.</p>
+ </item>
</taglist>
- <p>Note that <c>level</c> and <c>filters</c> are obeyed by
+ <p>Notice that <c>level</c> and <c>filters</c> are obeyed by
Logger itself before forwarding the log events to each
- handler, while <c>depth</c>, <c>max_size</c>
- and <c>formatter</c> are left to the handler
- implementation. All Logger's built-in handlers do apply these
- configuration parameters before printing.</p>
+ handler, while <c>formatter</c> and all handle specific
+ options are left to the handler implementation.</p>
+ <p>All Logger's built-in handlers will call the given formatter
+ before printing.</p>
</section>
</section>
<section>
<marker id="compatibility"/>
- <title>Backwards compatibility with error_logger</title>
- <p>Logger provides backwards compatibility with the old
+ <title>Backwards Compatibility with error_logger</title>
+ <p>Logger provides backwards compatibility with
<c>error_logger</c> in the following ways:</p>
<taglist>
- <tag>Legacy event handlers</tag>
- <item>
- <p>To use event handlers written for <c>error_logger</c>, just
- add your event handler with</p>
- <code>
-error_logger:add_report_handler/1,2.
- </code>
- <p>This will automatically start the <c>error_logger</c>
- event manager, and add <c>error_logger</c> as a
- handler to <c>logger</c>, with configuration</p>
-<code>
-#{level=>info,
- filter_default=>log,
- filters=>[]}.
-</code>
- <p>Note that this handler will ignore events that do not
- originate from the old <c>error_logger</c> API, or from
- within OTP. This means that if your code uses the logger API
- for logging, then your log events will be discarded by this
- handler.</p>
- <p>Also note that <c>error_logger</c> is not overload
- protected.</p>
- </item>
- <tag>Logger API</tag>
+ <tag>API for Logging</tag>
<item>
- <p>The old <c>error_logger</c> API still exists, but should
- only be used by legacy code. It will be removed in a later
+ <p>The <c>error_logger</c> API still exists, but should only
+ be used by legacy code. It will be removed in a later
release.</p>
+ <p>Calls
+ to <seealso marker="error_logger#error_report-1">
+ <c>error_logger:error_report/1,2</c></seealso>,
+ <seealso marker="error_logger#error_msg-1">
+ <c>error_logger:error_msg/1,2</c></seealso>, and
+ corresponding functions for warning and info messages, are
+ all forwarded to Logger as calls
+ to <seealso marker="logger#log-3">
+ <c>logger:log(Level,Report,Metadata)</c></seealso>.</p>
+ <p><c>Level = error | warning | info</c> and is taken
+ from the function name. <c>Report</c> contains the actual
+ log message, and <c>Metadata</c> contains additional
+ information which can be used for creating backwards
+ compatible events for legacy <c>error_logger</c> event
+ handlers, see
+ section <seealso marker="#legacy_event_handlers">Legacy
+ Event Handlers</seealso>.</p>
</item>
- <tag>Output format</tag>
+ <tag>Output Format</tag>
<item>
<p>To get log events on the same format as produced
by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>,
use the default formatter, <c>logger_formatter</c>, with
- configuration parameter <c>legacy_header=>true</c>. This is
+ configuration parameter <c>legacy_header => true</c>. This is
also the default.</p>
</item>
- <tag>Default format of log events from OTP</tag>
+ <tag>Default Format of Log Events from OTP</tag>
<item>
<p>By default, all log events originating from within OTP,
except the former so called "SASL reports", look the same as
before.</p>
</item>
- <tag>SASL reports</tag>
+ <tag><marker id="sasl_reports"/>SASL Reports</tag>
<item>
<p>By SASL reports we mean supervisor reports, crash reports
and progress reports.</p>
@@ -415,95 +706,134 @@ error_logger:add_report_handler/1,2.
named <c>sasl_report_tty_h</c>
and <c>sasl_report_file_h</c>.</p>
<p>The destination of these log events were configured by
- environment variables for the SASL application.</p>
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL
+ configuration parameters</seealso>.</p>
<p>Due to the specific event handlers, the output format
slightly differed from other log events.</p>
- <p>As of OTP-21, the concept of SASL reports is removed,
- meaning that the default behavior is as follows:</p>
+ <p>As of Erlang/OTP 21.0, the concept of SASL reports is
+ removed, meaning that the default behaviour is as
+ follows:</p>
<list>
- <item>Supervisor reports, crash reports and progress reports
+ <item>Supervisor reports, crash reports, and progress reports
are no longer connected to the SASL application.</item>
<item>Supervisor reports and crash reports are logged by
default.</item>
<item>Progress reports are not logged by default, but can be
- enabled with the kernel environment
- variable <c>logger_log_progress</c>.</item>
+ enabled with the Kernel configuration
+ parameter <seealso marker="kernel_app#logger_progress_reports">
+ <c>logger_progress_reports</c></seealso>.</item>
<item>The output format is the same for all log
events.</item>
</list>
- <p>If the old behavior is preferred, the kernel environment
- variable <c>logger_sasl_compatible</c> can be set
- to <c>true</c>. The old SASL environment variables can then
- be used as before, and the SASL reports will only be printed
- if the SASL application is running - through a second log
- handler named <c>sasl_h</c>.</p>
+ <p>If the old behaviour is preferred, the Kernel configuation
+ parameter <seealso marker="kernel_app:logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso> can be set
+ to <c>true</c>. The
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL
+ configuration parameters</seealso> can then be used as
+ before, and the SASL reports will only be printed if the
+ SASL application is running, through a second log handler
+ named <c>sasl</c>.</p>
<p>All SASL reports have a metadata
- field <c>domain=>[beam,erlang,otp,sasl]</c>, which can be
- used, for example, by filters to to stop or allow the
- events.</p>
+ field <c>domain => [beam,erlang,otp,sasl]</c>, which can be
+ used, for example, by filters to stop or allow the
+ log events.</p>
+ <p>See the <seealso marker="sasl:error_logging">SASL User's
+ Guide</seealso> for more information about the old SASL
+ error logging functionality.</p>
+ </item>
+ <tag><marker id="legacy_event_handlers"/>Legacy Event Handlers</tag>
+ <item>
+ <p>To use event handlers written for <c>error_logger</c>, just
+ add your event handler with</p>
+ <code>
+error_logger:add_report_handler/1,2.
+ </code>
+ <p>This will automatically start the <c>error_logger</c>
+ event manager, and add <c>error_logger</c> as a
+ handler to <c>logger</c>, with configuration</p>
+<code>
+#{level => info,
+ filter_default => log,
+ filters => []}.
+</code>
+ <p>Notice that this handler will ignore events that do not
+ originate from the <c>error_logger</c> API, or from within
+ OTP. This means that if your code uses the Logger API for
+ logging, then your log events will be discarded by this
+ handler.</p>
+ <p>Also notice that <c>error_logger</c> is not overload
+ protected.</p>
</item>
</taglist>
</section>
<section>
- <title>Error handling</title>
+ <title>Error Handling</title>
<p>Log data is expected to be either a format string and
- arguments, a string (unicode:chardata), or a report (map or
+ arguments, a string
+ (<seealso marker="stdlib:unicode#type-chardata">
+ <c>unicode:chardata()</c></seealso>), or a report (map or
key-value list) which can be converted to a format string and
- arguments by the handler. A default report callback should be
- included in the log event's metadata, which can be used for
- converting the report to a format string and arguments. The
- handler might also do a custom conversion if the default format
- is not desired.</p>
- <p><c>logger</c> does, to a certain extent, check its input data
+ arguments by the handler. If a report is given, a default report
+ callback can be included in the log event's metadata. The
+ handler can use this callback for converting the report to a
+ format string and arguments. If the format obtained by the
+ provided callback is not desired, or if there is no provided
+ callback, the handler must do a custom conversion.</p>
+ <p>Logger does, to a certain extent, check its input data
before forwarding a log event to the handlers, but it does not
evaluate conversion funs or check the validity of format strings
and arguments. This means that any filter or handler must be
careful when formatting the data of a log event, making sure
that it does not crash due to bad input data or faulty
callbacks.</p>
- <p>If a filter or handler still crashes, logger will remove the
- filter or handler in question from the configuration, and then
- print a short error message on the console. A debug event
- containing the crash reason and other details is also issued,
- and can be seen if a handler is installed which logs on debug
- level.</p>
+ <p>If a filter or handler still crashes, Logger will remove the
+ filter or handler in question from the configuration, and print
+ a short error message to the terminal. A debug event containing
+ the crash reason and other details is also issued, and can be
+ seen if a handler logging debug events is installed.</p>
</section>
<section>
<title>Example: add a handler to log debug events to file</title>
- <p>When starting an erlang node, the default behavior is that all
+ <p>When starting an Erlang node, the default behaviour is that all
log events with level info and above are logged to the
- console. In order to also log debug events, you can either
+ terminal. In order to also log debug events, you can either
change the global log level to <c>debug</c> or add a separate
handler to take care of this. In this example we will add a new
handler which prints the debug events to a separate file.</p>
- <p>First, we add an instance of logger_std_h with
+ <p>First, we add an instance of <c>logger_std_h</c> with
type <c>{file,File}</c>, and we set the handler's level
to <c>debug</c>:</p>
<pre>
-1> <input>Config = #{level=>debug,logger_std_h=>#{type=>{file,"./debug.log"}}}.</input>
+1> <input>Config = #{level => debug, logger_std_h => #{type => {file,"./debug.log"}}}.</input>
#{logger_std_h => #{type => {file,"./debug.log"}},
level => debug}
2> <input>logger:add_handler(debug_handler,logger_std_h,Config).</input>
ok</pre>
- <p>By default, the handler receives all events, so we need to add a filter
- to stop all non-debug events:</p>
+ <p>By default, the handler receives all events
+ (<c>filter_default=log</c>, see
+ section <seealso marker="#filters">Filters</seealso> for more
+ details), so we need to add a filter to stop all non-debug
+ events. The built-in
+ filter <seealso marker="logger_filters#level-2">
+ <c>logger_filters:level/2</c></seealso>
+ is used for this:</p>
<pre>
-3> <input>Fun = fun(#{level:=debug}=Log,_) -> Log; (_,_) -> stop end.</input>
-#Fun&lt;erl_eval.12.98642416>
-4> <input>logger:add_handler_filter(debug_handler,allow_debug,{Fun,[]}).</input>
+3> <input>logger:add_handler_filter(debug_handler,stop_non_debug,
+ {fun logger_filters:level/2,{stop,neq,debug}}).</input>
ok</pre>
- <p>And finally, we need to make sure that the logger itself allows
+ <p>And finally, we need to make sure that Logger itself allows
debug events. This can either be done by setting the global
- logger level:</p>
+ log level:</p>
<pre>
-5> <input>logger:set_logger_config(level,debug).</input>
+4> <input>logger:set_logger_config(level,debug).</input>
ok</pre>
<p>Or by allowing debug events from one or a few modules only:</p>
<pre>
-6> <input>logger:set_module_level(mymodule,debug).</input>
+5> <input>logger:set_module_level(mymodule,debug).</input>
ok</pre>
</section>
@@ -512,130 +842,112 @@ ok</pre>
<title>Example: implement a handler</title>
<p>The only requirement that a handler MUST fulfill is to export
the following function:</p>
- <code>log(logger:log(),logger:config()) ->ok</code>
- <p>It may also implement the following callbacks:</p>
+ <code>log(logger:log_event(),logger:config()) -> ok</code>
+ <p>It can optionally also implement the following callbacks:</p>
<code>
-adding_handler(logger:handler_id(),logger:config()) -> {ok,logger:config()} | {error,term()}
-removing_handler(logger:handler_id()) -> ok
-changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()}
+adding_handler(logger:config()) -> {ok,logger:config()} | {error,term()}
+removing_handler(logger:config()) -> ok
+changing_config(logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()}
</code>
- <p>When logger:add_handler(Id,Module,Config) is called, logger
- will first call Module:adding_handler(Id,Config), and if it
- returns {ok,NewConfig} the NewConfig is written to the
- configuration database. After this, the handler may receive log
- events as calls to Module:log/2.</p>
+ <p>When <c>logger:add_handler(Id,Module,Config)</c> is called,
+ Logger first calls <c>HModule:adding_handler(Config)</c>. If
+ this function returns <c>{ok,NewConfig}</c>, Logger
+ writes <c>NewConfig</c> to the configuration database, and
+ the <c>logger:add_handler/3</c> call returns. After this, the
+ handler is installed and must be ready to receive log events as
+ calls to <c>HModule:log/2</c>.</p>
<p>A handler can be removed by calling
- logger:remove_handler(Id). logger will call
- Module:removing_handler(Id), and then remove the handler's
- configuration from the configuration database.</p>
- <p>When logger:set_handler_config is called, logger calls
- Module:changing_config(Id,OldConfig,NewConfig). If this function
- returns ok, the NewConfig is written to the configuration
- database.</p>
-
- <p>A simple handler which prints to the console could be
- implemented as follows:</p>
+ <c>logger:remove_handler(Id)</c>. Logger calls
+ <c>HModule:removing_handler(Config)</c>, and removes the
+ handler's configuration from the configuration database.</p>
+ <p>When <c>logger:set_handler_config/2,3</c>
+ or <c>logger:update_handler_config/2</c> is called, Logger
+ calls <c>HModule:changing_config(OldConfig,NewConfig)</c>. If
+ this function returns <c>{ok,NewConfig}</c>, Logger
+ writes <c>NewConfig</c> to the configuration database.</p>
+
+ <p>A simple handler that prints to the terminal can be implemented
+ as follows:</p>
<code>
-module(myhandler).
-export([log/2]).
-log(#{msg:={report,R}},_) ->
- io:format("~p~n",[R]);
-log(#{msg:={string,S}},_) ->
- io:put_chars(S);
-log(#{msg:={F,A}},_) ->
- io:format(F,A).
+log(LogEvent,#{formatter:={FModule,FConfig}) ->
+ io:put_chars(FModule:format(LogEvent,FConfig)).
</code>
<p>A simple handler which prints to file could be implemented like
this:</p>
<code>
-module(myhandler).
--export([adding_handler/2, removing_handler/1, log/2]).
+-export([adding_handler/1, removing_handler/1, log/2]).
-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
-adding_handler(Id,Config) ->
+adding_handler(Config) ->
{ok,Fd} = file:open(File,[append,{encoding,utf8}]),
- {ok,Config#{myhandler_fd=>Fd}}.
+ {ok,Config#{myhandler_fd => Fd}}.
-removing_handler(Id,#{myhandler_fd:=Fd}) ->
+removing_handler(#{myhandler_fd:=Fd}) ->
_ = file:close(Fd),
ok.
-log(#{msg:={report,R}},#{myhandler_fd:=Fd}) ->
- io:format(Fd,"~p~n",[R]);
-log(#{msg:={string,S}},#{myhandler_fd:=Fd}) ->
- io:put_chars(Fd,S);
-log(#{msg:={F,A}},#{myhandler_fd:=Fd}) ->
- io:format(Fd,F,A).
+log(LogEvent,#{myhandler_fd:=Fd,formatter:={FModule,FConfig}}) ->
+ io:put_chars(Fd,FModule:format(LogEvent,FConfig)).
</code>
- <p>Note that none of the above handlers have any overload
+ <note><p>The above handlers do not have any overload
protection, and all log events are printed directly from the
- client process. Neither do the handlers use the formatter or
- in any way add time or other metadata to the printed events.</p>
-
- <p>For examples of overload protection, please refer to the
- implementation
- of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
- and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ client process.</p></note>
+
+ <p>For information and examples of overload protection, please
+ refer to
+ section <seealso marker="#overload_protection">Protecting the
+ Handler from Overload</seealso>, and the implementation
+ of <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
</seealso>.</p>
<p>Below is a simpler example of a handler which logs through one
- single process, and uses the default formatter to gain a common
- look of the log events.</p>
- <p>It also uses the metadata field <c>report_cb</c>, if it exists,
- to print reports in the way the event issuer suggests. The
- formatter will normally do this, but if the handler either has
- an own default (as in this example) or if the
- given <c>report_cb</c> should not be used at all, then the
- handler must take care of this itself.</p>
+ single process.</p>
<code>
-module(myhandler).
--export([adding_handler/2, removing_handler/1, log/2]).
+-export([adding_handler/1, removing_handler/1, log/2]).
-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
-adding_handler(Id,Config) ->
+adding_handler(Config) ->
{ok,Pid} = gen_server:start(?MODULE,Config),
- {ok,Config#{myhandler_pid=>Pid}}.
+ {ok,Config#{myhandler_pid => Pid}}.
-removing_handler(Id,#{myhandler_pid:=Pid}) ->
+removing_handler(#{myhandler_pid:=Pid}) ->
gen_server:stop(Pid).
-log(Log,#{myhandler_pid:=Pid} = Config) ->
- gen_server:cast(Pid,{log,Log,Config}).
+log(LogEvent,#{myhandler_pid:=Pid} = Config) ->
+ gen_server:cast(Pid,{log,LogEvent,Config}).
init(#{myhandler_file:=File}) ->
{ok,Fd} = file:open(File,[append,{encoding,utf8}]),
- {ok,#{file=>File,fd=>Fd}}.
+ {ok,#{file => File, fd => Fd}}.
handle_call(_,_,State) ->
{reply,{error,bad_request},State}.
-handle_cast({log,Log,Config},#{fd:=Fd} = State) ->
- do_log(Fd,Log,Config),
+handle_cast({log,LogEvent,Config},#{fd:=Fd} = State) ->
+ do_log(Fd,LogEvent,Config),
{noreply,State}.
terminate(Reason,#{fd:=Fd}) ->
_ = file:close(Fd),
ok.
-do_log(Fd,#{msg:={report,R}} = Log, Config) ->
- Fun = maps:get(report_cb,Config,fun my_report_cb/1,
- {F,A} = Fun(R),
- do_log(Fd,Log#{msg=>{F,A},Config);
-do_log(Fd,Log,#{formatter:={FModule,FConfig}}) ->
- String = FModule:format(Log,FConfig),
+do_log(Fd,LogEvent,#{formatter:={FModule,FConfig}}) ->
+ String = FModule:format(LogEvent,FConfig),
io:put_chars(Fd,String).
-
-my_report_cb(R) ->
- {"~p",[R]}.
</code>
</section>
<section>
<marker id="overload_protection"/>
- <title>Protecting the handler from overload</title>
+ <title>Protecting the Handler from Overload</title>
<p>In order for the built-in handlers to survive, and stay responsive,
during periods of high load (i.e. when huge numbers of incoming
log requests must be handled), a mechanism for overload protection
@@ -646,7 +958,7 @@ my_report_cb(R) ->
as follows:</p>
<section>
- <title>Message queue length</title>
+ <title>Message Queue Length</title>
<p>The handler process keeps track of the length of its message
queue and reacts in different ways depending on the current status.
The purpose is to keep the handler in, or (as quickly as possible),
@@ -663,7 +975,7 @@ my_report_cb(R) ->
and as long as the length of the message queue is lower, all log
requests are handled asynchronously. This simply means that the
process sending the log request (by calling a log function in the
- logger API) does not wait for a response from the handler but
+ Logger API) does not wait for a response from the handler but
continues executing immediately after the request (i.e. it will not
be affected by the time it takes the handler to print to the log
device). If the message queue grows larger than this value, however,
@@ -697,10 +1009,20 @@ my_report_cb(R) ->
</item>
</taglist>
- <p>For the overload protection algorithm to work properly, it is a
- requirement that:</p>
+ <p>For the overload protection algorithm to work properly, it is
+ required that:</p>
+
+ <p><c>toggle_sync_qlen =&lt; drop_new_reqs_qlen =&lt; flush_reqs_qlen</c></p>
+
+ <p>and that:</p>
+
+ <p><c>drop_new_reqs_qlen &gt; 1</c></p>
- <p><c>toggle_sync_qlen &lt; drop_new_reqs_qlen &lt; flush_reqs_qlen</c></p>
+ <p>If <c>toggle_sync_qlen</c> is set to <c>0</c>, the handler will handle all
+ requests synchronously. Setting the value of <c>toggle_sync_qlen</c> to the same
+ as <c>drop_new_reqs_qlen</c>, disables the synchronous mode. Likewise, setting
+ the value of <c>drop_new_reqs_qlen</c> to the same as <c>flush_reqs_qlen</c>,
+ disables the drop mode.</p>
<p>During high load scenarios, the length of the handler message queue
rarely grows in a linear and predictable way. Instead, whenever the
@@ -731,14 +1053,14 @@ logger:add_handler(my_standard_h, logger_std_h,
</section>
<section>
- <title>Controlling bursts of log requests</title>
+ <title>Controlling Bursts of Log Requests</title>
<p>A potential problem with large bursts of log requests, is that log files
may get full or wrapped too quickly (in the latter case overwriting
previously logged data that could be of great importance). For this reason,
both built-in handlers offer the possibility to set a maximum level of how
many requests to process with a certain time frame. With this burst control
feature enabled, the handler will take care of bursts of log requests
- without choking log files, or the console, with massive amounts of
+ without choking log files, or the terminal, with massive amounts of
printouts. These are the configuration parameters:</p>
<taglist>
@@ -772,7 +1094,7 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h,
</section>
<section>
- <title>Terminating a large handler</title>
+ <title>Terminating a Large Handler</title>
<p>A handler process may grow large even if it can manage peaks of high load
without crashing. The overload protection mechanism includes user configurable
levels for a maximum allowed message queue length and maximum allowed memory
@@ -809,7 +1131,14 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h,
<section>
<title>See Also</title>
- <p><seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
- <seealso marker="sasl:sasl_app"><c>SASL(6)</c></seealso></p>
+ <p>
+ <seealso marker="disk_log"><c>disk_log(3)</c></seealso>,
+ <seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>,
+ <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>,
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
+ <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p>
</section>
</chapter>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
index 90cc4fec30..20b49b8ca0 100644
--- a/lib/kernel/doc/src/logger_disk_log_h.xml
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -33,21 +33,21 @@
<file>logger_disk_log_h.xml</file>
</header>
<module>logger_disk_log_h</module>
- <modulesummary>A disk_log based handler for the Logger
- application.</modulesummary>
+ <modulesummary>A disk_log based handler for the Logger.</modulesummary>
<description>
- <p>This is a handler for the Logger application that offers circular
- (wrapped) logs by using the disk_log application. Multiple instances
- of this handler can be added to logger, and each instance will print to
+ <p>This is a handler for Logger that offers circular
+ (wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>.
+ Multiple instances
+ of this handler can be added to Logger, and each instance prints to
its own disk_log file, created with the name and settings specified in
the handler configuration.</p>
<p>The default standard handler,
<seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be
- replaced by a disk_log handler at startup of the kernel application.
+ replaced by a disk_log handler at startup of the Kernel application.
See an example of this below.</p>
<p>The handler has an overload protection mechanism that will keep the handler
- process and the kernel application alive during a high load of log
+ process and the Kernel application alive during a high load of log
requests. How this feature works, and how to modify the configuration,
is described in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
@@ -121,11 +121,11 @@ logger:add_handler(my_disk_log_h, logger_disk_log_h,
#{filesync_repeat_interval => 1000}}).
</code>
<p>In order to use the disk_log handler instead of the default standard
- handler when starting en Erlang node, use the kernel configuration parameter
- <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with
- value <c>{disk_log,FileName}</c>. Example:</p>
+ handler when starting an Erlang node, change the Kernel default logger to
+ use disk_log. Example:</p>
<code type="none">
-erl -kernel logger_dest '{disk_log,"./system_disk_log"}'
+erl -kernel logger '[{handler,default,logger_disk_log_h,
+ #{ disk_log_opts => #{ file => "./system_disk_log"}}}]'
</code>
</description>
@@ -141,6 +141,12 @@ erl -kernel logger_dest '{disk_log,"./system_disk_log"}'
</funcs>
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="logger"><c>logger(3)</c></seealso></p>
+ <p><seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso></p>
+ <p><seealso marker="disk_log"><c>disk_log(3)</c></seealso></p>
+ </section>
</erlref>
diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml
index d742391e35..f92181ea3f 100644
--- a/lib/kernel/doc/src/logger_filters.xml
+++ b/lib/kernel/doc/src/logger_filters.xml
@@ -33,72 +33,119 @@
<file>logger_filters.xml</file>
</header>
<module>logger_filters</module>
- <modulesummary>Filters to use with logger.</modulesummary>
+ <modulesummary>Filters to use with Logger.</modulesummary>
<description>
- <p>Filters to use with logger. All functions exported from this
- module can be used as logger or handler
+ <p>All functions exported from this module can be used as logger
+ or handler
filters. See <seealso marker="logger#add_logger_filter-2">
<c>logger:add_logger_filter/2</c></seealso>
and <seealso marker="logger#add_handler_filter-3">
- <c>logger:add_handler_filter/3</c></seealso>
- for more information about how filters are added.</p>
+ <c>logger:add_handler_filter/3</c></seealso> for more information
+ about how filters are added.</p>
+ <p>Filters are removed with <seealso marker="logger#remove_logger_filter-1">
+ <c>logger:remove_logger_filter/1</c></seealso>
+ and <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>.</p>
</description>
<funcs>
<func>
<name name="domain" arity="2"/>
- <fsummary>Filter log events based on the domain field in metadata.</fsummary>
+ <fsummary>Filter log events based on the domain field in
+ metadata.</fsummary>
<desc>
- <p>This filter provides a way of filtering log events based on a
- <c>domain</c> field <c>Metadata</c>.</p>
-
- <p>The <c><anno>Extra</anno></c> parameter is specified when
- adding the filter
- via <seealso marker="logger#add_logger_filter-2">
- <c>logger:add_logger_filter/2</c></seealso>
- or <seealso marker="logger#add_handler_filter-3">
- <c>logger:add_handler_filter/3</c></seealso>.</p>
-
- <p>The filter compares the value of the <c>domain</c> field
- in the log event's metadata (<c>Domain</c>)
- to <c><anno>MatchDomain</anno></c> as follows:</p>
-
- <taglist>
- <tag><c><anno>Compare</anno> = starts_with</c></tag>
- <item><p>The filter matches if <c>MatchDomain</c> is a prefix
- of <c>Domain</c>.</p></item>
- <tag><c><anno>Compare</anno> = prefix_of</c></tag>
- <item><p>The filter matches if <c>Domain</c> is a prefix
- of <c>MatchDomain</c>.</p></item>
- <tag><c><anno>Compare</anno> = equals</c></tag>
- <item><p>The filter matches if <c>Domain</c> is equal
- to <c>MatchDomain</c>.</p></item>
- <tag><c><anno>Compare</anno> = no_domain</c></tag>
- <item><p>The filter matches if there is no domain field in
- metadata. In this case <c><anno>MatchDomain</anno></c> shall
- be <c>[]</c>.</p></item>
- </taglist>
-
- <p>If the filter matches and <c><anno>Action</anno> =
- log</c>, the log event is allowed. If the filter matches
- and <c><anno>Action</anno> = stop</c>, the log event is
- stopped.</p>
-
- <p>If the filter does not match, it returns <c>ignore</c>,
- meaning that other filters, or the value of the
- configuration parameter <c>filter_default</c>, will decide
- if the event is allowed or not.</p>
-
- <p>Log events that do not contain any domain field, will
- only match when <c><anno>Compare</anno> = no_domain</c>.</p>
-
- <p>Example: stop all events with
- domain <c>[beam,erlang,otp,sasl|_]</c></p>
-
- <code>
+ <p>This filter provides a way of filtering log events based on a
+ <c>domain</c> field in <c>Metadata</c>. This field is
+ optional, and the purpose of using it is to group log events
+ from, for example, a specific functional area. This allows
+ filtering or other specialized treatment in a Logger
+ handler.</p>
+
+ <p>A domain field must be a list of atoms, creating smaller
+ and more specialized domains as the list grows longer. The
+ biggest domain is <c>[]</c>, which comprices all
+ possible domains.</p>
+
+ <p>For example, consider the following domains:</p>
+ <pre>
+D1 = [beam,erlang,otp]
+D2 = [beam,erlang,otp,sasl]</pre>
+
+ <p><c>D1</c> is the biggest of the two, and is said to be a
+ super-domain of <c>D2</c>. <c>D2</c> is a
+ sub-domain <c>D1</c>. Both <c>D1</c> and <c>D2</c> are
+ sub-domains of <c>[]</c></p>
+
+ <p>The above domains are used for logs originating from
+ Erlang/OTP. D1 specifies that the log event comes from
+ Erlang/OTP in general, and D2 indicates that the log event
+ is a so
+ called <seealso marker="logger_chapter#sasl_reports">SASL
+ report</seealso>.</p>
+
+ <p>The <c><anno>Extra</anno></c> parameter to
+ the <c>domain/2</c> function is specified when adding the
+ filter via <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the <c>domain</c> field
+ in the log event's metadata (<c>Domain</c>)
+ to <c><anno>MatchDomain</anno></c> as follows:</p>
+
+ <taglist>
+ <tag><c><anno>Compare</anno> = sub</c></tag>
+ <item>
+ <p>The filter matches if <c>Domain</c> is equal to or
+ a sub-domain of <c>MatchDomain</c>, that is,
+ if <c>MatchDomain</c> is a prefix of <c>Domain</c>.</p>
+ </item>
+ <tag><c><anno>Compare</anno> = super</c></tag>
+ <item>
+ <p>The filter matches if <c>Domain</c> is equal to or a
+ super-domain of <c>MatchDomain</c>, that is,
+ if <c>Domain</c> is a prefix of <c>MatchDomain</c>.</p>
+ </item>
+ <tag><c><anno>Compare</anno> = equal</c></tag>
+ <item>
+ <p>The filter matches if <c>Domain</c> is equal
+ to <c>MatchDomain</c>.</p>
+ </item>
+ <tag><c><anno>Compare</anno> = not_equal</c></tag>
+ <item>
+ <p>The filter matches if <c>Domain</c> is not equal
+ to <c>MatchDomain</c>, or if there is no domain field in
+ metadata.</p>
+ </item>
+ <tag><c><anno>Compare</anno> = undefined</c></tag>
+ <item><p>The filter matches if there is no domain field in
+ metadata. In this case <c><anno>MatchDomain</anno></c>
+ must be set to <c>[]</c>.</p>
+ </item>
+ </taglist>
+
+ <p>If the filter matches and <c><anno>Action</anno> = log</c>,
+ the log event is allowed. If the filter matches
+ and <c><anno>Action</anno> = stop</c>, the log event is
+ stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, decide if the
+ event is allowed or not.</p>
+
+ <p>Log events that do not contain any domain field, match only
+ when <c><anno>Compare</anno> = undefined</c>
+ or <c><anno>Compare</anno> = not_equal</c>.</p>
+
+ <p>Example: stop all events with
+ domain <c>[beam,erlang,otp,sasl|_]</c></p>
+
+ <code>
logger:set_handler_config(h1,filter_default,log). % this is the default
-Filter = {fun logger_filters:domain/2,{stop,starts_with,[beam,erlang,otp,sasl]}}.
+Filter = {fun logger_filters:domain/2,{stop,sub,[beam,erlang,otp,sasl]}}.
logger:add_handler_filter(h1,no_sasl,Filter).
ok</code>
</desc>
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
index 6a17e3641f..02f89b26be 100644
--- a/lib/kernel/doc/src/logger_formatter.xml
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -33,123 +33,280 @@
<file>logger_formatter.xml</file>
</header>
<module>logger_formatter</module>
- <modulesummary>Default formatter for the Logger application.</modulesummary>
+ <modulesummary>Default formatter for Logger.</modulesummary>
<description>
- <p>Default formatter for the Logger application.</p>
+ <p>Each log handler has a configured formatter specified as a
+ module and a configuration term. The purpose of the formatter is
+ to translate the log events to a final printable string
+ (<seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c>
+ </seealso>) which can be written to the output
+ device of the handler.</p>
+ <p><c>logger_formatter</c> is the default formatter used by
+ Logger.</p>
</description>
+
<datatypes>
<datatype>
+ <name name="config"/>
+ <desc>
+ <p>The configuration term for <c>logger_formatter</c> is a
+ <seealso marker="stdlib:maps">map</seealso>, and the
+ following keys can be set as configuration parameters:</p>
+ <taglist>
+ <tag><c>chars_limit = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the value of the option
+ with the same name to be used when calling
+ <seealso marker="stdlib:io_lib#format-3">
+ <c>io_lib:format/3</c></seealso>.
+ This value limits the total number of characters printed
+ for each log event. Notice that this is a soft limit. For a
+ hard truncation limit, see option <c>max_size</c>.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ <note>
+ <p><c>chars_limit</c> has no effect on log messages on
+ string form. These are expected to be short, but can
+ still be truncated by the <c>max_size</c>
+ parameter.</p>
+ </note>
+ </item>
+ <tag><marker id="depth"/><c>depth = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the maximum depth to
+ which terms shall be printed by this formatter. Format
+ strings passed to this formatter are rewritten. The
+ format controls ~p and ~w are replaced with ~P and ~W,
+ respectively, and the value is used as the depth
+ parameter. For details, see
+ <seealso marker="stdlib:io#format-2">io:format/2,3</seealso>
+ in STDLIB.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ <note>
+ <p><c>depth</c> has no effect on log messages on string
+ form. These are expected to be short, but can still be
+ truncated by the <c>max_size</c> parameter.</p>
+ </note>
+ </item>
+ <tag><c>legacy_header = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c> a header field is added to
+ logger_formatter's part of <c>Metadata</c>. The value of
+ this field is a string similar to the header created by
+ the old <c>error_logger</c> event handlers. It can be
+ included in the log event by adding the
+ tuple <c>{logger_formatter,header}</c> to the
+ template. See the description of
+ the <seealso marker="#type-template"><c>template()</c></seealso>
+ type for more information.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><c>max_size = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the absolute maximum size a
+ string returned from this formatter can have. If the
+ formatted string is longer, after possibly being limited
+ by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ </item>
+ <tag><c>report_cb = fun((</c><seealso marker="logger#type-report"><c>logger:report()</c></seealso><c>) -> {</c><seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso><c>,[term()]})</c></tag>
+ <item>
+ <p>A report callback is used by the formatter to transform
+ log messages on report form to a format string and
+ arguments. The report callback can be specified in the
+ metadata for the log event. If no report callback exist
+ in metadata, <c>logger_formatter</c> will
+ use <seealso marker="logger#format_report-1">
+ <c>logger:format_report/1</c></seealso> as default
+ callback.</p>
+ <p>If this configuration parameter is set, it replaces
+ both the default report callback, and any report
+ callback found in metadata. That is, all reports are
+ converted by this configured function.</p>
+ <p>The value must be a function with arity 1,
+ returning <c>{Format,Args}</c>, and it will be called
+ with a report as only argument.</p>
+ </item>
+ <tag><c>single_line = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c>, all newlines in the message are
+ replaced with <c>", "</c>, and whitespaces following
+ directly after newlines are removed. Note that newlines
+ added by the <c>template</c> parameter are not replaced.</p>
+ <p>Defaults to <c>true</c>.</p>
+ </item>
+ <tag><c>template = </c><seealso marker="#type-template"><c>template()</c></seealso></tag>
+ <item>
+ <p>The template describes how the formatted string is
+ composed by combining different data values from the log
+ event. See the description of
+ the <seealso marker="#type-template"><c>template()</c></seealso>
+ type for more information about this.</p>
+ </item>
+ <tag><c>time_designator = byte()</c></tag>
+ <item>
+ <p>Timestamps are formatted according to RFC3339, and the
+ time designator is the character used as date and time
+ separator.</p>
+ <p>Defaults to <c>$T</c>.</p>
+ <p>The value of this parameter is used as
+ the <c>time_designator</c> option
+ to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2">
+ <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p>
+ </item>
+ <tag><c>time_offset = integer() | [byte()]</c></tag>
+ <item>
+ <p>The time offset, either a string or an integer, to be
+ used when formatting the timestamp.</p>
+ <p>An empty string is interpreted as local time. The
+ values <c>"Z"</c>, <c>"z"</c> or <c>0</c> are
+ interpreted as Universal Coordinated Time (UTC).</p>
+ <p>Strings, other than <c>"Z"</c>, <c>"z"</c>,
+ or <c>""</c>, must be on the form <c>±[hh]:[mm]</c>, for
+ example <c>"-02:00"</c> or <c>"+00:00"</c>.</p>
+ <p>Integers must be in microseconds, meaning that the
+ offset <c>7200000000</c> is equivalent
+ to <c>"+02:00"</c>.</p>
+ <p>Defaults to an empty string, meaning that timestamps
+ are displayed in local time. However, for backwards
+ compatibility, if the SASL configuration
+ parameter <seealso marker="sasl:sasl_app#utc_log">
+ <c>utc_log</c></seealso><c>=true</c>, the default is
+ changed to <c>"Z"</c>, meaning that timestamps are displayed
+ in UTC.</p>
+ <p>The value of this parameter is used as
+ the <c>offset</c> option
+ to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2">
+ <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ <datatype>
<name name="template"/>
<desc>
+ <p>The template is a list of atoms, tuples and strings. The
+ atoms <c>level</c> or <c>msg</c>, are treated as
+ placeholders for the severity level and the log message,
+ respectively. Other atoms or tuples are interpreted as
+ placeholders for metadata, where atoms are expected to match
+ top level keys, and tuples represent paths to sub keys when
+ the metadata is a nested map. For example the
+ tuple <c>{key1,key2}</c> is replaced by the value of
+ the <c>key2</c> field in the nested map below. The
+ atom <c>key1</c> on its own is replaced by the complete
+ value of the <c>key1</c> field. The values are converted to
+ strings.</p>
+
+<code>
+#{key1=>#{key2=>my_value,
+ ...}
+ ...}</code>
+
+ <p>Strings in the template are printed literally.</p>
+ <p>The default template differs depending on the values
+ of <c>legacy_header</c>
+ and <c>single_line</c>:</p>
+
+ <p>The default value for the <c>template</c> configuration
+ parameter depends on the value of the <c>single_line</c>
+ and <c>legacy_header</c> configuration parameters as
+ follows.</p>
+
+ <p>The log event used in the examples is:</p>
+ <code>
+?LOG_ERROR("name: ~p~nexit_reason: ~p",[my_name,"It crashed"])</code>
+
+ <taglist>
+ <tag><c>legacy_header=true, single_line=false</c></tag>
+ <item>
+ <p>Default
+ template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+=ERROR REPORT==== 17-May-2018::18:30:19.453447 ===
+name: my_name
+exit_reason: "It crashed"</code>
+
+ <p>Notice that all eight levels can occur in the heading,
+ not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as the
+ old <c>error_logger</c> produced. And microseconds are
+ added at the end of the timestamp.</p>
+ </item>
+
+ <tag><c>legacy_header=true, single_line=true</c></tag>
+ <item>
+ <p>Default
+ template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p>
+
+ <p>Notice that the template is here the same as
+ for <c>single_line=false</c>, but the resulting log entry
+ differs in that there is only one line after the
+ heading:</p>
+ <code type="none">
+=ERROR REPORT==== 17-May-2018::18:31:06.952665 ===
+name: my_name, exit_reason: "It crashed"</code>
+ </item>
+
+ <tag><c>legacy_header=false, single_line=true</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+2018-05-17T18:31:31.152864+02:00 error: name: my_name, exit_reason: "It crashed"</code>
+ </item>
+
+ <tag><c>legacy_header=false, single_line=false</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+2018-05-17T18:32:20.105422+02:00 error:
+name: my_name
+exit_reason: "It crashed"</code>
+ </item>
+ </taglist>
</desc>
</datatype>
</datatypes>
<funcs>
<func>
+ <name name="check_config" arity="1"/>
+ <fsummary>Validates the given formatter configuration.</fsummary>
+ <desc>
+ <p>This callback function is called by Logger when the
+ formatter configuration for a handler is set or modified. It
+ returns <c>ok</c> if the configuration is valid,
+ and <c>{error,term()}</c> if it is faulty.</p>
+ </desc>
+ </func>
+ <func>
<name name="format" arity="2"/>
<fsummary>Formats the given message.</fsummary>
<desc>
- <p>Formats the given message.</p>
- <p>The template is a list of atoms, tuples and strings. Atoms
- can be <c>level</c> or <c>msg</c>, which are placeholders
- for the severity level and the log message,
- repectively. Tuples are interpreted as placeholders for
- metadata. Each element in the tuple must be an atom which
- matches a key in the nested metadata map, e.g. the
- tuple <c>{key1,key2}</c> will be replaced by the value of
- the key2 field in this nested map (the value vill be
- converted to a string):</p>
-
-<code>
-#{key1=>#{key2=>my_value,
- ...},
- ...}</code>
-
-
- <p> Strings are printed literally.</p>
-
- <p><c>depth</c> is a positive integer representing the maximum
- depth to which terms shall be printed by this
- formatter. Format strings passed to this formatter are
- rewritten. The format controls ~p and ~w are replaced with
- ~P and ~W, respectively, and the value is used as the depth
- parameter. For details, see
- <seealso marker="stdlib:io#format-2">io:format/2,3</seealso>
- in STDLIB.</p>
-
- <p><c>chars_limit</c> is a positive integer representing the
- value of the option with the same name to be used when calling
- <seealso marker="stdlib:io#format-3">io:format/3</seealso>. This
- value limits the total number of characters printed bu the
- formatter. Notes that this is a soft limit. For a hard
- truncation limit, see option <c>max_size</c>.</p>
-
- <p><c>max_size</c> is a positive integer representing the
- maximum size a string returned from this formatter can
- have. If the formatted string is longer, after possibly
- being limited by <c>depth</c> and/or <c>chars_limit</c>, it
- will be truncated.</p>
-
- <p><c>utc</c> is a boolean. If set to true, all dates are
- displayed in Universal Coordinated Time. Default
- is <c>false</c>.</p>
-
- <p><c>report_cb</c> must be a function with arity 1,
- returning <c>{Format,Args}</c>. This function will replace
- any <c>report_cb</c> found in metadata.</p>
-
- <p>If <c>single_line=true</c>, all newlines in the message are
- replaced with <c>", "</c>, and whitespaces following directly
- after newlines are removed. Note that newlines added by the
- formatter template are not replaced.</p>
-
- <p>If <c>legacy_header=true</c> a header field is added to
- logger_formatter's part of <c>Metadata</c>. The value of
- this field is a string similar to the header created by the
- old <c>error_logger</c> event handlers. It can be included
- in the log event by adding the
- tuple <c>{logger_formatter,header}</c> to the template.</p>
-
- <p>The default template when <c>legacy_header=true</c> is</p>
-
- <code>[{logger_formatter,header},"\n",msg,"\n"]</code>
-
- <p>which will cause log entries like this:</p>
-
- <code>=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 ===
- process: &lt;0.74.0&gt;
- exit_reason: "Something went wrong"</code>
-
- <p>Note that all eight levels might occur here, not
- only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c>. And also
- that micro seconds are added at the end of the
- timestamp.</p>
-
- <p>The default template when <c>single_line=true</c> is</p>
-
- <code>[time," ",level,": ",msg,"\n"]</code>
-
- <p>which will cause log entries like this:</p>
-
- <code>2017-12-29 13:31:49.640317 error: process: &lt;0.74.0&gt;, exit_reason: "Something went wrong"</code>
-
- <p>The default template when both <c>legacy_header</c> and
- <c>single_line</c> are set to false is:</p>
-
- <code>[time," ",level,":\n",msg,"\n"]</code>
-
- <p>which will cause log entries like this:</p>
-
- <code>2017-12-29 13:32:25.191925 error:
- process: &lt;0.74.0&gt;
- exit_reason: "Something went wrong"</code>
-
+ <p>This the formatter callback function to be called from
+ handlers. The log event is processed as follows:</p>
+ <list>
+ <item>If the message is on report form, it is converted to
+ <c>{Format,Args}</c> by calling the report
+ callback.</item>
+ <item>The size is limited according to the values of
+ configuration parameters <c>chars_limit</c>
+ and <c>depth</c>. Notice that this does not apply to
+ messages on string form.</item>
+ <item>The full log entry is composed according to
+ the <c>template</c>.</item>
+ <item>If the final string is too long, it is truncated
+ according to the value of configuration
+ parameter <c>max_size</c>.</item>
+ </list>
</desc>
</func>
-
</funcs>
</erlref>
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index fe9b9ca5a9..a4f2848037 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -33,17 +33,17 @@
<file>logger_std_h.xml</file>
</header>
<module>logger_std_h</module>
- <modulesummary>Default handler for the Logger application.</modulesummary>
+ <modulesummary>Default handler for Logger.</modulesummary>
<description>
- <p>This is the default handler for the Logger
- application. Multiple instances of this handler can be added to
- logger, and each instance will print logs to <c>standard_io</c>,
+ <p>This is the default handler for Logger.
+ Multiple instances of this handler can be added to
+ Logger, and each instance will print logs to <c>standard_io</c>,
<c>standard_error</c> or to file. The default instance that starts
- with kernel is named <c>logger_std_h</c> - which is the name to be used
+ with Kernel is named <c>default</c> - which is the name to be used
for reconfiguration.</p>
<p>The handler has an overload protection mechanism that will keep the handler
- process and the kernel application alive during a high load of log
+ process and the Kernel application alive during a high load of log
requests. How this feature works, and how to modify the configuration,
is described in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
@@ -57,7 +57,7 @@
are stored in a sub map with the key <c>logger_std_h</c>. The following
keys and values may be specified:</p>
<taglist>
- <tag><c>type</c></tag>
+ <tag><marker id="type"/><c>type</c></tag>
<item>
<p>This will have the value <c>standard_io</c>, <c>standard_error</c>,
<c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>,
@@ -104,12 +104,11 @@ logger:add_handler(my_standard_h, logger_std_h,
filesync_repeat_interval => 1000}}).
</code>
<p>In order to configure the default handler (that starts initially with
- the kernel application) to log to file instead of <c>standard_io</c>,
- use the kernel configuration parameter
- <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with
- value <c>{file,FileName}</c>. Example:</p>
+ the Kernel application) to log to file instead of <c>standard_io</c>,
+ change the Kernel default logger to use a file. Example:</p>
<code type="none">
-erl -kernel logger_dest '{file,"./erl.log"}'
+erl -kernel logger '[{handler,default,logger_std_h,
+ #{ logger_std_h => #{ type => {file,"./log.log"}}}}]'
</code>
<p>An example of how to replace the standard handler with a disk_log handler
at startup can be found in the manual of
@@ -128,6 +127,11 @@ erl -kernel logger_dest '{file,"./erl.log"}'
</funcs>
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="logger"><c>logger(3)</c></seealso></p>
+ <p><seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso></p>
+ </section>
</erlref>
diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml
index c06914d23d..b6c2714664 100644
--- a/lib/kernel/doc/src/ref_man.xml
+++ b/lib/kernel/doc/src/ref_man.xml
@@ -32,12 +32,15 @@
</description>
<xi:include href="kernel_app.xml"/>
+ <xi:include href="app.xml"/>
<xi:include href="application.xml"/>
<xi:include href="auth.xml"/>
<xi:include href="code.xml"/>
+ <xi:include href="config.xml"/>
<xi:include href="disk_log.xml"/>
<xi:include href="erl_boot_server.xml"/>
<xi:include href="erl_ddll.xml"/>
+ <xi:include href="erl_epmd.xml"/>
<xi:include href="erl_prim_loader_stub.xml"/>
<xi:include href="erlang_stub.xml"/>
<xi:include href="error_handler.xml"/>
@@ -66,6 +69,4 @@
<xi:include href="user.xml"/>
<xi:include href="wrap_log_reader.xml"/>
<xi:include href="zlib_stub.xml"/>
- <xi:include href="app.xml"/>
- <xi:include href="config.xml"/>
</application>
diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml
index bcc422930e..b8c25ca53b 100644
--- a/lib/kernel/doc/src/specs.xml
+++ b/lib/kernel/doc/src/specs.xml
@@ -6,6 +6,7 @@
<xi:include href="../specs/specs_disk_log.xml"/>
<xi:include href="../specs/specs_erl_boot_server.xml"/>
<xi:include href="../specs/specs_erl_ddll.xml"/>
+ <xi:include href="../specs/specs_erl_epmd.xml"/>
<xi:include href="../specs/specs_erl_prim_loader_stub.xml"/>
<xi:include href="../specs/specs_erlang_stub.xml"/>
<xi:include href="../specs/specs_error_handler.xml"/>
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
index 702845512c..f265fdd272 100644
--- a/lib/kernel/src/Makefile
+++ b/lib/kernel/src/Makefile
@@ -118,7 +118,7 @@ MODULES = \
logger_filters \
logger_formatter \
logger_server \
- logger_simple \
+ logger_simple_h \
logger_sup \
net \
net_adm \
@@ -146,7 +146,7 @@ HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \
../include/net_address.hrl ../include/logger.hrl
INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
- erl_epmd.hrl hipe_ext_format.hrl \
+ erl_epmd.hrl file_int.hrl hipe_ext_format.hrl \
inet_dns.hrl inet_res.hrl \
inet_boot.hrl inet_config.hrl inet_int.hrl \
inet_dns_record_adts.hrl \
@@ -279,7 +279,7 @@ $(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../inclu
$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl
$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl
$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl
-$(EBIN)/logger_simple.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_simple_h.beam: logger_internal.hrl ../include/logger.hrl
$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl
$(EBIN)/net_kernel.beam: ../include/net_address.hrl
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index b9cb722575..ff5df667b5 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -1272,9 +1272,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
NewEnv = merge_app_env(ApplEnv, ConfEnv),
CmdLineEnv = get_cmd_env(Name),
NewEnv2 = merge_app_env(NewEnv, CmdLineEnv),
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
- add_env(Name, NewEnv3),
+ add_env(Name, NewEnv2),
Appl = #appl{name = Name, descr = Descr, id = Id, vsn = Vsn,
appl_data = ApplData, inc_apps = IncApps, apps = Apps},
ets:insert(ac_tab, {{loaded, Name}, Appl}),
@@ -1292,7 +1290,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
{ok, NewS}.
unload(AppName, S) ->
- {ok, IncApps} = get_env(AppName, included_applications),
+ {ok, IncApps} = get_key(AppName, included_applications),
del_env(AppName),
ets:delete(ac_tab, {loaded, AppName}),
foldl(fun(App, S1) ->
@@ -1583,13 +1581,9 @@ do_change_appl({ok, {ApplData, Env, IncApps, Descr, Id, Vsn, Apps}},
CmdLineEnv = get_cmd_env(AppName),
NewEnv2 = merge_app_env(NewEnv1, CmdLineEnv),
- %% included_apps is made into an env parameter as well
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
-
%% Update ets table with new application env
del_env(AppName),
- add_env(AppName, NewEnv3),
+ add_env(AppName, NewEnv2),
OldAppl#appl{appl_data=ApplData,
descr=Descr,
diff --git a/lib/kernel/src/code.erl b/lib/kernel/src/code.erl
index f143a49d2f..7faef93609 100644
--- a/lib/kernel/src/code.erl
+++ b/lib/kernel/src/code.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -630,7 +630,7 @@ do_par_recv(N, Good, Bad) ->
call(Req) ->
code_server:call(Req).
--spec start_link() -> {'ok', pid()} | {'error', 'crash'}.
+-spec start_link() -> {'ok', pid()}.
start_link() ->
do_start().
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
index bbfa2a995d..1a7677295b 100644
--- a/lib/kernel/src/code_server.erl
+++ b/lib/kernel/src/code_server.erl
@@ -1437,7 +1437,7 @@ error_msg(Format, Args) ->
logger ! {log,error,Format,Args,
#{pid=>self(),
gl=>group_leader(),
- time=>erlang:monotonic_time(microsecond),
+ time=>erlang:system_time(microsecond),
error_logger=>#{tag=>error}}},
ok.
@@ -1446,7 +1446,7 @@ info_msg(Format, Args) ->
logger ! {log,info,Format,Args,
#{pid=>self(),
gl=>group_leader(),
- time=>erlang:monotonic_time(microsecond),
+ time=>erlang:system_time(microsecond),
error_logger=>#{tag=>info_msg}}},
ok.
diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl
index f96bc88913..9a0939972d 100644
--- a/lib/kernel/src/erl_epmd.erl
+++ b/lib/kernel/src/erl_epmd.erl
@@ -29,10 +29,20 @@
-define(port_please_failure2(Term), noop).
-endif.
+-ifndef(erlang_daemon_port).
+-define(erlang_daemon_port, 4369).
+-endif.
+-ifndef(epmd_dist_high).
+-define(epmd_dist_high, 4370).
+-endif.
+-ifndef(epmd_dist_low).
+-define(epmd_dist_low, 4370).
+-endif.
+
%% External exports
-export([start/0, start_link/0, stop/0, port_please/2,
port_please/3, names/0, names/1,
- register_node/2, register_node/3, open/0, open/1, open/2]).
+ register_node/2, register_node/3, address_please/3, open/0, open/1, open/2]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -53,7 +63,7 @@
start() ->
gen_server:start({local, erl_epmd}, ?MODULE, [], []).
-
+-spec start_link() -> {ok, pid()} | ignore | {error,term()}.
start_link() ->
gen_server:start_link({local, erl_epmd}, ?MODULE, [], []).
@@ -66,9 +76,22 @@ stop() ->
%% return {port, P, Version} | noport
%%
+-spec port_please(Name, Host) -> {ok, Port, Version} | noport when
+ Name :: string(),
+ Host :: inet:ip_address(),
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node, Host) ->
port_please(Node, Host, infinity).
+-spec port_please(Name, Host, Timeout) -> {ok, Port, Version} | noport when
+ Name :: string(),
+ Host :: inet:ip_address(),
+ Timeout :: non_neg_integer() | infinity,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node,HostName, Timeout) when is_atom(HostName) ->
port_please1(Node,atom_to_list(HostName), Timeout);
port_please(Node,HostName, Timeout) when is_list(HostName) ->
@@ -92,10 +115,21 @@ port_please1(Node,HostName, Timeout) ->
Else
end.
+-spec names() -> {ok, [{Name, Port}]} | {error, Reason} when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names() ->
{ok, H} = inet:gethostname(),
names(H).
+-spec names(Host) -> {ok, [{Name, Port}]} | {error, Reason} when
+ Host :: atom() | string() | inet:ip_address(),
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names(HostName) when is_atom(HostName); is_list(HostName) ->
case inet:gethostbyname(HostName) of
{ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
@@ -106,9 +140,22 @@ names(HostName) when is_atom(HostName); is_list(HostName) ->
names(EpmdAddr) ->
get_names(EpmdAddr).
+-spec register_node(Name, Port) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
register_node(Name, PortNo) ->
- register_node(Name, PortNo, inet).
+ register_node(Name, PortNo, inet).
+
+-spec register_node(Name, Port, Driver) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Driver :: inet_tcp | inet6_tcp | inet | inet6,
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
+
register_node(Name, PortNo, inet_tcp) ->
register_node(Name, PortNo, inet);
register_node(Name, PortNo, inet6_tcp) ->
@@ -116,6 +163,17 @@ register_node(Name, PortNo, inet6_tcp) ->
register_node(Name, PortNo, Family) ->
gen_server:call(erl_epmd, {register, Name, PortNo, Family}, infinity).
+-spec address_please(Name, Host, AddressFamily) -> Success | {error, term()} when
+ Name :: string(),
+ Host :: string() | inet:ip_address(),
+ AddressFamily :: inet | inet6,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer(),
+ Success :: {ok, inet:ip_address()} | {ok, inet:ip_address(), Port, Version}.
+
+address_please(_Name, Host, AddressFamily) ->
+ inet:getaddr(Host, AddressFamily).
+
%%%----------------------------------------------------------------------
%%% Callback functions from gen_server
%%%----------------------------------------------------------------------
diff --git a/lib/kernel/src/erl_signal_handler.erl b/lib/kernel/src/erl_signal_handler.erl
index 22f235d4e4..b76c2a217a 100644
--- a/lib/kernel/src/erl_signal_handler.erl
+++ b/lib/kernel/src/erl_signal_handler.erl
@@ -19,12 +19,21 @@
-module(erl_signal_handler).
-behaviour(gen_event).
--export([init/1, format_status/2,
+-export([start/0, init/1, format_status/2,
handle_event/2, handle_call/2, handle_info/2,
terminate/2, code_change/3]).
-record(state,{}).
+start() ->
+ %% add signal handler
+ case whereis(erl_signal_server) of
+ %% in case of minimal mode
+ undefined -> ok;
+ _ ->
+ gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
+ end.
+
init(_Args) ->
{ok, #state{}}.
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index 0706220a94..b3957d0c7e 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -32,7 +32,7 @@
which_report_handlers/0]).
%% logger callbacks
--export([adding_handler/2, removing_handler/1, log/2]).
+-export([adding_handler/1, removing_handler/1, log/2]).
-export([get_format_depth/0, limit_term/1]).
@@ -101,9 +101,9 @@ stop() ->
%%%-----------------------------------------------------------------
%%% Callbacks for logger
--spec adding_handler(logger:handler_id(),logger:config()) ->
+-spec adding_handler(logger:config()) ->
{ok,logger:config()} | {error,term()}.
-adding_handler(?MODULE,Config) ->
+adding_handler(#{id:=?MODULE}=Config) ->
case start() of
ok ->
{ok,Config};
@@ -111,12 +111,12 @@ adding_handler(?MODULE,Config) ->
Error
end.
--spec removing_handler(logger:handler_id()) -> ok.
-removing_handler(?MODULE) ->
+-spec removing_handler(logger:config()) -> ok.
+removing_handler(#{id:=?MODULE}) ->
stop(),
ok.
--spec log(logger:log(),logger:config()) -> ok.
+-spec log(logger:log_event(),logger:config()) -> ok.
log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) ->
do_log(Level,Msg,Meta).
@@ -529,18 +529,38 @@ logfile(filename) ->
Flag :: boolean().
tty(true) ->
- case lists:member(error_logger_tty_h, which_report_handlers()) of
- false ->
- add_report_handler(error_logger_tty_h, []);
- true ->
- ignore
- end,
+ _ = case lists:member(error_logger_tty_h, which_report_handlers()) of
+ false ->
+ case logger:get_handler_config(default) of
+ {ok,{logger_std_h,#{logger_std_h:=#{type:=standard_io}}}} ->
+ logger:remove_handler_filter(default,
+ error_logger_tty_false);
+ _ ->
+ logger:add_handler(error_logger_tty_true,logger_std_h,
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS(
+ [beam,erlang,otp]),
+ formatter=>{?DEFAULT_FORMATTER,
+ ?DEFAULT_FORMAT_CONFIG},
+ logger_std_h=>#{type=>standard_io}})
+ end;
+ true ->
+ ok
+ end,
ok;
tty(false) ->
- delete_report_handler(error_logger_tty_h).
+ delete_report_handler(error_logger_tty_h),
+ _ = logger:remove_handler(error_logger_tty_true),
+ _ = case logger:get_handler_config(default) of
+ {ok,{logger_std_h,#{logger_std_h:=#{type:=standard_io}}}} ->
+ logger:add_handler_filter(default,error_logger_tty_false,
+ {fun(_,_) -> stop end, ok});
+ _ ->
+ ok
+ end,
+ ok.
%%%-----------------------------------------------------------------
-
-spec limit_term(term()) -> term().
limit_term(Term) ->
@@ -552,4 +572,9 @@ limit_term(Term) ->
-spec get_format_depth() -> 'unlimited' | pos_integer().
get_format_depth() ->
- logger:get_format_depth().
+ case application:get_env(kernel, error_logger_format_depth) of
+ {ok, Depth} when is_integer(Depth) ->
+ max(10, Depth);
+ undefined ->
+ unlimited
+ end.
diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl
index 57d8fc7a15..1d4e37196c 100644
--- a/lib/kernel/src/file.erl
+++ b/lib/kernel/src/file.erl
@@ -69,7 +69,7 @@
%% Types that can be used from other modules -- alphabetically ordered.
-export_type([date_time/0, fd/0, file_info/0, filename/0, filename_all/0,
- io_device/0, name/0, name_all/0, posix/0]).
+ io_device/0, mode/0, name/0, name_all/0, posix/0]).
%%% Includes and defines
-include("file_int.hrl").
diff --git a/lib/kernel/src/file_server.erl b/lib/kernel/src/file_server.erl
index ecc1ffbdd6..29eaa23375 100644
--- a/lib/kernel/src/file_server.erl
+++ b/lib/kernel/src/file_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -73,7 +73,7 @@ stop() ->
%% {stop, Reason}
%%----------------------------------------------------------------------
--spec init([]) -> {'ok', state()} | {'stop', term()}.
+-spec init([]) -> {'ok', state()}.
init([]) ->
process_flag(trap_exit, true),
@@ -225,7 +225,7 @@ handle_cast(Msg, State) ->
%%----------------------------------------------------------------------
-spec handle_info(term(), state()) ->
- {'noreply', state()} | {'stop', 'normal', state()}.
+ {'noreply', state()}.
handle_info({'EXIT', Pid, _Reason}, State) when is_pid(Pid) ->
ets:delete(?FILE_IO_SERVER_TABLE, Pid),
diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl
index a6aa0edd15..3526df3600 100644
--- a/lib/kernel/src/gen_sctp.erl
+++ b/lib/kernel/src/gen_sctp.erl
@@ -118,6 +118,8 @@ open() ->
| inet:address_family()
| {port,Port}
| {type,SockType}
+ | {netns, file:filename_all()}
+ | {bind_to_device, binary()}
| option(),
IP :: inet:ip_address() | any | loopback,
Port :: inet:port_number(),
diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl
index ac61dbc792..253c63528f 100644
--- a/lib/kernel/src/gen_tcp.erl
+++ b/lib/kernel/src/gen_tcp.erl
@@ -102,6 +102,8 @@
inet:address_family() |
{port, inet:port_number()} |
{tcp_module, module()} |
+ {netns, file:filename_all()} |
+ {bind_to_device, binary()} |
option().
-type listen_option() ::
{ip, inet:socket_address()} |
@@ -111,6 +113,8 @@
{port, inet:port_number()} |
{backlog, B :: non_neg_integer()} |
{tcp_module, module()} |
+ {netns, file:filename_all()} |
+ {bind_to_device, binary()} |
option().
-type socket() :: port().
diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl
index 3121544719..9ab58011ec 100644
--- a/lib/kernel/src/gen_udp.erl
+++ b/lib/kernel/src/gen_udp.erl
@@ -97,6 +97,8 @@ open(Port) ->
| {ifaddr, inet:socket_address()}
| inet:address_family()
| {port, inet:port_number()}
+ | {netns, file:filename_all()}
+ | {bind_to_device, binary()}
| option(),
Socket :: socket(),
Reason :: inet:posix().
diff --git a/lib/kernel/src/hipe_unified_loader.erl b/lib/kernel/src/hipe_unified_loader.erl
index fd06f0f7d8..5704cc79c2 100644
--- a/lib/kernel/src/hipe_unified_loader.erl
+++ b/lib/kernel/src/hipe_unified_loader.erl
@@ -453,7 +453,7 @@ make_beam_stub(Mod, LoaderState, MD5, Beam, FunDefs, ClosuresToPatch) ->
%%========================================================================
%% Patching
%% @spec patch(refs(), BaseAddress::integer(), ConstAndZone::term(),
-%% FunDefs::term(), TrampolineMap::term()) -> 'ok'.
+%% FunDefs::term(), TrampolineMap::term()) -> 'ok'
%% @type refs()=[{RefType::integer(), Reflist::reflist()} | refs()]
%%
%% @type reflist()= [{Data::term(), Offsets::offests()}|reflist()]
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
index e3fdb1bb22..b4b50899f7 100644
--- a/lib/kernel/src/inet_tcp_dist.erl
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -283,73 +283,22 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
[Name, Address] = splitnode(Driver, Node, LongOrShortNames),
AddressFamily = Driver:family(),
- case inet:getaddr(Address, AddressFamily) of
+ ErlEpmd = net_kernel:epmd_module(),
+ {ARMod, ARFun} = get_address_resolver(ErlEpmd),
+ Timer = dist_util:start_timer(SetupTime),
+ case ARMod:ARFun(Name, Address, AddressFamily) of
+ {ok, Ip, TcpPort, Version} ->
+ ?trace("address_please(~p) -> version ~p~n",
+ [Node,Version]),
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
{ok, Ip} ->
- Timer = dist_util:start_timer(SetupTime),
- ErlEpmd = net_kernel:epmd_module(),
case ErlEpmd:port_please(Name, Ip) of
{port, TcpPort, Version} ->
?trace("port_please(~p) -> version ~p~n",
[Node,Version]),
- dist_util:reset_timer(Timer),
- case
- Driver:connect(
- Ip, TcpPort,
- connect_options([{active, false}, {packet, 2}]))
- of
- {ok, Socket} ->
- HSData = #hs_data{
- kernel_pid = Kernel,
- other_node = Node,
- this_node = MyNode,
- socket = Socket,
- timer = Timer,
- this_flags = 0,
- other_version = Version,
- f_send = fun Driver:send/2,
- f_recv = fun Driver:recv/3,
- f_setopts_pre_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, false},
- {packet, 4},
- nodelay()])
- end,
- f_setopts_post_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, true},
- {deliver, port},
- {packet, 4},
- nodelay()])
- end,
-
- f_getll = fun inet:getll/1,
- f_address =
- fun(_,_) ->
- #net_address{
- address = {Ip,TcpPort},
- host = Address,
- protocol = tcp,
- family = AddressFamily}
- end,
- mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
- mf_getstat = fun ?MODULE:getstat/1,
- request_type = Type,
- mf_setopts = fun ?MODULE:setopts/2,
- mf_getopts = fun ?MODULE:getopts/2
- },
- dist_util:handshake_we_started(HSData);
- _ ->
- %% Other Node may have closed since
- %% port_please !
- ?trace("other node (~p) "
- "closed since port_please.~n",
- [Node]),
- ?shutdown(Node)
- end;
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
_ ->
?trace("port_please (~p) "
"failed.~n", [Node]),
@@ -361,6 +310,71 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?shutdown(Node)
end.
+%%
+%% Actual setup of connection
+%%
+do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer) ->
+ dist_util:reset_timer(Timer),
+ case
+ Driver:connect(
+ Ip, TcpPort,
+ connect_options([{active, false}, {packet, 2}]))
+ of
+ {ok, Socket} ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun Driver:send/2,
+ f_recv = fun Driver:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address{
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = AddressFamily}
+ end,
+ mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
+ mf_getstat = fun ?MODULE:getstat/1,
+ request_type = Type,
+ mf_setopts = fun ?MODULE:setopts/2,
+ mf_getopts = fun ?MODULE:getopts/2
+ },
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% discovery !
+ ?trace("other node (~p) "
+ "closed since discovery (port_please).~n",
+ [Node]),
+ ?shutdown(Node)
+ end.
+
connect_options(Opts) ->
case application:get_env(kernel, inet_dist_connect_options) of
{ok,ConnectOpts} ->
@@ -430,6 +444,16 @@ get_tcp_address(Driver, Socket) ->
}.
%% ------------------------------------------------------------
+%% Determine if EPMD module supports address resolving. Default
+%% is to use inet:getaddr/2.
+%% ------------------------------------------------------------
+get_address_resolver(EpmdModule) ->
+ case erlang:function_exported(EpmdModule, address_please, 3) of
+ true -> {EpmdModule, address_please};
+ _ -> {inet, getaddr}
+ end.
+
+%% ------------------------------------------------------------
%% Do only accept new connection attempts from nodes at our
%% own LAN, if the check_ip environment parameter is true.
%% ------------------------------------------------------------
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index afffcd156e..d873178f55 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -68,7 +68,7 @@
logger_formatter,
logger_h_common,
logger_server,
- logger_simple,
+ logger_simple_h,
logger_std_h,
logger_sup,
net,
@@ -140,7 +140,10 @@
inet_db,
pg2]},
{applications, []},
- {env, []},
+ {env, [{logger_level, info},
+ {logger_sasl_compatible, false},
+ {logger_progress_reports, stop}
+ ]},
{mod, {kernel, []}},
{runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]}
]
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index 20aa47f602..b0e8c00bbf 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -30,23 +30,13 @@
%%% Callback functions for the kernel application.
%%%-----------------------------------------------------------------
start(_, []) ->
+ %% Setup the logger and configure the kernel logger environment
+ ok = logger:internal_init_logger(),
case supervisor:start_link({local, kernel_sup}, kernel, []) of
{ok, Pid} ->
- %% add signal handler
- case whereis(erl_signal_server) of
- %% in case of minimal mode
- undefined -> ok;
- _ ->
- ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
- end,
- %% add error handler
- case logger:setup_standard_handler() of
- ok -> {ok, Pid, []};
- Error ->
- %% Not necessary since the node will crash anyway:
- exit(Pid, shutdown),
- Error
- end;
+ ok = erl_signal_handler:start(),
+ ok = logger:add_handlers(kernel),
+ {ok, Pid, []};
Error -> Error
end.
@@ -153,7 +143,7 @@ init([]) ->
case init:get_argument(mode) of
{ok, [["minimal"]]} ->
{ok, {SupFlags,
- [Code, File, StdError, User, Config, RefC, SafeSup, LoggerSup]}};
+ [Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}};
_ ->
Rpc = #{id => rex,
start => {rpc, start_link, []},
diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl
index 535083ef27..c5ff1887c2 100644
--- a/lib/kernel/src/kernel_config.erl
+++ b/lib/kernel/src/kernel_config.erl
@@ -30,11 +30,8 @@
%%%-----------------------------------------------------------------
%%% This module implements a process that configures the kernel
%%% application.
-%%% Its purpose is that in the init phase add an error_logger
-%%% and when it dies (when the kernel application dies) deleting the
-%%% previously installed error_logger.
-%%% Also, this process waits for other nodes at startup, if
-%%% specified.
+%%% Its purpose is that in the init phase waits for other nodes at startup,
+%%% if specified.
%%%-----------------------------------------------------------------
start_link() -> gen_server:start_link(kernel_config, [], []).
diff --git a/lib/kernel/src/kernel_refc.erl b/lib/kernel/src/kernel_refc.erl
index 05076dc885..8e04ff99d8 100644
--- a/lib/kernel/src/kernel_refc.erl
+++ b/lib/kernel/src/kernel_refc.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2017. All Rights Reserved.
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -44,7 +44,7 @@ scheduler_wall_time(Bool) ->
%% Callback functions from gen_server
%%-----------------------------------------------------------------
--spec init([]) -> {'ok', map()} | {'stop', term()}.
+-spec init([]) -> {'ok', map()}.
init([]) ->
resource(scheduler_wall_time, false),
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
index 943ef8c2d1..3beb3102fa 100644
--- a/lib/kernel/src/logger.erl
+++ b/lib/kernel/src/logger.erl
@@ -37,18 +37,22 @@
-export([add_handler/3, remove_handler/1,
add_logger_filter/2, add_handler_filter/3,
remove_logger_filter/1, remove_handler_filter/2,
- set_module_level/2, reset_module_level/1,
+ set_module_level/2, unset_module_level/1,
set_logger_config/1, set_logger_config/2,
set_handler_config/2, set_handler_config/3,
- get_logger_config/0, get_handler_config/1]).
+ update_logger_config/1, update_handler_config/2,
+ update_formatter_config/2, update_formatter_config/3,
+ get_logger_config/0, get_handler_config/1,
+ add_handlers/1]).
+
+%% Private configuration
+-export([internal_init_logger/0]).
%% Misc
-export([compare_levels/2]).
--export([set_process_metadata/1, unset_process_metadata/0,
- get_process_metadata/0]).
+-export([set_process_metadata/1, update_process_metadata/1,
+ unset_process_metadata/0, get_process_metadata/0]).
-export([i/0, i/1]).
--export([setup_standard_handler/0, replace_simple_handler/3]).
--export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]).
%% Basic report formatting
-export([format_report/1, format_otp_report/1]).
@@ -60,27 +64,49 @@
%%%-----------------------------------------------------------------
%%% Types
--type log() :: #{level=>level(),
- msg=>{io:format(),[term()]} |
- {report,report()} |
- {string,unicode:chardata()},
- meta=>metadata()}.
+-type log_event() :: #{level:=level(),
+ msg:={io:format(),[term()]} |
+ {report,report()} |
+ {string,unicode:chardata()},
+ meta:=metadata()}.
-type level() :: emergency | alert | critical | error |
warning | notice | info | debug.
-type report() :: map() | [{atom(),term()}].
-type msg_fun() :: fun((term()) -> {io:format(),[term()]} |
report() |
unicode:chardata()).
--type metadata() :: map().
-
+-type metadata() :: #{pid => pid(),
+ gl => pid(),
+ time => timestamp(),
+ mfa => {module(),atom(),non_neg_integer()},
+ file => file:filename(),
+ line => non_neg_integer(),
+ domain => [atom()],
+ report_cb => fun((report()) -> {io:format(),[term()]}),
+ atom() => term()}.
+-type location() :: #{mfa := {module(),atom(),non_neg_integer()},
+ file := file:filename(),
+ line := non_neg_integer()}.
-type handler_id() :: atom().
-type filter_id() :: atom().
--type filter() :: {fun((log(),term()) -> filter_return()),term()}.
--type filter_return() :: stop | ignore | log().
--type config() :: map().
-
--export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0,
- filter_id/0,filter/0,filter_return/0]).
+-type filter() :: {fun((log_event(),filter_arg()) ->
+ filter_return()),filter_arg()}.
+-type filter_arg() :: term().
+-type filter_return() :: stop | ignore | log_event().
+-type config() :: #{id => handler_id(),
+ level => level(),
+ filter_default => log | stop,
+ filters => [{filter_id(),filter()}],
+ formatter => {module(),formatter_config()},
+ atom() => term()}.
+-type timestamp() :: integer().
+-type formatter_config() :: #{atom() => term()}.
+
+-type config_handler() :: {handler, handler_id(), module(), config()}.
+
+-export_type([log_event/0,level/0,report/0,msg_fun/0,metadata/0,config/0,
+ handler_id/0,filter_id/0,filter/0,filter_arg/0,filter_return/0,
+ config_handler/0,formatter_config/0]).
%%%-----------------------------------------------------------------
%%% API
@@ -185,24 +211,24 @@ allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) ->
-spec macro_log(Location,Level,StringOrReport) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
StringOrReport :: unicode:chardata() | report().
macro_log(Location,Level,StringOrReport) ->
log_allowed(Location,Level,StringOrReport,#{}).
-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
StringOrReport :: unicode:chardata() | report(),
Meta :: metadata();
(Location,Level,Format,Args) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Format :: io:format(),
Args ::[term()];
(Location,Level,Fun,FunArgs) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Fun :: msg_fun(),
FunArgs :: term().
@@ -213,13 +239,13 @@ macro_log(Location,Level,FunOrFormat,Args) ->
log_allowed(Location,Level,{FunOrFormat,Args},#{}).
-spec macro_log(Location,Level,Format,Args,Meta) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Format :: io:format(),
Args ::[term()],
Meta :: metadata();
(Location,Level,Fun,FunArgs,Meta) -> ok when
- Location :: map(),
+ Location :: location(),
Level :: level(),
Fun :: msg_fun(),
FunArgs :: term(),
@@ -343,10 +369,22 @@ set_handler_config(HandlerId,Key,Value) ->
set_handler_config(HandlerId,Config) ->
logger_server:set_config(HandlerId,Config).
+-spec update_logger_config(Config) -> ok | {error,term()} when
+ Config :: config().
+update_logger_config(Config) ->
+ logger_server:update_config(logger,Config).
+
+-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: config().
+update_handler_config(HandlerId,Config) ->
+ logger_server:update_config(HandlerId,Config).
+
-spec get_logger_config() -> {ok,Config} when
Config :: config().
get_logger_config() ->
- logger_config:get(?LOGGER_TABLE,logger).
+ {ok,Config} = logger_config:get(?LOGGER_TABLE,logger),
+ {ok,maps:remove(handlers,Config)}.
-spec get_handler_config(HandlerId) -> {ok,{Module,Config}} | {error,term()} when
HandlerId :: handler_id(),
@@ -355,16 +393,31 @@ get_logger_config() ->
get_handler_config(HandlerId) ->
logger_config:get(?LOGGER_TABLE,HandlerId).
+-spec update_formatter_config(HandlerId,FormatterConfig) ->
+ ok | {error,term()} when
+ HandlerId :: config(),
+ FormatterConfig :: formatter_config().
+update_formatter_config(HandlerId,FormatterConfig) ->
+ logger_server:update_formatter_config(HandlerId,FormatterConfig).
+
+-spec update_formatter_config(HandlerId,Key,Value) ->
+ ok | {error,term()} when
+ HandlerId :: config(),
+ Key :: atom(),
+ Value :: term().
+update_formatter_config(HandlerId,Key,Value) ->
+ logger_server:update_formatter_config(HandlerId,#{Key=>Value}).
+
-spec set_module_level(Module,Level) -> ok | {error,term()} when
Module :: module(),
Level :: level().
set_module_level(Module,Level) ->
logger_server:set_module_level(Module,Level).
--spec reset_module_level(Module) -> ok | {error,term()} when
+-spec unset_module_level(Module) -> ok | {error,term()} when
Module :: module().
-reset_module_level(Module) ->
- logger_server:reset_module_level(Module).
+unset_module_level(Module) ->
+ logger_server:unset_module_level(Module).
%%%-----------------------------------------------------------------
%%% Misc
@@ -390,6 +443,19 @@ set_process_metadata(Meta) when is_map(Meta) ->
set_process_metadata(Meta) ->
erlang:error(badarg,[Meta]).
+-spec update_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+update_process_metadata(Meta) when is_map(Meta) ->
+ case get_process_metadata() of
+ undefined ->
+ set_process_metadata(Meta);
+ Meta0 when is_map(Meta0) ->
+ set_process_metadata(maps:merge(Meta0,Meta)),
+ ok
+ end;
+update_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
-spec get_process_metadata() -> Meta | undefined when
Meta :: metadata().
get_process_metadata() ->
@@ -414,8 +480,9 @@ i() ->
i(_Action = print) ->
io:put_chars(i(string));
i(_Action = string) ->
- #{logger := #{level := Level, handlers := Handlers,
- filters := Filters, filter_default := FilterDefault},
+ #{logger := #{level := Level,
+ filters := Filters,
+ filter_default := FilterDefault},
handlers := HandlerConfigs,
module_levels := Modules} = i(term),
[io_lib:format("Current logger configuration:~n", []),
@@ -424,16 +491,15 @@ i(_Action = string) ->
io_lib:format(" Filters: ~n", []),
print_filters(4, Filters),
io_lib:format(" Handlers: ~n", []),
- print_handlers([C || {Id, _, _} = C <- HandlerConfigs,
- lists:member(Id, Handlers)]),
+ print_handlers(HandlerConfigs),
io_lib:format(" Level set per module: ~n", []),
print_module_levels(Modules)
];
i(_Action = term) ->
{Logger, Handlers, Modules} = logger_config:get(tid()),
- #{logger=>Logger,
- handlers=>Handlers,
- module_levels=>Modules}.
+ #{logger=>maps:remove(handlers,Logger),
+ handlers=>lists:keysort(1,Handlers),
+ module_levels=>lists:keysort(1,Modules)}.
print_filters(Indent, {Id, {Fun, Config}}) ->
io_lib:format("~sId: ~p~n"
@@ -477,204 +543,209 @@ print_module_levels({Module,Level}) ->
print_module_levels(ModuleLevels) ->
lists:map(fun print_module_levels/1, ModuleLevels).
--spec setup_standard_handler() -> ok | {error,term()}.
-setup_standard_handler() ->
- case get_logger_type() of
- {ok,silent} ->
- Level = get_logger_level(),
- ok = set_logger_config(level,Level),
- remove_handler(logger_simple);
- {ok,Type} ->
- Level = get_logger_level(),
- ok = set_logger_config(level,Level),
- Filters = get_logger_filters(),
- setup_standard_handler(Type,#{level=>Level,
- filter_default=>stop,
- filters=>Filters});
- Error ->
- Error
+-spec internal_init_logger() -> ok | {error,term()}.
+%% This function is responsible for config of the logger
+%% This is done before add_handlers because we want the
+%% logger settings to take effect before the kernel supervisor
+%% tree is started.
+internal_init_logger() ->
+ try
+ ok = logger:set_logger_config(level, get_logger_level()),
+ ok = logger:set_logger_config(filter_default, get_logger_filter_default()),
+
+ [case logger:add_logger_filter(Id, Filter) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end || {Id, Filter} <- get_logger_filters()],
+
+ _ = [[case logger:set_module_level(Module, Level) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end || Module <- Modules]
+ || {module_level, Level, Modules} <- get_logger_env()],
+
+ case logger:set_handler_config(simple,filters,
+ get_default_handler_filters()) of
+ ok -> ok;
+ {error,{not_found,simple}} -> ok
+ end,
+
+ init_kernel_handlers()
+ catch throw:Reason ->
+ ?LOG_ERROR("Invalid logger config: ~p", [Reason]),
+ {error, {bad_config, {kernel, Reason}}}
end.
--spec setup_standard_handler(Type,Config) -> ok | {error,term()} when
- Type :: tty | standard_io | standard_error | {file,File} |
- {file,File,Modes} | {disk_log,LogOpts} | false,
- File :: file:filename(),
- Modes :: [term()], % [file:mode()], or more specific?
- Config :: config(),
- LogOpts :: map().
-setup_standard_handler(false,#{level:=Level,filters:=Filters}) ->
- case set_handler_config(logger_simple,level,Level) of
- ok ->
- set_handler_config(logger_simple,filters,Filters);
- Error ->
- Error
- end;
-setup_standard_handler(Type,Config) ->
- {Module,TypeConfig} = get_type_config(Type),
- replace_simple_handler(?STANDARD_HANDLER,
- Module,
- maps:merge(Config,TypeConfig)).
-
--spec replace_simple_handler(Id,Module,Config) -> ok | {error,term()} when
- Id :: handler_id(),
- Module :: module(),
- Config :: config().
-replace_simple_handler(Id,Module,Config) ->
- _ = code:ensure_loaded(Module),
- DoBuffer = erlang:function_exported(Module,swap_buffer,2),
- case add_handler(Id,Module,Config#{wait_for_buffer=>DoBuffer}) of
- ok ->
- if DoBuffer ->
- {ok,Buffered} = logger_simple:get_buffer(),
- _ = remove_handler(logger_simple),
- Module:swap_buffer(?STANDARD_HANDLER,Buffered);
- true ->
- _ = remove_handler(logger_simple),
- ok
- end,
- ok;
- Error ->
- Error
+-spec init_kernel_handlers() -> ok | {error,term()}.
+%% Setup the kernel environment variables to be correct
+%% The actual handlers are started by a call to add_handlers.
+init_kernel_handlers() ->
+ try
+ case get_logger_type() of
+ {ok,silent} ->
+ ok = logger:remove_handler(simple);
+ {ok,false} ->
+ ok;
+ {ok,Type} ->
+ init_default_config(Type)
+ end
+ catch throw:Reason ->
+ ?LOG_ERROR("Invalid default handler config: ~p", [Reason]),
+ {error, {bad_config, {kernel, Reason}}}
+ end.
+
+-spec add_handlers(Application) -> ok | {error,term()} when
+ Application :: atom();
+ (HandlerConfig) -> ok | {error,term()} when
+ HandlerConfig :: [config_handler()].
+%% This function is responsible for resolving the handler config
+%% and then starting the correct handlers. This is done after the
+%% kernel supervisor tree has been started as it needs the logger_sup.
+add_handlers(App) when is_atom(App) ->
+ add_handlers(application:get_env(App, logger, []));
+add_handlers(HandlerConfig) ->
+ try
+ check_logger_config(HandlerConfig),
+ DefaultAdded =
+ lists:foldl(
+ fun({handler, default = Id, Module, Config}, _)
+ when not is_map_key(filters, Config) ->
+ %% The default handler should have a couple of extra filters
+ %% set on it by default.
+ DefConfig = #{ filter_default => stop,
+ filters => get_default_handler_filters()},
+ setup_handler(Id, Module, maps:merge(DefConfig,Config)),
+ true;
+ ({handler, Id, Module, Config}, Default) ->
+ setup_handler(Id, Module, Config),
+ Default orelse Id == default;
+ (_, Default) -> Default
+ end, false, HandlerConfig),
+ %% If a default handler was added we try to remove the simple_logger
+ %% If the simple logger exists it will replay its log events
+ %% to the handler(s) added in the fold above.
+ _ = [case logger:remove_handler(simple) of
+ ok -> ok;
+ {error,{not_found,simple}} -> ok
+ end || DefaultAdded],
+ ok
+ catch throw:Reason ->
+ ?LOG_ERROR("Invalid logger handler config: ~p", [Reason]),
+ {error, {bad_config, {handler, Reason}}}
+ end.
+
+setup_handler(Id, Module, Config) ->
+ case logger:add_handler(Id, Module, Config) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
end.
+check_logger_config(_) ->
+ ok.
+
+-spec get_logger_type() -> {ok, standard_io | false | silent |
+ {file, file:name_all()} |
+ {file, file:name_all(), [file:mode()]}}.
get_logger_type() ->
- Type0 =
- case application:get_env(kernel, logger_dest) of
- undefined ->
- application:get_env(kernel, error_logger);
- T ->
- T
- end,
- case Type0 of
+ case application:get_env(kernel, error_logger) of
{ok, tty} ->
- {ok, tty};
+ {ok, standard_io};
{ok, {file, File}} when is_list(File) ->
{ok, {file, File}};
{ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
{ok, {file, File, Modes}};
- {ok, {disk_log, File}} when is_list(File) ->
- {ok, {disk_log, get_disk_log_config(File)}};
{ok, false} ->
{ok, false};
{ok, silent} ->
{ok, silent};
undefined ->
- {ok, tty}; % default value
+ case lists:member({handler,default,undefined}, get_logger_env()) of
+ true ->
+ {ok, false};
+ false ->
+ {ok, standard_io} % default value
+ end;
{ok, Bad} ->
- {error,{bad_config, {kernel, {logger_dest, Bad}}}}
+ throw({error_logger, Bad})
end.
-get_disk_log_config(File) ->
- Config1 =
- case application:get_env(kernel,logger_disk_log_maxfiles) of
- undefined -> #{};
- {ok,MF} -> #{max_no_files=>MF}
- end,
- Config2 =
- case application:get_env(kernel,logger_disk_log_maxbytes) of
- undefined -> Config1;
- {ok,MB} -> Config1#{max_no_bytes=>MB}
- end,
- Config3 =
- case application:get_env(kernel,logger_disk_log_type) of
- undefined -> Config2;
- {ok,T} -> Config1#{type=>T}
- end,
- Config3#{file=>File}.
-
get_logger_level() ->
- case application:get_env(kernel,logger_level) of
- undefined -> info;
- {ok,Level} when ?IS_LEVEL(Level) -> Level
+ case application:get_env(kernel,logger_level,info) of
+ Level when ?IS_LEVEL(Level) ->
+ Level;
+ Level ->
+ throw({logger_level, Level})
+ end.
+
+get_logger_filter_default() ->
+ case lists:keyfind(filters,1,get_logger_env()) of
+ {filters,Default,_} ->
+ Default;
+ false ->
+ log
end.
get_logger_filters() ->
+ lists:foldl(
+ fun({filters, _, Filters}, _Acc) ->
+ Filters;
+ (_, Acc) ->
+ Acc
+ end, [], get_logger_env()).
+
+%% This function looks at the kernel logger environment
+%% and updates it so that the correct logger is configured
+init_default_config(Type) when Type==standard_io;
+ Type==standard_error;
+ element(1,Type)==file ->
+ Env = get_logger_env(),
+ DefaultFormatter = #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}},
+ DefaultConfig = DefaultFormatter#{logger_std_h=>#{type=>Type}},
+ NewLoggerEnv =
+ case lists:keyfind(default, 2, Env) of
+ {handler, default, Module, Config} ->
+ lists:map(
+ fun({handler, default, logger_std_h, _}) ->
+ %% Only want to add the logger_std_h config
+ %% if not configured by user AND the default
+ %% handler is still the logger_std_h.
+ {handler, default, Module, maps:merge(DefaultConfig,Config)};
+ ({handler, default, logger_disk_log_h, _}) ->
+ %% Add default formatter. The point of this
+ %% is to get the expected formatter config
+ %% for the default handler, since this
+ %% differs from the default values that
+ %% logger_formatter itself adds.
+ {handler, default, logger_disk_log_h, maps:merge(DefaultFormatter,Config)};
+ (Other) ->
+ Other
+ end, Env);
+ _ ->
+ %% Nothing has been configured, use default
+ [{handler, default, logger_std_h, DefaultConfig} | Env]
+ end,
+ application:set_env(kernel, logger, NewLoggerEnv, [{timeout,infinity}]);
+init_default_config(Type) ->
+ throw({illegal_logger_type,Type}).
+
+get_default_handler_filters() ->
case application:get_env(kernel, logger_sasl_compatible, false) of
true ->
?DEFAULT_HANDLER_FILTERS([beam,erlang,otp]);
false ->
Extra =
- case application:get_env(kernel, logger_log_progress, false) of
- true ->
+ case application:get_env(kernel, logger_progress_reports, stop) of
+ log ->
[];
- false ->
+ stop ->
[{stop_progress,
{fun logger_filters:progress/2,stop}}]
end,
Extra ++ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp,sasl])
end.
-get_type_config({disk_log,LogOpts}) ->
- {logger_disk_log_h,#{disk_log_opts=>LogOpts}};
-get_type_config(tty) ->
- %% This is only for backwards compatibility with error_logger and
- %% old kernel and sasl environment variables
- get_type_config(standard_io);
-get_type_config(Type) when Type==standard_io;
- Type==standard_error;
- element(1,Type)==file ->
- {logger_std_h,#{logger_std_h=>#{type=>Type}}};
-get_type_config(Type) ->
- {error,{illegal_logger_type,Type}}.
-
-%%%-----------------------------------------------------------------
--spec limit_term(term()) -> term().
-
-limit_term(Term) ->
- try get_format_depth() of
- unlimited -> Term;
- D -> io_lib:limit_term(Term, D)
- catch error:badarg ->
- %% This could happen during system termination, after
- %% application_controller process is dead.
- unlimited
- end.
-
--spec get_format_depth() -> 'unlimited' | pos_integer().
-
-get_format_depth() ->
- Depth =
- case application:get_env(kernel, logger_format_depth) of
- {ok, D} when is_integer(D) ->
- D;
- undefined ->
- case application:get_env(kernel, error_logger_format_depth) of
- {ok, D} when is_integer(D) ->
- D;
- undefined ->
- unlimited
- end
- end,
- max(10, Depth).
-
--spec get_max_size() -> 'unlimited' | pos_integer().
-
-get_max_size() ->
- case application:get_env(kernel, logger_max_size) of
- {ok, Size} when is_integer(Size) ->
- max(50, Size);
- undefined ->
- unlimited
- end.
-
--spec get_utc_config() -> boolean().
-
-get_utc_config() ->
- %% Kernel's logger_utc configuration overrides SASL utc_log, which
- %% in turn overrides stdlib config - in order to have uniform
- %% timestamps in log messages
- case application:get_env(kernel, logger_utc) of
- {ok, Val} -> Val;
- undefined ->
- case application:get_env(sasl, utc_log) of
- {ok, Val} -> Val;
- undefined ->
- case application:get_env(stdlib, utc_log) of
- {ok, Val} -> Val;
- undefined -> false
- end
- end
- end.
+get_logger_env() ->
+ application:get_env(kernel, logger, []).
%%%-----------------------------------------------------------------
%%% Internal
@@ -699,7 +770,7 @@ do_log_1(Level,Msg,Meta) ->
end.
-spec log_allowed(Location,Level,Msg,Meta) -> ok when
- Location :: map(),
+ Location :: location() | #{},
Level :: level(),
Msg :: {msg_fun(),term()} |
{io:format(),[term()]} |
@@ -792,7 +863,7 @@ proc_meta() ->
default(pid) -> self();
default(gl) -> group_leader();
-default(time) -> erlang:monotonic_time(microsecond).
+default(time) -> erlang:system_time(microsecond).
%% Remove everything upto and including this module from the stacktrace
filter_stacktrace(Module,[{Module,_,_,_}|_]) ->
diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl
index d9f5aa6faf..b3cf7d67dd 100644
--- a/lib/kernel/src/logger_backend.erl
+++ b/lib/kernel/src/logger_backend.erl
@@ -58,7 +58,7 @@ call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) ->
debug,
[{logger,removed_failing_handler},
{handler,{Id,Module}},
- {log,Log1},
+ {log_event,Log1},
{config,Config1},
{reason,{C,R,filter_stacktrace(S)}}]);
{error,{not_found,_}} ->
@@ -122,7 +122,7 @@ handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) ->
[{logger,removed_failing_filter},
{filter,Filter},
{owner,Owner},
- {log,Log},
+ {log_event,Log},
{reason,Reason}]);
_ ->
ok
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
index 799aea9617..1d35c2e068 100644
--- a/lib/kernel/src/logger_config.erl
+++ b/lib/kernel/src/logger_config.erl
@@ -24,14 +24,14 @@
allow/2,allow/3,
get/2, get/3, get/1,
create/3, create/4, set/3,
- set_module_level/3,reset_module_level/2,
+ set_module_level/3,unset_module_level/2,
cache_module_level/2,
level_to_int/1]).
-include("logger_internal.hrl").
new(Name) ->
- _ = ets:new(Name,[set,protected,named_table]),
+ _ = ets:new(Name,[set,protected,named_table,{write_concurrency,true}]),
ets:whereis(Name).
delete(Tid,Id) ->
@@ -109,7 +109,7 @@ set_module_level(Tid,Module,Level) ->
ets:insert(Tid,{Module,level_to_int(Level)}),
ok.
-reset_module_level(Tid,Module) ->
+unset_module_level(Tid,Module) ->
ets:delete(Tid,Module), % should possibley overwrite instead of delete?
ok.
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index 3b71f936d8..773aa75bc6 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -34,8 +34,8 @@
%% logger callbacks
-export([log/2,
- adding_handler/2, removing_handler/1,
- changing_config/3, swap_buffer/2]).
+ adding_handler/1, removing_handler/1,
+ changing_config/2, swap_buffer/2]).
%%%===================================================================
%%% API
@@ -108,8 +108,8 @@ reset(Name) ->
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(Name, Config) ->
- case check_config(adding, Name, Config) of
+adding_handler(#{id:=Name}=Config) ->
+ case check_config(adding, Config) of
{ok, Config1} ->
%% create initial handler state by merging defaults with config
HConfig = maps:get(?MODULE, Config1, #{}),
@@ -136,10 +136,9 @@ adding_handler(Name, Config) ->
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(Name,
- OldConfig=#{id:=Id, disk_log_opts:=DLOpts},
- NewConfig=#{id:=Id, disk_log_opts:=DLOpts}) ->
- case check_config(changing, Name, NewConfig) of
+changing_config(OldConfig=#{id:=Name, disk_log_opts:=DLOpts},
+ NewConfig=#{id:=Name, disk_log_opts:=DLOpts}) ->
+ case check_config(changing, NewConfig) of
Result = {ok,NewConfig1} ->
try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
?DEFAULT_CALL_TIMEOUT) of
@@ -151,12 +150,10 @@ changing_config(Name,
Error ->
Error
end;
-changing_config(_Name, OldConfig, NewConfig) ->
+changing_config(OldConfig, NewConfig) ->
{error,{illegal_config_change,OldConfig,NewConfig}}.
-check_config(adding, Name, Config0) ->
- %% Merge in defaults on top level
- Config = maps:merge(#{id => Name}, Config0),
+check_config(adding, #{id:=Name}=Config) ->
%% Merge in defaults on handler level
LogOpts0 = maps:get(disk_log_opts, Config, #{}),
LogOpts = merge_default_logopts(Name, LogOpts0),
@@ -173,7 +170,7 @@ check_config(adding, Name, Config0) ->
Error ->
Error
end;
-check_config(changing, _Name, Config) ->
+check_config(changing, Config) ->
MyConfig = maps:get(?MODULE, Config, #{}),
case check_my_config(maps:to_list(MyConfig)) of
ok -> {ok,Config};
@@ -223,7 +220,7 @@ check_my_config([]) ->
%%%-----------------------------------------------------------------
%%% Handler being removed
-removing_handler(Name) ->
+removing_handler(#{id:=Name}) ->
stop(Name).
%%%-----------------------------------------------------------------
@@ -238,15 +235,15 @@ swap_buffer(Name,Buffer) ->
%%%-----------------------------------------------------------------
%%% Log a string or report
--spec log(Log, Config) -> ok | dropped when
- Log :: logger:log(),
+-spec log(LogEvent, Config) -> ok | dropped when
+ LogEvent :: logger:log_event(),
Config :: logger:config().
-log(Log,Config=#{id:=Name}) ->
+log(LogEvent,Config=#{id:=Name}) ->
%% if the handler has crashed, we must drop this request
%% and hope the handler restarts so we can try again
true = is_pid(whereis(Name)),
- Bin = logger_h_common:log_to_binary(Log,Config),
+ Bin = logger_h_common:log_to_binary(LogEvent,Config),
logger_h_common:call_cast_or_drop(Name, Bin).
@@ -278,10 +275,11 @@ init([Name, Config = #{disk_log_opts := LogOpts},
last_log_ts => T0,
burst_win_ts => T0,
burst_msg_count => 0,
+ last_op => sync,
prev_log_result => ok,
prev_sync_result => ok,
prev_disk_log_info => undefined}),
- gen_server:cast(self(), {repeated_disk_log_sync,T0}),
+ gen_server:cast(self(), repeated_disk_log_sync),
enter_loop(Config, State1);
Error ->
logger_h_common:error_notify({open_disk_log,Name,Error}),
@@ -316,8 +314,7 @@ handle_call(disk_log_sync, _From, State = #{id := Name}) ->
{reply, Result, State1};
handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0,
- last_log_ts := LastLogTS}) ->
+ State = #{filesync_repeat_interval := FSyncInt0}) ->
HConfig = maps:get(?MODULE, NewConfig, #{}),
State1 = #{toggle_sync_qlen := TSQL,
drop_new_reqs_qlen := DNRQL,
@@ -338,9 +335,8 @@ handle_call({change_config,_OldConfig,NewConfig}, _From,
_ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
State,
undefined)),
- _ = gen_server:cast(self(), {repeated_disk_log_sync,
- LastLogTS})
- end,
+ _ = gen_server:cast(self(), repeated_disk_log_sync)
+ end,
{reply, ok, State1};
false ->
{reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State}
@@ -370,24 +366,23 @@ handle_cast({log, Bin}, State) ->
%% clause gets called repeatedly by the handler. In order to
%% guarantee that a filesync *always* happens after the last log
%% request, the repeat operation must be active!
-handle_cast({repeated_disk_log_sync,LastLogTS0},
+handle_cast(repeated_disk_log_sync,
State = #{id := Name,
filesync_repeat_interval := FSyncInt,
- last_log_ts := LastLogTS1}) ->
+ last_op := LastOp}) ->
State1 =
if is_integer(FSyncInt) ->
%% only do filesync if something has been
%% written since last time we checked
- NewState = if LastLogTS1 == LastLogTS0 ->
+ NewState = if LastOp == sync ->
State;
true ->
disk_log_sync(Name, State)
end,
{ok,TRef} =
timer:apply_after(FSyncInt, gen_server,cast,
- [self(),
- {repeated_disk_log_sync,LastLogTS1}]),
- NewState#{rep_sync_tref => TRef};
+ [self(),repeated_disk_log_sync]),
+ NewState#{rep_sync_tref => TRef, last_op => sync};
true ->
State
end,
@@ -649,10 +644,9 @@ close_disk_log(Name, _) ->
ok.
disk_log_write(Name, Bin, State) ->
- Result =
case ?disk_log_blog(Name, Bin) of
ok ->
- ok;
+ State#{prev_log_result => ok, last_op => write};
LogError ->
_ = case maps:get(prev_log_result, State) of
LogError ->
@@ -664,29 +658,26 @@ disk_log_write(Name, Bin, State) ->
LogOpts,
LogError})
end,
- LogError
- end,
- State#{prev_log_result => Result}.
+ State#{prev_log_result => LogError}
+ end.
disk_log_sync(Name, State) ->
- Result =
- case ?disk_log_sync(Name) of
- ok ->
- ok;
- SyncError ->
- _ = case maps:get(prev_sync_result, State) of
- SyncError ->
- %% don't report same error twice
- ok;
- _ ->
- LogOpts = maps:get(log_opts, State),
- logger_h_common:error_notify({Name,sync,
- LogOpts,
- SyncError})
- end,
- SyncError
- end,
- State#{prev_sync_result => Result}.
+ case ?disk_log_sync(Name) of
+ ok ->
+ State#{prev_sync_result => ok, last_op => sync};
+ SyncError ->
+ _ = case maps:get(prev_sync_result, State) of
+ SyncError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,sync,
+ LogOpts,
+ SyncError})
+ end,
+ State#{prev_sync_result => SyncError}
+ end.
error_notify_new(Info,Info, _Term) ->
ok;
diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl
index 85928f0fd6..7359b3b4b7 100644
--- a/lib/kernel/src/logger_filters.erl
+++ b/lib/kernel/src/logger_filters.erl
@@ -27,30 +27,31 @@
-include("logger_internal.hrl").
-define(IS_ACTION(A), (A==log orelse A==stop)).
--spec domain(Log,Extra) -> logger:filter_return() when
- Log :: logger:log(),
+-spec domain(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
Extra :: {Action,Compare,MatchDomain},
Action :: log | stop,
- Compare :: prefix_of | starts_with | equals | no_domain,
+ Compare :: super | sub | equal | not_equal | undefined,
MatchDomain :: list(atom()).
-domain(#{meta:=Meta}=Log,{Action,Compare,MatchDomain})
+domain(#{meta:=Meta}=LogEvent,{Action,Compare,MatchDomain})
when ?IS_ACTION(Action) andalso
- (Compare==prefix_of orelse
- Compare==starts_with orelse
- Compare==equals orelse
- Compare==no_domain) andalso
+ (Compare==super orelse
+ Compare==sub orelse
+ Compare==equal orelse
+ Compare==not_equal orelse
+ Compare==undefined) andalso
is_list(MatchDomain) ->
- filter_domain(Compare,Meta,MatchDomain,on_match(Action,Log));
-domain(Log,Extra) ->
- erlang:error(badarg,[Log,Extra]).
+ filter_domain(Compare,Meta,MatchDomain,on_match(Action,LogEvent));
+domain(LogEvent,Extra) ->
+ erlang:error(badarg,[LogEvent,Extra]).
--spec level(Log,Extra) -> logger:filter_return() when
- Log :: logger:log(),
+-spec level(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
Extra :: {Action,Operator,MatchLevel},
Action :: log | stop,
Operator :: neq | eq | lt | gt | lteq | gteq,
MatchLevel :: logger:level().
-level(#{level:=L1}=Log,{Action,Op,L2})
+level(#{level:=L1}=LogEvent,{Action,Op,L2})
when ?IS_ACTION(Action) andalso
(Op==neq orelse
Op==eq orelse
@@ -59,37 +60,40 @@ level(#{level:=L1}=Log,{Action,Op,L2})
Op==lteq orelse
Op==gteq) andalso
?IS_LEVEL(L2) ->
- filter_level(Op,L1,L2,on_match(Action,Log));
-level(Log,Extra) ->
- erlang:error(badarg,[Log,Extra]).
+ filter_level(Op,L1,L2,on_match(Action,LogEvent));
+level(LogEvent,Extra) ->
+ erlang:error(badarg,[LogEvent,Extra]).
--spec progress(Log,Extra) -> logger:filter_return() when
- Log :: logger:log(),
+-spec progress(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
Extra :: log | stop.
-progress(Log,Action) when ?IS_ACTION(Action) ->
- filter_progress(Log,on_match(Action,Log));
-progress(Log,Action) ->
- erlang:error(badarg,[Log,Action]).
+progress(LogEvent,Action) when ?IS_ACTION(Action) ->
+ filter_progress(LogEvent,on_match(Action,LogEvent));
+progress(LogEvent,Action) ->
+ erlang:error(badarg,[LogEvent,Action]).
--spec remote_gl(Log,Extra) -> logger:filter_return() when
- Log :: logger:log(),
+-spec remote_gl(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
Extra :: log | stop.
-remote_gl(Log,Action) when ?IS_ACTION(Action) ->
- filter_remote_gl(Log,on_match(Action,Log));
-remote_gl(Log,Action) ->
- erlang:error(badarg,[Log,Action]).
+remote_gl(LogEvent,Action) when ?IS_ACTION(Action) ->
+ filter_remote_gl(LogEvent,on_match(Action,LogEvent));
+remote_gl(LogEvent,Action) ->
+ erlang:error(badarg,[LogEvent,Action]).
%%%-----------------------------------------------------------------
%%% Internal
-filter_domain(prefix_of,#{domain:=Domain},MatchDomain,OnMatch) ->
+filter_domain(super,#{domain:=Domain},MatchDomain,OnMatch) ->
is_prefix(Domain,MatchDomain,OnMatch);
-filter_domain(starts_with,#{domain:=Domain},MatchDomain,OnMatch) ->
+filter_domain(sub,#{domain:=Domain},MatchDomain,OnMatch) ->
is_prefix(MatchDomain,Domain,OnMatch);
-filter_domain(equals,#{domain:=Domain},Domain,OnMatch) ->
+filter_domain(equal,#{domain:=Domain},Domain,OnMatch) ->
OnMatch;
-filter_domain(Action,Meta,_,OnMatch) ->
+filter_domain(not_equal,#{domain:=Domain},MatchDomain,OnMatch)
+ when Domain=/=MatchDomain ->
+ OnMatch;
+filter_domain(Compare,Meta,_,OnMatch) ->
case maps:is_key(domain,Meta) of
- false when Action==no_domain -> OnMatch;
+ false when Compare==undefined; Compare==not_equal -> OnMatch;
_ -> ignore
end.
@@ -119,5 +123,5 @@ filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() ->
filter_remote_gl(_,_) ->
ignore.
-on_match(log,Log) -> Log;
+on_match(log,LogEvent) -> LogEvent;
on_match(stop,_) -> stop.
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
index 386e7832e2..4d727b3da0 100644
--- a/lib/kernel/src/logger_formatter.erl
+++ b/lib/kernel/src/logger_formatter.erl
@@ -20,26 +20,28 @@
-module(logger_formatter).
-export([format/2]).
+-export([check_config/1]).
-include("logger_internal.hrl").
%%%-----------------------------------------------------------------
%%% Types
+-type config() :: #{chars_limit=>pos_integer()| unlimited,
+ depth=>pos_integer() | unlimited,
+ legacy_header=>boolean(),
+ max_size=>pos_integer() | unlimited,
+ report_cb=>fun((logger:report()) -> {io:format(),[term()]}),
+ single_line=>boolean(),
+ template=>template(),
+ time_designator=>byte(),
+ time_offset=>integer()|[byte()]}.
-type template() :: [atom()|tuple()|string()].
%%%-----------------------------------------------------------------
%%% API
--spec format(Log,Config) -> String when
- Log :: logger:log(),
- Config :: #{single_line=>boolean(),
- legacy_header=>boolean(),
- report_cb=>fun((logger:report()) -> {io:format(),[term()]}),
- chars_limit=>pos_integer()| unlimited,
- max_size=>pos_integer() | unlimited,
- depth=>pos_integer() | unlimited,
- template=>template(),
- utc=>boolean()},
- String :: string().
+-spec format(LogEvent,Config) -> unicode:chardata() when
+ LogEvent :: logger:log_event(),
+ Config :: config().
format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
when is_map(Config0) ->
Config = add_default_config(Config0),
@@ -84,8 +86,6 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
do_format(Level,Msg,Data,[level|Format],Config) ->
[to_string(level,Level,Config)|do_format(Level,Msg,Data,Format,Config)];
-do_format(Level,Msg,Data,[msg|Format],Config) ->
- [Msg|do_format(Level,Msg,Data,Format,Config)];
do_format(Level,Msg,Data,[Key|Format],Config) when is_atom(Key); is_tuple(Key) ->
Value = value(Key,Data),
[to_string(Key,Value,Config)|do_format(Level,Msg,Data,Format,Config)];
@@ -129,9 +129,7 @@ to_string(X) ->
io_lib:format("~tp",[X]).
format_msg({string,Chardata},Meta,Config) ->
- try unicode:characters_to_list(Chardata)
- catch _:_ -> format_msg({"INVALID STRING: ~tp",[Chardata]},Meta,Config)
- end;
+ format_msg({"~ts",[Chardata]},Meta,Config);
format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) when is_function(Fun,1) ->
format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config));
format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) ->
@@ -196,29 +194,24 @@ truncate(String,Size) ->
String
end.
-format_time(Timestamp,Config) when is_integer(Timestamp) ->
- {Date,Time,Micro} = timestamp_to_datetimemicro(Timestamp,Config),
- format_time(Date,Time,Micro);
-format_time(Other,_Config) ->
- %% E.g. a string
- to_string(Other).
-
-format_time({Y,M,D},{H,Min,S},Micro) ->
- io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w",
- [Y,M,D,H,Min,S,Micro]).
+%% SysTime is the system time in microseconds
+format_time(SysTime,#{time_offset:=Offset,time_designator:=Des})
+ when is_integer(SysTime) ->
+ calendar:system_time_to_rfc3339(SysTime,[{unit,microsecond},
+ {offset,Offset},
+ {time_designator,Des}]).
-%% Assuming this is monotonic time in microseconds
-timestamp_to_datetimemicro(Timestamp,Config) when is_integer(Timestamp) ->
- SysTime = Timestamp + erlang:time_offset(microsecond),
+%% SysTime is the system time in microseconds
+timestamp_to_datetimemicro(SysTime,Config) when is_integer(SysTime) ->
Micro = SysTime rem 1000000,
Sec = SysTime div 1000000,
UniversalTime = erlang:posixtime_to_universaltime(Sec),
- {Date,Time} =
- case Config of
- #{utc:=true} -> UniversalTime;
- _ -> erlang:universaltime_to_localtime(UniversalTime)
+ {{Date,Time},UtcStr} =
+ case offset_to_utc(maps:get(time_offset,Config)) of
+ true -> {UniversalTime,"UTC "};
+ _ -> {erlang:universaltime_to_localtime(UniversalTime),""}
end,
- {Date,Time,Micro}.
+ {Date,Time,Micro,UtcStr}.
format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_integer(A) ->
atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A);
@@ -231,9 +224,11 @@ maybe_add_legacy_header(Level,
#{time:=Timestamp}=Meta,
#{legacy_header:=true}=Config) ->
#{title:=Title}=MyMeta = add_legacy_title(Level,maps:get(?MODULE,Meta,#{})),
- {{Y,Mo,D},{H,Mi,S},Micro} = timestamp_to_datetimemicro(Timestamp,Config),
- Header = io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===",
- [Title,D,month(Mo),Y,H,Mi,S,Micro,utcstr(Config)]),
+ {{Y,Mo,D},{H,Mi,S},Micro,UtcStr} =
+ timestamp_to_datetimemicro(Timestamp,Config),
+ Header =
+ io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===",
+ [Title,D,month(Mo),Y,H,Mi,S,Micro,UtcStr]),
Meta#{?MODULE=>MyMeta#{header=>Header}};
maybe_add_legacy_header(_,Meta,_) ->
Meta.
@@ -257,20 +252,20 @@ month(10) -> "Oct";
month(11) -> "Nov";
month(12) -> "Dec".
-utcstr(#{utc:=true}) -> "UTC ";
-utcstr(_) -> "".
-
-add_default_config(#{utc:=_}=Config0) ->
+%% Ensure that all valid configuration parameters exist in the final
+%% configuration map
+add_default_config(Config0) ->
Default =
#{legacy_header=>false,
- single_line=>false,
- chars_limit=>unlimited},
- MaxSize = get_max_size(maps:get(max_size,Config0,false)),
- Depth = get_depth(maps:get(depth,Config0,false)),
+ single_line=>true,
+ chars_limit=>unlimited,
+ time_designator=>$T},
+ MaxSize = get_max_size(maps:get(max_size,Config0,undefined)),
+ Depth = get_depth(maps:get(depth,Config0,undefined)),
+ Offset = get_offset(maps:get(time_offset,Config0,undefined)),
add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize,
- depth=>Depth}));
-add_default_config(Config) ->
- add_default_config(Config#{utc=>logger:get_utc_config()}).
+ depth=>Depth,
+ time_offset=>Offset})).
add_default_template(#{template:=_}=Config) ->
Config;
@@ -284,12 +279,123 @@ default_template(#{single_line:=true}) ->
default_template(_) ->
?DEFAULT_FORMAT_TEMPLATE.
-get_max_size(false) ->
- logger:get_max_size();
+get_max_size(undefined) ->
+ unlimited;
get_max_size(S) ->
max(10,S).
-get_depth(false) ->
- logger:get_format_depth();
+get_depth(undefined) ->
+ error_logger:get_format_depth();
get_depth(S) ->
max(5,S).
+
+get_offset(undefined) ->
+ utc_to_offset(get_utc_config());
+get_offset(Offset) ->
+ Offset.
+
+utc_to_offset(true) ->
+ "Z";
+utc_to_offset(false) ->
+ "".
+
+get_utc_config() ->
+ %% SASL utc_log overrides stdlib config - in order to have uniform
+ %% timestamps in log messages
+ case application:get_env(sasl, utc_log) of
+ {ok, Val} when is_boolean(Val) -> Val;
+ _ ->
+ case application:get_env(stdlib, utc_log) of
+ {ok, Val} when is_boolean(Val) -> Val;
+ _ -> false
+ end
+ end.
+
+offset_to_utc(Z) when Z=:=0; Z=:="z"; Z=:="Z" ->
+ true;
+offset_to_utc([$+|Tz]) ->
+ case io_lib:fread("~d:~d", Tz) of
+ {ok, [0, 0], []} ->
+ true;
+ _ ->
+ false
+ end;
+offset_to_utc(_) ->
+ false.
+
+-spec check_config(Config) -> ok | {error,term()} when
+ Config :: config().
+check_config(Config) when is_map(Config) ->
+ do_check_config(maps:to_list(Config));
+check_config(Config) ->
+ {error,{invalid_formatter_config,?MODULE,Config}}.
+
+do_check_config([{Type,L}|Config]) when Type == chars_limit;
+ Type == depth;
+ Type == max_size ->
+ case check_limit(L) of
+ ok -> do_check_config(Config);
+ error -> {error,{invalid_formatter_config,?MODULE,{Type,L}}}
+ end;
+do_check_config([{single_line,SL}|Config]) when is_boolean(SL) ->
+ do_check_config(Config);
+do_check_config([{legacy_header,LH}|Config]) when is_boolean(LH) ->
+ do_check_config(Config);
+do_check_config([{report_cb,RCB}|Config]) when is_function(RCB,1) ->
+ do_check_config(Config);
+do_check_config([{template,T}|Config]) when is_list(T) ->
+ case lists:all(fun(X) when is_atom(X) -> true;
+ (X) when is_tuple(X), is_atom(element(1,X)) -> true;
+ (X) when is_list(X) -> io_lib:printable_unicode_list(X);
+ (_) -> false
+ end,
+ T) of
+ true ->
+ do_check_config(Config);
+ false ->
+ {error,{invalid_formatter_template,?MODULE,T}}
+ end;
+do_check_config([{time_offset,Offset}|Config]) ->
+ case check_offset(Offset) of
+ ok ->
+ do_check_config(Config);
+ error ->
+ {error,{invalid_formatter_config,?MODULE,{time_offset,Offset}}}
+ end;
+do_check_config([{time_designator,Char}|Config]) when Char>=0, Char=<255 ->
+ case io_lib:printable_latin1_list([Char]) of
+ true ->
+ do_check_config(Config);
+ false ->
+ {error,{invalid_formatter_config,?MODULE,{time_designator,Char}}}
+ end;
+do_check_config([C|_]) ->
+ {error,{invalid_formatter_config,?MODULE,C}};
+do_check_config([]) ->
+ ok.
+
+check_limit(L) when is_integer(L), L>0 ->
+ ok;
+check_limit(unlimited) ->
+ ok;
+check_limit(_) ->
+ error.
+
+check_offset(I) when is_integer(I) ->
+ ok;
+check_offset(Tz) when Tz=:=""; Tz=:="Z"; Tz=:="z" ->
+ ok;
+check_offset([Sign|Tz]) when Sign=:=$+; Sign=:=$- ->
+ check_timezone(Tz);
+check_offset(_) ->
+ error.
+
+check_timezone(Tz) ->
+ try io_lib:fread("~d:~d", Tz) of
+ {ok, [_, _], []} ->
+ ok;
+ _ ->
+ error
+ catch _:_ ->
+ error
+ end.
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 7caad366ae..336398cd4a 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -39,8 +39,8 @@
%%%-----------------------------------------------------------------
%%% Covert log data on any form to binary
--spec log_to_binary(Log,Config) -> LogString when
- Log :: logger:log(),
+-spec log_to_binary(LogEvent,Config) -> LogString when
+ LogEvent :: logger:log_event(),
Config :: logger:config(),
LogString :: binary().
log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) ->
@@ -58,7 +58,7 @@ do_log_to_binary(Log,Config) ->
catch _:_ ->
?LOG_INTERNAL(debug,[{formatter_error,Formatter},
{config,FormatterConfig},
- {log,Log},
+ {log_event,Log},
{bad_return_value,String}]),
<<"FORMATTER ERROR: bad_return_value">>
end.
@@ -69,10 +69,10 @@ try_format(Log,Formatter,FormatterConfig) ->
C:R:S ->
?LOG_INTERNAL(debug,[{formatter_crashed,Formatter},
{config,FormatterConfig},
- {log,Log},
+ {log_event,Log},
{reason,
{C,R,logger:filter_stacktrace(?MODULE,S)}}]),
- case {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG} of
+ case {?DEFAULT_FORMATTER,#{}} of
{Formatter,FormatterConfig} ->
"DEFAULT FORMATTER CRASHED";
{DefaultFormatter,DefaultConfig} ->
@@ -135,7 +135,8 @@ call_cast_or_drop(Name, Bin) ->
_:{timeout,_} ->
?observe(Name,{dropped,1})
end;
- drop -> ?observe(Name,{dropped,1})
+ drop ->
+ ?observe(Name,{dropped,1})
catch
%% if the ETS table doesn't exist (maybe because of a
%% handler restart), we can only drop the request
@@ -152,12 +153,15 @@ check_load(State = #{id:=Name, mode := Mode,
flush_reqs_qlen := FlushQLen}) ->
{_,Mem} = process_info(self(), memory),
?observe(Name,{max_mem,Mem}),
- %% make sure the handler process doesn't get scheduled
- %% out between the message_queue_len check below and the
- %% action that follows (flush or write).
{_,QLen} = process_info(self(), message_queue_len),
?observe(Name,{max_qlen,QLen}),
-
+ %% When the handler process gets scheduled in, it's impossible
+ %% to predict the QLen. We could jump "up" arbitrarily from say
+ %% async to sync, async to drop, sync to flush, etc. However, when
+ %% the handler process manages the log requests (without flushing),
+ %% one after the other, we will move "down" from drop to sync and
+ %% from sync to async. This way we don't risk getting stuck in
+ %% drop or sync mode with an empty mailbox.
{Mode1,_NewDrops,_NewFlushes} =
if
QLen >= FlushQLen ->
@@ -292,7 +296,7 @@ overload_levels_ok(HandlerConfig) ->
TSQL = maps:get(toggle_sync_qlen, HandlerConfig, ?TOGGLE_SYNC_QLEN),
DNRQL = maps:get(drop_new_reqs_qlen, HandlerConfig, ?DROP_NEW_REQS_QLEN),
FRQL = maps:get(flush_reqs_qlen, HandlerConfig, ?FLUSH_REQS_QLEN),
- (TSQL < DNRQL) andalso (DNRQL < FRQL).
+ (DNRQL > 1) andalso (TSQL =< DNRQL) andalso (DNRQL =< FRQL).
error_notify(Term) ->
?internal_log(error, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
index 89378dbb10..ed365ce6eb 100644
--- a/lib/kernel/src/logger_h_common.hrl
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -124,7 +124,7 @@
%%% slow down execution and therefore should not be include in code
%%% to be officially released.
-%% -define(TEST_HOOKS, true).
+-define(TEST_HOOKS, true).
-ifdef(TEST_HOOKS).
-define(TEST_HOOKS_TAB, logger_h_test_hooks).
diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl
index 82df499c2b..fedd6db370 100644
--- a/lib/kernel/src/logger_internal.hrl
+++ b/lib/kernel/src/logger_internal.hrl
@@ -22,16 +22,16 @@
-define(LOGGER_KEY,'$logger_config$').
-define(HANDLER_KEY,'$handler_config$').
-define(LOGGER_META_KEY,'$logger_metadata$').
--define(STANDARD_HANDLER, logger_std_h).
+-define(STANDARD_HANDLER, default).
-define(DEFAULT_HANDLER_FILTERS,
?DEFAULT_HANDLER_FILTERS([beam,erlang,otp])).
-define(DEFAULT_HANDLER_FILTERS(Domain),
[{remote_gl,{fun logger_filters:remote_gl/2,stop}},
- {domain,{fun logger_filters:domain/2,{log,prefix_of,Domain}}},
- {no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]).
+ {domain,{fun logger_filters:domain/2,{log,super,Domain}}},
+ {no_domain,{fun logger_filters:domain/2,{log,undefined,[]}}}]).
-define(DEFAULT_FORMATTER,logger_formatter).
-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true,
- template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}).
+ single_line=>false}).
-define(DEFAULT_FORMAT_TEMPLATE_HEADER,
[{logger_formatter,header},"\n",msg,"\n"]).
-define(DEFAULT_FORMAT_TEMPLATE_SINGLE,
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
index 6ef3b8582a..47010c9fa5 100644
--- a/lib/kernel/src/logger_server.erl
+++ b/lib/kernel/src/logger_server.erl
@@ -25,9 +25,10 @@
-export([start_link/0,
add_handler/3, remove_handler/1,
add_filter/2, remove_filter/2,
- set_module_level/2, reset_module_level/1,
+ set_module_level/2, unset_module_level/1,
cache_module_level/1,
- set_config/2, set_config/3]).
+ set_config/2, set_config/3, update_config/2,
+ update_formatter_config/2]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -36,8 +37,9 @@
-include("logger_internal.hrl").
-define(SERVER, logger).
+-define(LOGGER_SERVER_TAG, '$logger_cb_process').
--record(state, {tid}).
+-record(state, {tid, async_req, async_req_queue}).
%%%===================================================================
%%% API
@@ -47,23 +49,18 @@ start_link() ->
gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
add_handler(Id,Module,Config0) ->
- case sanity_check(logger,handlers,[Id]) of
- ok ->
- try check_mod(Module) of
+ try {check_id(Id),check_mod(Module)} of
+ {ok,ok} ->
+ case sanity_check(Id,Config0) of
ok ->
- case sanity_check(Id,Config0) of
- ok ->
- Default = default_config(Id),
- Config = maps:merge(Default,Config0),
- call({add_handler,Id,Module,Config});
- Error ->
- Error
- end
- catch throw:Error ->
- {error,Error}
- end;
- Error ->
- Error
+ Default = default_config(Id),
+ Config = maps:merge(Default,Config0),
+ call({add_handler,Id,Module,Config});
+ Error ->
+ Error
+ end
+ catch throw:Error ->
+ {error,Error}
end.
remove_handler(HandlerId) ->
@@ -86,9 +83,9 @@ set_module_level(Module,Level) when is_atom(Module) ->
set_module_level(Module,_) ->
{error,{not_a_module,Module}}.
-reset_module_level(Module) when is_atom(Module) ->
- call({reset_module_level,Module});
-reset_module_level(Module) ->
+unset_module_level(Module) when is_atom(Module) ->
+ call({unset_module_level,Module});
+unset_module_level(Module) ->
{error,{not_a_module,Module}}.
cache_module_level(Module) ->
@@ -96,10 +93,7 @@ cache_module_level(Module) ->
set_config(Owner,Key,Value) ->
- case sanity_check(Owner,Key,Value) of
- ok -> call({update_config,Owner,#{Key=>Value}});
- Error -> Error
- end.
+ update_config(Owner,#{Key=>Value}).
set_config(Owner,Config0) ->
case sanity_check(Owner,Config0) of
@@ -110,6 +104,21 @@ set_config(Owner,Config0) ->
Error
end.
+update_config(Owner, Config) ->
+ case sanity_check(Owner,Config) of
+ ok ->
+ call({update_config,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_formatter_config(HandlerId, FormatterConfig)
+ when is_map(FormatterConfig) ->
+ call({update_formatter_config,HandlerId,FormatterConfig});
+update_formatter_config(_HandlerId, FormatterConfig) ->
+ {error,{invalid_formatter_config,FormatterConfig}}.
+
+
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
@@ -118,104 +127,127 @@ init([]) ->
process_flag(trap_exit, true),
Tid = logger_config:new(?LOGGER_TABLE),
LoggerConfig = maps:merge(default_config(logger),
- #{handlers=>[logger_simple]}),
+ #{handlers=>[simple]}),
logger_config:create(Tid,logger,LoggerConfig),
- SimpleConfig0 = maps:merge(default_config(logger_simple),
+ SimpleConfig0 = maps:merge(default_config(simple),
#{filter_default=>stop,
- filters=>?DEFAULT_HANDLER_FILTERS,
- logger_simple=>#{buffer=>true}}),
+ filters=>?DEFAULT_HANDLER_FILTERS}),
%% If this fails, then the node should crash
- {ok,SimpleConfig} =
- logger_simple:adding_handler(logger_simple,SimpleConfig0),
- logger_config:create(Tid,logger_simple,logger_simple,SimpleConfig),
- {ok, #state{tid=Tid}}.
+ {ok,SimpleConfig} = logger_simple_h:adding_handler(SimpleConfig0),
+ logger_config:create(Tid,simple,logger_simple_h,SimpleConfig),
+ {ok, #state{tid=Tid, async_req_queue = queue:new()}}.
-handle_call({add_handler,Id,Module,HConfig}, _From, #state{tid=Tid}=State) ->
- Reply =
- case logger_config:exist(Tid,Id) of
- true ->
- {error,{already_exist,Id}};
- false ->
- %% inform the handler
- case call_h(Module,adding_handler,[Id,HConfig],{ok,HConfig}) of
- {ok,HConfig1} ->
- logger_config:create(Tid,Id,Module,HConfig1),
- {ok,Config} = do_get_config(Tid,logger),
- Handlers = maps:get(handlers,Config,[]),
- do_set_config(Tid,logger,
- Config#{handlers=>[Id|Handlers]}),
- ok;
- {error,HReason} ->
- {error,{handler_not_added,HReason}}
- end
- end,
- {reply,Reply,State};
-handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) ->
- Reply =
- case logger_config:get(Tid,HandlerId) of
- {ok,{Module,_}} ->
- {ok,Config} = do_get_config(Tid,logger),
- Handlers0 = maps:get(handlers,Config,[]),
- Handlers = lists:delete(HandlerId,Handlers0),
- %% inform the handler
- _ = call_h(Module,removing_handler,[HandlerId],ok),
- do_set_config(Tid,logger,Config#{handlers=>Handlers}),
- logger_config:delete(Tid,HandlerId),
- ok;
- _ ->
- {error,{not_found,HandlerId}}
- end,
- {reply,Reply,State};
+handle_call({add_handler,Id,Module,HConfig}, From, #state{tid=Tid}=State) ->
+ case logger_config:exist(Tid,Id) of
+ true ->
+ {reply,{error,{already_exist,Id}},State};
+ false ->
+ call_h_async(
+ fun() ->
+ %% inform the handler
+ call_h(Module,adding_handler,[HConfig],{ok,HConfig})
+ end,
+ fun({ok,HConfig1}) ->
+ %% We know that the call_h would have loaded the module
+ %% if it existed, so it is safe here to call function_exported
+ %% to find out if this is a valid handler
+ case erlang:function_exported(Module, log, 2) of
+ true ->
+ logger_config:create(Tid,Id,Module,HConfig1),
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers = maps:get(handlers,Config,[]),
+ do_set_config(Tid,logger,
+ Config#{handlers=>[Id|Handlers]});
+ false ->
+ {error,{invalid_handler,
+ {function_not_exported,
+ {Module,log,2}}}}
+ end;
+ ({error,HReason}) ->
+ {error,{handler_not_added,HReason}}
+ end,From,State)
+ end;
+handle_call({remove_handler,HandlerId}, From, #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,HConfig}} ->
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers0 = maps:get(handlers,Config,[]),
+ Handlers = lists:delete(HandlerId,Handlers0),
+ call_h_async(
+ fun() ->
+ %% inform the handler
+ call_h(Module,removing_handler,[HConfig],ok)
+ end,
+ fun(_Res) ->
+ do_set_config(Tid,logger,Config#{handlers=>Handlers}),
+ logger_config:delete(Tid,HandlerId),
+ ok
+ end,From,State);
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
Reply = do_add_filter(Tid,Id,Filter),
{reply,Reply,State};
handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
Reply = do_remove_filter(Tid,Id,FilterId),
{reply,Reply,State};
-handle_call({update_config,Id,NewConfig}, _From, #state{tid=Tid}=State) ->
- Reply =
- case logger_config:get(Tid,Id) of
- {ok,{Module,OldConfig}} ->
- Config = maps:merge(OldConfig,NewConfig),
- case call_h(Module,changing_config,[Id,OldConfig,Config],
- {ok,Config}) of
- {ok,Config1} ->
- do_set_config(Tid,Id,Config1);
- Error ->
- Error
- end;
- {ok,OldConfig} ->
- Config = maps:merge(OldConfig,NewConfig),
- do_set_config(Tid,Id,Config);
- Error ->
- Error
- end,
- {reply,Reply,State};
+handle_call({update_config,Id,NewConfig}, From, #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,Id) of
+ {ok,{_Module,OldConfig}} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ handle_call({set_config,Id,Config}, From, State);
+ {ok,OldConfig} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ {reply,do_set_config(Tid,Id,Config),State};
+ Error ->
+ {reply,Error,State}
+ end;
handle_call({set_config,logger,Config}, _From, #state{tid=Tid}=State) ->
- Reply = do_set_config(Tid,logger,Config),
+ {ok,#{handlers:=Handlers}} = logger_config:get(Tid,logger),
+ Reply = do_set_config(Tid,logger,Config#{handlers=>Handlers}),
{reply,Reply,State};
-handle_call({set_config,HandlerId,Config}, _From, #state{tid=Tid}=State) ->
+handle_call({set_config,HandlerId,Config}, From, #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,OldConfig}} ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,[OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ do_set_config(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
+handle_call({update_formatter_config,HandlerId,NewFConfig},_From,
+ #state{tid=Tid}=State) ->
Reply =
case logger_config:get(Tid,HandlerId) of
- {ok,{Module,OldConfig}} ->
- case call_h(Module,changing_config,[HandlerId,OldConfig,Config],
- {ok,Config}) of
- {ok,Config1} ->
- do_set_config(Tid,HandlerId,Config1);
- Error ->
- Error
+ {ok,{_Mod,#{formatter:={FMod,OldFConfig}}=Config}} ->
+ try
+ FConfig = maps:merge(OldFConfig,NewFConfig),
+ check_formatter({FMod,FConfig}),
+ do_set_config(Tid,HandlerId,
+ Config#{formatter=>{FMod,FConfig}})
+ catch throw:Reason -> {error,Reason}
end;
_ ->
- {error,{not_found,HandlerId}}
+ {error,{not_found,HandlerId}}
end,
{reply,Reply,State};
handle_call({set_module_level,Module,Level}, _From, #state{tid=Tid}=State) ->
Reply = logger_config:set_module_level(Tid,Module,Level),
{reply,Reply,State};
-handle_call({reset_module_level,Module}, _From, #state{tid=Tid}=State) ->
- Reply = logger_config:reset_module_level(Tid,Module),
+handle_call({unset_module_level,Module}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:unset_module_level(Tid,Module),
{reply,Reply,State}.
+handle_cast({async_req_reply,_Ref,_Reply} = Reply,State) ->
+ call_h_reply(Reply,State);
handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) ->
logger_config:cache_module_level(Tid,Module),
{noreply, State}.
@@ -234,7 +266,23 @@ handle_info({log,Level,Report,Meta}, State) ->
{noreply, State};
handle_info({Ref,_Reply},State) when is_reference(Ref) ->
%% Assuming this is a timed-out gen_server reply - ignoring
- {noreply, State}.
+ {noreply, State};
+handle_info({'DOWN',_Ref,_Proc,_Pid,_Reason} = Down,State) ->
+ call_h_reply(Down,State);
+handle_info(Unexpected,State) when element(1,Unexpected) == 'EXIT' ->
+ %% The simple logger will send an 'EXIT' message when it is replaced
+ %% We may as well ignore all 'EXIT' messages that we get
+ ?LOG_INTERNAL(debug,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State};
+handle_info(Unexpected,State) ->
+ ?LOG_INTERNAL(info,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State}.
terminate(_Reason, _State) ->
ok.
@@ -243,8 +291,11 @@ terminate(_Reason, _State) ->
%%% Internal functions
%%%===================================================================
call(Request) ->
- case whereis(?SERVER) of
- Pid when Pid==self() ->
+ Action = element(1,Request),
+ case get(?LOGGER_SERVER_TAG) of
+ true when
+ Action == add_handler; Action == remove_handler;
+ Action == update_config; Action == set_config ->
{error,{attempting_syncronous_call_to_self,Request}};
_ ->
gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
@@ -295,13 +346,13 @@ do_set_config(Tid,Id,Config) ->
default_config(logger) ->
#{level=>info,
filters=>[],
- filter_default=>log,
- handlers=>[]};
-default_config(_) ->
- #{level=>info,
+ filter_default=>log};
+default_config(Id) ->
+ #{id=>Id,
+ level=>info,
filters=>[],
filter_default=>log,
- formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}.
+ formatter=>{?DEFAULT_FORMATTER,#{}}}.
sanity_check(Owner,Key,Value) ->
sanity_check_1(Owner,[{Key,Value}]).
@@ -327,9 +378,6 @@ get_type(Id) ->
check_config(Owner,[{level,Level}|Config]) ->
check_level(Level),
check_config(Owner,Config);
-check_config(logger,[{handlers,Handlers}|Config]) ->
- check_handlers(Handlers),
- check_config(logger,Config);
check_config(Owner,[{filters,Filters}|Config]) ->
check_filters(Filters),
check_config(Owner,Config);
@@ -367,14 +415,6 @@ check_level(Level) ->
throw({invalid_level,Level})
end.
-check_handlers([Id|Handlers]) ->
- check_id(Id),
- check_handlers(Handlers);
-check_handlers([]) ->
- ok;
-check_handlers(Handlers) ->
- throw({invalid_handlers,Handlers}).
-
check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) ->
check_filters(Filters);
check_filters([Filter|_]) ->
@@ -389,40 +429,24 @@ check_filter_default(FD) when FD==stop; FD==log ->
check_filter_default(FD) ->
throw({invalid_filter_default,FD}).
-check_formatter({logger_formatter,Config}) when is_map(Config) ->
- check_logger_formatter_config(maps:to_list(Config));
-check_formatter({logger_formatter,Config}) ->
- throw({invalid_formatter_config,Config});
-check_formatter({Mod,_}) ->
- %% no knowledge of other formatters
- check_mod(Mod);
+check_formatter({Mod,Config}) ->
+ check_mod(Mod),
+ try Mod:check_config(Config) of
+ ok -> ok;
+ {error,Error} -> throw(Error)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Mod,check_config,[Config],_}|_]} ->
+ ok;
+ _ ->
+ throw({callback_crashed,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}})
+ end
+ end;
check_formatter(Formatter) ->
throw({invalid_formatter,Formatter}).
-
-check_logger_formatter_config([{template,T}|Config]) when is_list(T) ->
- case lists:all(fun(X) when is_atom(X) -> true;
- (X) when is_tuple(X), is_atom(element(1,X)) -> true;
- (X) when is_list(X) -> io_lib:printable_unicode_list(X);
- (_) -> false
- end,
- T) of
- true ->
- check_logger_formatter_config(Config);
- false ->
- throw({invalid_formatter_template,T})
- end;
-check_logger_formatter_config([{legacy_header,LH}|Config]) when is_boolean(LH) ->
- check_logger_formatter_config(Config);
-check_logger_formatter_config([{single_line,SL}|Config]) when is_boolean(SL) ->
- check_logger_formatter_config(Config);
-check_logger_formatter_config([{utc,Utc}|Config]) when is_boolean(Utc) ->
- check_logger_formatter_config(Config);
-check_logger_formatter_config([C|_]) ->
- throw({invalid_formatter_config,C});
-check_logger_formatter_config([]) ->
- ok.
-
call_h(Module, Function, Args, DefRet) ->
%% Not calling code:ensure_loaded + erlang:function_exported here,
%% since in some rare terminal cases, the code_server might not
@@ -434,7 +458,59 @@ call_h(Module, Function, Args, DefRet) ->
{error,undef,[{Module,Function,Args,_}|_]} ->
DefRet;
_ ->
- {error,{callback_crashed,
- {C,R,logger:filter_stacktrace(?MODULE,S)}}}
+ ST = logger:filter_stacktrace(?MODULE,S),
+ ?LOG_INTERNAL(error,
+ [{logger,callback_crashed},
+ {process,?SERVER},
+ {reason,{C,R,ST}}]),
+ {error,{callback_crashed,{C,R,ST}}}
end
end.
+
+%% There are all sort of API functions that can cause deadlocks if called
+%% from the handler callbacks. So we spawn a process that does the request
+%% for the logger_server. There are still APIs that will cause problems,
+%% namely logger:add_handler
+call_h_async(AsyncFun,PostFun,From,#state{ async_req = undefined } = State) ->
+ Parent = self(),
+ {Pid, Ref} = spawn_monitor(
+ fun() ->
+ put(?LOGGER_SERVER_TAG,true),
+ receive Ref -> Ref end,
+ gen_server:cast(Parent, {async_req_reply, Ref, AsyncFun()})
+ end),
+ Pid ! Ref,
+ {noreply,State#state{ async_req = {Ref,PostFun,From} }};
+call_h_async(AsyncFun,PostFun,From,#state{ async_req_queue = Q } = State) ->
+ {noreply,State#state{ async_req_queue = queue:in({AsyncFun,PostFun,From},Q) }}.
+
+call_h_reply({async_req_reply,Ref,Reply},
+ #state{ async_req = {Ref,PostFun,From}, async_req_queue = Q} = State) ->
+ erlang:demonitor(Ref,[flush]),
+ _ = gen_server:reply(From, PostFun(Reply)),
+ {Value,NewQ} = queue:out(Q),
+ NewState = State#state{ async_req = undefined,
+ async_req_queue = NewQ },
+ case Value of
+ {value,{AsyncFun,NPostFun,NFrom}} ->
+ call_h_async(AsyncFun,NPostFun,NFrom,NewState);
+ empty ->
+ {noreply,NewState}
+ end;
+call_h_reply({'DOWN',Ref,_Proc,Pid,Reason}, #state{ async_req = {Ref,_PostFun,_From}} = State) ->
+ %% This clause should only be triggered if someone explicitly sends an exit signal
+ %% to the spawned process. It is only here to make sure that the logger_server does
+ %% not deadlock if that happens.
+ ?LOG_INTERNAL(error,
+ [{logger,process_exited},
+ {process,Pid},
+ {reason,Reason}]),
+ call_h_reply(
+ {async_req_reply,Ref,{error,{logger_process_exited,Pid,Reason}}},
+ State);
+call_h_reply(Unexpected,State) ->
+ ?LOG_INTERNAL(info,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State}.
diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple_h.erl
index 23ff6ccd2e..19fb3b54ba 100644
--- a/lib/kernel/src/logger_simple.erl
+++ b/lib/kernel/src/logger_simple_h.erl
@@ -17,40 +17,21 @@
%%
%% %CopyrightEnd%
%%
--module(logger_simple).
+-module(logger_simple_h).
--export([adding_handler/2, removing_handler/1, log/2]).
--export([get_buffer/0]).
+-export([adding_handler/1, removing_handler/1, log/2]).
%% This module implements a simple handler for logger. It is the
%% default used during system start.
%%%-----------------------------------------------------------------
-%%% API
-get_buffer() ->
- case whereis(?MODULE) of
- undefined ->
- {error,noproc};
- Pid ->
- Ref = erlang:monitor(process,Pid),
- Pid ! {get_buffer,self()},
- receive
- {buffer,Buffer} ->
- erlang:demonitor(Ref,[flush]),
- {ok,Buffer};
- {'DOWN',Ref,process,Pid,Reason} ->
- {error,Reason}
- end
- end.
-
-%%%-----------------------------------------------------------------
%%% Logger callback
-adding_handler(?MODULE,Config) ->
+adding_handler(#{id:=simple}=Config) ->
Me = self(),
case whereis(?MODULE) of
undefined ->
- {Pid,Ref} = spawn_opt(fun() -> init(Me,Config) end,
+ {Pid,Ref} = spawn_opt(fun() -> init(Me) end,
[link,monitor,{message_queue_data,off_heap}]),
receive
{'DOWN',Ref,process,Pid,Reason} ->
@@ -63,7 +44,7 @@ adding_handler(?MODULE,Config) ->
{error,{handler_process_name_already_exists,?MODULE}}
end.
-removing_handler(?MODULE) ->
+removing_handler(#{id:=simple}) ->
case whereis(?MODULE) of
undefined ->
ok;
@@ -89,7 +70,7 @@ log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) ->
do_log(
#{level=>error,
msg=>{report,{error,simple_handler_process_dead}},
- meta=>#{time=>erlang:monotonic_time(microsecond)}}),
+ meta=>#{time=>erlang:system_time(microsecond)}}),
do_log(Log);
_ ->
?MODULE ! {log,Log}
@@ -102,54 +83,50 @@ log(_,_) ->
%%%-----------------------------------------------------------------
%%% Process
-init(Starter,Config) ->
+init(Starter) ->
register(?MODULE,self()),
Starter ! {self(),started},
- BufferSize =
- case Config of
- #{?MODULE:=#{buffer:=true}} ->
- 10;
- _ ->
- infinity
- end,
- loop(#{buffer_size=>BufferSize,dropped=>0,buffer=>[]},infinity).
+ loop(#{buffer_size=>10,dropped=>0,buffer=>[]}).
-loop(Buffer,Timeout) ->
+loop(Buffer) ->
receive
stop ->
- ok;
- {get_buffer,From} ->
- loop(Buffer#{send_to=>From},0);
+ %% We replay the logger messages of there is
+ %% a default handler when the simple handler
+ %% is removed.
+ case logger:get_handler_config(default) of
+ {ok, _} ->
+ replay_buffer(Buffer);
+ _ ->
+ ok
+ end;
{log,#{msg:=_,meta:=#{time:=_}}=Log} ->
do_log(Log),
- loop(update_buffer(Buffer,Log),Timeout);
+ loop(update_buffer(Buffer,Log));
_ ->
%% Unexpected message - flush it!
- loop(Buffer,Timeout)
- after Timeout ->
- #{dropped:=D,buffer:=B,send_to:=Pid} = Buffer,
- LogList = lists:reverse(B) ++ drop_msg(D),
- Pid ! {buffer,LogList},
- loop(Buffer#{buffer_size=>infinity,
- dropped=>0,
- buffer=>[],
- send_to=>false},
- infinity)
+ loop(Buffer)
end.
-update_buffer(#{buffer_size:=infinity}=Buffer,_Log) ->
- Buffer;
update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) ->
Buffer#{dropped=>D+1};
update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) ->
Buffer#{buffer_size=>S-1,buffer=>[Log|B]}.
+replay_buffer(#{ dropped := D, buffer := Buffer }) ->
+ lists:foreach(
+ fun F(#{msg := {Tag, Msg}} = L) when Tag =:= string; Tag =:= report ->
+ F(L#{ msg := Msg });
+ F(#{ level := Level, msg := Msg, meta := MD}) ->
+ logger:log(Level, Msg, MD)
+ end, lists:reverse(Buffer, drop_msg(D))).
+
drop_msg(0) ->
[];
drop_msg(N) ->
[#{level=>info,
msg=>{"Simple handler buffer full, dropped ~w messages",[N]},
- meta=>#{time=>erlang:monotonic_time(microsecond)}}].
+ meta=>#{time=>erlang:system_time(microsecond)}}].
%%%-----------------------------------------------------------------
%%% Internal
@@ -164,8 +141,7 @@ do_log(#{msg:=Msg,meta:=#{time:=T}}) ->
display_date(T),
display(Msg).
-display_date(Timestamp0) when is_integer(Timestamp0) ->
- Timestamp = Timestamp0 + erlang:time_offset(microsecond),
+display_date(Timestamp) when is_integer(Timestamp) ->
Micro = Timestamp rem 1000000,
Sec = Timestamp div 1000000,
{{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime(
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index cbc9db372c..63c3ab2dac 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -35,8 +35,8 @@
terminate/2, code_change/3]).
%% logger callbacks
--export([log/2, adding_handler/2, removing_handler/1,
- changing_config/3, swap_buffer/2]).
+-export([log/2, adding_handler/1, removing_handler/1,
+ changing_config/2, swap_buffer/2]).
%%%===================================================================
%%% API
@@ -109,8 +109,8 @@ reset(Name) ->
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(Name, Config) ->
- case check_config(adding, Name, Config) of
+adding_handler(#{id:=Name}=Config) ->
+ case check_config(adding, Config) of
{ok, Config1} ->
%% create initial handler state by merging defaults with config
HConfig = maps:get(?MODULE, Config1, #{}),
@@ -137,9 +137,8 @@ adding_handler(Name, Config) ->
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(Name,
- OldConfig=#{id:=Id, ?MODULE:=#{type:=Type}},
- NewConfig=#{id:=Id}) ->
+changing_config(OldConfig=#{id:=Name, ?MODULE:=#{type:=Type}},
+ NewConfig=#{id:=Name}) ->
MyConfig = maps:get(?MODULE, NewConfig, #{}),
case maps:get(type, MyConfig, Type) of
Type ->
@@ -149,11 +148,11 @@ changing_config(Name,
_ ->
{error,{illegal_config_change,OldConfig,NewConfig}}
end;
-changing_config(_Name, OldConfig, NewConfig) ->
+changing_config(OldConfig, NewConfig) ->
{error,{illegal_config_change,OldConfig,NewConfig}}.
changing_config1(Name, OldConfig, NewConfig) ->
- case check_config(changing, Name, NewConfig) of
+ case check_config(changing, NewConfig) of
Result = {ok,NewConfig1} ->
try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
?DEFAULT_CALL_TIMEOUT) of
@@ -166,9 +165,7 @@ changing_config1(Name, OldConfig, NewConfig) ->
Error
end.
-check_config(adding, Name, Config0) ->
- %% Merge in defaults on top level
- Config = maps:merge(#{id => Name}, Config0),
+check_config(adding, Config) ->
%% Merge in defaults on handler level
MyConfig0 = maps:get(?MODULE, Config, #{}),
MyConfig = maps:merge(#{type => standard_io},
@@ -179,7 +176,7 @@ check_config(adding, Name, Config0) ->
Error ->
Error
end;
-check_config(changing, _Name, Config) ->
+check_config(changing, Config) ->
MyConfig = maps:get(?MODULE, Config, #{}),
case check_my_config(maps:to_list(MyConfig)) of
ok -> {ok,Config};
@@ -207,7 +204,7 @@ check_my_config([]) ->
%%%-----------------------------------------------------------------
%%% Handler being removed
-removing_handler(Name) ->
+removing_handler(#{id:=Name}) ->
stop(Name).
%%%-----------------------------------------------------------------
@@ -222,15 +219,15 @@ swap_buffer(Name,Buffer) ->
%%%-----------------------------------------------------------------
%%% Log a string or report
--spec log(Log, Config) -> ok | dropped when
- Log :: logger:log(),
+-spec log(LogEvent, Config) -> ok | dropped when
+ LogEvent :: logger:log_event(),
Config :: logger:config().
-log(Log,Config=#{id:=Name}) ->
+log(LogEvent,Config=#{id:=Name}) ->
%% if the handler has crashed, we must drop this request
%% and hope the handler restarts so we can try again
true = is_pid(whereis(Name)),
- Bin = logger_h_common:log_to_binary(Log,Config),
+ Bin = logger_h_common:log_to_binary(LogEvent,Config),
logger_h_common:call_cast_or_drop(Name, Bin).
%%%===================================================================
@@ -257,10 +254,11 @@ init([Name, Config,
file_ctrl_sync => FileCtrlSyncInt,
last_qlen => 0,
last_log_ts => T0,
+ last_op => sync,
burst_win_ts => T0,
burst_msg_count => 0}),
proc_lib:init_ack({ok,self()}),
- gen_server:cast(self(), {repeated_filesync,T0}),
+ gen_server:cast(self(), repeated_filesync),
enter_loop(Config, State1);
Error ->
logger_h_common:error_notify({init_handler,Name,Error}),
@@ -310,12 +308,11 @@ handle_call(filesync, _From, State = #{type := Type,
if is_atom(Type) ->
{reply, ok, State};
true ->
- {reply, file_ctrl_filesync_sync(FileCtrlPid), State}
+ {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}}
end;
handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0,
- last_log_ts := LastLogTS}) ->
+ State = #{filesync_repeat_interval := FSyncInt0}) ->
HConfig = maps:get(?MODULE, NewConfig, #{}),
State1 = maps:merge(State, HConfig),
case logger_h_common:overload_levels_ok(State1) of
@@ -334,8 +331,7 @@ handle_call({change_config,_OldConfig,NewConfig}, _From,
_ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
State,
undefined)),
- gen_server:cast(self(), {repeated_filesync,
- LastLogTS})
+ gen_server:cast(self(), repeated_filesync)
end,
{reply, ok, State1};
false ->
@@ -365,24 +361,24 @@ handle_cast({log, Bin}, State) ->
%% clause gets called repeatedly by the handler. In order to
%% guarantee that a filesync *always* happens after the last log
%% request, the repeat operation must be active!
-handle_cast({repeated_filesync,LastLogTS0},
+handle_cast(repeated_filesync,
State = #{type := Type,
file_ctrl_pid := FileCtrlPid,
filesync_repeat_interval := FSyncInt,
- last_log_ts := LastLogTS1}) ->
+ last_op := LastOp}) ->
State1 =
if not is_atom(Type), is_integer(FSyncInt) ->
%% only do filesync if something has been
%% written since last time we checked
- if LastLogTS1 == LastLogTS0 ->
+ if LastOp == sync ->
ok;
true ->
file_ctrl_filesync_async(FileCtrlPid)
end,
{ok,TRef} =
timer:apply_after(FSyncInt, gen_server,cast,
- [self(),{repeated_filesync,LastLogTS1}]),
- State#{rep_sync_tref => TRef};
+ [self(),repeated_filesync]),
+ State#{rep_sync_tref => TRef, last_op => sync};
true ->
State
end,
@@ -600,6 +596,7 @@ write(Name, Mode, T1, Bin, _CallOrCast,
State1#{mode => Mode1,
last_qlen := LastQLen1,
last_log_ts => T1,
+ last_op => write,
burst_win_ts => BurstWinT,
burst_msg_count => BurstMsgCount1,
file_ctrl_sync =>
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile
index 8599a3d814..2ad1e3107c 100644
--- a/lib/kernel/test/Makefile
+++ b/lib/kernel/test/Makefile
@@ -77,8 +77,9 @@ MODULES= \
logger_filters_SUITE \
logger_formatter_SUITE \
logger_legacy_SUITE \
- logger_simple_SUITE \
+ logger_simple_h_SUITE \
logger_std_h_SUITE \
+ logger_test_lib \
os_SUITE \
pg2_SUITE \
seq_trace_SUITE \
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index c00fb44c46..988f26280f 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1603,8 +1603,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
rpc:call(Cp1, application, get_key, [appinc, start_phases]),
{ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
rpc:call(Cp1, application, get_key, [appinc, mod]),
@@ -1625,8 +1624,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
rpc:call(Cp1, application, get_all_key, [appinc]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, "Test of new app file, including appnew"} =
gen_server:call({global, {ch,41}}, {get_pid_key, description}),
@@ -1643,8 +1641,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}),
{ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} =
gen_server:call({global, {ch,41}}, {get_pid_key, modules}),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
@@ -1671,8 +1668,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
gen_server:call({global, {ch,41}}, get_pid_all_key),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
stop_node_nice(Cp1),
ok.
diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl
index 0470f09f29..9c6712ad74 100644
--- a/lib/kernel/test/erl_distribution_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_SUITE.erl
@@ -244,7 +244,7 @@ illegal(Name) ->
test_node(Name) ->
test_node(Name, false).
test_node(Name, Illigal) ->
- ProgName = atom_to_list(lib:progname()),
+ ProgName = ct:get_progname(),
Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++
" -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++
case Illigal of
diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl
index a8087e11f9..ef55a2d339 100644
--- a/lib/kernel/test/error_logger_warn_SUITE.erl
+++ b/lib/kernel/test/error_logger_warn_SUITE.erl
@@ -480,9 +480,12 @@ rb_utc() ->
UtcLog=case application:get_env(sasl,utc_log) of
{ok,true} ->
true;
- _AllOthers ->
+ {ok,false} ->
application:set_env(sasl,utc_log,true),
- false
+ false;
+ undefined ->
+ application:set_env(sasl,utc_log,true),
+ undefined
end,
application:start(sasl),
rb:start([{report_dir, rd()}]),
@@ -494,7 +497,12 @@ rb_utc() ->
Sum=one_rb_findstr([],"UTC"),
rb:stop(),
application:stop(sasl),
- application:set_env(sasl,utc_log,UtcLog),
+ case UtcLog of
+ undefined ->
+ application:unset_env(sasl,utc_log);
+ _ ->
+ application:set_env(sasl,utc_log,UtcLog)
+ end,
stop_node(Node),
ok.
diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl
index 45032faf6d..e95635b800 100644
--- a/lib/kernel/test/heart_SUITE.erl
+++ b/lib/kernel/test/heart_SUITE.erl
@@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
@@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "0"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "10"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) ->
clear_cmd(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
@@ -346,9 +346,16 @@ clear_cmd(Config) when is_list(Config) ->
get_cmd(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
- Cmd = "test",
- ok = rpc:call(Node, heart, set_cmd, [Cmd]),
- {ok, Cmd} = rpc:call(Node, heart, get_cmd, []),
+
+ ShortCmd = "test",
+ ok = rpc:call(Node, heart, set_cmd, [ShortCmd]),
+ {ok, ShortCmd} = rpc:call(Node, heart, get_cmd, []),
+
+ %% This would hang prior to OTP-15024 being fixed.
+ LongCmd = [$a || _ <- lists:seq(1, 160)],
+ ok = rpc:call(Node, heart, set_cmd, [LongCmd]),
+ {ok, LongCmd} = rpc:call(Node, heart, get_cmd, []),
+
stop_node(Node),
ok.
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 9a4578917d..a21020ff97 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -76,7 +76,7 @@ sync(Conf) when is_list(Conf) ->
%% Reset wall_clock
{T1,_} = erlang:statistics(wall_clock),
io:format("~p~n", [{t1, T1}]),
- Command = lists:concat([lib:progname(),
+ Command = lists:append([ct:get_progname(),
" -detached -sname cp1 ",
"-config ", Config,
" -env ERL_CRASH_DUMP erl_crash_dump.cp1"]),
diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover
index b30bcfe920..960bc0abff 100644
--- a/lib/kernel/test/logger.cover
+++ b/lib/kernel/test/logger.cover
@@ -8,7 +8,7 @@
logger_filters,
logger_formatter,
logger_server,
- logger_simple,
+ logger_simple_h,
logger_std_h,
logger_sup]}.
diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec
index cd76a754a4..1ab90b3e93 100644
--- a/lib/kernel/test/logger.spec
+++ b/lib/kernel/test/logger.spec
@@ -7,5 +7,5 @@
logger_filters_SUITE,
logger_formatter_SUITE,
logger_legacy_SUITE,
- logger_simple_SUITE,
+ logger_simple_h_SUITE,
logger_std_h_SUITE]}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index 0edce3e34c..f7ec59a7b7 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -40,18 +40,18 @@ suite() ->
[{timetrap,{seconds,30}}].
init_per_suite(Config) ->
- case logger:get_handler_config(logger_std_h) of
+ case logger:get_handler_config(?STANDARD_HANDLER) of
{ok,StdH} ->
- ok = logger:remove_handler(logger_std_h),
- [{logger_std_h,StdH}|Config];
+ ok = logger:remove_handler(?STANDARD_HANDLER),
+ [{default_handler,StdH}|Config];
_ ->
Config
end.
end_per_suite(Config) ->
- case ?config(logger_std_h,Config) of
+ case ?config(default_handler,Config) of
{HMod,HConfig} ->
- ok = logger:add_handler(logger_std_h,HMod,HConfig);
+ ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig);
_ ->
ok
end.
@@ -105,12 +105,12 @@ start_stop(_Config) ->
add_remove_handler(_Config) ->
register(callback_receiver,self()),
- {ok,#{handlers:=Hs0}} = logger:get_logger_config(),
+ #{handlers:=Hs0} = logger:i(),
{error,{not_found,h1}} = logger:get_handler_config(h1),
ok = logger:add_handler(h1,?MODULE,#{}),
[add] = test_server:messages_get(),
- {ok,#{handlers:=Hs}} = logger:get_logger_config(),
- [h1|Hs0] = Hs,
+ #{handlers:=Hs} = logger:i(),
+ {value,_,Hs0} = lists:keytake(h1,1,Hs),
{ok,{?MODULE,#{level:=info,filters:=[],filter_default:=log}}} = % defaults
logger:get_handler_config(h1),
ok = logger:set_handler_config(h1,filter_default,stop),
@@ -124,7 +124,7 @@ add_remove_handler(_Config) ->
ok = check_logged(info,"hello",[],?MY_LOC(1)),
ok = logger:remove_handler(h1),
[remove] = test_server:messages_get(),
- {ok,#{handlers:=Hs0}} = logger:get_logger_config(),
+ #{handlers:=Hs0} = logger:i(),
{error,{not_found,h1}} = logger:get_handler_config(h1),
{error,{not_found,h1}} = logger:remove_handler(h1),
logger:info("hello",[]),
@@ -218,33 +218,52 @@ change_config(_Config) ->
{ok,{?MODULE,#{level:=info,filter_default:=stop}=C2}} =
logger:get_handler_config(h1),
false = maps:is_key(custom,C2),
- {error,fail} = logger:set_handler_config(h1,#{fail=>true}),
+ {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}),
{error,{attempting_syncronous_call_to_self,_}} =
logger:set_handler_config(
- h1,#{call=>fun() -> logger:set_module_level(?MODULE,debug) end}),
+ h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}),
+ ok =
+ logger:set_handler_config(
+ h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}),
{ok,{?MODULE,C2}} = logger:get_handler_config(h1),
- %% Change one key only
- {error,fail} = logger:set_handler_config(h1,fail,true),
+ %% Change handler config: Single key
+ {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end),
ok = logger:set_handler_config(h1,custom,custom),
[changing_config] = test_server:messages_get(),
{ok,{?MODULE,#{custom:=custom}=C3}} = logger:get_handler_config(h1),
C2 = maps:remove(custom,C3),
+ %% Change handler config: Map
+ ok = logger:update_handler_config(h1,#{custom=>new_custom}),
+ [changing_config] = test_server:messages_get(),
+ {ok,{_,C4}} = logger:get_handler_config(h1),
+ C4 = C3#{custom:=new_custom},
+
+ %% Change logger config: Single key
+ {ok,LConfig0} = logger:get_logger_config(),
+ ok = logger:set_logger_config(level,warning),
+ {ok,LConfig1} = logger:get_logger_config(),
+ LConfig1 = LConfig0#{level:=warning},
+
+ %% Change logger config: Map
+ ok = logger:update_logger_config(#{level=>error}),
+ {ok,LConfig2} = logger:get_logger_config(),
+ LConfig2 = LConfig1#{level:=error},
+
%% Overwrite logger config - check that defaults are added
- {ok,LConfig} = logger:get_logger_config(),
ok = logger:set_logger_config(#{filter_default=>stop}),
- {ok,#{level:=info,filters:=[],handlers:=[],filter_default:=stop}=LC1} =
- logger:get_logger_config(),
- 4 = maps:size(LC1),
-
- %% Change one key only
- ok = logger:set_logger_config(handlers,[h1]),
- {ok,#{level:=info,filters:=[],handlers:=[h1],filter_default:=stop}} =
+ {ok,#{level:=info,filters:=[],filter_default:=stop}=LC1} =
logger:get_logger_config(),
+ 3 = maps:size(LC1),
+ %% Check that internal 'handlers' field has not been changed
+ #{handlers:=HCs} = logger:i(),
+ HIds1 = [Id || {Id,_,_} <- HCs],
+ {ok,#{handlers:=HIds2}} = logger_config:get(?LOGGER_TABLE,logger),
+ HIds1 = lists:sort(HIds2),
%% Cleanup
- ok = logger:set_logger_config(LConfig),
+ ok = logger:set_logger_config(LConfig0),
[] = test_server:messages_get(),
ok.
@@ -299,7 +318,7 @@ macros(_Config) ->
macros(cleanup,_Config) ->
logger:remove_handler(h1),
- logger:reset_module_level(?MODULE),
+ logger:unset_module_level(?MODULE),
ok.
set_level(_Config) ->
@@ -331,29 +350,29 @@ set_level_module(_Config) ->
logger:info(M2=?map_rep,?MY_LOC(0)),
ok = check_logged(info,M2,?MY_LOC(1)),
- {error,{not_a_module,{bad}}} = logger:reset_module_level({bad}),
- ok = logger:reset_module_level(?MODULE),
+ {error,{not_a_module,{bad}}} = logger:unset_module_level({bad}),
+ ok = logger:unset_module_level(?MODULE),
ok.
set_level_module(cleanup,_Config) ->
logger:remove_handler(h1),
- logger:reset_module_level(?MODULE),
+ logger:unset_module_level(?MODULE),
ok.
cache_level_module(_Config) ->
- ok = logger:reset_module_level(?MODULE),
+ ok = logger:unset_module_level(?MODULE),
[] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
?LOG_INFO(?map_rep),
%% Caching is done asynchronously, so wait a bit for the update
timer:sleep(100),
[_] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
- ok = logger:reset_module_level(?MODULE),
+ ok = logger:unset_module_level(?MODULE),
[] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
ok.
cache_level_module(cleanup,_Config) ->
- logger:reset_module_level(?MODULE),
+ logger:unset_module_level(?MODULE),
ok.
format_report(_Config) ->
@@ -425,6 +444,7 @@ filter_failed(cleanup,_Config) ->
ok.
handler_failed(_Config) ->
+ register(callback_receiver,self()),
{error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}),
{error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}),
{error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad),
@@ -434,26 +454,62 @@ handler_failed(_Config) ->
logger:add_handler(h1,?MODULE,#{filter_default=>true}),
{error,{invalid_formatter,[]}} =
logger:add_handler(h1,?MODULE,#{formatter=>[]}),
- ok = logger:add_handler(h1,nomodule,#{filter_default=>log}),
+ {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}),
logger:info(?map_rep),
check_no_log(),
- #{logger:=#{handlers:=Ids1},
- handlers:=H1} = logger:i(),
- false = lists:member(h1,Ids1),
+ #{handlers:=H1} = logger:i(),
false = lists:keymember(h1,1,H1),
{error,{not_found,h1}} = logger:remove_handler(h1),
- ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,crash=>true}),
+ ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,log_call=>fun() -> a = b end}),
{error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}),
+ [add] = test_server:messages_get(),
logger:info(?map_rep),
- check_no_log(),
- #{logger:=#{handlers:=Ids2},
- handlers:=H2} = logger:i(),
- false = lists:member(h2,Ids2),
+ [remove] = test_server:messages_get(),
+ #{handlers:=H2} = logger:i(),
false = lists:keymember(h2,1,H2),
{error,{not_found,h2}} = logger:remove_handler(h2),
+ CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end,
+ CrashHandler = fun() -> a = b end,
+ KillHandler = fun() -> exit(self(), die) end,
+
+ {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}),
+ {error,{handler_not_added,{callback_crashed,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}),
+ {error,{handler_not_added,{logger_process_exited,_,die}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CallAddHandler}),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CrashHandler}),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,#{conf_call=>KillHandler}),
+
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,conf_call,CallAddHandler),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,conf_call,CrashHandler),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,conf_call,KillHandler),
+
+ ok = logger:remove_handler(h1),
+ [add,remove] = test_server:messages_get(),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}),
+ ok = logger:remove_handler(h1),
+ [add,add,add] = test_server:messages_get(),
+
ok.
handler_failed(cleanup,_Config) ->
@@ -466,10 +522,6 @@ config_sanity_check(_Config) ->
{error,{invalid_filter_default,bad}} =
logger:set_logger_config(filter_default,bad),
{error,{invalid_level,bad}} = logger:set_logger_config(level,bad),
- {error,{invalid_handlers,bad}} = logger:set_logger_config(handlers,bad),
- {error,{invalid_id,{bad,bad}}} =
- logger:set_logger_config(handlers,[{bad,bad}]),
- {error,{invalid_id,"bad"}} = logger:set_logger_config(handlers,["bad"]),
{error,{invalid_filters,bad}} = logger:set_logger_config(filters,bad),
{error,{invalid_filter,bad}} = logger:set_logger_config(filters,[bad]),
{error,{invalid_filter,{_,_}}} =
@@ -499,29 +551,96 @@ config_sanity_check(_Config) ->
logger:set_handler_config(h1,formatter,bad),
{error,{invalid_module,{bad}}} =
logger:set_handler_config(h1,formatter,{{bad},cfg}),
- {error,{invalid_formatter_config,bad}} =
+ {error,{invalid_formatter_config,logger_formatter,bad}} =
logger:set_handler_config(h1,formatter,{logger_formatter,bad}),
- {error,{invalid_formatter_config,{bad,bad}}} =
+ {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} =
logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}),
- {error,{invalid_formatter_config,{template,bad}}} =
+ {error,{invalid_formatter_config,logger_formatter,{template,bad}}} =
logger:set_handler_config(h1,formatter,{logger_formatter,
#{template=>bad}}),
- {error,{invalid_formatter_template,[1]}} =
+ {error,{invalid_formatter_template,logger_formatter,[1]}} =
logger:set_handler_config(h1,formatter,{logger_formatter,
#{template=>[1]}}),
ok = logger:set_handler_config(h1,formatter,{logger_formatter,
#{template=>[]}}),
- {error,{invalid_formatter_config,{single_line,bad}}} =
+ {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} =
logger:set_handler_config(h1,formatter,{logger_formatter,
#{single_line=>bad}}),
ok = logger:set_handler_config(h1,formatter,{logger_formatter,
#{single_line=>true}}),
- {error,{invalid_formatter_config,{legacy_header,bad}}} =
+ {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} =
logger:set_handler_config(h1,formatter,{logger_formatter,
#{legacy_header=>bad}}),
ok = logger:set_handler_config(h1,formatter,{logger_formatter,
#{legacy_header=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>fun(R) ->
+ {"~p",[R]}
+ end}}),
+ {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>4}}),
+ ok = logger:set_handler_config(h1,formatter,{module,config}),
+ {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} =
+ logger:set_handler_config(h1,formatter,{?MODULE,crash}),
ok = logger:set_handler_config(h1,custom,custom),
+
+ %% Old utc parameter is no longer allowed (replaced by time_offset)
+ {error,{invalid_formatter_config,logger_formatter,{utc,true}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{utc=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>""}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"Z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"-0:0"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+10:13"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+0"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>bad}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>"s"}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>$\s}}),
ok.
config_sanity_check(cleanup,_Config) ->
@@ -650,14 +769,14 @@ process_metadata(_Config) ->
undefined = logger:get_process_metadata(),
{error,badarg} = ?TRY(logger:set_process_metadata(bad)),
ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
- Time = erlang:monotonic_time(microsecond),
+ Time = erlang:system_time(microsecond),
ProcMeta = #{time=>Time,line=>0,custom=>proc},
ok = logger:set_process_metadata(ProcMeta),
S1 = ?str,
?LOG_INFO(S1,#{custom=>macro}),
check_logged(info,S1,#{time=>Time,line=>0,custom=>macro}),
- Time2 = erlang:monotonic_time(microsecond),
+ Time2 = erlang:system_time(microsecond),
S2 = ?str,
?LOG_INFO(S2,#{time=>Time2,line=>1,custom=>macro}),
check_logged(info,S2,#{time=>Time2,line=>1,custom=>macro}),
@@ -666,6 +785,9 @@ process_metadata(_Config) ->
check_logged(info,S3,#{time=>Time,line=>0,custom=>func}),
ProcMeta = logger:get_process_metadata(),
+ ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}),
+ Expected = ProcMeta#{custom:=changed,custom2=>added},
+ Expected = logger:get_process_metadata(),
ok = logger:unset_process_metadata(),
undefined = logger:get_process_metadata(),
@@ -717,17 +839,20 @@ check_maps(Expected,Got,What) ->
end.
%% Handler
-adding_handler(_Id,Config) ->
+adding_handler(#{add_call:=Fun}) ->
+ Fun();
+adding_handler(Config) ->
maybe_send(add),
{ok,Config}.
-removing_handler(_Id) ->
+
+removing_handler(#{rem_call:=Fun}) ->
+ Fun();
+removing_handler(_Config) ->
maybe_send(remove),
ok.
-changing_config(_Id,_Old,#{call:=Fun}) ->
+changing_config(_Old,#{conf_call:=Fun}) ->
Fun();
-changing_config(_Id,_Old,#{fail:=true}) ->
- {error,fail};
-changing_config(_Id,_Old,Config) ->
+changing_config(_Old,Config) ->
maybe_send(changing_config),
{ok,Config}.
@@ -737,8 +862,8 @@ maybe_send(Msg) ->
Pid -> Pid ! Msg
end.
-log(_Log,#{crash:=true}) ->
- a=b;
+log(_Log,#{log_call:=Fun}) ->
+ Fun();
log(Log,Config) ->
TcProc = maps:get(tc_proc,Config,self()),
TcProc ! {Log,Config},
@@ -826,3 +951,8 @@ test_macros(emergency=Level) ->
%%% Called by macro ?TRY(X)
my_try(Fun) ->
try Fun() catch C:R -> {C,R} end.
+
+check_config(crash) ->
+ erlang:error({badmatch,3});
+check_config(_) ->
+ ok.
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index c7c6137380..7a1736c814 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -31,7 +31,8 @@
end).
suite() ->
- [{timetrap,{seconds,30}}].
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
init_per_suite(Config) ->
timer:start(), % to avoid progress report
@@ -327,36 +328,38 @@ formatter_fail(Config) ->
logger:add_handler(Name, logger_disk_log_h, HConfig),
Pid = whereis(Name),
true = is_pid(Pid),
- {ok,#{handlers:=H}} = logger:get_logger_config(),
+ #{handlers:=HC1} = logger:i(),
+ H = [Id || {Id,_,_} <- HC1],
true = lists:member(Name,H),
%% Formatter is added automatically
{ok,{_,#{formatter:={logger_formatter,_}}}} =
logger:get_handler_config(Name),
logger:info(M1=?msg,?domain),
- Got1 = try_match_file(?log_no(LogFile,1),"=INFO REPORT====.*\n"++M1,5000),
+ Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* info: "++M1,5000),
ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}),
logger:info(M2=?msg,?domain),
Got2 = try_match_file(?log_no(LogFile,1),
- Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2,
+ escape(Got1)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M2,
5000),
ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}),
logger:info(M3=?msg,?domain),
Got3 = try_match_file(?log_no(LogFile,1),
- Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3,
+ escape(Got2)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M3,
5000),
ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}),
logger:info(?msg,?domain),
try_match_file(?log_no(LogFile,1),
- Got3++"FORMATTER ERROR: bad_return_value",
+ escape(Got3)++"FORMATTER ERROR: bad_return_value",
5000),
%% Check that handler is still alive and was never dead
Pid = whereis(Name),
- {ok,#{handlers:=H}} = logger:get_logger_config(),
+ #{handlers:=HC2} = logger:i(),
+ H = [Id || {Id,_,_} <- HC2],
ok.
formatter_fail(cleanup,_Config) ->
@@ -369,10 +372,18 @@ config_fail(_Config) ->
#{logger_disk_log_h => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{42,42,_}}}} =
+
+ {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{logger_disk_log_h => #{drop_new_reqs_qlen=>1}}),
+ {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
- #{logger_disk_log_h => #{toggle_sync_qlen=>42,
+ #{logger_disk_log_h => #{toggle_sync_qlen=>43,
drop_new_reqs_qlen=>42}}),
+ {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{logger_disk_log_h => #{drop_new_reqs_qlen=>43,
+ flush_reqs_qlen=>42}}),
ok = logger:add_handler(?MODULE,logger_disk_log_h,
#{filter_default=>log,
@@ -497,24 +508,19 @@ disk_log_sync(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
- start_tracer([{?MODULE,format,2},
- {disk_log,blog,2},
+ start_tracer([{disk_log,blog,2},
{disk_log,sync,1}],
- [{formatter,"first"},
- {disk_log,blog},
+ [{disk_log,blog,<<"first\n">>},
{disk_log,sync}]),
logger:info("first", ?domain),
%% wait for automatic disk_log_sync
check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
- start_tracer([{?MODULE,format,2},
- {disk_log,blog,2},
+ start_tracer([{disk_log,blog,2},
{disk_log,sync,1}],
- [{formatter,"second"},
- {formatter,"third"},
- {disk_log,blog},
- {disk_log,blog},
+ [{disk_log,blog,<<"second\n">>},
+ {disk_log,blog,<<"third\n">>},
{disk_log,sync}]),
%% two log requests in fast succession will make the handler skip
%% an automatic disk log sync
@@ -531,13 +537,10 @@ disk_log_sync(Config) ->
no_repeat = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
- start_tracer([{?MODULE,format,2},
- {disk_log,blog,2},
+ start_tracer([{disk_log,blog,2},
{disk_log,sync,1}],
- [{formatter,"fourth"},
- {disk_log,blog},
- {formatter,"fifth"},
- {disk_log,blog},
+ [{disk_log,blog,<<"fourth\n">>},
+ {disk_log,blog,<<"fifth\n">>},
{disk_log,sync}]),
logger:info("fourth", ?domain),
@@ -566,6 +569,7 @@ disk_log_sync(Config) ->
check_tracer(100),
ok.
disk_log_sync(cleanup,_Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
disk_log_wrap(Config) ->
@@ -623,6 +627,7 @@ disk_log_wrap(Config) ->
ok.
disk_log_wrap(cleanup,_Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
disk_log_full(Config) ->
@@ -668,6 +673,7 @@ disk_log_full(Config) ->
{trace,{error_status,{error,{full,_}}}}] = Received,
ok.
disk_log_full(cleanup, _Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
disk_log_events(Config) ->
@@ -713,6 +719,7 @@ disk_log_events(Config) ->
end, Received),
ok.
disk_log_events(cleanup, _Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
write_failure(Config) ->
@@ -721,7 +728,7 @@ write_failure(Config) ->
Log = lists:concat([File,".1"]),
ct:pal("Log = ~p", [Log]),
- Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File),
+ Node = start_h_on_new_node(Config, File),
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
@@ -763,9 +770,9 @@ sync_failure(Config) ->
Dir = ?config(priv_dir, Config),
FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]),
File = filename:join(Dir, FileName),
- Log = lists:concat([File,".1"]),
- Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File),
+
+ Node = start_h_on_new_node(Config, File),
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
@@ -805,21 +812,12 @@ sync_failure(cleanup, _Config) ->
Nodes = nodes(),
[test_server:stop_node(Node) || Node <- Nodes].
-start_h_on_new_node(_Config, Func, File) ->
- Pa = filename:dirname(code:which(?MODULE)),
- Dest =
- case os:type() of
- {win32,_} ->
- lists:concat([" {disk_log,\\\"",File,"\\\"}"]);
- _ ->
- lists:concat([" \'{disk_log,\"",File,"\"}\'"])
- end,
- Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]),
- NodeName = lists:concat([?MODULE,"_",Func]),
- ct:pal("Starting ~s with ~tp", [NodeName,Args]),
- {ok,Node} = test_server:start_node(NodeName, peer, [{args, Args}]),
- Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
- true = is_pid(Pid),
+start_h_on_new_node(Config, File) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_disk_log_h,
+ #{ disk_log_opts => #{ file => File }}}]}]),
ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
{?MODULE,nl}]),
Node.
@@ -832,10 +830,10 @@ log_on_remote_node(Node,Msg) ->
ok.
%% functions for test hook macros to be called by rpc
-set_internal_log(Mod, Func) ->
- ?set_internal_log({Mod,Func}).
-set_result(Op, Result) ->
- ?set_result(Op, Result).
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
set_defaults() ->
?set_defaults().
@@ -852,62 +850,115 @@ internal_log(Type, Term) ->
op_switch_to_sync(Config) ->
{Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
NewHConfig =
- HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 3,
- drop_new_reqs_qlen => 501,
- flush_reqs_qlen => 2000,
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => NumOfReqs+1,
+ flush_reqs_qlen => 2*NumOfReqs,
enable_burst_limit => false}},
ok = logger:set_handler_config(?MODULE, NewHConfig),
- NumOfReqs = 500,
send_burst({n,NumOfReqs}, seq, {chars,79}, info),
- NumOfReqs = count_lines(Log),
- ok = file:delete(Log).
+ Lines = count_lines(Log),
+ ok = file:delete(Log),
+ NumOfReqs = Lines,
+ ok.
op_switch_to_sync(cleanup, _Config) ->
ok = stop_handler(?MODULE).
+op_switch_to_drop() ->
+ [{timetrap,{seconds,180}}].
op_switch_to_drop(Config) ->
- {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
-
- NewHConfig =
- HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
- drop_new_reqs_qlen => 3,
- flush_reqs_qlen => 600,
- enable_burst_limit => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
- NumOfReqs = 500,
- send_burst({n,NumOfReqs}, seq, {chars,79}, info),
- Logged = count_lines(Log),
- ct:pal("Number of messages dropped = ~w (~w)",
- [NumOfReqs-Logged,NumOfReqs]),
- true = (Logged < NumOfReqs),
- ok = file:delete(Log).
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{logger_disk_log_h =>
+ DLHConfig#{toggle_sync_qlen => 1,
+ drop_new_reqs_qlen => 2,
+ flush_reqs_qlen => Procs*NumOfReqs*Bursts,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok= stop_handler(?MODULE),
+ _ = file:delete(Log),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
op_switch_to_drop(cleanup, _Config) ->
- ok = stop_handler(?MODULE).
+ _ = stop_handler(?MODULE).
op_switch_to_flush() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
op_switch_to_flush(Config) ->
- {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
-
- %% it's important that both async and sync requests have been queued
- %% when the flush happens (verify with coverage of flush_log_requests/2)
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
- NewHConfig =
- HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
- drop_new_reqs_qlen => 99,
- flush_reqs_qlen => 100,
- enable_burst_limit => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
- NumOfReqs = 1000,
- Procs = 500,
- send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
- Logged = count_lines(Log),
- ct:pal("Number of messages flushed/dropped = ~w (~w)",
- [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]),
- true = (Logged < (NumOfReqs*Procs)),
- ok = file:delete(Log).
+ NewHConfig =
+ HConfig#{logger_disk_log_h =>
+ DLHConfig#{toggle_sync_qlen => 2,
+ %% disable drop mode
+ drop_new_reqs_qlen => 300,
+ flush_reqs_qlen => 300,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok= stop_handler(?MODULE),
+ _ = file:delete(Log),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
op_switch_to_flush(cleanup, _Config) ->
- ok = stop_handler(?MODULE).
+ _ = stop_handler(?MODULE).
limit_burst_disabled(Config) ->
@@ -988,10 +1039,10 @@ kill_disabled(cleanup, _Config) ->
ok = stop_handler(?MODULE).
qlen_kill_new(Config) ->
- {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
Pid0 = whereis(?MODULE),
{_,Mem0} = process_info(Pid0, memory),
- RestartAfter = 2000,
+ RestartAfter = ?HANDLER_RESTART_AFTER,
NewHConfig =
HConfig#{logger_disk_log_h =>
DLHConfig#{enable_kill_overloaded=>true,
@@ -1012,7 +1063,7 @@ qlen_kill_new(Config) ->
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
end,
- timer:sleep(RestartAfter + 1000),
+ timer:sleep(RestartAfter + 2000),
true = is_pid(whereis(?MODULE)),
ok
after
@@ -1025,10 +1076,10 @@ qlen_kill_new(cleanup, _Config) ->
ok = stop_handler(?MODULE).
mem_kill_new(Config) ->
- {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
Pid0 = whereis(?MODULE),
{_,Mem0} = process_info(Pid0, memory),
- RestartAfter = 2000,
+ RestartAfter = ?HANDLER_RESTART_AFTER,
NewHConfig =
HConfig#{logger_disk_log_h =>
DLHConfig#{enable_kill_overloaded=>true,
@@ -1049,7 +1100,7 @@ mem_kill_new(Config) ->
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
end,
- timer:sleep(RestartAfter * 2),
+ timer:sleep(RestartAfter + 2000),
true = is_pid(whereis(?MODULE)),
ok
after
@@ -1082,7 +1133,7 @@ restart_after(Config) ->
end,
{Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
- RestartAfter = 2000,
+ RestartAfter = ?HANDLER_RESTART_AFTER,
NewHConfig2 =
HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true,
handler_overloaded_qlen=>10,
@@ -1094,7 +1145,7 @@ restart_after(Config) ->
send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
receive
{'DOWN', MRef2, _, _, _Info2} ->
- timer:sleep(RestartAfter + 1000),
+ timer:sleep(RestartAfter + 2000),
Pid1 = whereis(?MODULE),
true = is_pid(Pid1),
false = (Pid1 == Pid0),
@@ -1111,7 +1162,7 @@ restart_after(cleanup, _Config) ->
%% during high load to verify that sync, dropping and flushing is
%% handled correctly.
handler_requests_under_load() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
handler_requests_under_load(Config) ->
{Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
NewHConfig =
@@ -1140,7 +1191,7 @@ handler_requests_under_load(Config) ->
NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
ok = file:delete(Log).
-handler_requests_under_load(cleanup, Config) ->
+handler_requests_under_load(cleanup, _Config) ->
ok = stop_handler(?MODULE).
send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
@@ -1365,11 +1416,33 @@ count_lines1(File) ->
file:close(Dev),
Lines.
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
start_tracer(Trace,Expected) ->
Pid = self(),
dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
dbg:p(whereis(?MODULE),[c]),
- dbg:p(Pid,[c]),
tpl(Trace),
ok.
@@ -1387,13 +1460,15 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{?MODULE,format,[#{msg:={string,Msg}}|_]}}, {Pid,[{formatter,Msg}|Expected]}) ->
- maybe_tracer_done(Pid,Expected,{formatter,Msg});
-tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) ->
+tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]}},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]}}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func});
tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
Pid ! {tracer_got_unexpected,Call,Expected},
{Pid,Expected}.
@@ -1413,5 +1488,13 @@ check_tracer(T) ->
dbg:stop_clear(),
ct:fail({tracer_got_unexpected,Got,Expected})
after T ->
+ dbg:stop_clear(),
ct:fail({timeout,tracer})
end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
index c2d3364701..601d331fb0 100644
--- a/lib/kernel/test/logger_env_var_SUITE.erl
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -1,4 +1,4 @@
-%
+%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2018. All Rights Reserved.
@@ -21,431 +21,636 @@
-compile(export_all).
--include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/logger.hrl").
-include_lib("kernel/src/logger_internal.hrl").
--define(all_vars,[{kernel,logger_dest},
- {kernel,logger_level},
- {kernel,logger_log_progress},
- {kernel,logger_sasl_compatible},
- {kernel,error_logger}]).
+-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]).
suite() ->
- [{timetrap,{seconds,30}}].
+ [{timetrap,{seconds,60}},
+ {ct_hooks,[logger_test_lib]}].
init_per_suite(Config) ->
- Env = [{App,Key,application:get_env(App,Key)} || {App,Key} <- ?all_vars],
- Removed = cleanup(),
- [{env,Env},{logger,Removed}|Config].
-
-end_per_suite(Config) ->
- [application:set_env(App,Key,Val) ||
- {App,Key,Val} <- ?config(env,Config),
- Val =/= undefined],
- Hs = ?config(logger,Config),
- [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
- ok.
-
-init_per_group(_Group, Config) ->
- Config.
-
-end_per_group(_Group, _Config) ->
- ok.
-
-init_per_testcase(_TestCase, Config) ->
Config.
-end_per_testcase(Case, Config) ->
- try apply(?MODULE,Case,[cleanup,Config])
- catch error:undef -> ok
- end,
- cleanup(),
+end_per_suite(_Config) ->
ok.
groups() ->
- [].
-
-all() ->
+ [{error_logger,[],[error_logger_tty,
+ error_logger_tty_sasl_compatible,
+ error_logger_false,
+ error_logger_false_progress,
+ error_logger_false_sasl_compatible,
+ error_logger_silent,
+ error_logger_silent_sasl_compatible,
+ error_logger_file]},
+ {logger,[],[logger_file,
+ logger_file_sasl_compatible,
+ logger_file_log_progress,
+ logger_file_no_filter,
+ logger_file_no_filter_level,
+ logger_file_formatter,
+ logger_filters,
+ logger_filters_stop,
+ logger_module_level,
+ logger_disk_log,
+ logger_disk_log_formatter,
+ logger_undefined,
+ logger_many_handlers_default_first,
+ logger_many_handlers_default_last,
+ logger_many_handlers_default_last_broken_filter
+ ]},
+ {bad,[],[bad_error_logger,
+ bad_level,
+ bad_sasl_compatibility,
+ bad_progress]}].
+
+all() ->
[default,
default_sasl_compatible,
- dest_tty,
- dest_tty_sasl_compatible,
- dest_false,
- dest_false_progress,
- dest_false_sasl_compatible,
- dest_silent,
- dest_silent_sasl_compatible,
- dest_file_old,
- dest_file,
- dest_disk_log,
- %% disk_log_vars, % or test this in logger_disk_log_SUITE?
sasl_compatible_false,
sasl_compatible_false_no_progress,
sasl_compatible,
- bad_dest%% ,
- %% bad_level,
- %% bad_sasl_compatibility,
- %% bad_progress
+ {group,bad},
+ {group,error_logger},
+ {group,logger}
].
default(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- undefined,
- undefined, % dest
- undefined, % level
- undefined, % sasl comp (default=false)
- undefined), % progress (default=false)
- {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
- true = is_pid(whereis(logger_std_h)),
+ {ok,#{handlers:=Hs},_Node} = setup(Config,[]),
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
info = maps:get(level,StdC),
StdFilters = maps:get(filters,StdC),
- {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
lists:keyfind(domain,1,StdFilters),
true = lists:keymember(stop_progress,1,StdFilters),
- false = lists:keymember(logger_simple,1,Hs),
- false = lists:keymember(sasl_h,1,Hs),
- false = is_pid(whereis(sasl_h)),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
ok.
default_sasl_compatible(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- undefined,
- undefined, % dest
- undefined, % level
- true, % sasl comp (default=false)
- undefined), % progress (default=false)
- {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
- true = is_pid(whereis(logger_std_h)),
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{logger_sasl_compatible,true}]),
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
info = maps:get(level,StdC),
StdFilters = maps:get(filters,StdC),
- {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ {domain,{_,{log,super,[beam,erlang,otp]}}} =
lists:keyfind(domain,1,StdFilters),
false = lists:keymember(stop_progress,1,StdFilters),
- false = lists:keymember(logger_simple,1,Hs),
- true = lists:keymember(sasl_h,1,Hs),
- true = is_pid(whereis(sasl_h)),
+ false = lists:keymember(simple,1,Hs),
+ true = lists:keymember(sasl,1,Hs),
ok.
-dest_tty(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- tty, % dest
- undefined, % level
- undefined, % sasl comp (default=false)
- undefined), % progress (default=false)
- {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
- true = is_pid(whereis(logger_std_h)),
+error_logger_tty(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,[{error_logger,tty}]),
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
info = maps:get(level,StdC),
StdFilters = maps:get(filters,StdC),
- {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
lists:keyfind(domain,1,StdFilters),
true = lists:keymember(stop_progress,1,StdFilters),
- false = lists:keymember(logger_simple,1,Hs),
- false = lists:keymember(sasl_h,1,Hs),
- false = is_pid(whereis(sasl_h)),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
ok.
-dest_tty_sasl_compatible(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- tty, % dest
- undefined, % level
- true, % sasl comp (default=false)
- undefined), % progress (default=false)
- {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
- true = is_pid(whereis(logger_std_h)),
+error_logger_tty_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,tty},
+ {logger_sasl_compatible,true}]),
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
info = maps:get(level,StdC),
StdFilters = maps:get(filters,StdC),
- {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ {domain,{_,{log,super,[beam,erlang,otp]}}} =
lists:keyfind(domain,1,StdFilters),
false = lists:keymember(stop_progress,1,StdFilters),
- false = lists:keymember(logger_simple,1,Hs),
- true = lists:keymember(sasl_h,1,Hs),
- true = is_pid(whereis(sasl_h)),
+ false = lists:keymember(simple,1,Hs),
+ true = lists:keymember(sasl,1,Hs),
ok.
-dest_false(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- false, % dest
- notice, % level
- undefined, % sasl comp (default=false)
- undefined), % progress (default=false)
- false = lists:keymember(logger_std_h,1,Hs),
- {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
- notice = maps:get(level,SimpleC),
+error_logger_false(Config) ->
+ {ok,#{handlers:=Hs,logger:=L},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice}]),
+ false = lists:keymember(?STANDARD_HANDLER,1,Hs),
+ {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs),
+ info = maps:get(level,SimpleC),
+ notice = maps:get(level,L),
SimpleFilters = maps:get(filters,SimpleC),
- {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
lists:keyfind(domain,1,SimpleFilters),
true = lists:keymember(stop_progress,1,SimpleFilters),
- false = lists:keymember(sasl_h,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
ok.
-dest_false_progress(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- false, % dest
- notice, % level
- undefined, % sasl comp (default=false)
- true), % progress (default=false)
- false = lists:keymember(logger_std_h,1,Hs),
- {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
- notice = maps:get(level,SimpleC),
+error_logger_false_progress(Config) ->
+ {ok,#{handlers:=Hs,logger:=L},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice},
+ {logger_progress_reports,log}]),
+ false = lists:keymember(?STANDARD_HANDLER,1,Hs),
+ {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs),
+ info = maps:get(level,SimpleC),
+ notice = maps:get(level,L),
SimpleFilters = maps:get(filters,SimpleC),
- {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
lists:keyfind(domain,1,SimpleFilters),
false = lists:keymember(stop_progress,1,SimpleFilters),
- false = lists:keymember(sasl_h,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
ok.
-dest_false_sasl_compatible(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- false, % dest
- notice, % level
- true, % sasl comp (default=false)
- undefined), % progress (default=false)
- false = lists:keymember(logger_std_h,1,Hs),
- {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
- notice = maps:get(level,SimpleC),
+error_logger_false_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs,logger:=L},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice},
+ {logger_sasl_compatible,true}]),
+ false = lists:keymember(?STANDARD_HANDLER,1,Hs),
+ {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs),
+ info = maps:get(level,SimpleC),
+ notice = maps:get(level,L),
SimpleFilters = maps:get(filters,SimpleC),
- {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ {domain,{_,{log,super,[beam,erlang,otp]}}} =
lists:keyfind(domain,1,SimpleFilters),
false = lists:keymember(stop_progress,1,SimpleFilters),
- true = lists:keymember(sasl_h,1,Hs),
- true = is_pid(whereis(sasl_h)),
+ true = lists:keymember(sasl,1,Hs),
ok.
-dest_silent(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- silent, % dest
- undefined, % level
- undefined, % sasl comp (default=false)
- undefined), % progress (default=false)
- false = lists:keymember(logger_std_h,1,Hs),
- false = lists:keymember(logger_simple,1,Hs),
- false = lists:keymember(sasl_h,1,Hs),
+error_logger_silent(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent}]),
+ false = lists:keymember(?STANDARD_HANDLER,1,Hs),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
ok.
-dest_silent_sasl_compatible(Config) ->
- {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- silent, % dest
- undefined, % level
- true, % sasl comp (default=false)
- undefined), % progress (default=false)
- false = lists:keymember(logger_std_h,1,Hs),
- false = lists:keymember(logger_simple,1,Hs),
- true = lists:keymember(sasl_h,1,Hs),
- true = is_pid(whereis(sasl_h)),
+error_logger_silent_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent},
+ {logger_sasl_compatible,true}]),
+ false = lists:keymember(?STANDARD_HANDLER,1,Hs),
+ false = lists:keymember(simple,1,Hs),
+ true = lists:keymember(sasl,1,Hs),
ok.
-dest_file_old(Config) ->
- {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
- error_logger,
- file, % dest
- undefined, % level
- undefined, % sasl comp (default=false)
- undefined), % progress (default=false)
- check_log(Log,
- file, % dest
- 0), % progress in std logger
+error_logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_Hs,Node} = setup(Config,
+ [{error_logger,{file,Log}}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
ok.
-
-
-dest_file(Config) ->
- {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- file, % dest
- undefined, % level
- undefined, % sasl comp (default=false)
- undefined), % progress (default=false)
- check_log(Log,
- file, % dest
- 0), % progress in std logger
+
+
+logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{logger_std_h=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ true = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+
ok.
-
-
-dest_disk_log(Config) ->
- {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- disk_log, % dest
- undefined, % level
- undefined, % sasl comp (default=false)
- undefined), % progress (default=false)
- check_log(Log,
- disk_log, % dest
- 0), % progress in std logger
+
+logger_file_sasl_compatible(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger_sasl_compatible,true},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{logger_std_h=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[beam,erlang,otp]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(simple,1,Hs),
+ true = lists:keymember(sasl,1,Hs),
+
+ ok.
+
+logger_file_log_progress(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger_progress_reports,log},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{logger_std_h=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+
+ ok.
+
+logger_file_no_filter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filter_default=>log,filters=>[],
+ logger_std_h=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+
+ ok.
+
+logger_file_no_filter_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],level=>error,
+ logger_std_h=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ error),% level
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ error = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+
+ ok.
+
+logger_file_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ logger_std_h=>#{type=>{file,Log}}}}]}]),
+ check_single_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+
+ ok.
+
+logger_filters(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,logger:=Logger},Node}
+ = setup(Config,
+ [{logger_progress_reports,log},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{logger_std_h=>#{type=>{file,Log}}}},
+ {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+ LoggerFilters = maps:get(filters,Logger),
+ true = lists:keymember(stop_progress,1,LoggerFilters),
+
+ ok.
+
+logger_filters_stop(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,logger:=Logger},Node}
+ = setup(Config,
+ [{logger_progress_reports,log},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ logger_std_h=>#{type=>{file,Log}}}},
+ {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,
+ notice),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+ LoggerFilters = maps:get(filters,Logger),
+ true = lists:keymember(log_error,1,LoggerFilters),
+
+ ok.
+
+logger_module_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node}
+ = setup(Config,
+ [{logger_progress_reports,log},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{logger_std_h=>#{type=>{file,Log}}}},
+ {module_level,error,[supervisor]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 3),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_std_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+ [{supervisor,error}] = ModuleLevels,
+ ok.
+
+logger_disk_log(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{disk_log_opts=>#{file=>Log}}}]}]),
+ check_default_log(Node,Log,
+ disk_log,% dest
+ 0),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_disk_log_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ true = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+
+ ok.
+
+logger_disk_log_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ disk_log_opts=>#{file=>Log}}}]}]),
+ check_single_log(Node,Log,
+ disk_log,% dest
+ 6),% progress in std logger
+
+ {?STANDARD_HANDLER,logger_disk_log_h,StdC} = lists:keyfind(?STANDARD_HANDLER,1,Hs),
+ info = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = lists:keymember(simple,1,Hs),
+ false = lists:keymember(sasl,1,Hs),
+
+ ok.
+
+logger_undefined(Config) ->
+ {ok,#{handlers:=Hs,logger:=L},_Node} =
+ setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]),
+ false = lists:keymember(?STANDARD_HANDLER,1,Hs),
+ {simple,logger_simple_h,SimpleC} = lists:keyfind(simple,1,Hs),
+ info = maps:get(level,SimpleC),
+ info = maps:get(level,L),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,SimpleFilters),
+ true = lists:keymember(stop_progress,1,SimpleFilters),
+ false = lists:keymember(sasl,1,Hs),
+ ok.
+
+
+%% Test that we can add multiple handlers with the default first
+logger_many_handlers_default_first(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ logger_std_h=>#{type=>{file,LogErr}}}
+ },
+ {handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ logger_std_h=>#{type=>{file,LogInfo}}}
+ }
+ ]}], LogErr, LogInfo, 6).
+
+%% Test that we can add multiple handlers with the default last
+logger_many_handlers_default_last(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_last_error),
+ LogInfo = file(Config,logger_many_handlers_default_last_info),
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ logger_std_h=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ logger_std_h=>#{type=>{file,LogErr}}}
+ }
+ ]}], LogErr, LogInfo, 7).
+
+%% Check that we can handle that an added logger has a broken filter
+%% This used to cause a deadlock.
+logger_many_handlers_default_last_broken_filter(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{broken,{fun logger_filters:level/2,broken_state}},
+ {level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ logger_std_h=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ logger_std_h=>#{type=>{file,LogErr}}}
+ }
+ ]}], LogErr, LogInfo, 7).
+
+logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) ->
+ {ok,#{handlers:=Hs},Node} = setup(Config,Env),
+ check_single_log(Node,LogErr,
+ file,% dest
+ 0,% progress in std logger
+ error), % level
+ ok = rpc:call(Node,logger_std_h,filesync,[info]),
+ {ok, Bin} = file:read_file(LogInfo),
+ ct:log("Log content:~n~s",[Bin]),
+ match(Bin,<<"info:">>,NumProgress+1,info,info),
+ match(Bin,<<"alert:">>,0,alert,info),
+
ok.
-
sasl_compatible_false(Config) ->
- {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- file, % dest
- undefined, % level
- false, % sasl comp
- true), % progress
- check_log(Log,
- file, % dest
- 4), % progress in std logger
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_Hs,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false},
+ {logger_progress_reports,log}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
ok.
sasl_compatible_false_no_progress(Config) ->
- {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- file, % dest
- undefined, % level
- false, % sasl comp
- false), % progress
- check_log(Log,
- file, % dest
- 0), % progress in std logger
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_Hs,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false},
+ {logger_progress_reports,stop}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
ok.
sasl_compatible(Config) ->
- {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
- logger_dest,
- file, % dest
- undefined, % level
- true, % sasl comp
- undefined), % progress
- check_log(Log,
- file, % dest
- 0), % progress in std logger
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_Hs,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {sasl_compatible,true}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
ok.
-bad_dest(Config) ->
- {error,{bad_config,{kernel,{logger_dest,baddest}}}} =
- setup(Config,?FUNCTION_NAME,
- logger_dest,
- baddest,
- undefined,
- undefined,
- undefined).
+bad_error_logger(Config) ->
+ error = setup(Config,[{error_logger,baddest}]).
bad_level(Config) ->
- error =
- setup(Config,?FUNCTION_NAME,
- logger_dest,
- tty,
- badlevel,
- undefined,
- undefined).
+ error = setup(Config,[{logger_level,badlevel}]).
bad_sasl_compatibility(Config) ->
- error =
- setup(Config,?FUNCTION_NAME,
- logger_dest,
- tty,
- info,
- badcomp,
- undefined).
+ error = setup(Config,[{logger_sasl_compatible,badcomp}]).
bad_progress(Config) ->
- error =
- setup(Config,?FUNCTION_NAME,
- logger_dest,
- tty,
- info,
- undefined,
- badprogress).
+ error = setup(Config,[{logger_progress_reports,badprogress}]).
%%%-----------------------------------------------------------------
%%% Internal
-setup(Config,Func,DestVar,Dest,Level,SaslComp,Progress) ->
- ok = logger:add_handler(logger_simple,logger_simple,
- #{filter_default=>log,
- logger_simple=>#{buffer=>true}}),
- Dir = ?config(priv_dir,Config),
- File = lists:concat([?MODULE,"_",Func,".log"]),
- Log = filename:join(Dir,File),
- case Dest of
- undefined ->
- ok;
- F when F==file; F==disk_log ->
- application:set_env(kernel,DestVar,{Dest,Log});
- _ ->
- application:set_env(kernel,DestVar,Dest)
- end,
- case Level of
- undefined ->
- ok;
- _ ->
- application:set_env(kernel,logger_level,Level)
- end,
- case SaslComp of
- undefined ->
- ok;
- _ ->
- application:set_env(kernel,logger_sasl_compatible,SaslComp)
- end,
- case Progress of
- undefined ->
- ok;
- _ ->
- application:set_env(kernel,logger_log_progress,Progress)
- end,
- case logger:setup_standard_handler() of
- ok ->
- application:start(sasl),
- StdH = case Dest of
- NoH when NoH==false; NoH==silent -> false;
- _ -> true
- end,
- StdH = is_pid(whereis(?STANDARD_HANDLER)),
- SaslH = if SaslComp -> true;
- true -> false
- end,
- SaslH = is_pid(whereis(sasl_h)),
- {ok,{Log,maps:get(handlers,logger:i())}};
- Error ->
- Error
- end.
+file(Config,Func) ->
+ filename:join(proplists:get_value(priv_dir,Config),
+ lists:concat([Func,".log"])).
+
+check_default_log(Node,Log,Dest,NumProgress) ->
+ check_default_log(Node,Log,Dest,NumProgress,info).
+check_default_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level),
+ match(Bin1,<<"ALERT REPORT">>,1,alert,Level),
+ match(Bin1,<<"INFO REPORT">>,0,info,Level),
+ match(Bin1,<<"DEBUG REPORT">>,0,debug,Level),
-check_log(Log,Dest,NumProgress) ->
- ok = logger:alert("dummy1"),
- ok = logger:debug("dummy1"),
+ match(Bin2,<<"INFO REPORT">>,1,info,Level),
+ match(Bin2,<<"DEBUG REPORT">>,0,debug,Level),
+ ok.
+
+check_single_log(Node,Log,Dest,NumProgress) ->
+ check_single_log(Node,Log,Dest,NumProgress,info).
+check_single_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"info:">>,NumProgress,info,Level),
+ match(Bin1,<<"alert:">>,1,alert,Level),
+ match(Bin1,<<"debug:">>,0,debug,Level),
+
+ match(Bin2,<<"info:">>,NumProgress+1,info,Level),
+ match(Bin2,<<"debug:">>,0,debug,Level),
+
+ ok.
+
+check_log(Node,Log,Dest) ->
+
+ ok = log(Node,alert,["dummy1"]),
+ ok = log(Node,debug,["dummy1"]),
%% Check that there are progress reports (supervisor and
%% application_controller) and an error report (the call above) in
%% the log. There should not be any info reports yet.
- {ok,Bin1} = sync_and_read(Dest,Log),
+ {ok,Bin1} = sync_and_read(Node,Dest,Log),
ct:log("Log content:~n~s",[Bin1]),
- match(Bin1,<<"PROGRESS REPORT">>,NumProgress),
- match(Bin1,<<"ALERT REPORT">>,1),
- match(Bin1,<<"INFO REPORT">>,0),
- match(Bin1,<<"DEBUG REPORT">>,0),
%% Then stop sasl and see that the info report from
%% application_controller is there
- ok = application:stop(sasl),
- {ok,Bin2} = sync_and_read(Dest,Log),
+ ok = rpc:call(Node,application,stop,[sasl]),
+ {ok,Bin2} = sync_and_read(Node,Dest,Log),
ct:log("Log content:~n~s",[Bin2]),
- match(Bin2,<<"INFO REPORT">>,1),
- match(Bin1,<<"DEBUG REPORT">>,0),
- ok.
+ {ok,Bin1,Bin2}.
-match(Bin,Pattern,0) ->
+match(Bin,Pattern,0,_,_) ->
nomatch = re:run(Bin,Pattern,[{capture,none}]);
-match(Bin,Pattern,N) ->
- {match,M} = re:run(Bin,Pattern,[{capture,all},global]),
- N = length(M).
-
-sync_and_read(disk_log,Log) ->
- logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER),
- file:read_file(Log ++ ".1");
-sync_and_read(file,Log) ->
- logger_std_h:filesync(?STANDARD_HANDLER),
- file:read_file(Log).
-
-cleanup() ->
- application:stop(sasl),
- [application:unset_env(App,Key) || {App,Key} <- ?all_vars],
- #{handlers:=Hs0} = logger:i(),
- Hs = lists:keydelete(cth_log_redirect,1,Hs0),
- [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
- Hs.
+match(Bin,Pattern,N,LogLevel,ConfLevel) ->
+ case logger:compare_levels(LogLevel,ConfLevel) of
+ lt -> match(Bin,Pattern,0,LogLevel,ConfLevel);
+ _ ->
+ {match,M} = re:run(Bin,Pattern,[{capture,all},global]),
+ N = length(M)
+ end.
diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl
index 21f14bbc02..11cce8fd20 100644
--- a/lib/kernel/test/logger_filters_SUITE.erl
+++ b/lib/kernel/test/logger_filters_SUITE.erl
@@ -75,66 +75,79 @@ all() ->
remote_gl].
domain(_Config) ->
- L1 = logger_filters:domain(L1=?dlog([]),{log,prefix_of,[]}),
- stop = logger_filters:domain(?dlog([]),{stop,prefix_of,[]}),
- L2 = logger_filters:domain(L2=?dlog([]),{log,starts_with,[]}),
- stop = logger_filters:domain(?dlog([]),{stop,starts_with,[]}),
- L3 = logger_filters:domain(L3=?dlog([]),{log,equals,[]}),
- stop = logger_filters:domain(?dlog([]),{stop,equals,[]}),
- ignore = logger_filters:domain(?dlog([]),{log,no_domain,[]}),
- ignore = logger_filters:domain(?dlog([]),{stop,no_domain,[]}),
-
- L4 = logger_filters:domain(L4=?dlog([a]),{log,prefix_of,[a,b]}),
- stop = logger_filters:domain(?dlog([a]),{stop,prefix_of,[a,b]}),
- ignore = logger_filters:domain(?dlog([a]),{log,starts_with,[a,b]}),
- ignore = logger_filters:domain(?dlog([a]),{stop,starts_with,[a,b]}),
- ignore = logger_filters:domain(?dlog([a]),{log,equals,[a,b]}),
- ignore = logger_filters:domain(?dlog([a]),{stop,equals,[a,b]}),
- ignore = logger_filters:domain(?dlog([a]),{log,no_domain,[a,b]}),
- ignore = logger_filters:domain(?dlog([a]),{stop,no_domain,[a,b]}),
-
- ignore = logger_filters:domain(?dlog([a,b]),{log,prefix_of,[a]}),
- ignore = logger_filters:domain(?dlog([a,b]),{stop,prefix_of,[a]}),
- L5 = logger_filters:domain(L5=?dlog([a,b]),{log,starts_with,[a]}),
- stop = logger_filters:domain(?dlog([a,b]),{stop,starts_with,[a]}),
- ignore = logger_filters:domain(?dlog([a,b]),{log,equals,[a]}),
- ignore = logger_filters:domain(?dlog([a,b]),{stop,equals,[a]}),
- ignore = logger_filters:domain(?dlog([a,b]),{log,no_domain,[a]}),
- ignore = logger_filters:domain(?dlog([a,b]),{stop,no_domain,[a]}),
-
- ignore = logger_filters:domain(?ndlog,{log,prefix_of,[a]}),
- ignore = logger_filters:domain(?ndlog,{stop,prefix_of,[a]}),
- ignore = logger_filters:domain(?ndlog,{log,starts_with,[a]}),
- ignore = logger_filters:domain(?ndlog,{stop,starts_with,[a]}),
- ignore = logger_filters:domain(?ndlog,{log,equals,[a]}),
- ignore = logger_filters:domain(?ndlog,{stop,equals,[a]}),
- L6 = logger_filters:domain(L6=?ndlog,{log,no_domain,[a]}),
- stop = logger_filters:domain(?ndlog,{stop,no_domain,[a]}),
-
- L7 = logger_filters:domain(L7=?dlog([a,b,c,d]),{log,prefix_of,[a,b,c,d]}),
- stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,prefix_of,[a,b,c,d]}),
- L8 = logger_filters:domain(L8=?dlog([a,b,c,d]),{log,starts_with,[a,b,c,d]}),
- stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,starts_with,[a,b,c,d]}),
- L9 = logger_filters:domain(L9=?dlog([a,b,c,d]),{log,equals,[a,b,c,d]}),
- stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equals,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,no_domain,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,no_domain,[a,b,c,d]}),
+ L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,super,[]}),
+ L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,sub,[]}),
+ L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}),
+
+ L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}),
+ L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}),
+
+ ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}),
+ L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}),
+ L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}),
+
+ ignore = logger_filters:domain(?ndlog,{log,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,equal,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}),
+ L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}),
+ L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}),
+
+ L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}),
+ L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}),
+ L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}),
%% A domain field in meta which is not a list is allowed by the
- %% filter, but it will never match.
- ignore = logger_filters:domain(?dlog(dummy),{log,prefix_of,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog(dummy),{stop,prefix_of,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog(dummy),{log,starts_with,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog(dummy),{stop,starts_with,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog(dummy),{log,equals,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog(dummy),{stop,equals,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog(dummy),{log,no_domain,[a,b,c,d]}),
- ignore = logger_filters:domain(?dlog(dummy),{stop,no_domain,[a,b,c,d]}),
+ %% filter, but since MatchDomain is always a list of atoms, only
+ %% Action=not_equal can ever match.
+ ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}),
+ L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}),
{error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)),
- {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,prefix_of,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})),
{error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})),
- {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,prefix_of,bad})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})),
ok.
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
index ac1abba629..7a93f2ca79 100644
--- a/lib/kernel/test/logger_formatter_SUITE.erl
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -68,23 +68,25 @@ all() ->
level_or_msg_in_meta,
faulty_log,
faulty_config,
- faulty_msg].
+ faulty_msg,
+ update_config].
default(_Config) ->
String1 = format(info,{"~p",[term]},#{},#{}),
ct:log(String1),
- [_Date,_Time,"info:\nterm\n"] = string:lexemes(String1," "),
+ [_DateTime,"info:","term\n"] = string:lexemes(String1," "),
Time = timestamp(),
ExpectedTimestamp = default_time_format(Time),
String2 = format(info,{"~p",[term]},#{time=>Time},#{}),
ct:log(String2),
- " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+ " info: term\n" = string:prefix(String2,ExpectedTimestamp),
ok.
legacy_header(_Config) ->
Time = timestamp(),
- String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true}),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true,
+ single_line=>false}),
ct:log(String1),
"=INFO REPORT==== "++Rest = String1,
[Timestamp,"\nterm\n"] = string:lexemes(Rest," ="),
@@ -98,12 +100,14 @@ legacy_header(_Config) ->
true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun",
"Jul","Aug","Sep","Oct","Nov","Dec"]),
- String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false}),
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false,
+ single_line=>false}),
ct:log(String2),
ExpectedTimestamp = default_time_format(Time),
" info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
- String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad}),
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad,
+ single_line=>false}),
ct:log(String3),
String3 = String2,
@@ -114,7 +118,8 @@ legacy_header(_Config) ->
String4 = String1,
String5 = format(info,{"~p",[term]},#{}, % <--- no time
- #{legacy_header=>true}),
+ #{legacy_header=>true,
+ single_line=>false}),
ct:log(String5),
"=INFO REPORT==== "++_ = String5,
ok.
@@ -264,8 +269,8 @@ format_msg(_Config) ->
String8 = format(info,{string,['not',printable,list]},
#{report_cb=>fun(_)-> {"formatted",[]} end},
#{template=>Template}),
- ct:log(String8),
- "INVALID STRING: ['not',printable,list]" = String8,
+ ct:log("~ts",[String8]), % avoiding ct_log crash
+ "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String8,
String9 = format(info,{string,"string"},#{},#{template=>Template}),
ct:log(String9),
@@ -289,38 +294,36 @@ report_cb(_Config) ->
ok.
max_size(_Config) ->
- Template = [msg],
+ Cfg = #{template=>[msg],
+ single_line=>false},
"12345678901234567890" =
- format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
- application:set_env(kernel,logger_max_size,11),
- "12345678901234567890" = % min value is 50, so this is not limited
- format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
- "12345678901234567890123456789012345678901234567..." = % 50
- format(info,
- {"123456789012345678901234567890123456789012345678901234567890",
- []},
- #{},
- #{template=>Template}),
- application:set_env(kernel,logger_max_size,53),
- "12345678901234567890123456789012345678901234567890..." = %53
- format(info,
- {"123456789012345678901234567890123456789012345678901234567890",
- []},
- #{},
- #{template=>Template}),
+ format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% application:set_env(kernel,logger_max_size,11),
+ %% "12345678901234567890" = % min value is 50, so this is not limited
+ %% format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% "12345678901234567890123456789012345678901234567..." = % 50
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
+ %% application:set_env(kernel,logger_max_size,53),
+ %% "12345678901234567890123456789012345678901234567890..." = %53
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
"123456789012..." =
- format(info,{"12345678901234567890",[]},#{},#{template=>Template,
- max_size=>15}),
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}),
"12345678901234567890" =
- format(info,{"12345678901234567890",[]},#{},#{template=>Template,
- max_size=>unlimited}),
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}),
%% Check that one newline at the end of the line is kept (if it exists)
"12345678901...\n" =
- format(info,{"12345678901234567890\n",[]},#{},#{template=>Template,
- max_size=>15}),
+ format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}),
"12345678901...\n" =
- format(info,{"12345678901234567890",[]},#{},#{template=>[msg,"\n"],
- max_size=>15}),
+ format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"],
+ max_size=>15}),
ok.
max_size(cleanup,_Config) ->
application:unset_env(kernel,logger_max_size),
@@ -339,12 +342,6 @@ depth(_Config) ->
{"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
#{},
#{template=>Template}),
- application:set_env(kernel,logger_format_depth,12),
- "[1,2,3,4,5,6,7,8,9,0,1|...]" =
- format(info,
- {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
- #{},
- #{template=>Template}),
"[1,2,3,4,5,6,7,8,9,0,1,2|...]" =
format(info,
{"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
@@ -359,7 +356,7 @@ depth(_Config) ->
depth=>unlimited}),
ok.
depth(cleanup,_Config) ->
- application:unset_env(kernel,logger_format_depth),
+ application:unset_env(kernel,error_logger_format_depth),
ok.
chars_limit(_Config) ->
@@ -368,7 +365,7 @@ chars_limit(_Config) ->
lists:seq(1,100),
maps:from_list(lists:zip(lists:seq(1,100),
lists:duplicate(100,value)))]},
- Meta = #{time=>"2018-04-26 9:15:40.449879"},
+ Meta = #{time=>timestamp()},
Template = [time," - ", msg, "\n"],
FC = #{template=>Template,
depth=>unlimited,
@@ -380,7 +377,7 @@ chars_limit(_Config) ->
L1 = string:length(String1),
ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]),
true = L1 > CL1,
- true = L1 < CL1 + 10,
+ true = L1 < CL1 + 15,
String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}),
L2 = string:length(String2),
@@ -392,13 +389,13 @@ chars_limit(_Config) ->
L3 = string:length(String3),
ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]),
true = L3 > CL3,
- true = L3 < CL3 + 10,
+ true = L3 < CL3 + 15,
String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}),
L4 = string:length(String4),
ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]),
true = L4 > CL3,
- true = L4 < CL3 + 10,
+ true = L4 < CL3 + 15,
%% Test that max_size truncates the string which is limited by
%% depth and chars_limit
@@ -409,6 +406,14 @@ chars_limit(_Config) ->
L5 = MS5,
true = lists:prefix(lists:sublist(String5,L5-4),String4),
+ %% Test that chars_limit limits string also
+ Str = "123456789012345678901234567890123456789012345678901234567890123456789",
+ CL6 = 80,
+ String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}),
+ L6 = string:length(String6),
+ ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]),
+ L6 = CL6,
+
ok.
format_mfa(_Config) ->
@@ -437,29 +442,58 @@ format_mfa(_Config) ->
ok.
format_time(_Config) ->
- Time1 = timestamp(),
- ExpectedTimestamp1 = default_time_format(Time1),
- String1 = format(info,{"~p",[term]},#{time=>Time1},#{}),
- ct:log(String1),
- " info:\nterm\n" = string:prefix(String1,ExpectedTimestamp1),
-
- Time2 = timestamp(),
- ExpectedTimestamp2 = default_time_format(Time2,true),
- String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}),
- ct:log(String2),
- " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp2),
-
- application:set_env(kernel,logger_utc,true),
- Time3 = timestamp(),
- ExpectedTimestamp3 = default_time_format(Time3,true),
- String3 = format(info,{"~p",[term]},#{time=>Time3},#{}),
- ct:log(String3),
- " info:\nterm\n" = string:prefix(String3,ExpectedTimestamp3),
+ Time = timestamp(),
+ Meta = #{time=>Time},
+ FC = #{template=>[time]},
+ Msg = {string,""},
+ ExpectedLocal = default_time_format(Time,false),
+ ExpectedUtc = default_time_format(Time,true),
+
+ %% default - local time
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% stdlib utc_log works when time_offset parameter is not set
+ application:set_env(stdlib,utc_log,true),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,true),
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+
+ %% time_designator config parameter to formatter
+ ExpectedLocalS = default_time_format(Time,false,$\s),
+ ExpectedUtcS = default_time_format(Time,true,$\s),
+
+ ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"",
+ time_designator=>$\s}),
+ ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z",
+ time_designator=>$\s}),
ok.
format_time(cleanup,_Config) ->
- application:unset_env(kernel,logger_utc),
+ application:unset_env(sasl,utc_log),
+ application:unset_env(stdlib,utc_log),
ok.
level_or_msg_in_meta(_Config) ->
@@ -480,7 +514,7 @@ level_or_msg_in_meta(_Config) ->
ok.
faulty_log(_Config) ->
- %% Unexpected log (should be type logger:log()) - print error
+ %% Unexpected log (should be type logger:log_event()) - print error
{error,
function_clause,
{logger_formatter,format,[_,_],_}} =
@@ -507,6 +541,54 @@ faulty_msg(_Config) ->
#{})),
ok.
+%% Test that formatter config can be changed, and that the default
+%% template is updated accordingly
+update_config(_Config) ->
+ logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}),
+ ok = logger:add_handler(?MODULE,?MODULE,#{}),
+ D = lists:seq(1,1000),
+ logger:info("~p~n",[D]),
+ {Lines1,C1} = check_log(),
+ [ct:log(L) || L <- Lines1],
+ ct:log("~p",[C1]),
+ [Line1] = Lines1,
+ [_Time,"info: "++D1] = string:split(Line1," "),
+ true = length(D1)>3000,
+ true = #{}==C1,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,false),
+ logger:info("~p~n",[D]),
+ {Lines2,C2} = check_log(),
+ [ct:log(L) || L <- Lines2],
+ ct:log("~p",[C2]),
+ true = length(Lines2)>50,
+ true = #{single_line=>false}==C2,
+
+ ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}),
+ logger:info("~p~n",[D]),
+ {Lines3,C3} = check_log(),
+ [ct:log(L) || L <- Lines3],
+ ct:log("~p",[C3]),
+ ["=INFO REPORT==== "++_|D3] = Lines3,
+ true = length(D3)>50,
+ true = #{legacy_header=>true,single_line=>false}==C3,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,true),
+ logger:info("~p~n",[D]),
+ {Lines4,C4} = check_log(),
+ [ct:log(L) || L <- Lines4],
+ ct:log("~p",[C4]),
+ ["=INFO REPORT==== "++_,D4] = Lines4,
+ true = length(D4)>3000,
+ true = #{legacy_header=>true,single_line=>true}==C4,
+
+ ok.
+
+update_config(cleanup,_Config) ->
+ _ = logger:remove_handler(?MODULE),
+ _ = logger:remove_handler_filter(default,silence),
+ ok.
+
%%%-----------------------------------------------------------------
%%% Internal
format(Level,Msg,Meta,Config) ->
@@ -518,22 +600,16 @@ format(Log,Config) ->
default_time_format(Timestamp) ->
default_time_format(Timestamp,false).
-default_time_format(Timestamp0,Utc) when is_integer(Timestamp0) ->
- Timestamp=Timestamp0+erlang:time_offset(microsecond),
- %% calendar:system_time_to_rfc3339(Time,[{unit,microsecond}]).
- Micro = Timestamp rem 1000000,
- Sec = Timestamp div 1000000,
- UniversalTime = erlang:posixtime_to_universaltime(Sec),
- {Date,Time} =
- if Utc -> UniversalTime;
- true -> erlang:universaltime_to_localtime(UniversalTime)
- end,
- default_time_format(Date,Time,Micro).
-
-default_time_format({Y,M,D},{H,Min,S},Micro) ->
- lists:flatten(
- io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w",
- [Y,M,D,H,Min,S,Micro])).
+default_time_format(Timestamp,Utc) ->
+ default_time_format(Timestamp,Utc,$T).
+
+default_time_format(Timestamp,Utc,Sep) ->
+ Offset = if Utc -> "Z";
+ true -> ""
+ end,
+ calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond},
+ {time_designator,Sep},
+ {offset,Offset}]).
integer(Str) ->
is_integer(list_to_integer(Str)).
@@ -549,10 +625,20 @@ my_try(Fun) ->
try Fun() catch C:R:S -> {C,R,hd(S)} end.
timestamp() ->
- erlang:monotonic_time(microsecond).
+ erlang:system_time(microsecond).
%% necessary?
add_time(#{time:=_}=Meta) ->
Meta;
add_time(Meta) ->
Meta#{time=>timestamp()}.
+
+%%%-----------------------------------------------------------------
+%%% handler callback
+log(Log,#{formatter:={M,C}}) ->
+ put(log,{M:format(Log,C),C}),
+ ok.
+
+check_log() ->
+ {S,C} = erase(log),
+ {string:lexemes(S,"\n"),C}.
diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl
index b59f5f7758..cfba35e43f 100644
--- a/lib/kernel/test/logger_legacy_SUITE.erl
+++ b/lib/kernel/test/logger_legacy_SUITE.erl
@@ -68,13 +68,13 @@ init_per_group(std, Config) ->
ok = logger:set_handler_config(
error_logger,filters,
[{domain,{fun logger_filters:domain/2,
- {log,prefix_of,[beam,erlang,otp]}}}]),
+ {log,super,[beam,erlang,otp]}}}]),
Config;
init_per_group(sasl, Config) ->
ok = logger:set_handler_config(
error_logger,filters,
[{domain,{fun logger_filters:domain/2,
- {log,prefix_of,[beam,erlang,otp,sasl]}}}]),
+ {log,super,[beam,erlang,otp,sasl]}}}]),
%% cth_log_redirect checks if sasl is started before displaying
%% any sasl reports - so just to see the real sasl reports in tc
diff --git a/lib/kernel/test/logger_simple_SUITE.erl b/lib/kernel/test/logger_simple_SUITE.erl
deleted file mode 100644
index 5d8d32492d..0000000000
--- a/lib/kernel/test/logger_simple_SUITE.erl
+++ /dev/null
@@ -1,247 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2018. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
--module(logger_simple_SUITE).
-
--compile(export_all).
-
--include_lib("common_test/include/ct.hrl").
--include_lib("kernel/include/logger.hrl").
--include_lib("kernel/src/logger_internal.hrl").
-
--define(check_no_log,[] = test_server:messages_get()).
--define(check(Expected),
- receive {log,Expected} ->
- [] = test_server:messages_get()
- after 1000 ->
- ct:fail({report_not_received,
- {line,?LINE},
- {expected,Expected},
- {got,test_server:messages_get()}})
- end).
-
--define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
- ":"++integer_to_list(?LINE)).
--define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
--define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
-
-suite() ->
- [{timetrap,{seconds,30}}].
-
-init_per_suite(Config) ->
- #{handlers:=Hs0} = logger:i(),
- Hs = lists:keydelete(cth_log_redirect,1,Hs0),
- [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
- Env = [{App,Key,application:get_env(App,Key)} ||
- {App,Key} <- [{kernel,logger_dest},
- {kernel,logger_level}]],
- [{env,Env},{logger,Hs}|Config].
-
-end_per_suite(Config) ->
- [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)],
- Hs = ?config(logger,Config),
- [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
- ok.
-
-init_per_group(_Group, Config) ->
- Config.
-
-end_per_group(_Group, _Config) ->
- ok.
-
-init_per_testcase(_TestCase, Config) ->
- Config.
-
-end_per_testcase(Case, Config) ->
- try apply(?MODULE,Case,[cleanup,Config])
- catch error:undef -> ok
- end,
- ok.
-
-groups() ->
- [].
-
-all() ->
- [start_stop,
- get_buffer,
- replace_file,
- replace_disk_log
- ].
-
-start_stop(_Config) ->
- undefined = whereis(logger_simple),
- register(logger_simple,self()),
- {error,_} = logger:add_handler(logger_simple,
- logger_simple,
- #{filter_default=>log}),
- unregister(logger_simple),
- ok = logger:add_handler(logger_simple,logger_simple,#{filter_default=>log}),
- Pid = whereis(logger_simple),
- true = is_pid(Pid),
- ok = logger:remove_handler(logger_simple),
- false = is_pid(whereis(logger_simple)),
- ok.
-start_stop(cleanup,_Config) ->
- logger:remove_handler(logger_simple).
-
-get_buffer(_Config) ->
- %% Start simple without buffer
- ok = logger:add_handler(logger_simple,logger_simple,
- #{filter_default=>log}),
- logger:emergency(?str),
- logger:alert(?str,[]),
- logger:error(?map_rep),
- logger:info(?keyval_rep),
- {ok,[]} = logger_simple:get_buffer(), % no buffer
- ok = logger:remove_handler(logger_simple),
-
- %% Start with buffer
- ok = logger:add_handler(logger_simple,logger_simple,
- #{filter_default=>log,
- logger_simple=>#{buffer=>true}}),
- logger:emergency(M1=?str),
- logger:alert(M2=?str,[]),
- logger:error(M3=?map_rep),
- logger:info(M4=?keyval_rep),
- logger:info(M41=?keyval_rep++[not_key_val]),
- error_logger:error_report(some_type,M5=?map_rep),
- error_logger:warning_report("some_type",M6=?map_rep),
- logger:critical(M7=?str,[A7=?keyval_rep]),
- logger:notice(M8=["fake",string,"line:",?LINE]),
- {ok,Buffered1} = logger_simple:get_buffer(),
- [#{level:=emergency,msg:={string,M1}},
- #{level:=alert,msg:={M2,[]}},
- #{level:=error,msg:={report,M3}},
- #{level:=info,msg:={report,M4}},
- #{level:=info,msg:={report,M41}},
- #{level:=error,msg:={report,#{label:={error_logger,error_report},
- report:=M5}}},
- #{level:=warning,msg:={report,#{label:={error_logger,warning_report},
- report:=M6}}},
- #{level:=critical,msg:={M7,[A7]}},
- #{level:=notice,msg:={string,M8}}] = Buffered1,
-
- %% Keep logging - should not buffer any more
- logger:emergency(?str),
- logger:alert(?str,[]),
- logger:error(?map_rep),
- logger:info(?keyval_rep),
- {ok,[]} = logger_simple:get_buffer(),
- ok = logger:remove_handler(logger_simple),
-
- %% Fill buffer and drop
- ok = logger:add_handler(logger_simple,logger_simple,
- #{filter_default=>log,
- logger_simple=>#{buffer=>true}}),
- logger:emergency(M9=?str),
- M10=?str,
- [logger:info(M10) || _ <- lists:seq(1,8)],
- logger:error(M11=?str),
- logger:error(?str),
- logger:error(?str),
- {ok,Buffered3} = logger_simple:get_buffer(),
- 11 = length(Buffered3),
- [#{level:=emergency,msg:={string,M9}},
- #{level:=info,msg:={string,M10}},
- #{level:=info,msg:={string,M10}},
- #{level:=info,msg:={string,M10}},
- #{level:=info,msg:={string,M10}},
- #{level:=info,msg:={string,M10}},
- #{level:=info,msg:={string,M10}},
- #{level:=info,msg:={string,M10}},
- #{level:=info,msg:={string,M10}},
- #{level:=error,msg:={string,M11}},
- #{level:=info,msg:={"Simple handler buffer full, dropped ~w messages",[2]}}]
- = Buffered3,
- ok.
-get_buffer(cleanup,_Config) ->
- logger:remove_handler(logger_simple).
-
-replace_file(Config) ->
- ok = logger:add_handler(logger_simple,logger_simple,
- #{filter_default=>log,
- logger_simple=>#{buffer=>true}}),
- logger:emergency(M1=?str),
- logger:alert(M2=?str,[]),
- logger:error(?map_rep),
- logger:info(?keyval_rep),
- undefined = whereis(?STANDARD_HANDLER),
- PrivDir = ?config(priv_dir,Config),
- File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)++".log"),
-
- application:set_env(kernel,logger_dest,{file,File}),
- application:set_env(kernel,logger_level,info),
-
- ok = logger:setup_standard_handler(),
- true = is_pid(whereis(?STANDARD_HANDLER)),
- ok = logger_std_h:filesync(?STANDARD_HANDLER),
- {ok,Bin} = file:read_file(File),
- Lines = [unicode:characters_to_list(L) ||
- L <- binary:split(Bin,<<"\n">>,[global,trim])],
- ["=EMERGENCY REPORT===="++_,
- M1,
- "=ALERT REPORT===="++_,
- M2,
- "=ERROR REPORT===="++_,
- _,
- _,
- "=INFO REPORT===="++_,
- _,
- _] = Lines,
- ok.
-replace_file(cleanup,_Config) ->
- logger:remove_handler(?STANDARD_HANDLER),
- logger:remove_handler(logger_simple).
-
-replace_disk_log(Config) ->
- ok = logger:add_handler(logger_simple,logger_simple,
- #{filter_default=>log,
- logger_simple=>#{buffer=>true}}),
- logger:emergency(M1=?str),
- logger:alert(M2=?str,[]),
- logger:error(?map_rep),
- logger:info(?keyval_rep),
- undefined = whereis(?STANDARD_HANDLER),
- PrivDir = ?config(priv_dir,Config),
- File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
-
- application:set_env(kernel,logger_dest,{disk_log,File}),
- application:set_env(kernel,logger_level,info),
-
- ok = logger:setup_standard_handler(),
- true = is_pid(whereis(?STANDARD_HANDLER)),
- ok = logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER),
- {ok,Bin} = file:read_file(File++".1"),
- Lines = [unicode:characters_to_list(L) ||
- L <- binary:split(Bin,<<"\n">>,[global,trim])],
- ["=EMERGENCY REPORT===="++_,
- M1,
- "=ALERT REPORT===="++_,
- M2,
- "=ERROR REPORT===="++_,
- _,
- _,
- "=INFO REPORT===="++_,
- _,
- _|_] = Lines, % the tail might be an info report about opening the disk log
- ok.
-replace_disk_log(cleanup,_Config) ->
- logger:remove_handler(?STANDARD_HANDLER),
- logger:remove_handler(logger_simple).
-
diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl
new file mode 100644
index 0000000000..271a2126de
--- /dev/null
+++ b/lib/kernel/test/logger_simple_h_SUITE.erl
@@ -0,0 +1,210 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]).
+
+-define(check_no_log,[] = test_server:messages_get()).
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks, [logger_test_lib]}].
+
+init_per_suite(Config) ->
+ #{handlers:=Hs0} = logger:i(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Env = [{App,Key,application:get_env(App,Key)} ||
+ {App,Key} <- [{kernel,logger_level}]],
+ [{env,Env},{logger,Hs}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ replace_default,
+ replace_file,
+ replace_disk_log
+ ].
+
+start_stop(_Config) ->
+ undefined = whereis(logger_simple_h),
+ register(logger_simple_h,self()),
+ {error,_} = logger:add_handler(simple,
+ logger_simple_h,
+ #{filter_default=>log}),
+ unregister(logger_simple_h),
+ ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}),
+ Pid = whereis(logger_simple_h),
+ true = is_pid(Pid),
+ ok = logger:remove_handler(simple),
+ false = is_pid(whereis(logger_simple_h)),
+ ok.
+start_stop(cleanup,_Config) ->
+ logger:remove_handler(simple).
+
+%% This testcase just tests that it does not crash, the default handler prints
+%% to stdout which we cannot read from in a detached slave.
+replace_default(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [M3=?map_rep]),
+ log(Node, info, [M4=?keyval_rep]),
+ log(Node, info, [M41=?keyval_rep++[not_key_val]]),
+ rpc:call(Node, error_logger, error_report, [some_type,M5=?map_rep]),
+ rpc:call(Node, error_logger, warning_report, ["some_type",M6=?map_rep]),
+ log(Node, critical, [M7=?str,[A7=?keyval_rep]]),
+ log(Node, notice, [M8=["fake",string,"line:",?LINE]]),
+
+ Env = rpc:call(Node, application, get_env, [kernel, logger, []]),
+ ok = rpc:call(Node, logger, add_handlers, [Env]),
+
+ ok.
+
+replace_file(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [M3=?map_rep]),
+ log(Node, info, [M4=?keyval_rep]),
+ log(Node, info, [M41=?keyval_rep++[not_key_val]]),
+ log(Node, critical, [M7=?str,[A7=?keyval_rep]]),
+ log(Node, notice, [M8=["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_std_h,
+ #{ logger_std_h => #{ type => {file, File} },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+
+ {ok,Bin} = sync_and_read(Node, file, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
+
+replace_disk_log(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [M3=?map_rep]),
+ log(Node, info, [M4=?keyval_rep]),
+ log(Node, info, [M41=?keyval_rep++[not_key_val]]),
+ log(Node, critical, [M7=?str,[A7=?keyval_rep]]),
+ log(Node, notice, [M8=["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_disk_log_h,
+ #{ disk_log_opts => #{ file => File },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+ {ok,Bin} = sync_and_read(Node, disk_log, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index e940e0a026..5764abd063 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -50,11 +50,12 @@
end).
suite() ->
- [{timetrap,{seconds,30}}].
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
init_per_suite(Config) ->
timer:start(), % to avoid progress report
- {ok,{?STANDARD_HANDLER,#{formatter:=OrigFormatter}}} =
+ {ok,{logger_std_h,#{formatter:=OrigFormatter}}} =
logger:get_handler_config(?STANDARD_HANDLER),
[{formatter,OrigFormatter}|Config].
@@ -241,36 +242,38 @@ formatter_fail(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}),
Pid = whereis(?MODULE),
true = is_pid(Pid),
- {ok,#{handlers:=H}} = logger:get_logger_config(),
+ #{handlers:=HC1} = logger:i(),
+ H = [Id || {Id,_,_} <- HC1],
true = lists:member(?MODULE,H),
%% Formatter is added automatically
{ok,{_,#{formatter:={logger_formatter,_}}}} =
logger:get_handler_config(?MODULE),
logger:info(M1=?msg,?domain),
- Got1 = try_match_file(Log,"=INFO REPORT====.*\n"++M1,5000),
+ Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* info: "++M1,5000),
ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}),
logger:info(M2=?msg,?domain),
Got2 = try_match_file(Log,
- Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2,
+ escape(Got1)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M2,
5000),
ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}),
logger:info(M3=?msg,?domain),
Got3 = try_match_file(Log,
- Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3,
+ escape(Got2)++"[0-9\\+\\-T:\\.]* info: FORMATTER CRASH: .*"++M3,
5000),
ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}),
logger:info(?msg,?domain),
try_match_file(Log,
- Got3++"FORMATTER ERROR: bad_return_value",
+ escape(Got3)++"FORMATTER ERROR: bad_return_value",
5000),
%% Check that handler is still alive and was never dead
Pid = whereis(?MODULE),
- {ok,#{handlers:=H}} = logger:get_logger_config(),
+ #{handlers:=HC2} = logger:i(),
+ H = [Id || {Id,_,_} <- HC2],
ok.
@@ -289,10 +292,17 @@ config_fail(_Config) ->
#{logger_std_h => #{restart_type => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{42,42,_}}}} =
+ {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
logger:add_handler(?MODULE,logger_std_h,
- #{logger_std_h => #{toggle_sync_qlen=>42,
+ #{logger_std_h => #{drop_new_reqs_qlen=>1}}),
+ {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{toggle_sync_qlen=>43,
drop_new_reqs_qlen=>42}}),
+ {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{drop_new_reqs_qlen=>43,
+ flush_reqs_qlen=>42}}),
ok = logger:add_handler(?MODULE,logger_std_h,
#{filter_default=>log,
@@ -315,29 +325,32 @@ config_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
crash_std_h_to_file(Config) ->
- crash_std_h(Config,?FUNCTION_NAME,logger_dest,file).
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_std_h,
+ #{ logger_std_h => #{ type => {file, Log} }}}],
+ file, Log).
crash_std_h_to_file(cleanup,_Config) ->
crash_std_h(cleanup).
crash_std_h_to_disk_log(Config) ->
- crash_std_h(Config,?FUNCTION_NAME,logger_dest,disk_log).
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_disk_log_h,
+ #{ disk_log_opts => #{ file => Log }}}],
+ disk_log,Log).
crash_std_h_to_disk_log(cleanup,_Config) ->
crash_std_h(cleanup).
-crash_std_h(Config,Func,Var,Type) ->
+crash_std_h(Config,Func,Var,Type,Log) ->
Dir = ?config(priv_dir,Config),
- File = lists:concat([?MODULE,"_",Func,".log"]),
- Log = filename:join(Dir,File),
+ SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])),
+ ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])),
Pa = filename:dirname(code:which(?MODULE)),
- TypeAndLog =
- case os:type() of
- {win32,_} ->
- lists:concat([" {",Type,",\\\"",Log,"\\\"}"]);
- _ ->
- lists:concat([" \'{",Type,",\"",Log,"\"}\'"])
- end,
- Args = lists:concat([" -kernel ",Var,TypeAndLog," -pa ",Pa]),
Name = lists:concat([?MODULE,"_",Func]),
+ Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]),
ct:pal("Starting ~p with ~tp", [Name,Args]),
%% Start a node which prints kernel logs to the destination specified by Type
{ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
@@ -499,93 +512,86 @@ filesync(Config) ->
#{logger_std_h => #{type => Type},
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
- formatter=>{?MODULE,self()}}),
- Tester = self(),
- TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
- Pid ! {trace,Mod,Func,Details},
- Pid;
- ({trace,TPid,'receive',Received}, Pid) ->
- Pid ! {trace,TPid,Received},
- Pid
- end,
- {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
- FileCtrlPid = maps:get(file_ctrl_pid , logger_std_h:info(?MODULE)),
- {ok,_} = dbg:p(FileCtrlPid, [c]),
- {ok,_} = dbg:tpl(logger_std_h, write_to_dev, 5, []),
- {ok,_} = dbg:tpl(logger_std_h, sync_dev, 4, []),
- {ok,_} = dbg:tp(file, datasync, 1, []),
+ formatter=>{?MODULE,nl}}),
+
+ %% check repeated filesync happens
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"first\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync}]),
logger:info("first", ?domain),
%% wait for automatic filesync
- timer:sleep(?FILESYNC_REP_INT),
- Expected1 = [{log,"first"}, {trace,logger_std_h,write_to_dev},
- {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
-
+ check_tracer(?FILESYNC_REP_INT*2),
+
+ %% check that explicit filesync is only done once
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"second\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync},
+ {no_more,500}
+ ]),
logger:info("second", ?domain),
%% do explicit filesync
logger_std_h:filesync(?MODULE),
%% a second filesync should be ignored
logger_std_h:filesync(?MODULE),
- Expected2 = [{log,"second"}, {trace,logger_std_h,write_to_dev},
- {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+ check_tracer(100),
%% check that if there's no repeated filesync active,
%% a filesync is still performed when handler goes idle
logger:set_handler_config(?MODULE, logger_std_h,
#{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ %% The following timer is to make sure the time from last log
+ %% ("second") to next ("third") is long enough, so the a flush is
+ %% triggered by the idle timeout between "thrid" and "fourth".
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ start_tracer([{logger_std_h, write_to_dev, 5},
+ {logger_std_h, sync_dev, 4},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"third\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync},
+ {logger_std_h, write_to_dev, <<"fourth\n">>},
+ {logger_std_h, sync_dev},
+ {file,datasync}]),
logger:info("third", ?domain),
+ %% wait for automatic filesync
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
logger:info("fourth", ?domain),
%% wait for automatic filesync
- timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
- Expected3 = [{log,"third"}, {trace,logger_std_h,write_to_dev},
- {log,"fourth"}, {trace,logger_std_h,write_to_dev},
- {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
-
- dbg:stop_clear(),
-
- %% verify that filesync has been performed as expected
- Received1 = lists:map(fun({trace,M,F,_}) -> {trace,M,F};
- (Other) -> Other
- end, test_server:messages_get()),
- ct:pal("Trace #1 =~n~p", [Received1]),
- Received1 = Expected1 ++ Expected2 ++ Expected3,
-
- try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\n">>}, 1000),
-
- {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
- {ok,_} = dbg:p(whereis(?MODULE), [c]),
- {ok,_} = dbg:tpl(logger_std_h, handle_cast, 2, []),
+ check_tracer(?IDLE_DETECT_TIME_MSEC*2),
%% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
+ OneSync = {logger_std_h,handle_cast,repeated_filesync},
+ %% receive 1 initial repeated_filesync, then 1 per sec
+ start_tracer([{logger_std_h,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+
logger:set_handler_config(?MODULE, logger_std_h,
#{filesync_repeat_interval => SyncInt}),
SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
timer:sleep(WaitT),
logger:set_handler_config(?MODULE, logger_std_h,
- #{filesync_repeat_interval => no_repeat}),
- dbg:stop_clear(),
-
- Received2 = lists:map(fun({trace,_M,handle_cast,[{Op,_},_]}) -> {trace,Op};
- (Other) -> Other
- end, test_server:messages_get()),
- ct:pal("Trace #2 =~n~p", [Received2]),
- OneSync = [{trace,repeated_filesync}],
- %% receive 1 initial repeated_filesync, then 1 per sec
- Received2 =
- lists:flatten([OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ #{filesync_repeat_interval => no_repeat}),
+ check_tracer(100),
ok.
filesync(cleanup, _Config) ->
+ dbg:stop_clear(),
logger:remove_handler(?MODULE).
write_failure(Config) ->
Dir = ?config(priv_dir, Config),
File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
Log = filename:join(Dir, File),
- Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ Node = start_std_h_on_new_node(Config, Log),
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
@@ -622,7 +628,7 @@ sync_failure(Config) ->
Dir = ?config(priv_dir, Config),
File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
Log = filename:join(Dir, File),
- Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ Node = start_std_h_on_new_node(Config, Log),
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
@@ -658,21 +664,12 @@ sync_failure(cleanup, _Config) ->
Nodes = nodes(),
[test_server:stop_node(Node) || Node <- Nodes].
-start_std_h_on_new_node(_Config, Func, Log) ->
- Pa = filename:dirname(code:which(?MODULE)),
- Dest =
- case os:type() of
- {win32,_} ->
- lists:concat([" {file,\\\"",Log,"\\\"}"]);
- _ ->
- lists:concat([" \'{file,\"",Log,"\"}\'"])
- end,
- Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]),
- Name = lists:concat([?MODULE,"_",Func]),
- ct:pal("Starting ~s with ~tp", [Name,Args]),
- {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
- Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
- true = is_pid(Pid),
+start_std_h_on_new_node(Config, Log) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_std_h,
+ #{ logger_std_h => #{ type => {file,Log}}}}]}]),
ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
{?MODULE,nl}]),
Node.
@@ -698,16 +695,17 @@ internal_log(Type, Term) ->
op_switch_to_sync_file(Config) ->
{Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
NewHConfig =
- HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3,
- drop_new_reqs_qlen => 501,
- flush_reqs_qlen => 2000,
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => NumOfReqs+1,
+ flush_reqs_qlen => 2*NumOfReqs,
enable_burst_limit => false}},
ok = logger:set_handler_config(?MODULE, NewHConfig),
%% TRecvPid = start_op_trace(),
- NumOfReqs = 500,
send_burst({n,NumOfReqs}, seq, {chars,79}, info),
- NumOfReqs = count_lines(Log),
+ Lines = count_lines(Log),
+ ok = file:delete(Log),
%% true = analyse_trace(TRecvPid,
%% fun(Events) -> find_mode(async,Events) end),
%% true = analyse_trace(TRecvPid,
@@ -718,101 +716,137 @@ op_switch_to_sync_file(Config) ->
%% fun(Events) -> find_mode(drop,Events) end),
%% false = analyse_trace(TRecvPid,
%% fun(Events) -> find_mode(flush,Events) end),
- ok = file:delete(Log),
%% stop_op_trace(TRecvPid),
+ NumOfReqs = Lines,
ok.
op_switch_to_sync_file(cleanup, _Config) ->
ok = stop_handler(?MODULE).
op_switch_to_sync_tty(Config) ->
{HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 500,
NewHConfig =
HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3,
- drop_new_reqs_qlen => 501,
- flush_reqs_qlen => 2000,
+ drop_new_reqs_qlen => NumOfReqs+1,
+ flush_reqs_qlen => 2*NumOfReqs,
enable_burst_limit => false}},
ok = logger:set_handler_config(?MODULE, NewHConfig),
- NumOfReqs = 500,
send_burst({n,NumOfReqs}, seq, {chars,79}, info),
ok.
op_switch_to_sync_tty(cleanup, _Config) ->
ok = stop_handler(?MODULE).
+op_switch_to_drop_file() ->
+ [{timetrap,{seconds,180}}].
op_switch_to_drop_file(Config) ->
- {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
-
- NewHConfig =
- HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
- drop_new_reqs_qlen => 3,
- flush_reqs_qlen => 600,
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{logger_std_h =>
+ StdHConfig#{toggle_sync_qlen => 1,
+ drop_new_reqs_qlen => 2,
+ flush_reqs_qlen =>
+ Procs*NumOfReqs*Bursts,
enable_burst_limit => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
- %% TRecvPid = start_op_trace(),
- NumOfReqs = 500,
- send_burst({n,NumOfReqs}, seq, {chars,79}, info),
- Logged = count_lines(Log),
- ct:pal("Number of messages dropped = ~w (~w)",
- [NumOfReqs-Logged,NumOfReqs]),
- true = (Logged < NumOfReqs),
- %% true = analyse_trace(TRecvPid,
- %% fun(Events) -> find_mode(async,Events) end),
- %% true = analyse_trace(TRecvPid,
- %% fun(Events) -> find_mode(drop,Events) end),
- %% false = analyse_trace(TRecvPid,
- %% fun(Events) -> find_mode(flush,Events) end),
- %% true = analyse_trace(TRecvPid,
- %% fun(Events) -> find_switch(async,drop,Events)
- %% orelse find_switch(sync,drop,Events)
- %% end),
- ok = file:delete(Log),
- %% stop_op_trace(TRecvPid),
- ok.
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler gets the
+ %% requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ _ = file:delete(Log),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
op_switch_to_drop_file(cleanup, _Config) ->
- ok = stop_handler(?MODULE).
+ _ = stop_handler(?MODULE).
op_switch_to_drop_tty(Config) ->
{HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 300,
+ Procs = 2,
NewHConfig =
- HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
- drop_new_reqs_qlen => 3,
- flush_reqs_qlen => 600,
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 1,
+ drop_new_reqs_qlen => 2,
+ flush_reqs_qlen =>
+ Procs*NumOfReqs+1,
enable_burst_limit => false}},
ok = logger:set_handler_config(?MODULE, NewHConfig),
- NumOfReqs = 500,
- send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
ok.
op_switch_to_drop_tty(cleanup, _Config) ->
ok = stop_handler(?MODULE).
op_switch_to_flush_file() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
op_switch_to_flush_file(Config) ->
- {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
-
- %% it's important that both async and sync requests have been queued
- %% when the flush happens (verify with coverage of flush_log_requests/2)
-
- NewHConfig =
- HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
- drop_new_reqs_qlen => 99,
- flush_reqs_qlen => 100,
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{logger_std_h =>
+ StdHConfig#{toggle_sync_qlen => 2,
+ %% disable drop mode
+ drop_new_reqs_qlen => 300,
+ flush_reqs_qlen => 300,
enable_burst_limit => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
- NumOfReqs = 10000,
- Procs = 100,
- send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
- Logged = count_lines(Log),
- ct:pal("Number of messages flushed/dropped = ~w (~w)",
- [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]),
- true = (Logged < (NumOfReqs*Procs)),
-
- %%! --- Thu Apr 12 13:46:00 2018 --- peppe was here!
- %%! TODO: Verify that handler has switched to flush mode
-
- ok = file:delete(Log),
- ok.
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ _ = file:delete(Log),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
op_switch_to_flush_file(cleanup, _Config) ->
- ok = stop_handler(?MODULE).
+ _ = stop_handler(?MODULE).
op_switch_to_flush_tty(Config) ->
{HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
@@ -822,12 +856,13 @@ op_switch_to_flush_tty(Config) ->
NewHConfig =
HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
- drop_new_reqs_qlen => 99,
+ %% disable drop mode
+ drop_new_reqs_qlen => 100,
flush_reqs_qlen => 100,
enable_burst_limit => false}},
ok = logger:set_handler_config(?MODULE, NewHConfig),
- NumOfReqs = 10000,
- Procs = 10,
+ NumOfReqs = 1000,
+ Procs = 100,
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
ok.
op_switch_to_flush_tty(cleanup, _Config) ->
@@ -911,10 +946,10 @@ kill_disabled(cleanup, _Config) ->
ok = stop_handler(?MODULE).
qlen_kill_new(Config) ->
- {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
Pid0 = whereis(?MODULE),
{_,Mem0} = process_info(Pid0, memory),
- RestartAfter = 2000,
+ RestartAfter = ?HANDLER_RESTART_AFTER,
NewHConfig =
HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
handler_overloaded_qlen=>10,
@@ -934,7 +969,7 @@ qlen_kill_new(Config) ->
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
end,
- timer:sleep(RestartAfter + 1000),
+ timer:sleep(RestartAfter + 2000),
true = is_pid(whereis(?MODULE)),
ok
after
@@ -948,7 +983,7 @@ qlen_kill_new(cleanup, _Config) ->
%% choke the standard handler on remote node to verify the termination
%% works as expected
-qlen_kill_std(Config) ->
+qlen_kill_std(_Config) ->
%%! HERE
%% Dir = ?config(priv_dir, Config),
%% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
@@ -962,10 +997,10 @@ qlen_kill_std(Config) ->
{skip,"Not done yet"}.
mem_kill_new(Config) ->
- {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
Pid0 = whereis(?MODULE),
{_,Mem0} = process_info(Pid0, memory),
- RestartAfter = 2000,
+ RestartAfter = ?HANDLER_RESTART_AFTER,
NewHConfig =
HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
handler_overloaded_qlen=>50000,
@@ -985,7 +1020,7 @@ mem_kill_new(Config) ->
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
end,
- timer:sleep(RestartAfter * 2),
+ timer:sleep(RestartAfter * 3),
true = is_pid(whereis(?MODULE)),
ok
after
@@ -999,7 +1034,7 @@ mem_kill_new(cleanup, _Config) ->
%% choke the standard handler on remote node to verify the termination
%% works as expected
-mem_kill_std(Config) ->
+mem_kill_std(_Config) ->
{skip,"Not done yet"}.
restart_after(Config) ->
@@ -1023,7 +1058,7 @@ restart_after(Config) ->
end,
{Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
- RestartAfter = 2000,
+ RestartAfter = ?HANDLER_RESTART_AFTER,
NewHConfig2 =
HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
handler_overloaded_qlen=>10,
@@ -1035,7 +1070,7 @@ restart_after(Config) ->
send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
receive
{'DOWN', MRef2, _, _, _Info2} ->
- timer:sleep(RestartAfter + 1000),
+ timer:sleep(RestartAfter + 2000),
Pid1 = whereis(?MODULE),
true = is_pid(Pid1),
false = (Pid1 == Pid0),
@@ -1052,7 +1087,7 @@ restart_after(cleanup, _Config) ->
%% during high load to verify that sync, dropping and flushing is
%% handled correctly.
handler_requests_under_load() ->
- [{timetrap,{seconds,60}}].
+ [{timetrap,{minutes,3}}].
handler_requests_under_load(Config) ->
{Log,HConfig,StdHConfig} =
start_handler(?MODULE, ?FUNCTION_NAME, Config),
@@ -1081,7 +1116,7 @@ handler_requests_under_load(Config) ->
NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
ok = file:delete(Log).
-handler_requests_under_load(cleanup, Config) ->
+handler_requests_under_load(cleanup, _Config) ->
ok = stop_handler(?MODULE).
send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
@@ -1133,8 +1168,9 @@ start_handler(Name, FuncName, Config) ->
{Log,HConfig,StdHConfig}.
stop_handler(Name) ->
- ok = logger:remove_handler(Name),
- ct:pal("Handler ~p stopped!", [Name]).
+ R = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
+ R.
count_lines(File) ->
wait_until_written(File, -1),
@@ -1306,6 +1342,30 @@ try_match_file(_,Pattern,_,Incorrect) ->
[Pattern,Incorrect]),
erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
+
%%%-----------------------------------------------------------------
%%%
start_op_trace() ->
@@ -1346,17 +1406,17 @@ find_mode(flush, Events) ->
find_mode(Mode, Events) ->
lists:keymember([{mode,Mode}], 3, Events).
-find_switch(From, To, Events) ->
- try lists:foldl(fun({trace_return,check_load,{To,_,_,_}},
- {trace_call,check_load,[#{mode := From}]}) ->
- throw(match);
- (Event, _) ->
- Event
- end, undefined, Events) of
- _ -> false
- catch
- throw:match -> true
- end.
+%% find_switch(_From, To, Events) ->
+%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}},
+%% {trace_call,check_load,[#{mode := From}]}) ->
+%% throw(match);
+%% (Event, _) ->
+%% Event
+%% end, undefined, Events) of
+%% _ -> false
+%% catch
+%% throw:match -> true
+%% end.
analyse_trace(TRecvPid, TestFun) ->
TRecvPid ! {test,self(),TestFun},
@@ -1394,3 +1454,74 @@ analyse(Msgs) ->
From ! {result,self(),TestFun(Msgs)},
analyse(Msgs)
end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(whereis(?MODULE),[c]),
+ dbg:p(FileCtrlPid,[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,[]),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
+ {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
+tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func});
+tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,0},
+ {Pid,Expected};
+maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,T},
+ {Pid,Expected};
+maybe_tracer_done(Pid,Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ check_tracer(T,fun() -> ct:fail({timeout,tracer}) end).
+check_tracer(T,TimeoutFun) ->
+ receive
+ {tracer_done,Delay} ->
+ %% Possibly wait Delay ms to check that no unexpected
+ %% traces are received
+ check_tracer(Delay,fun() -> ok end);
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ TimeoutFun()
+ end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl
new file mode 100644
index 0000000000..4ac05e6480
--- /dev/null
+++ b/lib/kernel/test/logger_test_lib.erl
@@ -0,0 +1,82 @@
+%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_test_lib).
+
+-include_lib("kernel/src/logger_internal.hrl").
+
+-export([setup/2, log/3, sync_and_read/3]).
+
+-export([init/2,
+ pre_init_per_suite/3, pre_init_per_testcase/4,
+ post_end_per_testcase/5, post_end_per_suite/3]).
+
+setup(Config,Vars) ->
+ FuncStr = lists:concat([proplists:get_value(suite, Config), "_",
+ proplists:get_value(tc, Config)]),
+ ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr),
+ file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])),
+ case test_server:start_node(proplists:get_value(tc, Config), slave,
+ [{args, ["-pa ",filename:dirname(code:which(?MODULE)),
+ " -boot start_sasl -kernel start_timer true "
+ "-config ",ConfigFileName]}]) of
+ {ok, Node} ->
+ L = rpc:call(Node, logger, i, []),
+ ct:log("~p",[L]),
+ {ok, L, Node};
+ {error, Reason} ->
+ ct:log("Failed to start node: ~p",[Reason]),
+ error
+ end.
+
+log(Node, F, A) ->
+ log(Node, logger, F, A).
+log(Node, M, F, A) ->
+ MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) },
+ rpc:call(Node, M, F, A ++ [MD]).
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,disk_log_sync,[?STANDARD_HANDLER]),
+ file:read_file(Log ++ ".1");
+sync_and_read(Node, file,Log) ->
+ ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ file:read_file(Log).
+
+
+init(_, _) ->
+ {ok, []}.
+
+pre_init_per_suite(_Suite, Config, State) ->
+ {[{nodes, nodes()} | Config], State}.
+
+pre_init_per_testcase(Suite, TC, Config, State) ->
+ cleanup(Config),
+ {[{suite, Suite}, {tc, TC} | Config], State}.
+
+post_end_per_testcase(_, _TC, Config, Res, State) ->
+ cleanup(Config),
+ {Res, State}.
+
+post_end_per_suite(_, Config, State) ->
+ cleanup(Config),
+ {Config, State}.
+
+cleanup(Config) ->
+ [test_server:stop_node(N) || N <- nodes(),
+ not lists:member(N, proplists:get_value(nodes, Config))].
diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl
index 591fbb2125..abbc301360 100644
--- a/lib/kernel/test/os_SUITE.erl
+++ b/lib/kernel/test/os_SUITE.erl
@@ -227,8 +227,8 @@ find_executable(Config) when is_list(Config) ->
DataDir = proplists:get_value(data_dir, Config),
%% Smoke test.
- case lib:progname() of
- erl ->
+ case ct:get_progname() of
+ "erl" ->
ErlPath = os:find_executable("erl"),
true = is_list(ErlPath),
true = filelib:is_regular(ErlPath);
@@ -388,7 +388,7 @@ comp(Expected, Got) ->
ct:fail(failed)
end.
-%% Like lib:nonl/1, but strips \r as well as \n.
+%% strips \n and \r\n from end of string
strip_nl([$\r, $\n]) -> [];
strip_nl([$\n]) -> [];
diff --git a/lib/observer/src/observer_lib.erl b/lib/observer/src/observer_lib.erl
index 0678b64134..718ef91942 100644
--- a/lib/observer/src/observer_lib.erl
+++ b/lib/observer/src/observer_lib.erl
@@ -682,7 +682,7 @@ parse_string(Str) ->
{error, {_SLine, SMod, SError}, _} ->
throw(io_lib:format("~ts", [SMod:format_error(SError)]))
end,
- case lib:extended_parse_term(Tokens) of
+ case erl_eval:extended_parse_term(Tokens) of
{error, {_PLine, PMod, PError}} ->
throw(io_lib:format("~ts", [PMod:format_error(PError)]));
Res -> Res
diff --git a/lib/parsetools/src/yecc.erl b/lib/parsetools/src/yecc.erl
index b4e1cfe5e3..ce1b9468fd 100644
--- a/lib/parsetools/src/yecc.erl
+++ b/lib/parsetools/src/yecc.erl
@@ -455,10 +455,14 @@ os_process_size() ->
case os:type() of
{unix, sunos} ->
Size = os:cmd("ps -o vsz -p " ++ os:getpid() ++ " | tail -1"),
- list_to_integer(lib:nonl(Size));
+ list_to_integer(nonl(Size));
_ ->
0
- end.
+ end.
+
+nonl([$\n]) -> [];
+nonl([]) -> [];
+nonl([H|T]) -> [H|nonl(T)].
timeit(Name, Fun, St0) ->
Time = runtime,
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index 7284da0499..5d57109140 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -986,10 +986,38 @@ fun(#'DistributionPoint'{}, #'CertificateList'{},
<p>The <c>ip</c> Reference ID takes an <seealso marker="inet:inet#type-ip_address">inet:ip_address()</seealso>
or an ip address in string format (E.g "10.0.1.1" or "1234::5678:9012") as second element.
</p>
+ <p>See <seealso marker="#pkix_verify_hostname_match_fun-1">pkix_verify_hostname_match_fun/1</seealso> for a
+ function that return a fun suitable for this option.
+ </p>
</desc>
</func>
<func>
+ <name>pkix_verify_hostname_match_fun(Alg) -> fun(RefId | FQDN::string(), PresentedID) -> boolean() | default</name>
+ <fsummary>Returns a fun that is intendended as argument to the match_fun option in pkix_verify_hostname/3.
+ </fsummary>
+ <type>
+ <v>Alg = https</v>
+ <d>The algorithm for wich the fun should implement the special matching rules</d>
+ <v>RefId</v>
+ <d>See <seealso marker="#pkix_verify_hostname-3">pkix_verify_hostname/3</seealso>.</d>
+ <v>FQDN</v>
+ <d>See <seealso marker="#pkix_verify_hostname-3">pkix_verify_hostname/3</seealso>.</d>
+ <v>PresentedID</v>
+ <d>See <seealso marker="#pkix_verify_hostname-3">pkix_verify_hostname/3</seealso>.</d>
+ </type>
+ <desc>
+ <p>The return value of calling this function is intended to be used in the <c>match_fun</c> option in
+ <seealso marker="#pkix_verify_hostname-3">pkix_verify_hostname/3</seealso>.
+ </p>
+ <p>The returned fun augments the verify hostname matching according to the specific rules for
+ the protocol in the argument.
+ </p>
+ </desc>
+ </func>
+
+
+ <func>
<name>sign(Msg, DigestType, Key) -> binary()</name>
<name>sign(Msg, DigestType, Key, Options) -> binary()</name>
<fsummary>Creates a digital signature.</fsummary>
diff --git a/lib/public_key/src/pubkey_pem.erl b/lib/public_key/src/pubkey_pem.erl
index 06a4455b3f..bacc9ec600 100644
--- a/lib/public_key/src/pubkey_pem.erl
+++ b/lib/public_key/src/pubkey_pem.erl
@@ -209,6 +209,8 @@ pem_start('DSAPrivateKey') ->
<<"-----BEGIN DSA PRIVATE KEY-----">>;
pem_start('DHParameter') ->
<<"-----BEGIN DH PARAMETERS-----">>;
+pem_start('PrivateKeyInfo') ->
+ <<"-----BEGIN PRIVATE KEY-----">>;
pem_start('EncryptedPrivateKeyInfo') ->
<<"-----BEGIN ENCRYPTED PRIVATE KEY-----">>;
pem_start('CertificationRequest') ->
diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl
index 931901640a..f2b57fd330 100644
--- a/lib/public_key/src/public_key.erl
+++ b/lib/public_key/src/public_key.erl
@@ -49,6 +49,7 @@
pkix_normalize_name/1,
pkix_path_validation/3,
pkix_verify_hostname/2, pkix_verify_hostname/3,
+ pkix_verify_hostname_match_fun/1,
ssh_decode/2, ssh_encode/2,
ssh_hostkey_fingerprint/1, ssh_hostkey_fingerprint/2,
ssh_curvename2oid/1, oid2ssh_curvename/1,
@@ -237,7 +238,7 @@ der_decode(Asn1Type, Der) when (Asn1Type == 'PrivateKeyInfo') or
andalso is_binary(Der) ->
try
{ok, Decoded} = 'PKCS-FRAME':decode(Asn1Type, Der),
- Decoded
+ der_priv_key_decode(Decoded)
catch
error:{badmatch, {error, _}} = Error ->
erlang:error(Error)
@@ -252,12 +253,45 @@ der_decode(Asn1Type, Der) when is_atom(Asn1Type), is_binary(Der) ->
erlang:error(Error)
end.
+der_priv_key_decode({'PrivateKeyInfo', v1,
+ {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-ecPublicKey', {asn1_OPENTYPE, Parameters}}, PrivKey, _}) ->
+ EcPrivKey = der_decode('ECPrivateKey', PrivKey),
+ EcPrivKey#'ECPrivateKey'{parameters = der_decode('EcpkParameters', Parameters)};
+der_priv_key_decode({'PrivateKeyInfo', v1,
+ {'PrivateKeyInfo_privateKeyAlgorithm', ?'rsaEncryption', _}, PrivKey, _}) ->
+ der_decode('RSAPrivateKey', PrivKey);
+der_priv_key_decode({'PrivateKeyInfo', v1,
+ {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-dsa', {asn1_OPENTYPE, Parameters}}, PrivKey, _}) ->
+ {params, #'Dss-Parms'{p=P, q=Q, g=G}} = der_decode('DSAParams', Parameters),
+ X = der_decode('Prime-p', PrivKey),
+ #'DSAPrivateKey'{p=P, q=Q, g=G, x=X};
+der_priv_key_decode(PKCS8Key) ->
+ PKCS8Key.
+
%%--------------------------------------------------------------------
-spec der_encode(asn1_type(), term()) -> Der::binary().
%%
%% Description: Encodes a public key entity with asn1 DER encoding.
%%--------------------------------------------------------------------
-der_encode(Asn1Type, Entity) when (Asn1Type == 'PrivateKeyInfo') or
+
+der_encode('PrivateKeyInfo', #'DSAPrivateKey'{p=P, q=Q, g=G, x=X}) ->
+ der_encode('PrivateKeyInfo',
+ {'PrivateKeyInfo', v1,
+ {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-dsa',
+ {asn1_OPENTYPE, der_encode('Dss-Parms', #'Dss-Parms'{p=P, q=Q, g=G})}},
+ der_encode('Prime-p', X), asn1_NOVALUE});
+der_encode('PrivateKeyInfo', #'RSAPrivateKey'{} = PrivKey) ->
+ der_encode('PrivateKeyInfo',
+ {'PrivateKeyInfo', v1,
+ {'PrivateKeyInfo_privateKeyAlgorithm', ?'rsaEncryption', {asn1_OPENTYPE, ?DER_NULL}},
+ der_encode('RSAPrivateKey', PrivKey), asn1_NOVALUE});
+der_encode('PrivateKeyInfo', #'ECPrivateKey'{parameters = Parameters} = PrivKey) ->
+ der_encode('PrivateKeyInfo',
+ {'PrivateKeyInfo', v1,
+ {'PrivateKeyInfo_privateKeyAlgorithm', ?'id-ecPublicKey',
+ {asn1_OPENTYPE, der_encode('EcpkParameters', Parameters)}},
+ der_encode('ECPrivateKey', PrivKey#'ECPrivateKey'{parameters = asn1_NOVALUE}), asn1_NOVALUE});
+der_encode(Asn1Type, Entity) when (Asn1Type == 'PrivateKeyInfo') or
(Asn1Type == 'EncryptedPrivateKeyInfo') ->
try
{ok, Encoded} = 'PKCS-FRAME':encode(Asn1Type, Entity),
@@ -850,12 +884,23 @@ pkix_crls_validate(OtpCert, DPAndCRLs0, Options) ->
Options, pubkey_crl:init_revokation_state()).
%--------------------------------------------------------------------
--spec pkix_verify_hostname(Cert :: #'OTPCertificate'{} | binary(),
- ReferenceIDs :: [{uri_id | dns_id | ip | srv_id | oid(), string()}]) -> boolean().
+-spec pkix_verify_hostname(#'OTPCertificate'{} | binary(),
+ referenceIDs()
+ ) -> boolean().
+
+-spec pkix_verify_hostname(#'OTPCertificate'{} | binary(),
+ referenceIDs(),
+ proplists:proplist()) -> boolean().
--spec pkix_verify_hostname(Cert :: #'OTPCertificate'{} | binary(),
- ReferenceIDs :: [{uri_id | dns_id | ip | srv_id | oid(), string()}],
- Options :: proplists:proplist()) -> boolean().
+-type referenceIDs() :: [referenceID()] .
+-type referenceID() :: {uri_id | dns_id | ip | srv_id | oid(), string()} .
+
+-spec pkix_verify_hostname_match_fun(high_level_alg()) -> match_fun() .
+
+-type high_level_alg() :: https .
+-type match_fun() :: fun((ReferenceID::referenceID() | string(),
+ PresentedID::{atom()|oid(),string()}) -> match_fun_result() ) .
+-type match_fun_result() :: boolean() | default .
%% Description: Validates a hostname to RFC 6125
%%--------------------------------------------------------------------
@@ -920,6 +965,11 @@ pkix_verify_hostname(Cert = #'OTPCertificate'{tbsCertificate = TbsCert}, Referen
end
end.
+pkix_verify_hostname_match_fun(https) ->
+ fun({dns_id,FQDN=[_|_]}, {dNSName,Name=[_|_]}) -> verify_hostname_match_wildcard(FQDN, Name);
+ (_, _) -> default
+ end.
+
%%--------------------------------------------------------------------
-spec ssh_decode(binary(), public_key | ssh_file()) -> [{public_key(), Attributes::list()}]
; (binary(), ssh2_pubkey) -> public_key()
@@ -1483,9 +1533,7 @@ verify_hostname_match_default(Ref, Pres) ->
verify_hostname_match_default0(FQDN=[_|_], {cn,FQDN}) ->
not lists:member($*, FQDN);
verify_hostname_match_default0(FQDN=[_|_], {cn,Name=[_|_]}) ->
- [F1|Fs] = string:tokens(FQDN, "."),
- [N1|Ns] = string:tokens(Name, "."),
- match_wild(F1,N1) andalso Fs==Ns;
+ verify_hostname_match_wildcard(FQDN, Name);
verify_hostname_match_default0({dns_id,R}, {dNSName,P}) ->
R==P;
verify_hostname_match_default0({uri_id,R}, {uniformResourceIdentifier,P}) ->
@@ -1520,6 +1568,13 @@ verify_hostname_match_default0({srv_id,R}, {?srvName_OID,P}) ->
verify_hostname_match_default0(_, _) ->
false.
+
+verify_hostname_match_wildcard(FQDN, Name) ->
+ [F1|Fs] = string:tokens(FQDN, "."),
+ [N1|Ns] = string:tokens(Name, "."),
+ match_wild(F1,N1) andalso Fs==Ns.
+
+
ok({ok,X}) -> X.
l16_to_tup(L) -> list_to_tuple(l16_to_tup(L, [])).
diff --git a/lib/public_key/test/pbe_SUITE.erl b/lib/public_key/test/pbe_SUITE.erl
index 44caf479e5..8a5db4efec 100644
--- a/lib/public_key/test/pbe_SUITE.erl
+++ b/lib/public_key/test/pbe_SUITE.erl
@@ -226,11 +226,6 @@ pbes2(Config) when is_list(Config) ->
ok
end.
-check_key_info(#'PrivateKeyInfo'{privateKeyAlgorithm =
- #'PrivateKeyInfo_privateKeyAlgorithm'{algorithm = ?rsaEncryption},
- privateKey = Key}) ->
- #'RSAPrivateKey'{} = public_key:der_decode('RSAPrivateKey', iolist_to_binary(Key)).
-
decode_encode_key_file(File, Password, Cipher, Config) ->
Datadir = proplists:get_value(data_dir, Config),
{ok, PemKey} = file:read_file(filename:join(Datadir, File)),
@@ -238,11 +233,10 @@ decode_encode_key_file(File, Password, Cipher, Config) ->
PemEntry = public_key:pem_decode(PemKey),
ct:print("Pem entry: ~p" , [PemEntry]),
[{Asn1Type, _, {Cipher,_} = CipherInfo} = PubEntry] = PemEntry,
- KeyInfo = public_key:pem_entry_decode(PubEntry, Password),
+ #'RSAPrivateKey'{} = KeyInfo = public_key:pem_entry_decode(PubEntry, Password),
PemKey1 = public_key:pem_encode([public_key:pem_entry_encode(Asn1Type, KeyInfo, {CipherInfo, Password})]),
Pem = strip_ending_newlines(PemKey),
- Pem = strip_ending_newlines(PemKey1),
- check_key_info(KeyInfo).
+ Pem = strip_ending_newlines(PemKey1).
strip_ending_newlines(Bin) ->
string:strip(binary_to_list(Bin), right, 10).
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index 449d1fc040..fcc9bdc080 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -64,6 +64,7 @@ all() ->
groups() ->
[{pem_decode_encode, [], [dsa_pem, rsa_pem, ec_pem, encrypted_pem,
dh_pem, cert_pem, pkcs7_pem, pkcs10_pem, ec_pem2,
+ rsa_priv_pkcs8, dsa_priv_pkcs8, ec_priv_pkcs8,
ec_pem_encode_generated,
gen_ec_param_prime_field, gen_ec_param_char_2_field
]},
@@ -181,6 +182,19 @@ dsa_pem(Config) when is_list(Config) ->
DSAPubPemNoEndNewLines = strip_superfluous_newlines(DSAPubPem),
DSAPubPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PubEntry0])).
+dsa_priv_pkcs8() ->
+ [{doc, "DSA PKCS8 private key decode/encode"}].
+dsa_priv_pkcs8(Config) when is_list(Config) ->
+ Datadir = proplists:get_value(data_dir, Config),
+ {ok, DsaPem} = file:read_file(filename:join(Datadir, "dsa_key_pkcs8.pem")),
+ [{'PrivateKeyInfo', DerDSAKey, not_encrypted} = Entry0 ] = public_key:pem_decode(DsaPem),
+ DSAKey = public_key:der_decode('PrivateKeyInfo', DerDSAKey),
+ DSAKey = public_key:pem_entry_decode(Entry0),
+ true = check_entry_type(DSAKey, 'DSAPrivateKey'),
+ PrivEntry0 = public_key:pem_entry_encode('PrivateKeyInfo', DSAKey),
+ DSAPemNoEndNewLines = strip_superfluous_newlines(DsaPem),
+ DSAPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PrivEntry0])).
+
%%--------------------------------------------------------------------
rsa_pem() ->
@@ -216,6 +230,19 @@ rsa_pem(Config) when is_list(Config) ->
RSARawPemNoEndNewLines = strip_superfluous_newlines(RSARawPem),
RSARawPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PubEntry1])).
+rsa_priv_pkcs8() ->
+ [{doc, "RSA PKCS8 private key decode/encode"}].
+rsa_priv_pkcs8(Config) when is_list(Config) ->
+ Datadir = proplists:get_value(data_dir, Config),
+ {ok, RsaPem} = file:read_file(filename:join(Datadir, "rsa_key_pkcs8.pem")),
+ [{'PrivateKeyInfo', DerRSAKey, not_encrypted} = Entry0 ] = public_key:pem_decode(RsaPem),
+ RSAKey = public_key:der_decode('PrivateKeyInfo', DerRSAKey),
+ RSAKey = public_key:pem_entry_decode(Entry0),
+ true = check_entry_type(RSAKey, 'RSAPrivateKey'),
+ PrivEntry0 = public_key:pem_entry_encode('PrivateKeyInfo', RSAKey),
+ RSAPemNoEndNewLines = strip_superfluous_newlines(RsaPem),
+ RSAPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PrivEntry0])).
+
%%--------------------------------------------------------------------
ec_pem() ->
@@ -262,6 +289,18 @@ ec_pem2(Config) when is_list(Config) ->
ECPemNoEndNewLines = strip_superfluous_newlines(ECPrivPem),
ECPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([Entry1, Entry2])).
+ec_priv_pkcs8() ->
+ [{doc, "EC PKCS8 private key decode/encode"}].
+ec_priv_pkcs8(Config) when is_list(Config) ->
+ Datadir = proplists:get_value(data_dir, Config),
+ {ok, ECPrivPem} = file:read_file(filename:join(Datadir, "ec_key_pkcs8.pem")),
+ [{'PrivateKeyInfo', _, not_encrypted} = PKCS8Key] = public_key:pem_decode(ECPrivPem),
+ ECPrivKey = public_key:pem_entry_decode(PKCS8Key),
+ true = check_entry_type(ECPrivKey, 'ECPrivateKey'),
+ true = check_entry_type(ECPrivKey#'ECPrivateKey'.parameters, 'EcpkParameters'),
+ PrivEntry0 = public_key:pem_entry_encode('PrivateKeyInfo', ECPrivKey),
+ ECPemNoEndNewLines = strip_superfluous_newlines(ECPrivPem),
+ ECPemNoEndNewLines = strip_superfluous_newlines(public_key:pem_encode([PrivEntry0])).
init_ec_pem_encode_generated(Config) ->
case catch true = lists:member('secp384r1', crypto:ec_curves()) of
@@ -271,7 +310,7 @@ init_ec_pem_encode_generated(Config) ->
ec_pem_encode_generated() ->
[{doc, "PEM-encode generated EC key"}].
-ec_pem_encode_generated(Config) ->
+ec_pem_encode_generated(_Config) ->
Key1 = public_key:generate_key({namedCurve, 'secp384r1'}),
public_key:pem_entry_encode('ECPrivateKey', Key1),
@@ -926,7 +965,7 @@ pkix_verify_hostname_cn(Config) ->
%% openssl req -x509 -nodes -newkey rsa:1024 -keyout /dev/null -extensions SAN -config public_key_SUITE_data/verify_hostname.conf 2>/dev/null > public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem
%%
%% Subject: C=SE, CN=example.com
-%% Subject Alternative Name: DNS:kb.example.org, URI:http://www.example.org, URI:https://wws.example.org
+%% Subject Alternative Name: DNS:kb.example.org, DNS:*.example.org, URI:http://www.example.org, URI:https://wws.example.org
pkix_verify_hostname_subjAltName(Config) ->
DataDir = proplists:get_value(data_dir, Config),
@@ -945,7 +984,16 @@ pkix_verify_hostname_subjAltName(Config) ->
{dns_id,"wws.example.org"}]),
%% Check that a dns_id matches a DNS subjAltName:
- true = public_key:pkix_verify_hostname(Cert, [{dns_id,"kb.example.org"}]).
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"kb.example.org"}]),
+
+ %% Check that a dns_id does not match a DNS subjAltName wiht wildcard
+ false = public_key:pkix_verify_hostname(Cert, [{dns_id,"other.example.org"}]),
+
+ %% Check that a dns_id does nmatches a DNS subjAltName wiht wildcard with matchfun
+ true = public_key:pkix_verify_hostname(Cert, [{dns_id,"other.example.org"}],
+ [{match_fun, public_key:pkix_verify_hostname_match_fun(https)}
+ ]
+ ).
%%--------------------------------------------------------------------
%% Uses the pem-file for pkix_verify_hostname_cn
@@ -1312,7 +1360,7 @@ do_gen_ec_param(File) ->
ct:fail({key_gen_fail, File})
end.
-init_per_testcase_gen_ec_param(TC, Curve, Config) ->
+init_per_testcase_gen_ec_param(_TC, Curve, Config) ->
case crypto:ec_curves() of
[] ->
{skip, missing_ec_support};
@@ -1328,7 +1376,7 @@ init_per_testcase_gen_ec_param(TC, Curve, Config) ->
end.
-crypto_supported_curve(Curve, Curves) ->
+crypto_supported_curve(Curve, _Curves) ->
try crypto:generate_key(ecdh, Curve) of
{error,_} -> false; % Just in case crypto is changed in the future...
_-> true
diff --git a/lib/public_key/test/public_key_SUITE_data/dsa_key_pkcs8.pem b/lib/public_key/test/public_key_SUITE_data/dsa_key_pkcs8.pem
new file mode 100644
index 0000000000..86e38e2c76
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/dsa_key_pkcs8.pem
@@ -0,0 +1,9 @@
+-----BEGIN PRIVATE KEY-----
+MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBALez5tklY5CdFeTMos899pA6i4u4
+uCtszgBzrdBk6cl5FVqzdzWMGTQiynnTpGsrOESinzP06Ip+pG15We2OORwgvCxD
+/W95aCiN0/+MdiXqlsmboBARMzsa+SmBENN3gF/+tuuEAFzOXU1q2cmEywRLyfbM
+2KIBVE/TChWYw2eRAhUA1R64VvcQ90XA8SOKVDmMA0dBzukCgYEAlLMYP0pbgBlg
+HQVO3/avAHlWNrIq52Lxk7SdPJWgMvPjTK9Z6sv88kxsCcydtjvO439j1yqcwk50
+GQc+86ktBWWz93/HkIdnFyqafef4mmWvm2Uq6ClQKS+A0Asfaj8Mys+HUMiI+qsf
+djRbyIpwb7MX1nsVdsKzALnZNMW27A0EFgIUWYCfDrv5tqwPWKJu00ez0R192SY=
+-----END PRIVATE KEY-----
diff --git a/lib/public_key/test/public_key_SUITE_data/ec_key_pkcs8.pem b/lib/public_key/test/public_key_SUITE_data/ec_key_pkcs8.pem
new file mode 100644
index 0000000000..8280a3671a
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/ec_key_pkcs8.pem
@@ -0,0 +1,5 @@
+-----BEGIN PRIVATE KEY-----
+MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgB349XXSmba5BbJT5UuCK
+OoyoPHsygy6n+WzP1J+8eYShRANCAATTJdDtiqV9Hs7q+Y/yak1z3uJpukFQGYmr
+lJ2iztxfv7bz10eJ5yM/GNqG8kK0w7SIzjedsIkfjRK7bX6mP7h4
+-----END PRIVATE KEY-----
diff --git a/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem b/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem
index 83e1ad37b3..7ab9ed7b96 100644
--- a/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem
+++ b/lib/public_key/test/public_key_SUITE_data/pkix_verify_hostname_subjAltName.pem
@@ -1,14 +1,14 @@
-----BEGIN CERTIFICATE-----
-MIICEjCCAXugAwIBAgIJANwliLph5EiAMA0GCSqGSIb3DQEBCwUAMCMxCzAJBgNV
-BAYTAlNFMRQwEgYDVQQDEwtleGFtcGxlLmNvbTAeFw0xNjEyMjAxNTEyMjRaFw0x
-NzAxMTkxNTEyMjRaMCMxCzAJBgNVBAYTAlNFMRQwEgYDVQQDEwtleGFtcGxlLmNv
-bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAydstIN157w8QxkVaOl3wm81j
-fgZ8gqO3BXkECPF6bw5ewLlmePL6Qs4RypsaRe7cKJ9rHFlwhpdcYkxWSWEt2N7Z
-Ry3N4SjuU04ohWbYgy3ijTt7bJg7jOV1Dh56BnI4hwhQj0oNFizNZOeRRfEzdMnS
-+uk03t/Qre2NS7KbwnUCAwEAAaNOMEwwSgYDVR0RBEMwQYIOa2IuZXhhbXBsZS5v
-cmeGFmh0dHA6Ly93d3cuZXhhbXBsZS5vcmeGF2h0dHBzOi8vd3dzLmV4YW1wbGUu
-b3JnMA0GCSqGSIb3DQEBCwUAA4GBAKqFqW5gCso422bXriCBJoygokOTTOw1Rzpq
-K8Mm0B8W9rrW9OTkoLEcjekllZcUCZFin2HovHC5HlHZz+mQvBI1M6sN2HVQbSzS
-EgL66U9gwJVnn9/U1hXhJ0LO28aGbyE29DxnewNR741dWN3oFxCdlNaO6eMWaEsO
-gduJ5sDl
+MIICITCCAYqgAwIBAgIJAP31suf/Fi4oMA0GCSqGSIb3DQEBCwUAMCMxCzAJBgNV
+BAYTAlNFMRQwEgYDVQQDEwtleGFtcGxlLmNvbTAeFw0xODA1MTcxMDIzNDBaFw0x
+ODA2MTYxMDIzNDBaMCMxCzAJBgNVBAYTAlNFMRQwEgYDVQQDEwtleGFtcGxlLmNv
+bTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAsUVMXSM4Q6vYp7H4Svsfv4QQ
+dmUD3IdTbtumlyAqLZuc6Z0HU9IOE0wpF97+5AE3moHluwN/MtSX/fb9oxCjh3L6
+iDla770uUoIgiWkA9lyzuYXt7zGsqc0EmGMJRAHp4jOxI26U/C8wdXoyZsGD8GPr
+hYAI2Me4CkdDqCoRuUUCAwEAAaNdMFswWQYDVR0RBFIwUIIOa2IuZXhhbXBsZS5v
+cmeCDSouZXhhbXBsZS5vcmeGFmh0dHA6Ly93d3cuZXhhbXBsZS5vcmeGF2h0dHBz
+Oi8vd3dzLmV4YW1wbGUub3JnMA0GCSqGSIb3DQEBCwUAA4GBAKs8vWMqpXiuFhcq
+6W1dMrVB4tuDjt1Ctr3g2USXBLgm8NxsZzslFyDnrvtZY0hbjcAkGKMMhy8lFD5t
++GjBbyp7MKII6vJaVvc+wbrsbNdvioB1puGwbgVhgD3Kb79do9h6JrNncjMvBN7j
+VK6BUB8TUofFmztMjoPlxFOs/7qK
-----END CERTIFICATE-----
diff --git a/lib/public_key/test/public_key_SUITE_data/rsa_key_pkcs8.pem b/lib/public_key/test/public_key_SUITE_data/rsa_key_pkcs8.pem
new file mode 100644
index 0000000000..9ef5b3353f
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/rsa_key_pkcs8.pem
@@ -0,0 +1,10 @@
+-----BEGIN PRIVATE KEY-----
+MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEA1GLJmDS5yLvg1zqa
+epnwCgOXzxpPvHokDQx+AcgfO14SPtCD6UTlDEwYBp+6tUTm+qgeQN/CTi7POwIA
+m7P3UwIDAQABAkALFiEJ1e7AwLXq5j88GR8Dls5s3CW/Y+zP1ZAaTbT7p0QUMxG+
+0ko7h8NoxcQJHZU27sZXCjog/IBqn577Xv4RAiEA8/aQ09kz0jxi4aNvlix4B+bW
+gX0sYtcCDkBzx8Y6iMkCIQDe3WCxV9PuiDjpuC8cAy3UMC5PBygZG4iK3arpgzxp
+OwIhAKxKJg+mpgVEJiTpsiVhNEeIS1bZWp5W75m3BM1B/haZAiBQOhEcxikcrR0P
+xaXvx5Uv1UhWWpUstKSqmLF17jBJEQIhAMx4HMLqwaGeYwOcxfzxz6Al8fnPmfAR
+hqFR28fVJrWX
+-----END PRIVATE KEY-----
diff --git a/lib/public_key/test/public_key_SUITE_data/verify_hostname.conf b/lib/public_key/test/public_key_SUITE_data/verify_hostname.conf
index a28864dc78..6b4e4f284e 100644
--- a/lib/public_key/test/public_key_SUITE_data/verify_hostname.conf
+++ b/lib/public_key/test/public_key_SUITE_data/verify_hostname.conf
@@ -10,7 +10,8 @@ CN=example.com
subjectAltName = @alt_names
[alt_names]
-DNS = kb.example.org
+DNS.1 = kb.example.org
+DNS.2 = *.example.org
URI.1 = http://www.example.org
URI.2 = https://wws.example.org
diff --git a/lib/runtime_tools/doc/src/LTTng.xml b/lib/runtime_tools/doc/src/LTTng.xml
index 93937b3fdc..4114542c74 100644
--- a/lib/runtime_tools/doc/src/LTTng.xml
+++ b/lib/runtime_tools/doc/src/LTTng.xml
@@ -111,7 +111,7 @@ $ make </code>
<p><em>process_register</em></p>
<list type="bulleted">
<item><c>pid : string</c> :: Process ID. Ex. <c>"&lt;0.131.0&gt;"</c></item>
- <item><c>name : string</c> :: Registered name. Ex. <c>"error_logger"</c></item>
+ <item><c>name : string</c> :: Registered name. Ex. <c>"logger"</c></item>
<item><c>type : string</c> :: <c>"register" | "unregister"</c></item>
</list>
<p>Example:</p>
diff --git a/lib/sasl/doc/src/error_logging.xml b/lib/sasl/doc/src/error_logging.xml
index 4b2c960bbb..8731b73599 100644
--- a/lib/sasl/doc/src/error_logging.xml
+++ b/lib/sasl/doc/src/error_logging.xml
@@ -32,22 +32,51 @@
<rev>B</rev>
<file>error_logging.xml</file>
</header>
+ <note>
+ <p>The SASL error logging concept desribed in this section is
+ deprecated since OTP-21, when the
+ new <seealso marker="kernel:logger_chapter">logging
+ API</seealso> was introduced.</p>
+ <p>The new default behaviour is that the SASL application no
+ longer affects which log events that are logged.
+ <seealso marker="#supervisor_report">Supervisor
+ reports</seealso> and <seealso marker="#crash_report">crash
+ reports</seealso> are logged via the default logger handler
+ which is setup by
+ Kernel. <seealso marker="#progress_report">Progress
+ reports</seealso> are by default not logged, but can be enabled
+ by setting the Kernel configuration
+ parameter <seealso marker="kernel:kernel_app#logger_progress_reports">
+ <c>logger_progress_reports</c></seealso> to <c>log</c>.</p>
+ <p>The old SASL error logging behaviour can be re-enabled by setting the
+ Kernel configuration
+ parameter <seealso marker="kernel:kernel_app#logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso> to <c>true</c>.</p>
+ <p>The mechanism
+ for <seealso marker="#multi_file_logging">multi-file error report
+ logging</seealso> as described in this section is also kept for
+ backwards compatibility. However, the new logging API also
+ introduces <seealso marker="kernel:logger_disk_log_h">
+ <c>logger_disk_log_h(3)</c></seealso>, which is a logger
+ handler that can print to multiple files
+ using <seealso marker="kernel:disk_log"><c>disk_log(3)</c></seealso>.</p>
+ </note>
+
+ <section>
+ <title>SASL reports</title>
<p>The SASL application introduces three types of reports:</p>
<list type="bulleted">
<item>Supervisor report</item>
<item>Progress report</item>
<item>Crash report</item>
</list>
- <p>When the SASL application is started, it adds a handler that
- formats and writes these reports, as specified in the configuration
- parameters for SASL, that is, the environment variables
- in the SASL application specification, which is found in the
- <c>.app</c> file of SASL. For details, see the
- <seealso marker="sasl_app"><c>sasl(6)</c></seealso> application in the
- Reference Manual and the <seealso marker="kernel:app"><c>app(4)</c></seealso>
- file in the Kernel Reference Manual.</p>
+ <p>When the SASL application is started, it adds a logger handler
+ that formats and writes these reports, as specified in
+ the <seealso marker="sasl_app#deprecated_error_logger_config">configuration
+ parameters for SASL</seealso></p>
<section>
+ <marker id="supervisor_report"/>
<title>Supervisor Report</title>
<p>A supervisor report is issued when a supervised child terminates
unexpectedly. A supervisor report contains the following
@@ -68,6 +97,7 @@
</section>
<section>
+ <marker id="progress_report"/>
<title>Progress Report</title>
<p>A progress report is issued when a supervisor starts or
restarts a child. A progress report contains the following items:</p>
@@ -82,6 +112,7 @@
</section>
<section>
+ <marker id="crash_report"/>
<title>Crash Report</title>
<p>Processes started with functions
<seealso marker="stdlib:proc_lib#spawn/1"><c>proc_lib:spawn</c></seealso> or
@@ -105,6 +136,7 @@
crash. The information gathered is the same as the information
for Crasher, described in the previous item.</p></item>
</taglist>
+ </section>
<section>
<title>Example</title>
@@ -163,6 +195,7 @@
</section>
<section>
+ <marker id="multi_file_logging"/>
<title>Multi-File Error Report Logging</title>
<p>Multi-file error report logging is used to store error messages
received by <c>error_logger</c>. The error messages
@@ -171,7 +204,8 @@
of files exist at the same time. The logging is very fast, as
each error message is written as a binary term.</p>
<p>For more details, see the
- <seealso marker="sasl_app"><c>sasl(6)</c></seealso>
+ <seealso marker="sasl_app#deprecated_error_logger_config">
+ <c>sasl(6)</c></seealso>
application in the Reference Manual.</p>
</section>
diff --git a/lib/sasl/doc/src/sasl_app.xml b/lib/sasl/doc/src/sasl_app.xml
index 48b0b8eafb..be275879ee 100644
--- a/lib/sasl/doc/src/sasl_app.xml
+++ b/lib/sasl/doc/src/sasl_app.xml
@@ -86,30 +86,26 @@
<c>RELDIR</c> is used. By default, this is
<c>$OTP_ROOT/releases</c>.</p>
</item>
- <tag><c><![CDATA[utc_log = true | false ]]></c></tag>
- <item>
- <p>If set to <c>true</c>, all dates in textual log outputs are
- displayed in Universal Coordinated Time with the string
- <c>UTC</c> appended.</p>
- </item>
</taglist>
</section>
<section>
+ <marker id="deprecated_error_logger_config"/>
<title>Deprecated Error Logger Event Handlers and Configuration</title>
- <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ <p>In Erlang/OTP 21.0, a new API for logging was added. The
old <c>error_logger</c> event manager, and event handlers
- running on this manager, will still work, but they are not used
+ running on this manager, still work, but they are not used
by default.</p>
<p>The error logger event handlers <c>sasl_report_tty_h</c>
- and <c>sasl_report_file_h</c>, were earliger used for printing
+ and <c>sasl_report_file_h</c>, were earlier used for printing
the so called SASL reports, i.e. <em>supervisor
reports</em>, <em>crash reports</em>, and <em>progress
- reports</em>. These reports are now also printed by the standard
+ reports</em>. These reports are now also printed by the default
logger handler started by the Kernel application. Progress
- reports are by default stopped by a filter, but can easily be
+ reports are by default stopped by a filter, but can be
added by setting the Kernel configuration
- parameter <seealso marker="kernel:kernel_app#logger_log_progress"><c>logger_log_progress=true</c></seealso>.</p>
+ parameter <seealso marker="kernel:kernel_app#logger_progress_reports">
+ <c>logger_progress_reports</c></seealso> to <c>log</c>.</p>
<p>If the old error logger event handlers are still desired, they
must be added by
calling <c>error_logger:add_report_handler/1,2</c>.</p>
@@ -119,32 +115,28 @@
<p>Formats and writes <em>supervisor reports</em>, <em>crash
reports</em>, and <em>progress reports</em> to <c>stdio</c>.
This error logger event handler uses
- <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso>
+ <seealso marker="kernel:kernel_app#deprecated-configuration-parameters"><c>error_logger_format_depth</c></seealso>
in the Kernel application to limit how much detail is printed
- in crash and supervisor reports. If <c>logger_format_depth</c>
- is not set, it uses the old <c>error_logger_format_depth</c>
- instead.</p>
+ in crash and supervisor reports.</p>
</item>
<tag><c>sasl_report_file_h</c></tag>
<item>
<p>Formats and writes <em>supervisor reports</em>, <em>crash
report</em>, and <em>progress report</em> to a single file.
This error logger event handler uses
- <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso>
+ <seealso marker="kernel:kernel_app#deprecated-configuration-parameters"><c>error_logger_format_depth</c></seealso>
in the Kernel application to limit the details printed in
- crash and supervisor reports. If <c>logger_format_depth</c> is
- not set, it uses the old <c>error_logger_format_depth</c>
- instead.</p>
+ crash and supervisor reports.</p>
</item>
</taglist>
<p>A similar behaviour, but still using the new logger API, can be
obtained by setting the Kernel application environment
variable <seealso marker="kernel:kernel_app#logger_sasl_compatible"><c>logger_sasl_compatible=true</c></seealso>. This will add a
second instance of the standard logger handler
- named <c>sasl_h</c>, which will only print the SASL reports. No
+ named <c>sasl</c>, which will only print the SASL reports. No
SASL reports will then be printed by the Kernel logger
handler.</p>
- <p>The <c>sasl_h</c> handler will be configured according to the
+ <p>The <c>sasl</c> handler will be configured according to the
values of the following SASL application environment
variables.</p>
<taglist>
@@ -179,6 +171,12 @@
<c>sasl_error_logger</c> to error reports or progress reports,
or both. Default is <c>all</c>.</p>
</item>
+ <tag><marker id="utc_log"/><c><![CDATA[utc_log = true | false ]]></c></tag>
+ <item>
+ <p>If set to <c>true</c>, all dates in textual log outputs are
+ displayed in Universal Coordinated Time with the string
+ <c>UTC</c> appended.</p>
+ </item>
</taglist>
<p>The error logger event handler <c>log_mf_h</c> can also still
@@ -222,6 +220,7 @@
<title>See Also</title>
<p><seealso marker="alarm_handler"><c>alarm_handler(3)</c></seealso>,
<seealso marker="kernel:error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>,
<seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>,
<seealso marker="rb"><c>rb(3)</c></seealso>,
<seealso marker="release_handler"><c>release_handler(3)</c></seealso>,
diff --git a/lib/sasl/src/sasl.erl b/lib/sasl/src/sasl.erl
index 657eb6688a..92b80694d7 100644
--- a/lib/sasl/src/sasl.erl
+++ b/lib/sasl/src/sasl.erl
@@ -129,28 +129,24 @@ get_mf_maxf() ->
add_sasl_logger(undefined, _Level) -> ok;
add_sasl_logger(std, undefined) -> ok;
add_sasl_logger(Dest, Level) ->
- FC0 = #{legacy_header=>true,
- template=>[{logger_formatter,header},"\n",msg,"\n"]},
- FC = case application:get_env(sasl,utc_log) of
- {ok,Bool} when is_boolean(Bool) ->
- FC0#{utc=>Bool};
- _ ->
- FC0
- end,
- ok = logger:add_handler(sasl_h,logger_std_h,
+ FC = #{legacy_header=>true,
+ single_line=>false},
+ ok = logger:add_handler(sasl,logger_std_h,
#{level=>Level,
filter_default=>stop,
filters=>
- [{sasl_domain,
+ [{remote_gl,
+ {fun logger_filters:remote_gl/2,stop}},
+ {sasl_domain,
{fun logger_filters:domain/2,
- {log,equals,[beam,erlang,otp,sasl]}}}],
+ {log,equal,[beam,erlang,otp,sasl]}}}],
logger_std_h=>#{type=>Dest},
formatter=>{logger_formatter,FC}}).
delete_sasl_logger(undefined) -> ok;
delete_sasl_logger(std) -> ok;
delete_sasl_logger(_Type) ->
- _ = logger:remove_handler(sasl_h),
+ _ = logger:remove_handler(sasl),
ok.
add_error_logger_mf(undefined) -> ok;
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index f4b1b54fd1..6916107623 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1551,7 +1551,7 @@ mandatory_modules() ->
logger_server,
logger_backend,
logger_config,
- logger_simple,
+ logger_simple_h,
lists,
proc_lib,
supervisor
diff --git a/lib/sasl/test/sasl_report_SUITE.erl b/lib/sasl/test/sasl_report_SUITE.erl
index 96975aaf69..72ee2f0a10 100644
--- a/lib/sasl/test/sasl_report_SUITE.erl
+++ b/lib/sasl/test/sasl_report_SUITE.erl
@@ -54,7 +54,7 @@ gen_server_crash_unicode(Config) ->
gen_server_crash(Config, Encoding) ->
StopFilter = {fun(_,_) -> stop end, ok},
- logger:add_handler_filter(logger_std_h,stop_all,StopFilter),
+ logger:add_handler_filter(default,stop_all,StopFilter),
logger:add_handler_filter(cth_log_redirect,stop_all,StopFilter),
try
do_gen_server_crash(Config, Encoding)
@@ -62,7 +62,7 @@ gen_server_crash(Config, Encoding) ->
ok = application:unset_env(kernel, logger_sasl_compatible),
ok = application:unset_env(sasl, sasl_error_logger),
ok = application:unset_env(kernel, error_logger_format_depth),
- logger:remove_handler_filter(logger_std_h,stop_all),
+ logger:remove_handler_filter(default,stop_all),
logger:remove_handler_filter(cth_log_redirect,stop_all)
end,
ok.
@@ -83,9 +83,11 @@ do_gen_server_crash(Config, Encoding) ->
error_logger:logfile({open,KernelLog}),
application:start(sasl),
logger:i(print),
+ ct:log("error_logger handlers: ~p",[error_logger:which_report_handlers()]),
crash_me(),
+
error_logger:logfile(close),
application:stop(sasl),
diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml
index d78309167a..d0ed674eee 100644
--- a/lib/ssh/doc/src/notes.xml
+++ b/lib/ssh/doc/src/notes.xml
@@ -30,6 +30,28 @@
<file>notes.xml</file>
</header>
+<section><title>Ssh 4.6.9</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Host key hash erroneously calculated for clients
+ following draft-00 of RFC 4419, for example PuTTY</p>
+ <p>
+ Own Id: OTP-15064</p>
+ </item>
+ <item>
+ <p>
+ Renegotiation could fail in some states</p>
+ <p>
+ Own Id: OTP-15066</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Ssh 4.6.8</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
@@ -487,6 +509,34 @@
</section>
+<section><title>Ssh 4.4.2.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix rare spurios shutdowns of ssh servers when receiveing
+ <c>{'EXIT',_,normal}</c> messages.</p>
+ <p>
+ Own Id: OTP-15018</p>
+ </item>
+ <item>
+ <p>
+ Host key hash erroneously calculated for clients
+ following draft-00 of RFC 4419, for example PuTTY</p>
+ <p>
+ Own Id: OTP-15064</p>
+ </item>
+ <item>
+ <p>
+ Renegotiation could fail in some states</p>
+ <p>
+ Own Id: OTP-15066</p>
+ </item>
+ </list>
+ </section>
+
+</section>
<section><title>Ssh 4.4.2.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index 0223831cb1..407956cc6f 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -762,9 +762,23 @@
<datatype>
<name name="rekey_limit_common_option"/>
+ <name name="limit_bytes"/>
+ <name name="limit_time"/>
<desc>
- <p>Sets a limit, in bytes, when rekeying is to be initiated.
- Defaults to once per each GB and once per hour.</p>
+ <p>Sets the limit when rekeying is to be initiated. Both the max time and max amount of data
+ could be configured:
+ </p>
+ <list>
+ <item><c>{Minutes, Bytes}</c> initiate rekeying when any of the limits are reached.</item>
+ <item><c>Bytes</c> initiate rekeying when <c>Bytes</c> number of bytes are transferred,
+ or at latest after one hour.</item>
+ </list>
+ <p>When a rekeying is done, both the timer and the byte counter are restarted.
+ Defaults to one hour and one GByte.</p>
+ <p>If <c>Minutes</c> is set to <c>infinity</c>, no rekeying will ever occur due to that max time has passed.
+ Setting <c>Bytes</c> to <c>infinity</c> will inhibit rekeying after a certain amount of data has been transferred.
+ If the option value is set to <c>{infinity, infinity}</c>, no rekeying will be initiated. Note that rekeying initiated
+ by the peer will still be performed.</p>
</desc>
</datatype>
diff --git a/lib/ssh/doc/src/ssh_app.xml b/lib/ssh/doc/src/ssh_app.xml
index 6d180a5272..2ebd176e12 100644
--- a/lib/ssh/doc/src/ssh_app.xml
+++ b/lib/ssh/doc/src/ssh_app.xml
@@ -130,39 +130,47 @@
For the list on a particular installation, use the command
<seealso marker="ssh:ssh#default_algorithms/0">ssh:default_algorithms/0</seealso>.
The user may override the default algorithm configuration both on the server side and the client side.
- See the option <c>preferred_algorithms</c> in the <seealso marker="ssh:ssh#daemon/1">ssh:daemon/1,2,3</seealso> and
+ See the options
+ <seealso marker="ssh:ssh#type-preferred_algorithms_common_option">preferred_algorithms</seealso>
+ and
+ <seealso marker="ssh:ssh#type-modify_algorithms_common_option">modify_algorithms</seealso>
+ in the <seealso marker="ssh:ssh#daemon/1">ssh:daemon/1,2,3</seealso> and
<seealso marker="ssh:ssh#connect/3">ssh:connect/3,4</seealso> functions.
</p>
- <p>Supported algorithms are:</p>
+ <p>Supported algorithms are (in the default order):</p>
<marker id="supported_algos"></marker>
<taglist>
<tag>Key exchange algorithms</tag>
<item>
<list type="bulleted">
- <item>ecdh-sha2-nistp256</item>
<item>ecdh-sha2-nistp384</item>
<item>ecdh-sha2-nistp521</item>
- <item>diffie-hellman-group-exchange-sha1</item>
+ <item>ecdh-sha2-nistp256</item>
<item>diffie-hellman-group-exchange-sha256</item>
- <item>diffie-hellman-group14-sha1</item>
- <item>diffie-hellman-group14-sha256</item>
<item>diffie-hellman-group16-sha512</item>
<item>diffie-hellman-group18-sha512</item>
- <item>(diffie-hellman-group1-sha1, retired: can be enabled with the <c>preferred_algorithms</c> option)</item>
+ <item>diffie-hellman-group14-sha256</item>
+ <item>diffie-hellman-group14-sha1</item>
+ <item>diffie-hellman-group-exchange-sha1</item>
+ <item>(diffie-hellman-group1-sha1, retired: It can be enabled with the
+ <seealso marker="ssh:ssh#type-preferred_algorithms_common_option">preferred_algorithms</seealso>
+ or
+ <seealso marker="ssh:ssh#type-modify_algorithms_common_option">modify_algorithms</seealso>
+ options)</item>
</list>
</item>
<tag>Public key algorithms</tag>
<item>
<list type="bulleted">
- <item>ecdsa-sha2-nistp256</item>
<item>ecdsa-sha2-nistp384</item>
<item>ecdsa-sha2-nistp521</item>
+ <item>ecdsa-sha2-nistp256</item>
<item>ssh-rsa</item>
- <item>ssh-dss</item>
<item>rsa-sha2-256</item>
<item>rsa-sha2-512</item>
+ <item>ssh-dss</item>
</list>
</item>
@@ -178,11 +186,11 @@
<tag>Encryption algorithms (ciphers)</tag>
<item>
<list type="bulleted">
- <item>[email protected]</item>
<item>[email protected]</item>
- <item>aes128-ctr</item>
- <item>aes192-ctr</item>
<item>aes256-ctr</item>
+ <item>aes192-ctr</item>
+ <item>[email protected]</item>
+ <item>aes128-ctr</item>
<item>aes128-cbc</item>
<item>3des-cbc</item>
<item>(AEAD_AES_128_GCM, not enabled per default)</item>
@@ -241,7 +249,11 @@
<item><url href="https://tools.ietf.org/html/rfc4253">RFC 4253</url>, The Secure Shell (SSH) Transport Layer Protocol.
<p>Except</p>
<list type="bulleted">
- <item>8.1. diffie-hellman-group1-sha1. Disabled by default, can be enabled with the <c>preferred_algorithms</c> option.</item>
+ <item>8.1. diffie-hellman-group1-sha1. Disabled by default, can be enabled with the
+ <seealso marker="ssh:ssh#type-preferred_algorithms_common_option">preferred_algorithms</seealso>
+ or
+ <seealso marker="ssh:ssh#type-modify_algorithms_common_option">modify_algorithms</seealso>
+ options.</item>
</list>
<p/>
</item>
@@ -280,7 +292,10 @@
<p><marker id="rfc5647_note"/>There is an ambiguity in the synchronized selection of cipher and mac algorithm.
This is resolved by OpenSSH in the ciphers [email protected] and [email protected] which are implemented.
If the explicit ciphers and macs AEAD_AES_128_GCM or AEAD_AES_256_GCM are needed,
- they could be enabled with the option preferred_algorithms.
+ they could be enabled with the options
+ <seealso marker="ssh:ssh#type-preferred_algorithms_common_option">preferred_algorithms</seealso>
+ or
+ <seealso marker="ssh:ssh#type-modify_algorithms_common_option">modify_algorithms</seealso>.
</p>
<warning>
<p>
@@ -322,10 +337,18 @@
<p>Deviations:</p>
<list type="bulleted">
<item>The <c>diffie-hellman-group1-sha1</c> is not enabled by default, but is still supported and can be enabled
- with the option <c>preferred-algorithms</c></item>
+ with the options
+ <seealso marker="ssh:ssh#type-preferred_algorithms_common_option">preferred_algorithms</seealso>
+ or
+ <seealso marker="ssh:ssh#type-modify_algorithms_common_option">modify_algorithms</seealso>.
+ </item>
<item>The questionable sha1-based algorithms <c>diffie-hellman-group-exchange-sha1</c> and
<c>diffie-hellman-group14-sha1</c> are still enabled by default for compatibility with ancient clients and servers.
- They can be disabled with the option <c>preferred-algorithms</c></item>
+ They can be disabled with the options
+ <seealso marker="ssh:ssh#type-preferred_algorithms_common_option">preferred_algorithms</seealso>
+ or
+ <seealso marker="ssh:ssh#type-modify_algorithms_common_option">modify_algorithms</seealso>.
+ They will be disabled by default when the draft is turned into an RFC.</item>
</list>
<p/>
</item>
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index a3d9a1b1cb..2efd239aae 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -29,7 +29,6 @@
-define(SSH_DEFAULT_PORT, 22).
-define(SSH_MAX_PACKET_SIZE, (256*1024)).
--define(REKEY_TIMOUT, 3600000).
-define(REKEY_DATA_TIMOUT, 60000).
-define(DEFAULT_PROFILE, default).
@@ -192,7 +191,12 @@
-type user_dir_common_option() :: {user_dir, false | string()}.
-type profile_common_option() :: {profile, atom() }.
-type max_idle_time_common_option() :: {idle_time, timeout()}.
--type rekey_limit_common_option() :: {rekey_limit, non_neg_integer() }.
+-type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() |
+ {Minutes::limit_time(), Bytes::limit_bytes()}
+ }.
+
+-type limit_bytes() :: non_neg_integer() | infinity . % non_neg_integer due to compatibility
+-type limit_time() :: pos_integer() | infinity .
-type key_cb_common_option() :: {key_cb, Module::atom() | {Module::atom(),Opts::[term()]} } .
-type disconnectfun_common_option() ::
diff --git a/lib/ssh/src/ssh_client_channel.erl b/lib/ssh/src/ssh_client_channel.erl
index f20007baaf..8b5e196412 100644
--- a/lib/ssh/src/ssh_client_channel.erl
+++ b/lib/ssh/src/ssh_client_channel.erl
@@ -180,6 +180,8 @@ init([Options]) ->
{stop, Why} ->
{stop, Why}
catch
+ _:undef ->
+ {stop, {bad_channel_callback_module,Cb}};
_:Reason ->
{stop, Reason}
end.
@@ -305,8 +307,8 @@ terminate(Reason, #state{cm = ConnectionManager,
close_sent = false} = State) ->
catch ssh_connection:close(ConnectionManager, ChannelId),
terminate(Reason, State#state{close_sent = true});
-terminate(_, #state{channel_cb = Cb, channel_state = ChannelState}) ->
- catch Cb:terminate(Cb, ChannelState),
+terminate(Reason, #state{channel_cb = Cb, channel_state = ChannelState}) ->
+ catch Cb:terminate(Reason, ChannelState),
ok.
%%--------------------------------------------------------------------
diff --git a/lib/ssh/src/ssh_connection.erl b/lib/ssh/src/ssh_connection.erl
index ed03b4e2ed..dad7636e3f 100644
--- a/lib/ssh/src/ssh_connection.erl
+++ b/lib/ssh/src/ssh_connection.erl
@@ -498,25 +498,24 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
data = Data},
#connection{channel_cache = Cache} = Connection, server) ->
<<?DEC_BIN(SsName,_SsLen)>> = Data,
-
- #channel{remote_id = RemoteId} = Channel0 =
+ #channel{remote_id=RemoteId} = Channel =
ssh_client_channel:cache_lookup(Cache, ChannelId),
-
- ReplyMsg = {subsystem, ChannelId, WantReply, binary_to_list(SsName)},
-
- try
- {ok, Pid} = start_subsystem(SsName, Connection, Channel0, ReplyMsg),
- erlang:monitor(process, Pid),
- Channel = Channel0#channel{user = Pid},
- ssh_client_channel:cache_update(Cache, Channel),
- Reply = {connection_reply,
- channel_success_msg(RemoteId)},
- {[Reply], Connection}
- catch
- _:_ ->
- ErrorReply = {connection_reply, channel_failure_msg(RemoteId)},
- {[ErrorReply], Connection}
- end;
+ Reply =
+ try
+ start_subsystem(SsName, Connection, Channel,
+ {subsystem, ChannelId, WantReply, binary_to_list(SsName)})
+ of
+ {ok, Pid} ->
+ erlang:monitor(process, Pid),
+ ssh_client_channel:cache_update(Cache, Channel#channel{user=Pid}),
+ channel_success_msg(RemoteId);
+ {error,_Error} ->
+ channel_failure_msg(RemoteId)
+ catch
+ _:_ ->
+ channel_failure_msg(RemoteId)
+ end,
+ {[{connection_reply,Reply}], Connection};
handle_msg(#ssh_msg_channel_request{request_type = "subsystem"},
Connection, client) ->
@@ -822,7 +821,12 @@ start_channel(Cb, Id, Args, SubSysSup, Exec, Opts) ->
ChannelSup = ssh_subsystem_sup:channel_supervisor(SubSysSup),
case max_num_channels_not_exceeded(ChannelSup, Opts) of
true ->
- ssh_server_channel_sup:start_child(ChannelSup, Cb, Id, Args, Exec);
+ case ssh_server_channel_sup:start_child(ChannelSup, Cb, Id, Args, Exec) of
+ {error,{Error,_Info}} ->
+ throw(Error);
+ Others ->
+ Others
+ end;
false ->
throw(max_num_channels_exceeded)
end.
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 57641cf74c..3e224fe13f 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -71,7 +71,7 @@
-export([init_connection_handler/3, % proc_lib:spawn needs this
init_ssh_record/3, % Export of this internal function
% intended for low-level protocol test suites
- renegotiate/1, renegotiate_data/1, alg/1 % Export intended for test cases
+ renegotiate/1, alg/1 % Export intended for test cases
]).
-export([dbg_trace/3]).
@@ -325,14 +325,7 @@ close(ConnectionHandler, ChannelId) ->
) -> ok.
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
renegotiate(ConnectionHandler) ->
- cast(ConnectionHandler, renegotiate).
-
-%%--------------------------------------------------------------------
--spec renegotiate_data(connection_ref()
- ) -> ok.
-%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
-renegotiate_data(ConnectionHandler) ->
- cast(ConnectionHandler, data_size).
+ cast(ConnectionHandler, force_renegotiate).
%%--------------------------------------------------------------------
alg(ConnectionHandler) ->
@@ -349,11 +342,6 @@ alg(ConnectionHandler) ->
connection_state :: #connection{},
latest_channel_id = 0 :: non_neg_integer()
| undefined,
- idle_timer_ref :: undefined
- | infinity
- | reference(),
- idle_timer_value = infinity :: infinity
- | pos_integer(),
transport_protocol :: atom()
| undefined, % ex: tcp
transport_cb :: atom()
@@ -429,20 +417,16 @@ init([Role,Socket,Opts]) ->
},
D = case Role of
client ->
- %% Start the renegotiation timers
- timer:apply_after(?REKEY_TIMOUT, gen_statem, cast, [self(), renegotiate]),
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]),
- cache_init_idle_timer(D0);
+ D0;
server ->
Sups = ?GET_INTERNAL_OPT(supervisors, Opts),
- cache_init_idle_timer(
- D0#data{connection_state =
- C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}),
- exec = ?GET_OPT(exec, Opts),
- system_supervisor = proplists:get_value(system_sup, Sups),
- sub_system_supervisor = proplists:get_value(subsystem_sup, Sups),
- connection_supervisor = proplists:get_value(connection_sup, Sups)
- }})
+ D0#data{connection_state =
+ C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}),
+ exec = ?GET_OPT(exec, Opts),
+ system_supervisor = proplists:get_value(system_sup, Sups),
+ sub_system_supervisor = proplists:get_value(subsystem_sup, Sups),
+ connection_supervisor = proplists:get_value(connection_sup, Sups)
+ }}
end,
{ok, {hello,Role}, D};
@@ -544,7 +528,7 @@ role({_,Role}) -> Role;
role({_,Role,_}) -> Role.
-spec renegotiation(state_name()) -> boolean().
-renegotiation({_,_,ReNeg}) -> ReNeg == renegotiation;
+renegotiation({_,_,ReNeg}) -> ReNeg == renegotiate;
renegotiation(_) -> false.
@@ -558,10 +542,15 @@ renegotiation(_) -> false.
#data{}
) -> gen_statem:event_handler_result(state_name()) .
+-define(CONNECTION_MSG(Msg),
+ [{next_event, internal, prepare_next_packet},
+ {next_event,internal,{conn_msg,Msg}}]).
+
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
callback_mode() ->
- handle_event_function.
+ [handle_event_function,
+ state_enter].
handle_event(_, _Event, {init_error,Error}=StateName, D) ->
@@ -1016,95 +1005,92 @@ handle_event(_, #ssh_msg_debug{} = Msg, _, D) ->
debug_fun(Msg, D),
keep_state_and_data;
-handle_event(internal, Msg=#ssh_msg_global_request{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_request_success{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_request_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open_confirmation{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_window_adjust{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_data{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_extended_data{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_eof{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_close{}, {connected,server} = StateName, D) ->
- handle_connection_msg(Msg, StateName, cache_request_idle_timer_check(D));
-
-handle_event(internal, Msg=#ssh_msg_channel_close{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+handle_event(internal, {conn_msg,Msg}, StateName, #data{starter = User,
+ connection_state = Connection0,
+ event_queue = Qev0} = D0) ->
+ Role = role(StateName),
+ Rengotation = renegotiation(StateName),
+ try ssh_connection:handle_msg(Msg, Connection0, Role) of
+ {disconnect, Reason0, RepliesConn} ->
+ {Repls, D} = send_replies(RepliesConn, D0),
+ case {Reason0,Role} of
+ {{_, Reason}, client} when ((StateName =/= {connected,client})
+ and (not Rengotation)) ->
+ User ! {self(), not_connected, Reason};
+ _ ->
+ ok
+ end,
+ {stop_and_reply, {shutdown,normal}, Repls, D};
-handle_event(internal, Msg=#ssh_msg_channel_request{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+ {Replies, Connection} when is_list(Replies) ->
+ {Repls, D} =
+ case StateName of
+ {connected,_} ->
+ send_replies(Replies, D0#data{connection_state=Connection});
+ _ ->
+ {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies),
+ send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies})
+ end,
+ case {Msg, StateName} of
+ {#ssh_msg_channel_close{}, {connected,_}} ->
+ {keep_state, D, [cond_set_idle_timer(D)|Repls]};
+ {#ssh_msg_channel_success{}, _} ->
+ update_inet_buffers(D#data.socket),
+ {keep_state, D, Repls};
+ _ ->
+ {keep_state, D, Repls}
+ end
-handle_event(internal, Msg=#ssh_msg_channel_success{}, StateName, D) ->
- update_inet_buffers(D#data.socket),
- handle_connection_msg(Msg, StateName, D);
+ catch
+ Class:Error ->
+ {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0),
+ {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION,
+ io_lib:format("Internal error: ~p:~p",[Class,Error]),
+ StateName, D1),
+ {stop_and_reply, Shutdown, Repls, D}
+ end;
-handle_event(internal, Msg=#ssh_msg_channel_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+handle_event(enter, _OldState, {connected,_}=State, D) ->
+ %% Entering the state where re-negotiation is possible
+ init_renegotiate_timers(State, D);
+
+handle_event(enter, _OldState, {ext_info,_,renegotiate}=State, D) ->
+ %% Could be hanging in exit_info state if nothing else arrives
+ init_renegotiate_timers(State, D);
+
+handle_event(enter, {connected,_}, State, D) ->
+ %% Exiting the state where re-negotiation is possible
+ pause_renegotiate_timers(State, D);
+
+handle_event(cast, force_renegotiate, StateName, D) ->
+ handle_event({timeout,renegotiate}, undefined, StateName, D);
+
+handle_event({timeout,renegotiate}, _, StateName, D0) ->
+ case StateName of
+ {connected,Role} ->
+ start_rekeying(Role, D0);
+ {ext_info,Role,renegotiate} ->
+ start_rekeying(Role, D0);
+ _ ->
+ %% Wrong state for starting a renegotiation, must be in re-negotiation
+ keep_state_and_data
+ end;
-handle_event(cast, renegotiate, {connected,Role}, D) ->
- {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D#data.ssh_params),
- send_bytes(SshPacket, D),
- timer:apply_after(?REKEY_TIMOUT, gen_statem, cast, [self(), renegotiate]),
- {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh,
- key_exchange_init_msg = KeyInitMsg}};
+handle_event({timeout,check_data_size}, _, StateName, D0) ->
+ %% Rekey due to sent data limit reached? (Can't be in {ext_info,...} if data is sent)
+ case StateName of
+ {connected,Role} ->
+ check_data_rekeying(Role, D0);
+ _ ->
+ %% Wrong state for starting a renegotiation, must be in re-negotiation
+ keep_state_and_data
+ end;
handle_event({call,From}, get_alg, _, D) ->
#ssh{algorithms=Algs} = D#data.ssh_params,
{keep_state_and_data, [{reply,From,Algs}]};
-handle_event(cast, renegotiate, _, _) ->
- %% Already in key-exchange so safe to ignore
- timer:apply_after(?REKEY_TIMOUT, gen_statem, cast, [self(), renegotiate]), % FIXME: not here in original
- keep_state_and_data;
-
-
-%% Rekey due to sent data limit reached?
-handle_event(cast, data_size, {connected,Role}, D) ->
- {ok, [{send_oct,Sent0}]} = inet:getstat(D#data.socket, [send_oct]),
- Sent = Sent0 - D#data.last_size_rekey,
- MaxSent = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]),
- case Sent >= MaxSent of
- true ->
- {KeyInitMsg, SshPacket, Ssh} =
- ssh_transport:key_exchange_init_msg(D#data.ssh_params),
- send_bytes(SshPacket, D),
- {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh,
- key_exchange_init_msg = KeyInitMsg,
- last_size_rekey = Sent0}};
- _ ->
- keep_state_and_data
- end;
-
-handle_event(cast, data_size, _, _) ->
- %% Already in key-exchange so safe to ignore
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]), % FIXME: not here in original
- keep_state_and_data;
-
-
-
handle_event(cast, _, StateName, _) when not ?CONNECTED(StateName) ->
{keep_state_and_data, [postpone]};
@@ -1218,7 +1204,7 @@ handle_event({call,From}, {request, ChannelPid, ChannelId, Type, Data, Timeout},
D ->
%% Note reply to channel will happen later when reply is recived from peer on the socket
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_request_idle_timer_check(D)}
+ {keep_state, D, cond_set_idle_timer(D)}
end;
handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName, D0)
@@ -1229,7 +1215,7 @@ handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName,
D ->
%% Note reply to channel will happen later when reply is recived from peer on the socket
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_request_idle_timer_check(D)}
+ {keep_state, D, cond_set_idle_timer(D)}
end;
handle_event({call,From}, {data, ChannelId, Type, Data, Timeout}, StateName, D0)
@@ -1270,7 +1256,7 @@ handle_event({call,From},
}),
D = add_request(true, ChannelId, From, D2),
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_cancel_idle_timer(D)};
+ {keep_state, D, cond_set_idle_timer(D)};
handle_event({call,From}, {send_window, ChannelId}, StateName, D)
when ?CONNECTED(StateName) ->
@@ -1300,7 +1286,7 @@ handle_event({call,From}, {close, ChannelId}, StateName, D0)
#channel{remote_id = Id} = Channel ->
D1 = send_msg(ssh_connection:channel_close_msg(Id), D0),
ssh_client_channel:cache_update(cache(D1), Channel#channel{sent_close = true}),
- {keep_state, cache_request_idle_timer_check(D1), [{reply,From,ok}]};
+ {keep_state, D1, [cond_set_idle_timer(D1), {reply,From,ok}]};
undefined ->
{keep_state_and_data, [{reply,From,ok}]}
end;
@@ -1316,6 +1302,7 @@ handle_event(info, {Proto, Sock, Info}, {hello,_}, #data{socket = Sock,
{keep_state_and_data, [{next_event, internal, {info_line,Info}}]}
end;
+
handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
transport_protocol = Proto}) ->
try ssh_transport:handle_packet_part(
@@ -1333,20 +1320,36 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
try
ssh_message:decode(set_kex_overload_prefix(DecryptedBytes,D1))
of
- Msg = #ssh_msg_kexinit{} ->
+ #ssh_msg_kexinit{} = Msg ->
{keep_state, D1, [{next_event, internal, prepare_next_packet},
{next_event, internal, {Msg,DecryptedBytes}}
]};
+
+ #ssh_msg_global_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_request_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_request_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open_confirmation{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_window_adjust{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_extended_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_eof{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_close{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+
Msg ->
{keep_state, D1, [{next_event, internal, prepare_next_packet},
- {next_event, internal, Msg}
+ {next_event, internal, Msg}
]}
catch
- C:E ->
+ C:E:ST ->
{Shutdown, D} =
?send_disconnect(?SSH_DISCONNECT_PROTOCOL_ERROR,
io_lib:format("Bad packet: Decrypted, but can't decode~n~p:~p~n~p",
- [C,E,erlang:get_stacktrace()]),
+ [C,E,ST]),
StateName, D1),
{stop, Shutdown, D}
end;
@@ -1375,10 +1378,10 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
StateName, D0),
{stop, Shutdown, D}
catch
- C:E ->
+ C:E:ST ->
{Shutdown, D} =
?send_disconnect(?SSH_DISCONNECT_PROTOCOL_ERROR,
- io_lib:format("Bad packet: Couldn't decrypt~n~p:~p~n~p",[C,E,erlang:get_stacktrace()]),
+ io_lib:format("Bad packet: Couldn't decrypt~n~p:~p~n~p",[C,E,ST]),
StateName, D0),
{stop, Shutdown, D}
end;
@@ -1418,8 +1421,20 @@ handle_event(info, {timeout, {_, From} = Request}, _,
end;
%%% Handle that ssh channels user process goes down
-handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D0) ->
- {keep_state, handle_channel_down(ChannelPid, D0)};
+handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D) ->
+ Cache = cache(D),
+ ssh_client_channel:cache_foldl(
+ fun(#channel{user=U,
+ local_id=Id}, Acc) when U == ChannelPid ->
+ ssh_client_channel:cache_delete(Cache, Id),
+ Acc;
+ (_,Acc) ->
+ Acc
+ end, [], Cache),
+ {keep_state, D, cond_set_idle_timer(D)};
+
+handle_event({timeout,idle_time}, _Data, _StateName, _D) ->
+ {stop, {shutdown, "Timeout"}};
%%% So that terminate will be run when supervisor is shutdown
handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) ->
@@ -1439,7 +1454,7 @@ handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) ->
end;
handle_event(info, check_cache, _, D) ->
- {keep_state, cache_check_set_idle_timer(D)};
+ {keep_state, D, cond_set_idle_timer(D)};
handle_event(info, UnexpectedMessage, StateName, D = #data{ssh_params = Ssh}) ->
case unexpected_fun(UnexpectedMessage, D) of
@@ -1486,6 +1501,11 @@ handle_event(internal, {send_disconnect,Code,DetailedText,Module,Line}, StateNam
send_disconnect(Code, DetailedText, Module, Line, StateName, D0),
{stop, Shutdown, D};
+
+handle_event(enter, _OldState, State, D) ->
+ %% Just skip
+ {next_state, State, D};
+
handle_event(_Type, _Msg, {ext_info,Role,_ReNegFlag}, D) ->
%% If something else arrives, goto next state and handle the event in that one
{next_state, {connected,Role}, D, [postpone]};
@@ -1743,46 +1763,6 @@ call(FsmPid, Event, Timeout) ->
end.
-handle_connection_msg(Msg, StateName, D0 = #data{starter = User,
- connection_state = Connection0,
- event_queue = Qev0}) ->
- Renegotiation = renegotiation(StateName),
- Role = role(StateName),
- try ssh_connection:handle_msg(Msg, Connection0, Role) of
- {disconnect, Reason0, RepliesConn} ->
- {Repls, D} = send_replies(RepliesConn, D0),
- case {Reason0,Role} of
- {{_, Reason}, client} when ((StateName =/= {connected,client}) and (not Renegotiation)) ->
- User ! {self(), not_connected, Reason};
- _ ->
- ok
- end,
- {stop_and_reply, {shutdown,normal}, Repls, D};
-
- {[], Connection} ->
- {keep_state, D0#data{connection_state = Connection}};
-
- {Replies, Connection} when is_list(Replies) ->
- {Repls, D} =
- case StateName of
- {connected,_} ->
- send_replies(Replies, D0#data{connection_state=Connection});
- _ ->
- {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies),
- send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies})
- end,
- {keep_state, D, Repls}
-
- catch
- Class:Error ->
- {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0),
- {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION,
- io_lib:format("Internal error: ~p:~p",[Class,Error]),
- StateName, D1),
- {stop_and_reply, Shutdown, Repls, D}
- end.
-
-
set_kex_overload_prefix(Msg = <<?BYTE(Op),_/binary>>, #data{ssh_params=SshParams})
when Op == 30;
Op == 31
@@ -1888,19 +1868,6 @@ handle_request(ChannelId, Type, Data, WantReply, From, D) ->
end.
%%%----------------------------------------------------------------
-handle_channel_down(ChannelPid, D) ->
- Cache = cache(D),
- ssh_client_channel:cache_foldl(
- fun(#channel{user=U,
- local_id=Id}, Acc) when U == ChannelPid ->
- ssh_client_channel:cache_delete(Cache, Id),
- Acc;
- (_,Acc) ->
- Acc
- end, [], Cache),
- cache_check_set_idle_timer(D).
-
-
update_sys(Cache, Channel, Type, ChannelPid) ->
ssh_client_channel:cache_update(Cache,
Channel#channel{sys = Type, user = ChannelPid}).
@@ -1919,6 +1886,42 @@ new_channel_id(#data{connection_state = #connection{channel_id_seed = Id} =
{Id, State#data{connection_state =
Connection#connection{channel_id_seed = Id + 1}}}.
+
+%%%----------------------------------------------------------------
+start_rekeying(Role, D0) ->
+ {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params),
+ send_bytes(SshPacket, D0),
+ D = D0#data{ssh_params = Ssh,
+ key_exchange_init_msg = KeyInitMsg},
+ {next_state, {kexinit,Role,renegotiate}, D}.
+
+
+init_renegotiate_timers(State, D) ->
+ {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ {next_state, State, D, [{{timeout,renegotiate}, RekeyTimeout, none},
+ {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none} ]}.
+
+
+pause_renegotiate_timers(State, D) ->
+ {next_state, State, D, [{{timeout,renegotiate}, infinity, none},
+ {{timeout,check_data_size}, infinity, none} ]}.
+
+check_data_rekeying(Role, D) ->
+ {ok, [{send_oct,SocketSentTotal}]} = inet:getstat(D#data.socket, [send_oct]),
+ SentSinceRekey = SocketSentTotal - D#data.last_size_rekey,
+ {_RekeyTimeout,MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ case check_data_rekeying_dbg(SentSinceRekey, MaxSent) of
+ true ->
+ start_rekeying(Role, D#data{last_size_rekey = SocketSentTotal});
+ _ ->
+ %% Not enough data sent for a re-negotiation. Restart timer.
+ {keep_state, D, {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none}}
+ end.
+
+check_data_rekeying_dbg(SentSinceRekey, MaxSent) ->
+ %% This function is for the ssh_dbg to trace on. See dbg_trace/3 at the end.
+ SentSinceRekey >= MaxSent.
+
%%%----------------------------------------------------------------
%%% This server/client has decided to disconnect via the state machine:
%%% The unused arguments are for debugging.
@@ -2131,60 +2134,12 @@ retry_fun(User, Reason, #data{ssh_params = #ssh{opts = Opts,
%%% Cache idle timer that closes the connection if there are no
%%% channels open for a while.
-cache_init_idle_timer(D) ->
- case ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts) of
- infinity ->
- D#data{idle_timer_value = infinity,
- idle_timer_ref = infinity % A flag used later...
- };
- IdleTime ->
- %% We dont want to set the timeout on first connect
- D#data{idle_timer_value = IdleTime}
- end.
-
-
-cache_check_set_idle_timer(D = #data{idle_timer_ref = undefined,
- idle_timer_value = IdleTime}) ->
- %% No timer set - shall we set one?
+cond_set_idle_timer(D) ->
case ssh_client_channel:cache_info(num_entries, cache(D)) of
- 0 when IdleTime == infinity ->
- %% No. Meaningless to set a timer that fires in an infinite time...
- D;
- 0 ->
- %% Yes, we'll set one since the cache is empty and it should not
- %% be that for a specified time
- D#data{idle_timer_ref =
- erlang:send_after(IdleTime, self(), {'EXIT',[],"Timeout"})};
- _ ->
- %% No - there are entries in the cache
- D
- end;
-cache_check_set_idle_timer(D) ->
- %% There is already a timer set or the timeout time is infinite
- D.
-
-
-cache_cancel_idle_timer(D) ->
- case D#data.idle_timer_ref of
- infinity ->
- %% The timer is not activated
- D;
- undefined ->
- %% The timer is already cancelled
- D;
- TimerRef ->
- %% The timer is active
- erlang:cancel_timer(TimerRef),
- D#data{idle_timer_ref = undefined}
+ 0 -> {{timeout,idle_time}, ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts), none};
+ _ -> {{timeout,idle_time}, infinity, none}
end.
-
-cache_request_idle_timer_check(D = #data{idle_timer_value = infinity}) ->
- D;
-cache_request_idle_timer_check(D = #data{idle_timer_value = IdleTime}) ->
- erlang:send_after(IdleTime, self(), check_cache),
- D.
-
%%%----------------------------------------------------------------
start_channel_request_timer(_,_, infinity) ->
ok;
@@ -2245,7 +2200,7 @@ update_inet_buffers(Socket) ->
%%%# Tracing
%%%#
-dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events];
+dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events, renegotiation];
dbg_trace(flags, connections, A) -> [c] ++ dbg_trace(flags, terminate, A);
dbg_trace(on, connections, A) -> dbg:tp(?MODULE, init_connection_handler, 3, x),
@@ -2288,6 +2243,33 @@ dbg_trace(format, connection_events, {return_from, {?MODULE,handle_event,4}, Ret
io_lib:format("~p~n", [event_handler_result(Ret)])
];
+dbg_trace(flags, renegotiation, _) -> [c];
+dbg_trace(on, renegotiation, _) -> dbg:tpl(?MODULE, init_renegotiate_timers, 2, x),
+ dbg:tpl(?MODULE, pause_renegotiate_timers, 2, x),
+ dbg:tpl(?MODULE, check_data_rekeying_dbg, 2, x),
+ dbg:tpl(?MODULE, start_rekeying, 2, x);
+dbg_trace(off, renegotiation, _) -> dbg:ctpl(?MODULE, init_renegotiate_timers, 2),
+ dbg:ctpl(?MODULE, pause_renegotiate_timers, 2),
+ dbg:ctpl(?MODULE, check_data_rekeying_dbg, 2),
+ dbg:ctpl(?MODULE, start_rekeying, 2);
+dbg_trace(format, renegotiation, {call, {?MODULE,init_renegotiate_timers,[_State,D]}}) ->
+ ["Renegotiation init\n",
+ io_lib:format("rekey_limit: ~p ({ms,bytes})~ncheck_data_size: ~p (ms)~n",
+ [?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ ?REKEY_DATA_TIMOUT])
+ ];
+dbg_trace(format, renegotiation, {call, {?MODULE,pause_renegotiate_timers,[_State,_D]}}) ->
+ ["Renegotiation pause\n"];
+dbg_trace(format, renegotiation, {call, {?MODULE,start_rekeying,[_Role,_D]}}) ->
+ ["Renegotiation start rekeying\n"];
+dbg_trace(format, renegotiation, {call, {?MODULE,check_data_rekeying_dbg,[SentSinceRekey, MaxSent]}}) ->
+ ["Renegotiation check data sent\n",
+ io_lib:format("TotalSentSinceRekey: ~p~nMaxBeforeRekey: ~p~nStartRekey: ~p~n",
+ [SentSinceRekey, MaxSent, SentSinceRekey >= MaxSent])
+ ];
+
+
+
dbg_trace(flags, terminate, _) -> [c];
dbg_trace(on, terminate, _) -> dbg:tp(?MODULE, terminate, 3, x);
dbg_trace(off, terminate, _) -> dbg:ctpg(?MODULE, terminate, 3);
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index 4dd9082250..fe95d2ac54 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -599,9 +599,24 @@ default(common) ->
class => user_options
},
- {rekey_limit, def} => % FIXME: Why not common?
- #{default => 1024000000,
- chk => fun check_non_neg_integer/1,
+ {rekey_limit, def} =>
+ #{default => {3600000, 1024000000}, % {1 hour, 1 GB}
+ chk => fun({infinity, infinity}) ->
+ true;
+ ({Mins, infinity}) when is_integer(Mins), Mins>0 ->
+ {true, {Mins*60*1000, infinity}};
+ ({infinity, Bytes}) when is_integer(Bytes), Bytes>=0 ->
+ true;
+ ({Mins, Bytes}) when is_integer(Mins), Mins>0,
+ is_integer(Bytes), Bytes>=0 ->
+ {true, {Mins*60*1000, Bytes}};
+ (infinity) ->
+ {true, {3600000, infinity}};
+ (Bytes) when is_integer(Bytes), Bytes>=0 ->
+ {true, {3600000, Bytes}};
+ (_) ->
+ false
+ end,
class => user_options
},
diff --git a/lib/ssh/src/ssh_sftp.erl b/lib/ssh/src/ssh_sftp.erl
index 5984713ec9..9c391abc43 100644
--- a/lib/ssh/src/ssh_sftp.erl
+++ b/lib/ssh/src/ssh_sftp.erl
@@ -171,21 +171,16 @@ start_channel(Host, Port, UserOptions) ->
stop_channel(Pid) ->
case is_process_alive(Pid) of
true ->
- OldValue = process_flag(trap_exit, true),
- link(Pid),
- exit(Pid, ssh_sftp_stop_channel),
- receive
- {'EXIT', Pid, normal} ->
- ok
- after 5000 ->
- exit(Pid, kill),
- receive
- {'EXIT', Pid, killed} ->
- ok
- end
- end,
- process_flag(trap_exit, OldValue),
- ok;
+ MonRef = erlang:monitor(process, Pid),
+ unlink(Pid),
+ exit(Pid, ssh_sftp_stop_channel),
+ receive {'DOWN',MonRef,_,_,_} -> ok
+ after
+ 1000 ->
+ exit(Pid, kill),
+ erlang:demonitor(MonRef, [flush]),
+ ok
+ end;
false ->
ok
end.
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index f5bba9f824..631c4d0213 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -1808,9 +1808,10 @@ kex_alg_dependent({E, F, K}) ->
%% diffie-hellman and ec diffie-hellman (with E = Q_c, F = Q_s)
<<?Empint(E), ?Empint(F), ?Empint(K)>>;
-kex_alg_dependent({-1, _, -1, _, _, E, F, K}) ->
+kex_alg_dependent({-1, NBits, -1, Prime, Gen, E, F, K}) ->
%% ssh_msg_kex_dh_gex_request_old
- <<?Empint(E), ?Empint(F), ?Empint(K)>>;
+ <<?Euint32(NBits),
+ ?Empint(Prime), ?Empint(Gen), ?Empint(E), ?Empint(F), ?Empint(K)>>;
kex_alg_dependent({Min, NBits, Max, Prime, Gen, E, F, K}) ->
%% diffie-hellman group exchange
@@ -1849,9 +1850,6 @@ public_algo({#'ECPoint'{},{namedCurve,OID}}) ->
Curve = public_key:oid2ssh_curvename(OID),
list_to_atom("ecdsa-sha2-" ++ binary_to_list(Curve)).
-
-
-
sha('ssh-rsa') -> sha;
sha('rsa-sha2-256') -> sha256;
sha('rsa-sha2-384') -> sha384;
diff --git a/lib/ssh/test/Makefile b/lib/ssh/test/Makefile
index 0a99d31a63..9832a9b210 100644
--- a/lib/ssh/test/Makefile
+++ b/lib/ssh/test/Makefile
@@ -36,6 +36,7 @@ MODULES= \
ssh_options_SUITE \
ssh_basic_SUITE \
ssh_bench_SUITE \
+ ssh_chan_behaviours_SUITE \
ssh_compat_SUITE \
ssh_connection_SUITE \
ssh_dbg_SUITE \
@@ -53,6 +54,8 @@ MODULES= \
ssh_key_cb_options \
ssh_key_cb_engine_keys \
ssh_trpt_test_lib \
+ ssh_chan_behaviours_client \
+ ssh_chan_behaviours_server \
ssh_echo_server \
ssh_bench_dev_null \
ssh_peername_sockname_server \
diff --git a/lib/ssh/test/ssh_algorithms_SUITE.erl b/lib/ssh/test/ssh_algorithms_SUITE.erl
index 0ce4bd8699..3a5478f847 100644
--- a/lib/ssh/test/ssh_algorithms_SUITE.erl
+++ b/lib/ssh/test/ssh_algorithms_SUITE.erl
@@ -100,7 +100,7 @@ init_per_suite(Config) ->
ct:log("all() ->~n ~p.~n~ngroups()->~n ~p.~n",[all(),groups()]),
ssh:start(),
[{std_simple_sftp_size,25000} % Sftp transferred data size
- | setup_pubkey(Config)]
+ | Config]
end
).
@@ -459,17 +459,6 @@ pubkey_opts(Config) ->
{system_dir, SystemDir}].
-setup_pubkey(Config) ->
- DataDir = proplists:get_value(data_dir, Config),
- UserDir = proplists:get_value(priv_dir, Config),
- Keys =
- [ssh_test_lib:setup_dsa(DataDir, UserDir),
- ssh_test_lib:setup_rsa(DataDir, UserDir),
- ssh_test_lib:setup_ecdsa("256", DataDir, UserDir)
- ],
- ssh_test_lib:write_auth_keys(Keys, UserDir), % 'authorized_keys' shall contain ALL pub keys
- Config.
-
setup_pubkey(Alg, Config) ->
DataDir = proplists:get_value(data_dir, Config),
UserDir = proplists:get_value(priv_dir, Config),
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index 1fa94bef11..807e23ff01 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -32,7 +32,7 @@
-define(NEWLINE, <<"\r\n">>).
--define(REKEY_DATA_TMO, 65000).
+-define(REKEY_DATA_TMO, 1 * 60000). % Should be multiples of 60000
%%--------------------------------------------------------------------
%% Common Test interface functions -----------------------------------
@@ -45,7 +45,6 @@ suite() ->
all() ->
[{group, all_tests}].
-
groups() ->
[{all_tests, [parallel], [{group, ssh_renegotiate_SUITE},
{group, ssh_basic_SUITE}
@@ -76,8 +75,17 @@ groups() ->
shell_exit_status
]},
- {ssh_renegotiate_SUITE, [parallel], [rekey,
- rekey_limit,
+ {ssh_renegotiate_SUITE, [parallel], [rekey0,
+ rekey1,
+ rekey2,
+ rekey3,
+ rekey4,
+ rekey_limit_client,
+ rekey_limit_daemon,
+ rekey_time_limit_client,
+ rekey_time_limit_daemon,
+ norekey_limit_client,
+ norekey_limit_daemon,
renegotiate1,
renegotiate2]},
@@ -1325,69 +1333,231 @@ shell_exit_status(Config) when is_list(Config) ->
ssh:stop_daemon(Pid).
+%%----------------------------------------------------------------------------
%%% Idle timeout test
-rekey() -> [{timetrap,{seconds,90}}].
+rekey0() -> [{timetrap,{seconds,90}}].
+rekey1() -> [{timetrap,{seconds,90}}].
+rekey2() -> [{timetrap,{seconds,90}}].
+rekey3() -> [{timetrap,{seconds,90}}].
+rekey4() -> [{timetrap,{seconds,90}}].
-rekey(Config) ->
- {Pid, Host, Port} =
- ssh_test_lib:std_daemon(Config,
- [{rekey_limit, 0}]),
- ConnectionRef =
- ssh_test_lib:std_connect(Config, Host, Port,
- [{rekey_limit, 0}]),
+rekey0(Config) -> rekey_chk(Config, 0, 0).
+rekey1(Config) -> rekey_chk(Config, infinity, 0).
+rekey2(Config) -> rekey_chk(Config, {infinity,infinity}, 0).
+rekey3(Config) -> rekey_chk(Config, 0, infinity).
+rekey4(Config) -> rekey_chk(Config, 0, {infinity,infinity}).
+
+rekey_chk(Config, RLdaemon, RLclient) ->
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config, [{rekey_limit, RLdaemon}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, RLclient}]),
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
- receive
- after ?REKEY_DATA_TMO ->
- %%By this time rekeying would have been done
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
- false = (Kex2 == Kex1),
- ssh:close(ConnectionRef),
- ssh:stop_daemon(Pid)
- end.
-%%--------------------------------------------------------------------
+ %% Make both sides send something:
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
-%%% Test rekeying by data volume
+ %% Check rekeying
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex1==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10),
-rekey_limit() -> [{timetrap,{seconds,400}}].
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
-rekey_limit(Config) ->
+%%--------------------------------------------------------------------
+%%% Test rekeying by data volume
+
+rekey_limit_client() -> [{timetrap,{seconds,400}}].
+rekey_limit_client(Config) ->
+ Limit = 6000,
UserDir = proplists:get_value(priv_dir, Config),
DataFile = filename:join(UserDir, "rekey.data"),
-
+ Data = lists:duplicate(Limit+10,1),
Algs = proplists:get_value(preferred_algorithms, Config),
{Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
{preferred_algorithms,Algs}]),
- ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, 6000},
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, Limit},
{max_random_length_padding,0}]),
{ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+ %% Check that it doesn't rekey without data transfer
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that datatransfer triggers rekeying
+ ok = ssh_sftp:write_file(SftpPid, DataFile, Data),
timer:sleep(?REKEY_DATA_TMO),
- Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
- Data = lists:duplicate(159000,1),
+ %% Check that datatransfer continues to trigger rekeying
ok = ssh_sftp:write_file(SftpPid, DataFile, Data),
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
- false = (Kex2 == Kex1),
+ %% Check that it doesn't rekey on a small datatransfer
+ ok = ssh_sftp:write_file(SftpPid, DataFile, "hi\n"),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
- ok = ssh_sftp:write_file(SftpPid, DataFile, "hi\n"),
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+
+
+rekey_limit_daemon() -> [{timetrap,{seconds,400}}].
+rekey_limit_daemon(Config) ->
+ Limit = 6000,
+ UserDir = proplists:get_value(priv_dir, Config),
+ DataFile1 = filename:join(UserDir, "rekey1.data"),
+ DataFile2 = filename:join(UserDir, "rekey2.data"),
+ file:write_file(DataFile1, lists:duplicate(Limit+10,1)),
+ file:write_file(DataFile2, "hi\n"),
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, Limit},
+ {max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+
+ %% Check that it doesn't rekey without data transfer
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
- false = (Kex2 == Kex1),
+ %% Check that datatransfer triggers rekeying
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile1),
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
+ %% Check that datatransfer continues to trigger rekeying
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile1),
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10),
+
+ %% Check that it doesn't rekey without data transfer
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ %% Check that it doesn't rekey on a small datatransfer
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile2),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ %% Check that it doesn't rekey without data transfer
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+
+%%--------------------------------------------------------------------
+%% Check that datatransfer in the other direction does not trigger re-keying
+norekey_limit_client() -> [{timetrap,{seconds,400}}].
+norekey_limit_client(Config) ->
+ Limit = 6000,
+ UserDir = proplists:get_value(priv_dir, Config),
+ DataFile = filename:join(UserDir, "rekey3.data"),
+ file:write_file(DataFile, lists:duplicate(Limit+10,1)),
+
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, Limit},
+ {max_random_length_padding,0}]),
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+%% Check that datatransfer in the other direction does not trigger re-keying
+norekey_limit_daemon() -> [{timetrap,{seconds,400}}].
+norekey_limit_daemon(Config) ->
+ Limit = 6000,
+ UserDir = proplists:get_value(priv_dir, Config),
+ DataFile = filename:join(UserDir, "rekey4.data"),
+
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, Limit},
+ {max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ok = ssh_sftp:write_file(SftpPid, DataFile, lists:duplicate(Limit+10,1)),
+ timer:sleep(?REKEY_DATA_TMO),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ ssh_sftp:stop_channel(SftpPid),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+%%--------------------------------------------------------------------
+%%% Test rekeying by time
+
+rekey_time_limit_client() -> [{timetrap,{seconds,400}}].
+rekey_time_limit_client(Config) ->
+ Minutes = ?REKEY_DATA_TMO div 60000,
+ GB = 1024*1000*1000,
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, {Minutes, GB}},
+ {max_random_length_padding,0}]),
+ rekey_time_limit(Pid, ConnectionRef).
+
+rekey_time_limit_daemon() -> [{timetrap,{seconds,400}}].
+rekey_time_limit_daemon(Config) ->
+ Minutes = ?REKEY_DATA_TMO div 60000,
+ GB = 1024*1000*1000,
+ Algs = proplists:get_value(preferred_algorithms, Config),
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, {Minutes, GB}},
+ {max_random_length_padding,0},
+ {preferred_algorithms,Algs}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
+ rekey_time_limit(Pid, ConnectionRef).
+
+
+rekey_time_limit(Pid, ConnectionRef) ->
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+ Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+
+ timer:sleep(5000),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
+
+ %% Check that it rekeys when the max time + 30s has passed
+ timer:sleep(?REKEY_DATA_TMO + 30*1000),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
+
+ %% Check that it does not rekey when nothing is transferred
+ timer:sleep(?REKEY_DATA_TMO + 30*1000),
+ ?wait_match(false, Kex2==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10),
ssh_sftp:stop_channel(SftpPid),
ssh:close(ConnectionRef),
@@ -1395,7 +1565,7 @@ rekey_limit(Config) ->
%%--------------------------------------------------------------------
-%%% Test rekeying with simulataneous send request
+%%% Test rekeying with simultaneous send request
renegotiate1(Config) ->
UserDir = proplists:get_value(priv_dir, Config),
diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE.erl b/lib/ssh/test/ssh_chan_behaviours_SUITE.erl
new file mode 100644
index 0000000000..16ed152bcd
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_SUITE.erl
@@ -0,0 +1,152 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(ssh_chan_behaviours_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("ssh/src/ssh.hrl").
+-include("ssh_test_lib.hrl").
+
+%% Note: This directive should only be used in test suites.
+-compile(export_all).
+
+%%--------------------------------------------------------------------
+%% Common Test interface functions -----------------------------------
+%%--------------------------------------------------------------------
+
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap,{seconds,60}}].
+
+all() ->
+ [
+ noexist_subsystem,
+ undefined_subsystem,
+ defined_subsystem,
+ subsystem_client
+ ].
+
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ ?CHECK_CRYPTO(
+ begin
+ ssh:start(),
+ Config
+ end).
+
+end_per_suite(_Config) ->
+ {Time,R} = timer:tc(ssh, stop, []),
+ ct:log("Stop ssh: ~p ms",[(100*(Time div 1000)) / 100]),
+ R.
+
+init_per_testcase(_TC, Config) ->
+ SubSystems = [
+ {"bad_cb", {ssh_chan_behaviours_undefined, []}}, % A non-existing file
+ {"ch1", {ssh_chan_behaviours_server, [self(),true]}}
+ ],
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config, [{subsystems,SubSystems}]),
+ C = ssh_test_lib:std_connect(Config, Host, Port, []),
+ [{connref,C}, {daemon_pid,Pid}| Config].
+
+end_per_testcase(_TC, Config) ->
+ {Time,_} = timer:tc(ssh, stop_daemon, [proplists:get_value(daemon_pid,Config)]),
+ ct:log("Stop daemon: ~p ms",[(100*(Time div 1000)) / 100]),
+ case flush() of
+ [] -> ok;
+ Msgs -> ct:pal("Unhandled messages:~n~p", [Msgs])
+ end.
+
+
+-define(EXPECT(What, Bind),
+ Bind =
+ (fun() ->
+ receive What ->
+ ct:log("~p:~p ~p got ~p",[?MODULE,?LINE,self(),What]),
+ Bind
+ after 5000 ->
+ ct:log("~p:~p ~p Flushed:~n~p",[?MODULE,?LINE,self(),flush()]),
+ ct:fail("Timeout!",[])
+ end
+ end)()
+ ).
+
+%%--------------------------------------------------------------------
+%% Test Cases --------------------------------------------------------
+%%--------------------------------------------------------------------
+%% Try start a subsystem whos name is not known by the server
+noexist_subsystem(Config) ->
+ C = proplists:get_value(connref, Config),
+ {ok, Ch} = ssh_connection:session_channel(C, infinity),
+ failure = ssh_connection:subsystem(C, Ch, "noexist", infinity),
+ ok = ssh_connection:close(C, Ch),
+ ?EXPECT({ssh_cm,C,{closed,Ch}},[]),
+ ok.
+
+%% Try to start a subsystem with a known name, but without any callback file
+undefined_subsystem(Config) ->
+ C = proplists:get_value(connref, Config),
+ {ok, Ch} = ssh_connection:session_channel(C, infinity),
+ failure = ssh_connection:subsystem(C, Ch, "bad_cb", infinity),
+ ok = ssh_connection:close(C, Ch),
+ ?EXPECT({ssh_cm,C,{closed,Ch}},[]), % self() is instead of a proper channel handler
+ ok.
+
+%% Try to start and stop a subsystem with known name and defined callback file
+defined_subsystem(Config) ->
+ C = proplists:get_value(connref, Config),
+ {ok, Ch1} = ssh_connection:session_channel(C, infinity),
+
+ success = ssh_connection:subsystem(C, Ch1, "ch1", infinity),
+ IDsrv = ?EXPECT({{_Csrv,_Ch1srv}, {ssh_channel_up,_Ch1srv,_Csrv}}, {_Csrv,_Ch1srv}),
+
+ ok = ssh_connection:close(C, Ch1),
+ ?EXPECT({IDsrv, {terminate,normal}}, []),
+ ?EXPECT({ssh_cm, C, {closed,Ch1}}, []), % self() is instead of a proper channel handler
+ ok.
+
+%% Try to start and stop a subsystem from a ssh_client_channel behviour
+subsystem_client(Config) ->
+ C = proplists:get_value(connref, Config),
+
+ {ok,ChRef} = ssh_chan_behaviours_client:start_link(C),
+ IDclt = ?EXPECT({{C,Ch1clt}, {ssh_channel_up,Ch1clt,C}}, {C,Ch1clt}),
+ IDsrv = ?EXPECT({{_Csrv,Ch1srv}, {ssh_channel_up,Ch1srv,_Csrv}}, {_Csrv,Ch1srv}),
+
+ ok = ssh_chan_behaviours_client:stop(ChRef),
+ ?EXPECT({IDclt, {terminate,normal}}, []), % From the proper channel handler
+ ?EXPECT({IDsrv, {terminate,normal}}, []),
+ ok.
+
+%%%================================================================
+%%%
+%%%
+
+flush() -> lists:reverse(flush([])).
+
+flush(Acc) ->
+ receive
+ M ->
+ flush([M|Acc])
+ after 0 ->
+ Acc
+ end.
+
diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key
new file mode 100644
index 0000000000..51ab6fbd88
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key
@@ -0,0 +1,13 @@
+-----BEGIN DSA PRIVATE KEY-----
+MIIBuwIBAAKBgQCClaHzE2ul0gKSUxah5W0W8UiJLy4hXngKEqpaUq9SSdVdY2LK
+wVfKH1gt5iuaf1FfzOhsIC9G/GLnjYttXZc92cv/Gfe3gR+s0ni2++MX+T++mE/Q
+diltXv/Hp27PybS67SmiFW7I+RWnT2OKlMPtw2oUuKeztCe5UWjaj/y5FQIVAPLA
+l9RpiU30Z87NRAHY3NTRaqtrAoGANMRxw8UfdtNVR0CrQj3AgPaXOGE4d+G4Gp4X
+skvnCHycSVAjtYxebUkzUzt5Q6f/IabuLUdge3gXrc8BetvrcKbp+XZgM0/Vj2CF
+Ymmy3in6kzGZq7Fw1sZaku6AOU8vLa5woBT2vAcHLLT1bLAzj7viL048T6MfjrOP
+ef8nHvACgYBhDWFQJ1mf99sg92LalVq1dHLmVXb3PTJDfCO/Gz5NFmj9EZbAtdah
+/XcF3DeRF+eEoz48wQF/ExVxSMIhLdL+o+ElpVhlM7Yii+T7dPhkQfEul6zZXu+U
+ykSTXYUbtsfTNRFQGBW2/GfnEc0mnIxfn9v10NEWMzlq5z9wT9P0CgIVAN4wtL5W
+Lv62jKcdskxNyz2NQoBx
+-----END DSA PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key.pub b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key.pub
new file mode 100644
index 0000000000..4dbb1305b0
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_dsa_key.pub
@@ -0,0 +1,11 @@
+---- BEGIN SSH2 PUBLIC KEY ----
+AAAAB3NzaC1kc3MAAACBAIKVofMTa6XSApJTFqHlbRbxSIkvLiFeeAoSqlpSr1JJ1V1j
+YsrBV8ofWC3mK5p/UV/M6GwgL0b8YueNi21dlz3Zy/8Z97eBH6zSeLb74xf5P76YT9B2
+KW1e/8enbs/JtLrtKaIVbsj5FadPY4qUw+3DahS4p7O0J7lRaNqP/LkVAAAAFQDywJfU
+aYlN9GfOzUQB2NzU0WqrawAAAIA0xHHDxR9201VHQKtCPcCA9pc4YTh34bganheyS+cI
+fJxJUCO1jF5tSTNTO3lDp/8hpu4tR2B7eBetzwF62+twpun5dmAzT9WPYIViabLeKfqT
+MZmrsXDWxlqS7oA5Ty8trnCgFPa8BwcstPVssDOPu+IvTjxPox+Os495/yce8AAAAIBh
+DWFQJ1mf99sg92LalVq1dHLmVXb3PTJDfCO/Gz5NFmj9EZbAtdah/XcF3DeRF+eEoz48
+wQF/ExVxSMIhLdL+o+ElpVhlM7Yii+T7dPhkQfEul6zZXu+UykSTXYUbtsfTNRFQGBW2
+/GfnEc0mnIxfn9v10NEWMzlq5z9wT9P0Cg==
+---- END SSH2 PUBLIC KEY ----
diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key
new file mode 100644
index 0000000000..fb1a862ded
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key
@@ -0,0 +1,6 @@
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDArxbDfh3p1okrD9wQw6jJ4d4DdlBPD5GqXE8bIeRJiK41Sh40LgvPw
+mkqEDSXK++CgBwYFK4EEACKhZANiAAScl43Ih2lWTDKrSox5ve5uiTXil4smsup3
+CfS1XPjKxgBAmlfBim8izbdrT0BFdQzz2joduNMtpt61wO4rGs6jm0UP7Kim9PC7
+Hneb/99fIYopdMH5NMnk60zGO1uZ2vc=
+-----END EC PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key.pub b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key.pub
new file mode 100644
index 0000000000..428d5fb7d7
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_ecdsa_key.pub
@@ -0,0 +1 @@
+ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBJyXjciHaVZMMqtKjHm97m6JNeKXiyay6ncJ9LVc+MrGAECaV8GKbyLNt2tPQEV1DPPaOh240y2m3rXA7isazqObRQ/sqKb08Lsed5v/318hiil0wfk0yeTrTMY7W5na9w== uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key
new file mode 100644
index 0000000000..79968bdd7d
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key
@@ -0,0 +1,16 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDCZX+4FBDwZIh9y/Uxee1VJnEXlowpz2yDKwj8semM4q843337
+zbNfxHmladB1lpz2NqyxI175xMIJuDxogyZdsOxGnFAzAnthR4dqL/RWRWzjaxSB
+6IAO9SPYVVlrpZ+1hsjLW79fwXK/yc8VdhRuWTeQiRgYY2ek8+OKbOqz4QIDAQAB
+AoGANmvJzJO5hkLuvyDZHKfAnGTtpifcR1wtSa9DjdKUyn8vhKF0mIimnbnYQEmW
+NUUb3gXCZLi9PvkpRSVRrASDOZwcjoU/Kvww163vBUVb2cOZfFhyn6o2Sk88Tt++
+udH3hdjpf9i7jTtUkUe+QYPsia+wgvvrmn4QrahLAH86+kECQQDx5gFeXTME3cnW
+WMpFz3PPumduzjqgqMMWEccX4FtQkMX/gyGa5UC7OHFyh0N/gSWvPbRHa8A6YgIt
+n8DO+fh5AkEAzbqX4DOn8NY6xJIi42q7l/2jIA0RkB6P7YugW5NblhqBZ0XDnpA5
+sMt+rz+K07u9XZtxgh1xi7mNfwY6lEAMqQJBAJBEauCKmRj35Z6OyeQku59SPsnY
++SJEREVvSNw2lH9SOKQQ4wPsYlTGbvKtNVZgAcen91L5MmYfeckYE/fdIZECQQCt
+64zxsTnM1I8iFxj/gP/OYlJBikrKt8udWmjaghzvLMEw+T2DExJyb9ZNeT53+UMB
+m6O+B/4xzU/djvp+0hbhAkAemIt+rA5kTmYlFndhpvzkSSM8a2EXsO4XIPgGWCTT
+tQKS/tTly0ADMjN/TVy11+9d6zcqadNVuHXHGtR4W0GR
+-----END RSA PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key.pub b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key.pub
new file mode 100644
index 0000000000..75d2025c71
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_SUITE_data/ssh_host_rsa_key.pub
@@ -0,0 +1,5 @@
+---- BEGIN SSH2 PUBLIC KEY ----
+AAAAB3NzaC1yc2EAAAADAQABAAAAgQDCZX+4FBDwZIh9y/Uxee1VJnEXlowpz2yDKwj8
+semM4q843337zbNfxHmladB1lpz2NqyxI175xMIJuDxogyZdsOxGnFAzAnthR4dqL/RW
+RWzjaxSB6IAO9SPYVVlrpZ+1hsjLW79fwXK/yc8VdhRuWTeQiRgYY2ek8+OKbOqz4Q==
+---- END SSH2 PUBLIC KEY ----
diff --git a/lib/ssh/test/ssh_chan_behaviours_client.erl b/lib/ssh/test/ssh_chan_behaviours_client.erl
new file mode 100644
index 0000000000..07ac21ba97
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_client.erl
@@ -0,0 +1,143 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2005-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+%%% Description: Example ssh client
+-module(ssh_chan_behaviours_client).
+-behaviour(ssh_client_channel).
+-record(state, {
+ parent,
+ cm,
+ ch,
+ dbg
+ }).
+-export([start_link/1, start/1,
+ stop/1, send_eof/1,
+ init/1, handle_msg/2, handle_ssh_msg/2, terminate/2,
+ code_change/3, handle_call/3, handle_cast/2
+ ]).
+
+-define(DBG(State,Fmt,Args),
+ case State#state.dbg of
+ true -> ct:log("~p:~p ~p C=~p Ch=~p "++Fmt,
+ [?MODULE,?LINE,self(),State#state.cm,State#state.ch|Args]);
+ false -> ok
+ end).
+
+
+start_link(C) ->
+ {ok, Ch} = ssh_connection:session_channel(C, infinity),
+ ssh_client_channel:start_link(C, Ch, ssh_chan_behaviours_client, [C, Ch, self(), true]).
+
+start(C) ->
+ {ok, Ch} = ssh_connection:session_channel(C, infinity),
+ ssh_client_channel:start(C, Ch, ssh_chan_behaviours_client, [C, Ch, self(), true]).
+
+send_eof(ChRef) ->
+ ssh_client_channel:call(ChRef, send_eof).
+
+stop(ChRef) ->
+ ssh_client_channel:call(ChRef, stop).
+
+
+init([C, Ch, Parent, Dbg|_Exec]) ->
+ case ssh_connection:subsystem(C, Ch, "ch1", infinity) of
+ success ->
+ State = #state{cm = C,
+ ch = Ch,
+ parent=Parent,
+ dbg=Dbg},
+ ?DBG(State, "callback spawned, parent = ~p", [Parent]),
+ {ok, State};
+
+ Other ->
+ {stop, Other}
+ end.
+
+handle_msg({ssh_channel_up, ChannelId, ConnectionManager}=M, State0) ->
+ State = State0#state{cm = ConnectionManager,
+ ch = ChannelId},
+ tell_parent(M, State),
+ ?DBG(State, "ssh_channel_up",[]),
+ {ok, State}.
+
+handle_ssh_msg({ssh_cm, C, {data, Ch, 0, Data}}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "ssh_cm data size(Data)=~p",[size(Data)]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {data, Ch, Type, Data}}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "ssh_cm data Type=~p : ~p",[Type,Data]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {eof, Ch}}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "eof",[]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {signal, _Ch, _SigNameStr}=Sig} = M, #state{ch=Ch,cm=C} = State) ->
+ %% Ignore signals according to RFC 4254 section 6.9.
+ tell_parent(M, State),
+ ?DBG(State, "~p",[Sig]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {exit_signal, Ch, _, _Error, _}=Sig}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "~p",[Sig]),
+ {stop, Ch, State};
+
+handle_ssh_msg({ssh_cm, C, {exit_status, Ch, _Status}=Sig}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "~p",[Sig]),
+ {stop, Ch, State}.
+
+
+handle_call(send_eof, _From,#state{ch=Ch,cm=C} = State) ->
+ {reply, ssh_connection:send_eof(C,Ch), State};
+
+handle_call(stop, _From, State) ->
+ {stop, normal, ok, State};
+
+handle_call(Msg, _From, State) ->
+ ?DBG(State, "Unknown call ~p", [Msg]),
+ {reply, {unknown_call,Msg}, State}.
+
+
+terminate(Reason, State) ->
+ tell_parent({terminate,Reason}, State),
+ ?DBG(State, "terminate Reason = ~p",[Reason]).
+
+
+handle_cast(Msg, State) ->
+ ?DBG(State, "Unknown cast ~p", [Msg]),
+ {noreply, State}.
+
+code_change(_OldVsn, State, _Extra) -> {ok, State}.
+
+%%%================================================================
+%%%
+%%%
+
+tell_parent(Msg, #state{parent = Parent,
+ cm = C,
+ ch = Ch}) -> Parent ! {{C,Ch}, Msg}.
+
diff --git a/lib/ssh/test/ssh_chan_behaviours_server.erl b/lib/ssh/test/ssh_chan_behaviours_server.erl
new file mode 100644
index 0000000000..a5ec19e0cf
--- /dev/null
+++ b/lib/ssh/test/ssh_chan_behaviours_server.erl
@@ -0,0 +1,96 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2005-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+%%% Description: Example ssh server
+-module(ssh_chan_behaviours_server).
+-behaviour(ssh_server_channel).
+-record(state, {
+ parent,
+ cm,
+ ch,
+ dbg
+ }).
+-export([init/1, handle_msg/2, handle_ssh_msg/2, terminate/2]).
+
+-define(DBG(State,Fmt,Args),
+ case State#state.dbg of
+ true -> ct:log("~p:~p ~p C=~p Ch=~p "++Fmt,
+ [?MODULE,?LINE,self(),State#state.cm,State#state.ch|Args]);
+ false -> ok
+ end).
+
+
+init([Pid,Dbg|_Exec]) ->
+ {ok, #state{parent=Pid,
+ dbg=Dbg}}.
+
+handle_msg({ssh_channel_up, ChannelId, ConnectionManager}=M, State0) ->
+ State = State0#state{cm = ConnectionManager,
+ ch = ChannelId},
+ tell_parent(M, State),
+ ?DBG(State, "ssh_channel_up",[]),
+ {ok, State}.
+
+handle_ssh_msg({ssh_cm, C, {data, Ch, 0, Data}}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "ssh_cm data size(Data)=~p",[size(Data)]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {data, Ch, Type, Data}}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "ssh_cm data Type=~p : ~p",[Type,Data]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {eof, Ch}}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "eof",[]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {signal, _Ch, _SigNameStr}=Sig} = M, #state{ch=Ch,cm=C} = State) ->
+ %% Ignore signals according to RFC 4254 section 6.9.
+ tell_parent(M, State),
+ ?DBG(State, "~p",[Sig]),
+ {ok, State};
+
+handle_ssh_msg({ssh_cm, C, {exit_signal, Ch, _, _Error, _}=Sig}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "~p",[Sig]),
+ {stop, Ch, State};
+
+handle_ssh_msg({ssh_cm, C, {exit_status, Ch, _Status}=Sig}=M, #state{ch=Ch,cm=C} = State) ->
+ tell_parent(M, State),
+ ?DBG(State, "~p",[Sig]),
+ {stop, Ch, State}.
+
+terminate(Reason, State) ->
+ tell_parent({terminate,Reason}, State),
+ ?DBG(State, "terminate Reason = ~p",[Reason]),
+ ok.
+
+%%%================================================================
+%%%
+%%%
+
+tell_parent(Msg, #state{parent = Parent,
+ cm = C,
+ ch = Ch}) -> Parent ! {{C,Ch}, Msg}.
+
diff --git a/lib/ssh/test/ssh_test_lib.erl b/lib/ssh/test/ssh_test_lib.erl
index 57ae2dbac2..65970535f4 100644
--- a/lib/ssh/test/ssh_test_lib.erl
+++ b/lib/ssh/test/ssh_test_lib.erl
@@ -926,7 +926,7 @@ get_kex_init(Conn, Ref, TRef) ->
end;
false ->
- ct:log("Not in 'connected' state: ~p",[State]),
+ ct:log("~p:~p Not in 'connected' state: ~p",[?MODULE,?LINE,State]),
receive
{reneg_timeout,Ref} ->
ct:log("S = ~p", [S]),
diff --git a/lib/ssh/vsn.mk b/lib/ssh/vsn.mk
index f327d2ec11..f10e7aa96a 100644
--- a/lib/ssh/vsn.mk
+++ b/lib/ssh/vsn.mk
@@ -1,4 +1,5 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
-SSH_VSN = 4.6.8
+SSH_VSN = 4.6.9
+
APP_VSN = "ssh-$(SSH_VSN)"
diff --git a/lib/ssl/doc/src/notes.xml b/lib/ssl/doc/src/notes.xml
index 4ad7da9486..34fe352d08 100644
--- a/lib/ssl/doc/src/notes.xml
+++ b/lib/ssl/doc/src/notes.xml
@@ -27,6 +27,22 @@
</header>
<p>This document describes the changes made to the SSL application.</p>
+<section><title>SSL 8.2.6</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Proper handling of clients that choose to send an empty
+ answer to a certificate request</p>
+ <p>
+ Own Id: OTP-15050</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>SSL 8.2.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/ssl/src/Makefile b/lib/ssl/src/Makefile
index 11b3e65912..c389aa8cfe 100644
--- a/lib/ssl/src/Makefile
+++ b/lib/ssl/src/Makefile
@@ -54,8 +54,8 @@ MODULES= \
ssl_connection_sup \
ssl_listen_tracker_sup\
dtls_connection_sup \
- dtls_udp_listener\
- dtls_udp_sup \
+ dtls_packet_demux \
+ dtls_listener_sup \
ssl_dist_sup\
ssl_dist_admin_sup\
ssl_dist_connection_sup\
diff --git a/lib/ssl/src/dtls_connection.erl b/lib/ssl/src/dtls_connection.erl
index 0fe568759d..4e3f65d9c6 100644
--- a/lib/ssl/src/dtls_connection.erl
+++ b/lib/ssl/src/dtls_connection.erl
@@ -137,9 +137,8 @@ next_record(#state{protocol_buffers =
Buffers#protocol_buffers{dtls_cipher_texts = Rest},
connection_states = ConnectionStates});
next_record(#state{role = server,
- socket = {Listener, {Client, _}},
- transport_cb = gen_udp} = State) ->
- dtls_udp_listener:active_once(Listener, Client, self()),
+ socket = {Listener, {Client, _}}} = State) ->
+ dtls_packet_demux:active_once(Listener, Client, self()),
{no_record, State};
next_record(#state{role = client,
socket = {_Server, Socket} = DTLSSocket,
@@ -448,7 +447,7 @@ init({call, From}, {start, Timeout},
},
{Record, State} = next_record(State3),
next_event(hello, Record, State, Actions);
-init({call, _} = Type, Event, #state{role = server, transport_cb = gen_udp} = State) ->
+init({call, _} = Type, Event, #state{role = server, data_tag = udp} = State) ->
Result = gen_handshake(?FUNCTION_NAME, Type, Event,
State#state{flight_state = {retransmit, ?INITIAL_RETRANSMIT_TIMEOUT},
protocol_specific = #{current_cookie_secret => dtls_v1:cookie_secret(),
@@ -922,7 +921,7 @@ handle_alerts([Alert | Alerts], {next_state, StateName, State}) ->
handle_alerts([Alert | Alerts], {next_state, StateName, State, _Actions}) ->
handle_alerts(Alerts, ssl_connection:handle_alert(Alert, StateName, State)).
-handle_own_alert(Alert, Version, StateName, #state{transport_cb = gen_udp,
+handle_own_alert(Alert, Version, StateName, #state{data_tag = udp,
role = Role,
ssl_options = Options} = State0) ->
case ignore_alert(Alert, State0) of
@@ -1013,10 +1012,10 @@ next_flight(Flight) ->
change_cipher_spec => undefined,
handshakes_after_change_cipher_spec => []}.
-handle_flight_timer(#state{transport_cb = gen_udp,
+handle_flight_timer(#state{data_tag = udp,
flight_state = {retransmit, Timeout}} = State) ->
start_retransmision_timer(Timeout, State);
-handle_flight_timer(#state{transport_cb = gen_udp,
+handle_flight_timer(#state{data_tag = udp,
flight_state = connection} = State) ->
{State, []};
handle_flight_timer(State) ->
diff --git a/lib/ssl/src/dtls_udp_sup.erl b/lib/ssl/src/dtls_listener_sup.erl
index 197882e92f..6939f1ef3b 100644
--- a/lib/ssl/src/dtls_udp_sup.erl
+++ b/lib/ssl/src/dtls_listener_sup.erl
@@ -23,7 +23,7 @@
%% Purpose: Supervisor for a procsses dispatching upd datagrams to
%% correct DTLS handler
%%----------------------------------------------------------------------
--module(dtls_udp_sup).
+-module(dtls_listener_sup).
-behaviour(supervisor).
@@ -52,10 +52,10 @@ init(_O) ->
MaxT = 3600,
Name = undefined, % As simple_one_for_one is used.
- StartFunc = {dtls_udp_listener, start_link, []},
+ StartFunc = {dtls_packet_demux, start_link, []},
Restart = temporary, % E.g. should not be restarted
Shutdown = 4000,
- Modules = [dtls_udp_listener],
+ Modules = [dtls_packet_demux],
Type = worker,
ChildSpec = {Name, StartFunc, Restart, Shutdown, Type, Modules},
diff --git a/lib/ssl/src/dtls_udp_listener.erl b/lib/ssl/src/dtls_packet_demux.erl
index 0608c6bd2b..1672626165 100644
--- a/lib/ssl/src/dtls_udp_listener.erl
+++ b/lib/ssl/src/dtls_packet_demux.erl
@@ -19,15 +19,15 @@
%%
--module(dtls_udp_listener).
+-module(dtls_packet_demux).
-behaviour(gen_server).
-include("ssl_internal.hrl").
%% API
--export([start_link/4, active_once/3, accept/2, sockname/1, close/1,
- get_all_opts/1, get_sock_opts/2, set_sock_opts/2]).
+-export([start_link/5, active_once/3, accept/2, sockname/1, close/1,
+ get_all_opts/1, get_sock_opts/2, set_sock_opts/2]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -36,6 +36,7 @@
-record(state,
{port,
listener,
+ transport,
dtls_options,
emulated_options,
dtls_msq_queues = kv_new(),
@@ -50,35 +51,36 @@
%%% API
%%%===================================================================
-start_link(Port, EmOpts, InetOptions, DTLSOptions) ->
- gen_server:start_link(?MODULE, [Port, EmOpts, InetOptions, DTLSOptions], []).
+start_link(Port, TransportInfo, EmOpts, InetOptions, DTLSOptions) ->
+ gen_server:start_link(?MODULE, [Port, TransportInfo, EmOpts, InetOptions, DTLSOptions], []).
-active_once(UDPConnection, Client, Pid) ->
- gen_server:cast(UDPConnection, {active_once, Client, Pid}).
+active_once(PacketSocket, Client, Pid) ->
+ gen_server:cast(PacketSocket, {active_once, Client, Pid}).
-accept(UDPConnection, Accepter) ->
- call(UDPConnection, {accept, Accepter}).
+accept(PacketSocket, Accepter) ->
+ call(PacketSocket, {accept, Accepter}).
-sockname(UDPConnection) ->
- call(UDPConnection, sockname).
-close(UDPConnection) ->
- call(UDPConnection, close).
-get_sock_opts(UDPConnection, SplitSockOpts) ->
- call(UDPConnection, {get_sock_opts, SplitSockOpts}).
-get_all_opts(UDPConnection) ->
- call(UDPConnection, get_all_opts).
-set_sock_opts(UDPConnection, Opts) ->
- call(UDPConnection, {set_sock_opts, Opts}).
+sockname(PacketSocket) ->
+ call(PacketSocket, sockname).
+close(PacketSocket) ->
+ call(PacketSocket, close).
+get_sock_opts(PacketSocket, SplitSockOpts) ->
+ call(PacketSocket, {get_sock_opts, SplitSockOpts}).
+get_all_opts(PacketSocket) ->
+ call(PacketSocket, get_all_opts).
+set_sock_opts(PacketSocket, Opts) ->
+ call(PacketSocket, {set_sock_opts, Opts}).
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
-init([Port, EmOpts, InetOptions, DTLSOptions]) ->
+init([Port, {TransportModule, _,_,_} = TransportInfo, EmOpts, InetOptions, DTLSOptions]) ->
try
- {ok, Socket} = gen_udp:open(Port, InetOptions),
+ {ok, Socket} = TransportModule:open(Port, InetOptions),
{ok, #state{port = Port,
first = true,
+ transport = TransportInfo,
dtls_options = DTLSOptions,
emulated_options = EmOpts,
listener = Socket,
@@ -134,20 +136,20 @@ handle_cast({active_once, Client, Pid}, State0) ->
State = handle_active_once(Client, Pid, State0),
{noreply, State}.
-handle_info({udp, Socket, IP, InPortNo, _} = Msg, #state{listener = Socket} = State0) ->
+handle_info({Transport, Socket, IP, InPortNo, _} = Msg, #state{listener = Socket, transport = {_,Transport,_,_}} = State0) ->
State = handle_datagram({IP, InPortNo}, Msg, State0),
next_datagram(Socket),
{noreply, State};
%% UDP socket does not have a connection and should not receive an econnreset
-%% This does however happens on on some windows versions. Just ignoring it
+%% This does however happens on some windows versions. Just ignoring it
%% appears to make things work as expected!
-handle_info({udp_error, Socket, econnreset = Error}, #state{listener = Socket} = State) ->
+handle_info({Error, Socket, econnreset = Error}, #state{listener = Socket, transport = {_,_,_, udp_error}} = State) ->
Report = io_lib:format("Ignore SSL UDP Listener: Socket error: ~p ~n", [Error]),
error_logger:info_report(Report),
{noreply, State};
-handle_info({udp_error, Socket, Error}, #state{listener = Socket} = State) ->
- Report = io_lib:format("SSL UDP Listener shutdown: Socket error: ~p ~n", [Error]),
+handle_info({Error, Socket, Error}, #state{listener = Socket, transport = {_,_,_, Error}} = State) ->
+ Report = io_lib:format("SSL Packet muliplxer shutdown: Socket error: ~p ~n", [Error]),
error_logger:info_report(Report),
{noreply, State#state{close=true}};
@@ -231,7 +233,7 @@ setup_new_connection(User, From, Client, Msg, #state{dtls_processes = Processes,
listener = Socket,
emulated_options = EmOpts} = State) ->
ConnArgs = [server, "localhost", Port, {self(), {Client, Socket}},
- {DTLSOpts, EmOpts, udp_listener}, User, dtls_socket:default_cb_info()],
+ {DTLSOpts, EmOpts, dtls_listener}, User, dtls_socket:default_cb_info()],
case dtls_connection_sup:start_child(ConnArgs) of
{ok, Pid} ->
erlang:monitor(process, Pid),
diff --git a/lib/ssl/src/dtls_socket.erl b/lib/ssl/src/dtls_socket.erl
index 0e4ab089dc..8dd62bc352 100644
--- a/lib/ssl/src/dtls_socket.erl
+++ b/lib/ssl/src/dtls_socket.erl
@@ -22,31 +22,31 @@
-include("ssl_internal.hrl").
-include("ssl_api.hrl").
--export([send/3, listen/3, accept/3, connect/4, socket/4, setopts/3, getopts/3, getstat/3,
+-export([send/3, listen/2, accept/3, connect/4, socket/4, setopts/3, getopts/3, getstat/3,
peername/2, sockname/2, port/2, close/2]).
-export([emulated_options/0, emulated_options/1, internal_inet_values/0, default_inet_values/0, default_cb_info/0]).
send(Transport, {{IP,Port},Socket}, Data) ->
Transport:send(Socket, IP, Port, Data).
-listen(gen_udp = Transport, Port, #config{transport_info = {Transport, _, _, _},
- ssl = SslOpts,
- emulated = EmOpts,
- inet_user = Options} = Config) ->
+listen(Port, #config{transport_info = TransportInfo,
+ ssl = SslOpts,
+ emulated = EmOpts,
+ inet_user = Options} = Config) ->
- case dtls_udp_sup:start_child([Port, emulated_socket_options(EmOpts, #socket_options{}),
+ case dtls_listener_sup:start_child([Port, TransportInfo, emulated_socket_options(EmOpts, #socket_options{}),
Options ++ internal_inet_values(), SslOpts]) of
{ok, Pid} ->
- {ok, #sslsocket{pid = {udp, Config#config{udp_handler = {Pid, Port}}}}};
+ {ok, #sslsocket{pid = {dtls, Config#config{dtls_handler = {Pid, Port}}}}};
Err = {error, _} ->
Err
end.
-accept(udp, #config{transport_info = {Transport = gen_udp,_,_,_},
+accept(dtls, #config{transport_info = {Transport,_,_,_},
connection_cb = ConnectionCb,
- udp_handler = {Listner, _}}, _Timeout) ->
- case dtls_udp_listener:accept(Listner, self()) of
+ dtls_handler = {Listner, _}}, _Timeout) ->
+ case dtls_packet_demux:accept(Listner, self()) of
{ok, Pid, Socket} ->
{ok, socket(Pid, Transport, {Listner, Socket}, ConnectionCb)};
{error, Reason} ->
@@ -69,7 +69,9 @@ connect(Address, Port, #config{transport_info = {Transport, _, _, _} = CbInfo,
end.
close(gen_udp, {_Client, _Socket}) ->
- ok.
+ ok;
+close(Transport, {_Client, Socket}) ->
+ Transport:close(Socket).
socket(Pid, gen_udp = Transport, {{_, _}, Socket}, ConnectionCb) ->
#sslsocket{pid = Pid,
@@ -79,18 +81,18 @@ socket(Pid, Transport, Socket, ConnectionCb) ->
#sslsocket{pid = Pid,
%% "The name "fd" is keept for backwards compatibility
fd = {Transport, Socket, ConnectionCb}}.
-setopts(_, #sslsocket{pid = {udp, #config{udp_handler = {ListenPid, _}}}}, Options) ->
+setopts(_, #sslsocket{pid = {dtls, #config{dtls_handler = {ListenPid, _}}}}, Options) ->
SplitOpts = tls_socket:split_options(Options),
- dtls_udp_listener:set_sock_opts(ListenPid, SplitOpts);
+ dtls_packet_demux:set_sock_opts(ListenPid, SplitOpts);
%%% Following clauses will not be called for emulated options, they are handled in the connection process
setopts(gen_udp, Socket, Options) ->
inet:setopts(Socket, Options);
setopts(Transport, Socket, Options) ->
Transport:setopts(Socket, Options).
-getopts(_, #sslsocket{pid = {udp, #config{udp_handler = {ListenPid, _}}}}, Options) ->
+getopts(_, #sslsocket{pid = {dtls, #config{dtls_handler = {ListenPid, _}}}}, Options) ->
SplitOpts = tls_socket:split_options(Options),
- dtls_udp_listener:get_sock_opts(ListenPid, SplitOpts);
+ dtls_packet_demux:get_sock_opts(ListenPid, SplitOpts);
getopts(gen_udp, #sslsocket{pid = {Socket, #config{emulated = EmOpts}}}, Options) ->
{SockOptNames, EmulatedOptNames} = tls_socket:split_options(Options),
EmulatedOpts = get_emulated_opts(EmOpts, EmulatedOptNames),
@@ -112,7 +114,7 @@ getstat(gen_udp, {_,Socket}, Options) ->
inet:getstat(Socket, Options);
getstat(Transport, Socket, Options) ->
Transport:getstat(Socket, Options).
-peername(udp, _) ->
+peername(_, undefined) ->
{error, enotconn};
peername(gen_udp, {_, {Client, _Socket}}) ->
{ok, Client};
diff --git a/lib/ssl/src/inet_tls_dist.erl b/lib/ssl/src/inet_tls_dist.erl
index 3e9828a2fe..a6ceff25cb 100644
--- a/lib/ssl/src/inet_tls_dist.erl
+++ b/lib/ssl/src/inet_tls_dist.erl
@@ -518,51 +518,16 @@ gen_setup(Driver, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
{Name, Address} = split_node(Driver, Node, LongOrShortNames),
- case Driver:getaddr(Address) of
+ ErlEpmd = net_kernel:epmd_module(),
+ {ARMod, ARFun} = get_address_resolver(ErlEpmd, Driver),
+ Timer = trace(dist_util:start_timer(SetupTime)),
+ case ARMod:ARFun(Name,Address,Driver:family()) of
+ {ok, Ip, TcpPort, Version} ->
+ do_setup_connect(Driver, Kernel, Node, Address, Ip, TcpPort, Version, Type, MyNode, Timer);
{ok, Ip} ->
- Timer = trace(dist_util:start_timer(SetupTime)),
- ErlEpmd = net_kernel:epmd_module(),
case ErlEpmd:port_please(Name, Ip) of
{port, TcpPort, Version} ->
- Opts =
- trace(
- connect_options(
- %%
- %% Use verify_server/3 to verify that
- %% the server's certificate is for Node
- %%
- setup_verify_server(
- get_ssl_options(client), Node))),
- dist_util:reset_timer(Timer),
- case ssl:connect(
- Address, TcpPort,
- [binary, {active, false}, {packet, 4},
- Driver:family(), nodelay()] ++ Opts,
- net_kernel:connecttime()) of
- {ok, #sslsocket{pid = DistCtrl} = SslSocket} ->
- _ = monitor_pid(DistCtrl),
- ok = ssl:controlling_process(SslSocket, self()),
- HSData0 = hs_data_common(SslSocket),
- HSData =
- HSData0#hs_data{
- kernel_pid = Kernel,
- other_node = Node,
- this_node = MyNode,
- socket = DistCtrl,
- timer = Timer,
- this_flags = 0,
- other_version = Version,
- request_type = Type},
- link(DistCtrl),
- dist_util:handshake_we_started(trace(HSData));
- Other ->
- %% Other Node may have closed since
- %% port_please !
- ?shutdown2(
- Node,
- trace(
- {ssl_connect_failed, Ip, TcpPort, Other}))
- end;
+ do_setup_connect(Driver, Kernel, Node, Address, Ip, TcpPort, Version, Type, MyNode, Timer);
Other ->
?shutdown2(
Node,
@@ -575,6 +540,47 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
trace({getaddr_failed, Driver, Address, Other}))
end.
+do_setup_connect(Driver, Kernel, Node, Address, Ip, TcpPort, Version, Type, MyNode, Timer) ->
+ Opts =
+ trace(
+ connect_options(
+ %%
+ %% Use verify_server/3 to verify that
+ %% the server's certificate is for Node
+ %%
+ setup_verify_server(
+ get_ssl_options(client), Node))),
+ dist_util:reset_timer(Timer),
+ case ssl:connect(
+ Address, TcpPort,
+ [binary, {active, false}, {packet, 4},
+ Driver:family(), nodelay()] ++ Opts,
+ net_kernel:connecttime()) of
+ {ok, #sslsocket{pid = DistCtrl} = SslSocket} ->
+ _ = monitor_pid(DistCtrl),
+ ok = ssl:controlling_process(SslSocket, self()),
+ HSData0 = hs_data_common(SslSocket),
+ HSData =
+ HSData0#hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = DistCtrl,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ request_type = Type},
+ link(DistCtrl),
+ dist_util:handshake_we_started(trace(HSData));
+ Other ->
+ %% Other Node may have closed since
+ %% port_please !
+ ?shutdown2(
+ Node,
+ trace(
+ {ssl_connect_failed, Ip, TcpPort, Other}))
+ end.
+
close(Socket) ->
gen_close(inet, Socket).
@@ -644,6 +650,16 @@ verify_server(PeerCert, valid_peer, {CertNodesFun,Node} = S) ->
%% ------------------------------------------------------------
+%% Determine if EPMD module supports address resolving. Default
+%% is to use inet_tcp:getaddr/2.
+%% ------------------------------------------------------------
+get_address_resolver(EpmdModule, Driver) ->
+ case erlang:function_exported(EpmdModule, address_please, 3) of
+ true -> {EpmdModule, address_please};
+ _ -> {Driver, getaddr}
+ end.
+
+%% ------------------------------------------------------------
%% Do only accept new connection attempts from nodes at our
%% own LAN, if the check_ip environment parameter is true.
%% ------------------------------------------------------------
diff --git a/lib/ssl/src/ssl.app.src b/lib/ssl/src/ssl.app.src
index 2aecb6836e..da281829cb 100644
--- a/lib/ssl/src/ssl.app.src
+++ b/lib/ssl/src/ssl.app.src
@@ -17,8 +17,8 @@
dtls_socket,
dtls_v1,
dtls_connection_sup,
- dtls_udp_listener,
- dtls_udp_sup,
+ dtls_packet_demux,
+ dtls_listener_sup,
%% API
ssl, %% Main API
tls, %% TLS specific
diff --git a/lib/ssl/src/ssl.appup.src b/lib/ssl/src/ssl.appup.src
index 4ad2a2f1fd..bfdd0c205b 100644
--- a/lib/ssl/src/ssl.appup.src
+++ b/lib/ssl/src/ssl.appup.src
@@ -1,7 +1,6 @@
%% -*- erlang -*-
{"%VSN%",
[
- {<<"8.2.4">>, [{load_module, ssl_cipher, soft_purge, soft_purge, []}]},
{<<"8\\..*">>, [{restart_application, ssl}]},
{<<"7\\..*">>, [{restart_application, ssl}]},
{<<"6\\..*">>, [{restart_application, ssl}]},
@@ -10,7 +9,6 @@
{<<"3\\..*">>, [{restart_application, ssl}]}
],
[
- {<<"8.2.4">>, [{load_module, ssl_cipher, soft_purge, soft_purge, []}]},
{<<"8\\..*">>, [{restart_application, ssl}]},
{<<"7\\..*">>, [{restart_application, ssl}]},
{<<"6\\..*">>, [{restart_application, ssl}]},
diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl
index 5b6d92ebf4..a7b4ec2bf7 100644
--- a/lib/ssl/src/ssl.erl
+++ b/lib/ssl/src/ssl.erl
@@ -23,9 +23,17 @@
%%% Purpose : Main API module for SSL see also tls.erl and dtls.erl
-module(ssl).
--include("ssl_internal.hrl").
+
-include_lib("public_key/include/public_key.hrl").
+-include("ssl_internal.hrl").
+-include("ssl_api.hrl").
+-include("ssl_internal.hrl").
+-include("ssl_record.hrl").
+-include("ssl_cipher.hrl").
+-include("ssl_handshake.hrl").
+-include("ssl_srp.hrl").
+
%% Application handling
-export([start/0, start/1, stop/0, clear_pem_cache/0]).
@@ -39,8 +47,8 @@
close/1, close/2, shutdown/2, recv/2, recv/3, send/2,
getopts/2, setopts/2, getstat/1, getstat/2
]).
-%% SSL/TLS protocol handling
+%% SSL/TLS protocol handling
-export([cipher_suites/0, cipher_suites/1, cipher_suites/2, filter_cipher_suites/2,
prepend_cipher_suites/2, append_cipher_suites/2,
eccs/0, eccs/1, versions/0,
@@ -49,14 +57,9 @@
%% Misc
-export([handle_options/2, tls_version/1, new_ssl_options/3]).
--include("ssl_api.hrl").
--include("ssl_internal.hrl").
--include("ssl_record.hrl").
--include("ssl_cipher.hrl").
--include("ssl_handshake.hrl").
--include("ssl_srp.hrl").
-
--include_lib("public_key/include/public_key.hrl").
+-deprecated({ssl_accept, 1, eventually}).
+-deprecated({ssl_accept, 2, eventually}).
+-deprecated({ssl_accept, 3, eventually}).
%%--------------------------------------------------------------------
-spec start() -> ok | {error, reason()}.
@@ -231,7 +234,7 @@ handshake(#sslsocket{fd = {_, _, _, Tracker}} = Socket, SslOpts, Timeout) when
handshake(#sslsocket{pid = Pid, fd = {_, _, _}} = Socket, SslOpts, Timeout) when
(is_integer(Timeout) andalso Timeout >= 0) or (Timeout == infinity)->
try
- {ok, EmOpts, _} = dtls_udp_listener:get_all_opts(Pid),
+ {ok, EmOpts, _} = dtls_packet_demux:get_all_opts(Pid),
ssl_connection:handshake(Socket, {SslOpts,
tls_socket:emulated_socket_options(EmOpts, #socket_options{})}, Timeout)
catch
@@ -280,8 +283,8 @@ handshake_cancel(Socket) ->
%%--------------------------------------------------------------------
close(#sslsocket{pid = Pid}) when is_pid(Pid) ->
ssl_connection:close(Pid, {close, ?DEFAULT_TIMEOUT});
-close(#sslsocket{pid = {udp, #config{udp_handler = {Pid, _}}}}) ->
- dtls_udp_listener:close(Pid);
+close(#sslsocket{pid = {dtls, #config{dtls_handler = {Pid, _}}}}) ->
+ dtls_packet_demux:close(Pid);
close(#sslsocket{pid = {ListenSocket, #config{transport_info={Transport,_, _, _}}}}) ->
Transport:close(ListenSocket).
@@ -308,10 +311,10 @@ close(#sslsocket{pid = {ListenSocket, #config{transport_info={Transport,_, _, _}
%%--------------------------------------------------------------------
send(#sslsocket{pid = Pid}, Data) when is_pid(Pid) ->
ssl_connection:send(Pid, Data);
-send(#sslsocket{pid = {_, #config{transport_info={gen_udp, _, _, _}}}}, _) ->
+send(#sslsocket{pid = {_, #config{transport_info={_, udp, _, _}}}}, _) ->
{error,enotconn}; %% Emulate connection behaviour
-send(#sslsocket{pid = {udp,_}}, _) ->
- {error,enotconn};
+send(#sslsocket{pid = {dtls,_}}, _) ->
+ {error,enotconn}; %% Emulate connection behaviour
send(#sslsocket{pid = {ListenSocket, #config{transport_info={Transport, _, _, _}}}}, Data) ->
Transport:send(ListenSocket, Data). %% {error,enotconn}
@@ -326,7 +329,7 @@ recv(Socket, Length) ->
recv(#sslsocket{pid = Pid}, Length, Timeout) when is_pid(Pid),
(is_integer(Timeout) andalso Timeout >= 0) or (Timeout == infinity)->
ssl_connection:recv(Pid, Length, Timeout);
-recv(#sslsocket{pid = {udp,_}}, _, _) ->
+recv(#sslsocket{pid = {dtls,_}}, _, _) ->
{error,enotconn};
recv(#sslsocket{pid = {Listen,
#config{transport_info = {Transport, _, _, _}}}}, _,_) when is_port(Listen)->
@@ -340,7 +343,7 @@ recv(#sslsocket{pid = {Listen,
%%--------------------------------------------------------------------
controlling_process(#sslsocket{pid = Pid}, NewOwner) when is_pid(Pid), is_pid(NewOwner) ->
ssl_connection:new_user(Pid, NewOwner);
-controlling_process(#sslsocket{pid = {udp, _}},
+controlling_process(#sslsocket{pid = {dtls, _}},
NewOwner) when is_pid(NewOwner) ->
ok; %% Meaningless but let it be allowed to conform with TLS
controlling_process(#sslsocket{pid = {Listen,
@@ -365,7 +368,7 @@ connection_information(#sslsocket{pid = Pid}) when is_pid(Pid) ->
end;
connection_information(#sslsocket{pid = {Listen, _}}) when is_port(Listen) ->
{error, enotconn};
-connection_information(#sslsocket{pid = {udp,_}}) ->
+connection_information(#sslsocket{pid = {dtls,_}}) ->
{error,enotconn}.
%%--------------------------------------------------------------------
@@ -391,13 +394,11 @@ peername(#sslsocket{pid = Pid, fd = {Transport, Socket, _}}) when is_pid(Pid)->
dtls_socket:peername(Transport, Socket);
peername(#sslsocket{pid = Pid, fd = {Transport, Socket, _, _}}) when is_pid(Pid)->
tls_socket:peername(Transport, Socket);
-peername(#sslsocket{pid = {udp = Transport, #config{udp_handler = {_Pid, _}}}}) ->
- dtls_socket:peername(Transport, undefined);
-peername(#sslsocket{pid = Pid, fd = {gen_udp= Transport, Socket, _, _}}) when is_pid(Pid) ->
- dtls_socket:peername(Transport, Socket);
+peername(#sslsocket{pid = {dtls, #config{dtls_handler = {_Pid, _}}}}) ->
+ dtls_socket:peername(dtls, undefined);
peername(#sslsocket{pid = {ListenSocket, #config{transport_info = {Transport,_,_,_}}}}) ->
tls_socket:peername(Transport, ListenSocket); %% Will return {error, enotconn}
-peername(#sslsocket{pid = {udp,_}}) ->
+peername(#sslsocket{pid = {dtls,_}}) ->
{error,enotconn}.
%%--------------------------------------------------------------------
@@ -412,7 +413,7 @@ peercert(#sslsocket{pid = Pid}) when is_pid(Pid) ->
Result ->
Result
end;
-peercert(#sslsocket{pid = {udp, _}}) ->
+peercert(#sslsocket{pid = {dtls, _}}) ->
{error, enotconn};
peercert(#sslsocket{pid = {Listen, _}}) when is_port(Listen) ->
{error, enotconn}.
@@ -562,7 +563,7 @@ eccs_filter_supported(Curves) ->
%%--------------------------------------------------------------------
getopts(#sslsocket{pid = Pid}, OptionTags) when is_pid(Pid), is_list(OptionTags) ->
ssl_connection:get_opts(Pid, OptionTags);
-getopts(#sslsocket{pid = {udp, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, OptionTags) when is_list(OptionTags) ->
+getopts(#sslsocket{pid = {dtls, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, OptionTags) when is_list(OptionTags) ->
try dtls_socket:getopts(Transport, ListenSocket, OptionTags) of
{ok, _} = Result ->
Result;
@@ -600,7 +601,7 @@ setopts(#sslsocket{pid = Pid}, Options0) when is_pid(Pid), is_list(Options0) ->
_:_ ->
{error, {options, {not_a_proplist, Options0}}}
end;
-setopts(#sslsocket{pid = {udp, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, Options) when is_list(Options) ->
+setopts(#sslsocket{pid = {dtls, #config{transport_info = {Transport,_,_,_}}}} = ListenSocket, Options) when is_list(Options) ->
try dtls_socket:setopts(Transport, ListenSocket, Options) of
ok ->
ok;
@@ -657,7 +658,7 @@ getstat(#sslsocket{pid = Pid, fd = {Transport, Socket, _, _}}, Options) when is_
shutdown(#sslsocket{pid = {Listen, #config{transport_info = {Transport,_, _, _}}}},
How) when is_port(Listen) ->
Transport:shutdown(Listen, How);
-shutdown(#sslsocket{pid = {udp,_}},_) ->
+shutdown(#sslsocket{pid = {dtls,_}},_) ->
{error, enotconn};
shutdown(#sslsocket{pid = Pid}, How) ->
ssl_connection:shutdown(Pid, How).
@@ -669,8 +670,8 @@ shutdown(#sslsocket{pid = Pid}, How) ->
%%--------------------------------------------------------------------
sockname(#sslsocket{pid = {Listen, #config{transport_info = {Transport, _, _, _}}}}) when is_port(Listen) ->
tls_socket:sockname(Transport, Listen);
-sockname(#sslsocket{pid = {udp, #config{udp_handler = {Pid, _}}}}) ->
- dtls_udp_listener:sockname(Pid);
+sockname(#sslsocket{pid = {dtls, #config{dtls_handler = {Pid, _}}}}) ->
+ dtls_packet_demux:sockname(Pid);
sockname(#sslsocket{pid = Pid, fd = {Transport, Socket, _}}) when is_pid(Pid) ->
dtls_socket:sockname(Transport, Socket);
sockname(#sslsocket{pid = Pid, fd = {Transport, Socket, _, _}}) when is_pid(Pid) ->
@@ -704,7 +705,7 @@ versions() ->
%%--------------------------------------------------------------------
renegotiate(#sslsocket{pid = Pid}) when is_pid(Pid) ->
ssl_connection:renegotiation(Pid);
-renegotiate(#sslsocket{pid = {udp,_}}) ->
+renegotiate(#sslsocket{pid = {dtls,_}}) ->
{error, enotconn};
renegotiate(#sslsocket{pid = {Listen,_}}) when is_port(Listen) ->
{error, enotconn}.
@@ -719,7 +720,7 @@ renegotiate(#sslsocket{pid = {Listen,_}}) when is_port(Listen) ->
prf(#sslsocket{pid = Pid},
Secret, Label, Seed, WantedLength) when is_pid(Pid) ->
ssl_connection:prf(Pid, Secret, Label, Seed, WantedLength);
-prf(#sslsocket{pid = {udp,_}}, _,_,_,_) ->
+prf(#sslsocket{pid = {dtls,_}}, _,_,_,_) ->
{error, enotconn};
prf(#sslsocket{pid = {Listen,_}}, _,_,_,_) when is_port(Listen) ->
{error, enotconn}.
@@ -792,8 +793,8 @@ supported_suites(anonymous, Version) ->
do_listen(Port, #config{transport_info = {Transport, _, _, _}} = Config, tls_connection) ->
tls_socket:listen(Transport, Port, Config);
-do_listen(Port, #config{transport_info = {Transport, _, _, _}} = Config, dtls_connection) ->
- dtls_socket:listen(Transport, Port, Config).
+do_listen(Port, Config, dtls_connection) ->
+ dtls_socket:listen(Port, Config).
%% Handle extra ssl options given to ssl_accept
-spec handle_options([any()], #ssl_options{}) -> #ssl_options{}
diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl
index 28b26fd358..3f8b9a8a9b 100644
--- a/lib/ssl/src/ssl_cipher.erl
+++ b/lib/ssl/src/ssl_cipher.erl
@@ -37,10 +37,10 @@
erl_suite_definition/1,
cipher_init/3, decipher/6, cipher/5, decipher_aead/6, cipher_aead/6,
suite/1, suites/1, all_suites/1, crypto_support_filters/0,
- ec_keyed_suites/0, chacha_suites/1, anonymous_suites/1, psk_suites/1, psk_suites_anon/1,
+ chacha_suites/1, anonymous_suites/1, psk_suites/1, psk_suites_anon/1,
srp_suites/0, srp_suites_anon/0,
rc4_suites/1, des_suites/1, rsa_suites/1, openssl_suite/1, openssl_suite_name/1,
- filter/2, filter_suites/1, filter_suites/2,
+ filter/3, filter_suites/1, filter_suites/2,
hash_algorithm/1, sign_algorithm/1, is_acceptable_hash/2, is_fallback/1,
random_bytes/1, calc_mac_hash/4,
is_stream_ciphersuite/1]).
@@ -2212,39 +2212,25 @@ openssl_suite_name(Cipher) ->
suite_definition(Cipher).
%%--------------------------------------------------------------------
--spec filter(undefined | binary(), [cipher_suite()]) -> [cipher_suite()].
+-spec filter(undefined | binary(), [cipher_suite()], ssl_record:ssl_version()) -> [cipher_suite()].
%%
%% Description: Select the cipher suites that can be used together with the
%% supplied certificate. (Server side functionality)
%%-------------------------------------------------------------------
-filter(undefined, Ciphers) ->
+filter(undefined, Ciphers, _) ->
Ciphers;
-filter(DerCert, Ciphers) ->
+filter(DerCert, Ciphers0, Version) ->
OtpCert = public_key:pkix_decode_cert(DerCert, otp),
SigAlg = OtpCert#'OTPCertificate'.signatureAlgorithm,
PubKeyInfo = OtpCert#'OTPCertificate'.tbsCertificate#'OTPTBSCertificate'.subjectPublicKeyInfo,
PubKeyAlg = PubKeyInfo#'OTPSubjectPublicKeyInfo'.algorithm,
- Ciphers1 =
- case ssl_certificate:public_key_type(PubKeyAlg#'PublicKeyAlgorithm'.algorithm) of
- rsa ->
- filter_keyuse(OtpCert, ((Ciphers -- dsa_signed_suites()) -- ec_keyed_suites()) -- ecdh_suites(),
- rsa_suites(), dhe_rsa_suites() ++ ecdhe_rsa_suites());
- dsa ->
- (Ciphers -- rsa_keyed_suites()) -- ec_keyed_suites();
- ec ->
- filter_keyuse(OtpCert, (Ciphers -- rsa_keyed_suites()) -- dsa_signed_suites(),
- [], ecdhe_ecdsa_suites())
- end,
-
- case public_key:pkix_sign_types(SigAlg#'SignatureAlgorithm'.algorithm) of
- {_, rsa} ->
- Ciphers1 -- ecdsa_signed_suites();
- {_, dsa} ->
- Ciphers1;
- {_, ecdsa} ->
- Ciphers1 -- rsa_signed_suites()
- end.
+ Ciphers = filter_suites_pubkey(
+ ssl_certificate:public_key_type(PubKeyAlg#'PublicKeyAlgorithm'.algorithm),
+ Ciphers0, Version, OtpCert),
+ {_, Sign} = public_key:pkix_sign_types(SigAlg#'SignatureAlgorithm'.algorithm),
+ filter_suites_signature(Sign, Ciphers, Version).
+
%%--------------------------------------------------------------------
-spec filter_suites([erl_cipher_suite()] | [cipher_suite()], map()) ->
[erl_cipher_suite()] | [cipher_suite()].
@@ -2676,143 +2662,208 @@ next_iv(Bin, IV) ->
<<_:FirstPart/binary, NextIV:IVSz/binary>> = Bin,
NextIV.
-rsa_signed_suites() ->
- dhe_rsa_suites() ++ rsa_suites() ++
- psk_rsa_suites() ++ srp_rsa_suites() ++
- ecdh_rsa_suites() ++ ecdhe_rsa_suites().
-
-rsa_keyed_suites() ->
- dhe_rsa_suites() ++ rsa_suites() ++
- psk_rsa_suites() ++ srp_rsa_suites() ++
- ecdhe_rsa_suites().
-
-dhe_rsa_suites() ->
- [?TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
- ?TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
- ?TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
- ?TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
- ?TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
- ?TLS_DHE_RSA_WITH_DES_CBC_SHA,
- ?TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
- ?TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
- ?TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256
- ].
-
-psk_rsa_suites() ->
- [?TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
- ?TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
- ?TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
- ?TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
- ?TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
- ?TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
- ?TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
- ?TLS_RSA_PSK_WITH_RC4_128_SHA].
-
-srp_rsa_suites() ->
- [?TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
- ?TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
- ?TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA].
-
-rsa_suites() ->
- [?TLS_RSA_WITH_AES_256_CBC_SHA256,
- ?TLS_RSA_WITH_AES_256_CBC_SHA,
- ?TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- ?TLS_RSA_WITH_AES_128_CBC_SHA256,
- ?TLS_RSA_WITH_AES_128_CBC_SHA,
- ?TLS_RSA_WITH_RC4_128_SHA,
- ?TLS_RSA_WITH_RC4_128_MD5,
- ?TLS_RSA_WITH_DES_CBC_SHA,
- ?TLS_RSA_WITH_AES_128_GCM_SHA256,
- ?TLS_RSA_WITH_AES_256_GCM_SHA384].
-
-ecdh_rsa_suites() ->
- [?TLS_ECDH_RSA_WITH_NULL_SHA,
- ?TLS_ECDH_RSA_WITH_RC4_128_SHA,
- ?TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
- ?TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
- ?TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
- ?TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
- ?TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
- ?TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
- ?TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384].
-
-ecdhe_rsa_suites() ->
- [?TLS_ECDHE_RSA_WITH_NULL_SHA,
- ?TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- ?TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- ?TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- ?TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- ?TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- ?TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
- ?TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- ?TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- ?TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256].
-
-dsa_signed_suites() ->
- dhe_dss_suites() ++ srp_dss_suites().
-
-dhe_dss_suites() ->
- [?TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
- ?TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
- ?TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
- ?TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
- ?TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
- ?TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
- ?TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
- ?TLS_DHE_DSS_WITH_AES_256_GCM_SHA384].
-
-srp_dss_suites() ->
- [?TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
- ?TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
- ?TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA].
+filter_suites_pubkey(rsa, CiphersSuites0, _Version, OtpCert) ->
+ KeyUses = key_uses(OtpCert),
+ NotECDSAKeyed = (CiphersSuites0 -- ec_keyed_suites(CiphersSuites0))
+ -- dss_keyed_suites(CiphersSuites0),
+ CiphersSuites = filter_keyuse_suites(keyEncipherment, KeyUses,
+ NotECDSAKeyed,
+ rsa_suites_encipher(CiphersSuites0)),
+ filter_keyuse_suites(digitalSignature, KeyUses, CiphersSuites,
+ rsa_ecdhe_dhe_suites(CiphersSuites));
+filter_suites_pubkey(dsa, Ciphers, _, OtpCert) ->
+ KeyUses = key_uses(OtpCert),
+ NotECRSAKeyed = (Ciphers -- rsa_keyed_suites(Ciphers)) -- ec_keyed_suites(Ciphers),
+ filter_keyuse_suites(digitalSignature, KeyUses, NotECRSAKeyed,
+ dss_dhe_suites(Ciphers));
+filter_suites_pubkey(ec, Ciphers, _, OtpCert) ->
+ Uses = key_uses(OtpCert),
+ NotRSADSAKeyed = (Ciphers -- rsa_keyed_suites(Ciphers)) -- dss_keyed_suites(Ciphers),
+ CiphersSuites = filter_keyuse_suites(digitalSignature, Uses, NotRSADSAKeyed,
+ ec_ecdhe_suites(Ciphers)),
+ filter_keyuse_suites(keyAgreement, Uses, CiphersSuites, ec_ecdh_suites(Ciphers)).
+
+filter_suites_signature(rsa, Ciphers, Version) ->
+ (Ciphers -- ecdsa_signed_suites(Ciphers, Version)) -- dsa_signed_suites(Ciphers, Version);
+filter_suites_signature(dsa, Ciphers, Version) ->
+ (Ciphers -- ecdsa_signed_suites(Ciphers, Version)) -- rsa_signed_suites(Ciphers, Version);
+filter_suites_signature(ecdsa, Ciphers, Version) ->
+ (Ciphers -- rsa_signed_suites(Ciphers, Version)) -- dsa_signed_suites(Ciphers, Version).
+
+
+%% From RFC 5246 - Section 7.4.2. Server Certificate
+%% If the client provided a "signature_algorithms" extension, then all
+%% certificates provided by the server MUST be signed by a
+%% hash/signature algorithm pair that appears in that extension. Note
+%% that this implies that a certificate containing a key for one
+%% signature algorithm MAY be signed using a different signature
+%% algorithm (for instance, an RSA key signed with a DSA key). This is
+%% a departure from TLS 1.1, which required that the algorithms be the
+%% same.
+%% Note that this also implies that the DH_DSS, DH_RSA,
+%% ECDH_ECDSA, and ECDH_RSA key exchange algorithms do not restrict the
+%% algorithm used to sign the certificate. Fixed DH certificates MAY be
+%% signed with any hash/signature algorithm pair appearing in the
+%% extension. The names DH_DSS, DH_RSA, ECDH_ECDSA, and ECDH_RSA are
+%% historical.
+%% Note: DH_DSS and DH_RSA is not supported
+rsa_signed({3,N}) when N >= 3 ->
+ fun(rsa) -> true;
+ (dhe_rsa) -> true;
+ (ecdhe_rsa) -> true;
+ (rsa_psk) -> true;
+ (srp_rsa) -> true;
+ (_) -> false
+ end;
+rsa_signed(_) ->
+ fun(rsa) -> true;
+ (dhe_rsa) -> true;
+ (ecdhe_rsa) -> true;
+ (ecdh_rsa) -> true;
+ (rsa_psk) -> true;
+ (srp_rsa) -> true;
+ (_) -> false
+ end.
+%% Cert should be signed by RSA
+rsa_signed_suites(Ciphers, Version) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [rsa_signed(Version)],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+ecdsa_signed({3,N}) when N >= 3 ->
+ fun(ecdhe_ecdsa) -> true;
+ (_) -> false
+ end;
+ecdsa_signed(_) ->
+ fun(ecdhe_ecdsa) -> true;
+ (ecdh_ecdsa) -> true;
+ (_) -> false
+ end.
+
+%% Cert should be signed by ECDSA
+ecdsa_signed_suites(Ciphers, Version) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [ecdsa_signed(Version)],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+
+rsa_keyed(dhe_rsa) ->
+ true;
+rsa_keyed(rsa) ->
+ true;
+rsa_keyed(rsa_psk) ->
+ true;
+rsa_keyed(srp_rsa) ->
+ true;
+rsa_keyed(_) ->
+ false.
-ec_keyed_suites() ->
- ecdh_ecdsa_suites() ++ ecdhe_ecdsa_suites()
- ++ ecdh_rsa_suites().
+%% Certs key is an RSA key
+rsa_keyed_suites(Ciphers) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(Kex) -> rsa_keyed(Kex) end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+
+%% RSA Certs key can be used for encipherment
+rsa_suites_encipher(Ciphers) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(rsa) -> true;
+ (rsa_psk) -> true;
+ (_) -> false
+ end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+
+dss_keyed(dhe_dss) ->
+ true;
+dss_keyed(spr_dss) ->
+ true;
+dss_keyed(_) ->
+ false.
+
+%% Cert should be have DSS key (DSA)
+dss_keyed_suites(Ciphers) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(Kex) -> dss_keyed(Kex) end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+
+%% Cert should be signed by DSS (DSA)
+dsa_signed_suites(Ciphers, Version) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [dsa_signed(Version)],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+dsa_signed(_) ->
+ fun(dhe_dss) -> true;
+ (_) -> false
+ end.
-ecdsa_signed_suites() ->
- ecdh_ecdsa_suites() ++ ecdhe_ecdsa_suites().
+dss_dhe_suites(Ciphers) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(dhe_dss) -> true;
+ (_) -> false
+ end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
-ecdh_suites() ->
- ecdh_rsa_suites() ++ ecdh_ecdsa_suites().
+ec_keyed(ecdh_ecdsa) ->
+ true;
+ec_keyed(ecdh_rsa) ->
+ true;
+ec_keyed(_) ->
+ false.
-ecdh_ecdsa_suites() ->
- [?TLS_ECDH_ECDSA_WITH_NULL_SHA,
- ?TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
- ?TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
- ?TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
- ?TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
- ?TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
- ?TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
- ?TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
- ?TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384].
-
-ecdhe_ecdsa_suites() ->
- [?TLS_ECDHE_ECDSA_WITH_NULL_SHA,
- ?TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- ?TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
- ?TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- ?TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- ?TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- ?TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
- ?TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- ?TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- ?TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256].
-
-filter_keyuse(OtpCert, Ciphers, Suites, SignSuites) ->
+%% Certs key is an ECC key
+ec_keyed_suites(Ciphers) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(Kex) -> ec_keyed(Kex) end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+
+%% EC Certs key usage keyAgreement
+ec_ecdh_suites(Ciphers)->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(ecdh_ecdsa) -> true;
+ (_) -> false
+ end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+
+%% EC Certs key usage digitalSignature
+ec_ecdhe_suites(Ciphers) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(ecdhe_ecdsa) -> true;
+ (ecdhe_rsa) -> true;
+ (_) -> false
+ end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+%% RSA Certs key usage digitalSignature
+rsa_ecdhe_dhe_suites(Ciphers) ->
+ filter_suites(Ciphers, #{key_exchange_filters => [fun(dhe_rsa) -> true;
+ (ecdhe_rsa) -> true;
+ (_) -> false
+ end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}).
+
+key_uses(OtpCert) ->
TBSCert = OtpCert#'OTPCertificate'.tbsCertificate,
TBSExtensions = TBSCert#'OTPTBSCertificate'.extensions,
Extensions = ssl_certificate:extensions_list(TBSExtensions),
case ssl_certificate:select_extension(?'id-ce-keyUsage', Extensions) of
undefined ->
- Ciphers;
- #'Extension'{extnValue = KeyUse} ->
- Result = filter_keyuse_suites(keyEncipherment,
- KeyUse, Ciphers, Suites),
- filter_keyuse_suites(digitalSignature,
- KeyUse, Result, SignSuites)
+ [];
+ #'Extension'{extnValue = KeyUses} ->
+ KeyUses
end.
+%% If no key-usage extension is defined all key-usages are allowed
+filter_keyuse_suites(_, [], CiphersSuites, _) ->
+ CiphersSuites;
filter_keyuse_suites(Use, KeyUse, CipherSuits, Suites) ->
case ssl_certificate:is_valid_key_usage(KeyUse, Use) of
true ->
diff --git a/lib/ssl/src/ssl_config.erl b/lib/ssl/src/ssl_config.erl
index 022fb7eac0..452a98e683 100644
--- a/lib/ssl/src/ssl_config.erl
+++ b/lib/ssl/src/ssl_config.erl
@@ -132,7 +132,13 @@ private_key(#'PrivateKeyInfo'{privateKeyAlgorithm =
#'PrivateKeyInfo_privateKeyAlgorithm'{algorithm = ?'id-dsa'},
privateKey = Key}) ->
public_key:der_decode('DSAPrivateKey', iolist_to_binary(Key));
-
+private_key(#'PrivateKeyInfo'{privateKeyAlgorithm =
+ #'PrivateKeyInfo_privateKeyAlgorithm'{algorithm = ?'id-ecPublicKey',
+ parameters = {asn1_OPENTYPE, Parameters}},
+ privateKey = Key}) ->
+ ECKey = public_key:der_decode('ECPrivateKey', iolist_to_binary(Key)),
+ ECParameters = public_key:der_decode('EcpkParameters', Parameters),
+ ECKey#'ECPrivateKey'{parameters = ECParameters};
private_key(Key) ->
Key.
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 3f8c1f97f9..ec034af44c 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -1472,7 +1472,7 @@ connection_info(#state{sni_hostname = SNIHostname,
RecordCB = record_cb(Connection),
CipherSuiteDef = #{key_exchange := KexAlg} = ssl_cipher:suite_definition(CipherSuite),
IsNamedCurveSuite = lists:member(KexAlg,
- [ecdh_ecdsa, ecdhe_ecdsa, ecdh_anon]),
+ [ecdh_ecdsa, ecdhe_ecdsa, ecdh_rsa, ecdh_anon]),
CurveInfo = case ECCCurve of
{namedCurve, Curve} when IsNamedCurveSuite ->
[{ecc, {named_curve, pubkey_cert_records:namedCurves(Curve)}}];
@@ -1572,11 +1572,14 @@ handle_peer_cert(Role, PeerCert, PublicKeyInfo,
handle_peer_cert_key(client, _,
{?'id-ecPublicKey', #'ECPoint'{point = _ECPoint} = PublicKey,
PublicKeyParams},
- KeyAlg, State) when KeyAlg == ecdh_rsa;
- KeyAlg == ecdh_ecdsa ->
+ KeyAlg, #state{session = Session} = State) when KeyAlg == ecdh_rsa;
+ KeyAlg == ecdh_ecdsa ->
ECDHKey = public_key:generate_key(PublicKeyParams),
+ {namedCurve, Oid} = PublicKeyParams,
+ Curve = pubkey_cert_records:namedCurves(Oid), %% Need API function
PremasterSecret = ssl_handshake:premaster_secret(PublicKey, ECDHKey),
- master_secret(PremasterSecret, State#state{diffie_hellman_keys = ECDHKey});
+ master_secret(PremasterSecret, State#state{diffie_hellman_keys = ECDHKey,
+ session = Session#session{ecc = {named_curve, Curve}}});
%% We do currently not support cipher suites that use fixed DH.
%% If we want to implement that the following clause can be used
%% to extract DH parameters form cert.
diff --git a/lib/ssl/src/ssl_connection_sup.erl b/lib/ssl/src/ssl_connection_sup.erl
index 1a1f43e683..1aa7c5844f 100644
--- a/lib/ssl/src/ssl_connection_sup.erl
+++ b/lib/ssl/src/ssl_connection_sup.erl
@@ -51,12 +51,12 @@ init([]) ->
ListenOptionsTracker = listen_options_tracker_child_spec(),
DTLSConnetionManager = dtls_connection_manager_child_spec(),
- DTLSUdpListeners = dtls_udp_listeners_spec(),
+ DTLSListeners = dtls_listeners_spec(),
{ok, {{one_for_one, 10, 3600}, [TLSConnetionManager,
ListenOptionsTracker,
DTLSConnetionManager,
- DTLSUdpListeners
+ DTLSListeners
]}}.
@@ -91,9 +91,9 @@ listen_options_tracker_child_spec() ->
Type = supervisor,
{Name, StartFunc, Restart, Shutdown, Type, Modules}.
-dtls_udp_listeners_spec() ->
- Name = dtls_udp_listener,
- StartFunc = {dtls_udp_sup, start_link, []},
+dtls_listeners_spec() ->
+ Name = dtls_listener,
+ StartFunc = {dtls_listener_sup, start_link, []},
Restart = permanent,
Shutdown = 4000,
Modules = [],
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index 8ddd4623c1..ebbb633b22 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -772,11 +772,12 @@ available_suites(UserSuites, Version) ->
lists:filtermap(fun(Suite) -> lists:member(Suite, VersionSuites) end, UserSuites).
available_suites(ServerCert, UserSuites, Version, undefined, Curve) ->
- ssl_cipher:filter(ServerCert, available_suites(UserSuites, Version))
- -- unavailable_ecc_suites(Curve);
+ Suites = ssl_cipher:filter(ServerCert, available_suites(UserSuites, Version), Version),
+ filter_unavailable_ecc_suites(Curve, Suites);
available_suites(ServerCert, UserSuites, Version, HashSigns, Curve) ->
Suites = available_suites(ServerCert, UserSuites, Version, undefined, Curve),
- filter_hashsigns(Suites, [ssl_cipher:suite_definition(Suite) || Suite <- Suites], HashSigns, []).
+ filter_hashsigns(Suites, [ssl_cipher:suite_definition(Suite) || Suite <- Suites], HashSigns,
+ Version, []).
available_signature_algs(undefined, _) ->
undefined;
@@ -814,7 +815,7 @@ prf({3,0}, _, _, _, _, _) ->
prf({3,_N}, PRFAlgo, Secret, Label, Seed, WantedLength) ->
{ok, tls_v1:prf(PRFAlgo, Secret, Label, Seed, WantedLength)}.
-select_session(SuggestedSessionId, CipherSuites, HashSigns, Compressions, Port, #session{ecc = ECCCurve} =
+select_session(SuggestedSessionId, CipherSuites, HashSigns, Compressions, Port, #session{ecc = ECCCurve0} =
Session, Version,
#ssl_options{ciphers = UserSuites, honor_cipher_order = HonorCipherOrder} = SslOpts,
Cache, CacheCb, Cert) ->
@@ -823,10 +824,12 @@ select_session(SuggestedSessionId, CipherSuites, HashSigns, Compressions, Port,
Cache, CacheCb),
case Resumed of
undefined ->
- Suites = available_suites(Cert, UserSuites, Version, HashSigns, ECCCurve),
- CipherSuite = select_cipher_suite(CipherSuites, Suites, HonorCipherOrder),
+ Suites = available_suites(Cert, UserSuites, Version, HashSigns, ECCCurve0),
+ CipherSuite0 = select_cipher_suite(CipherSuites, Suites, HonorCipherOrder),
+ {ECCCurve, CipherSuite} = cert_curve(Cert, ECCCurve0, CipherSuite0),
Compression = select_compression(Compressions),
{new, Session#session{session_id = SessionId,
+ ecc = ECCCurve,
cipher_suite = CipherSuite,
compression_method = Compression}};
_ ->
@@ -1066,11 +1069,11 @@ select_hashsign(#hash_sign_algos{hash_sign_algos = HashSigns}, Cert, KeyExAlgo,
TBSCert#'OTPTBSCertificate'.subjectPublicKeyInfo,
Sign = sign_algo(SignAlgo),
- SubSing = sign_algo(SubjAlgo),
-
- case lists:filter(fun({_, S} = Algos) when S == Sign ->
+ SubSign = sign_algo(SubjAlgo),
+
+ case lists:filter(fun({_, S} = Algos) when S == SubSign ->
is_acceptable_hash_sign(Algos, Sign,
- SubSing, KeyExAlgo, SupportedHashSigns);
+ SubSign, KeyExAlgo, SupportedHashSigns);
(_) ->
false
end, HashSigns) of
@@ -2075,25 +2078,26 @@ handle_psk_identity(_PSKIdentity, LookupFun)
handle_psk_identity(PSKIdentity, {Fun, UserState}) ->
Fun(psk, PSKIdentity, UserState).
-filter_hashsigns([], [], _, Acc) ->
- lists:reverse(Acc);
-filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns,
- Acc) when KeyExchange == dhe_ecdsa;
- KeyExchange == ecdhe_ecdsa ->
- do_filter_hashsigns(ecdsa, Suite, Suites, Algos, HashSigns, Acc);
-filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns,
+filter_hashsigns([], [], _, _, Acc) ->
+ lists:reverse(Acc);
+filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Version,
+ Acc) when KeyExchange == dhe_ecdsa;
+ KeyExchange == ecdhe_ecdsa ->
+ do_filter_hashsigns(ecdsa, Suite, Suites, Algos, HashSigns, Version, Acc);
+filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Version,
Acc) when KeyExchange == rsa;
KeyExchange == dhe_rsa;
KeyExchange == ecdhe_rsa;
KeyExchange == srp_rsa;
KeyExchange == rsa_psk ->
- do_filter_hashsigns(rsa, Suite, Suites, Algos, HashSigns, Acc);
-filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Acc) when
+ do_filter_hashsigns(rsa, Suite, Suites, Algos, HashSigns, Version, Acc);
+filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Version, Acc) when
KeyExchange == dhe_dss;
KeyExchange == srp_dss ->
- do_filter_hashsigns(dsa, Suite, Suites, Algos, HashSigns, Acc);
-filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Acc) when
+ do_filter_hashsigns(dsa, Suite, Suites, Algos, HashSigns, Version, Acc);
+filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Verion,
+ Acc) when
KeyExchange == dh_dss;
KeyExchange == dh_rsa;
KeyExchange == dh_ecdsa;
@@ -2102,8 +2106,9 @@ filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], Has
%% Fixed DH certificates MAY be signed with any hash/signature
%% algorithm pair appearing in the hash_sign extension. The names
%% DH_DSS, DH_RSA, ECDH_ECDSA, and ECDH_RSA are historical.
- filter_hashsigns(Suites, Algos, HashSigns, [Suite| Acc]);
-filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Acc) when
+ filter_hashsigns(Suites, Algos, HashSigns, Verion, [Suite| Acc]);
+filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], HashSigns, Version,
+ Acc) when
KeyExchange == dh_anon;
KeyExchange == ecdh_anon;
KeyExchange == srp_anon;
@@ -2111,20 +2116,28 @@ filter_hashsigns([Suite | Suites], [#{key_exchange := KeyExchange} | Algos], Has
KeyExchange == dhe_psk;
KeyExchange == ecdhe_psk ->
%% In this case hashsigns is not used as the kexchange is anonaymous
- filter_hashsigns(Suites, Algos, HashSigns, [Suite| Acc]).
+ filter_hashsigns(Suites, Algos, HashSigns, Version, [Suite| Acc]).
-do_filter_hashsigns(SignAlgo, Suite, Suites, Algos, HashSigns, Acc) ->
+do_filter_hashsigns(SignAlgo, Suite, Suites, Algos, HashSigns, Version, Acc) ->
case lists:keymember(SignAlgo, 2, HashSigns) of
true ->
- filter_hashsigns(Suites, Algos, HashSigns, [Suite| Acc]);
+ filter_hashsigns(Suites, Algos, HashSigns, Version, [Suite| Acc]);
false ->
- filter_hashsigns(Suites, Algos, HashSigns, Acc)
+ filter_hashsigns(Suites, Algos, HashSigns, Version, Acc)
end.
-unavailable_ecc_suites(no_curve) ->
- ssl_cipher:ec_keyed_suites();
-unavailable_ecc_suites(_) ->
- [].
+filter_unavailable_ecc_suites(no_curve, Suites) ->
+ ECCSuites = ssl_cipher:filter_suites(Suites, #{key_exchange_filters => [fun(ecdh_ecdsa) -> true;
+ (ecdhe_ecdsa) -> true;
+ (ecdh_rsa) -> true;
+ (_) -> false
+ end],
+ cipher_filters => [],
+ mac_filters => [],
+ prf_filters => []}),
+ Suites -- ECCSuites;
+filter_unavailable_ecc_suites(_, Suites) ->
+ Suites.
%%-------------Extension handling --------------------------------
handle_renegotiation_extension(Role, RecordCB, Version, Info, Random, NegotiatedCipherSuite,
@@ -2220,10 +2233,12 @@ sign_algo(Alg) ->
is_acceptable_hash_sign(Algos, _, _, KeyExAlgo, SupportedHashSigns) when
KeyExAlgo == dh_dss;
KeyExAlgo == dh_rsa;
- KeyExAlgo == dh_ecdsa ->
- %% dh_* could be called only dh in TLS-1.2
+ KeyExAlgo == ecdh_rsa;
+ KeyExAlgo == ecdh_ecdsa
+ ->
+ %% *dh_* could be called only *dh in TLS-1.2
is_acceptable_hash_sign(Algos, SupportedHashSigns);
-is_acceptable_hash_sign(Algos, rsa, ecdsa, ecdh_rsa, SupportedHashSigns) ->
+is_acceptable_hash_sign(Algos, rsa, ecdsa, ecdhe_rsa, SupportedHashSigns) ->
is_acceptable_hash_sign(Algos, SupportedHashSigns);
is_acceptable_hash_sign({_, rsa} = Algos, rsa, _, dhe_rsa, SupportedHashSigns) ->
is_acceptable_hash_sign(Algos, SupportedHashSigns);
@@ -2254,7 +2269,7 @@ is_acceptable_hash_sign(_, _, _, KeyExAlgo, _) when
KeyExAlgo == ecdhe_anon
->
true;
-is_acceptable_hash_sign(_,_, _,_,_) ->
+is_acceptable_hash_sign(_,_,_,_,_) ->
false.
is_acceptable_hash_sign(Algos, SupportedHashSigns) ->
lists:member(Algos, SupportedHashSigns).
@@ -2436,3 +2451,27 @@ handle_renegotiation_info(_RecordCB, ConnectionStates, SecureRenegotation) ->
{false, false} ->
{ok, ConnectionStates}
end.
+
+cert_curve(_, _, no_suite) ->
+ {no_curve, no_suite};
+cert_curve(Cert, ECCCurve0, CipherSuite) ->
+ case ssl_cipher:suite_definition(CipherSuite) of
+ #{key_exchange := Kex} when Kex == ecdh_ecdsa;
+ Kex == ecdh_rsa ->
+ OtpCert = public_key:pkix_decode_cert(Cert, otp),
+ TBSCert = OtpCert#'OTPCertificate'.tbsCertificate,
+ #'OTPSubjectPublicKeyInfo'{algorithm = AlgInfo}
+ = TBSCert#'OTPTBSCertificate'.subjectPublicKeyInfo,
+ {namedCurve, Oid} = AlgInfo#'PublicKeyAlgorithm'.parameters,
+ try pubkey_cert_records:namedCurves(Oid) of
+ Curve ->
+ {{named_curve, Curve}, CipherSuite}
+ catch
+ _:_ ->
+ {no_curve, no_suite}
+ end;
+ _ ->
+ {ECCCurve0, CipherSuite}
+ end.
+
+
diff --git a/lib/ssl/src/ssl_internal.hrl b/lib/ssl/src/ssl_internal.hrl
index 5df00de0e5..977d012fa7 100644
--- a/lib/ssl/src/ssl_internal.hrl
+++ b/lib/ssl/src/ssl_internal.hrl
@@ -160,7 +160,7 @@
-record(config, {ssl, %% SSL parameters
inet_user, %% User set inet options
emulated, %% Emulated option list or "inherit_tracker" pid
- udp_handler,
+ dtls_handler,
inet_ssl, %% inet options for internal ssl socket
transport_info, %% Callback info
connection_cb
diff --git a/lib/ssl/test/ssl_ECC.erl b/lib/ssl/test/ssl_ECC.erl
index 489a72e50e..36d949f74b 100644
--- a/lib/ssl/test/ssl_ECC.erl
+++ b/lib/ssl/test/ssl_ECC.erl
@@ -34,53 +34,65 @@
%% ECDH_RSA
client_ecdh_rsa_server_ecdh_rsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [keyAgreement]}]),
Suites = all_rsa_suites(Config),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdh_rsa, ecdh_rsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
ssl_test_lib:ssl_options(SOpts, Config),
[{check_keyex, ecdh_rsa}, {ciphers, Suites} | proplists:delete(check_keyex, Config)]).
client_ecdhe_rsa_server_ecdh_rsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [keyAgreement]}]),
Suites = all_rsa_suites(Config),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdhe_rsa, ecdh_rsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
ssl_test_lib:ssl_options(SOpts, Config),
[{check_keyex, ecdh_rsa}, {ciphers, Suites} | proplists:delete(check_keyex, Config)]).
client_ecdhe_ecdsa_server_ecdh_rsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [keyAgreement]}]),
Suites = all_rsa_suites(Config),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdhe_ecdsa, ecdh_rsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
- ssl_test_lib:ssl_options(SOpts, Config),
- [{check_keyex, ecdh_rsa}, {ciphers, Suites} | proplists:delete(check_keyex, Config)]).
+ ssl_test_lib:ssl_options(SOpts, Config),
+ [{check_keyex, ecdh_rsa}, {ciphers, Suites} | proplists:delete(check_keyex, Config)]).
%% ECDHE_RSA
client_ecdh_rsa_server_ecdhe_rsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdh_rsa, ecdhe_rsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
ssl_test_lib:ssl_options(SOpts, Config),
[{check_keyex, ecdhe_rsa} | proplists:delete(check_keyex, Config)]).
client_ecdhe_rsa_server_ecdhe_rsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdhe_rsa, ecdhe_rsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
- ssl_test_lib:ssl_options(SOpts, Config),
+ ssl_test_lib:ssl_options(SOpts, Config),
[{check_keyex, ecdhe_rsa} | proplists:delete(check_keyex, Config)]).
client_ecdhe_ecdsa_server_ecdhe_rsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdh_ecdsa, ecdhe_rsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
@@ -89,7 +101,7 @@ client_ecdhe_ecdsa_server_ecdhe_rsa(Config) when is_list(Config) ->
%% ECDH_ECDSA
client_ecdh_ecdsa_server_ecdh_ecdsa(Config) when is_list(Config) ->
- Ext = x509_test:extensions([{key_usage, [keyEncipherment]}]),
+ Ext = x509_test:extensions([{key_usage, [keyAgreement]}]),
{COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
[[], [], [{extensions, Ext}]]},
{client_chain,
@@ -99,7 +111,7 @@ client_ecdh_ecdsa_server_ecdh_ecdsa(Config) when is_list(Config) ->
ssl_test_lib:ssl_options(SOpts, Config),
[{check_keyex, ecdh_ecdsa} | proplists:delete(check_keyex, Config)]).
client_ecdhe_rsa_server_ecdh_ecdsa(Config) when is_list(Config) ->
- Ext = x509_test:extensions([{key_usage, [keyEncipherment]}]),
+ Ext = x509_test:extensions([{key_usage, [keyAgreement]}]),
{COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
[[], [], [{extensions, Ext}]]},
{client_chain,
@@ -110,7 +122,7 @@ client_ecdhe_rsa_server_ecdh_ecdsa(Config) when is_list(Config) ->
[{check_keyex, ecdh_ecdsa} | proplists:delete(check_keyex, Config)]).
client_ecdhe_ecdsa_server_ecdh_ecdsa(Config) when is_list(Config) ->
- Ext = x509_test:extensions([{key_usage, [keyEncipherment]}]),
+ Ext = x509_test:extensions([{key_usage, [keyAgreement]}]),
{COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
[[], [], [{extensions, Ext}]]},
{client_chain,
@@ -122,24 +134,30 @@ client_ecdhe_ecdsa_server_ecdh_ecdsa(Config) when is_list(Config) ->
%% ECDHE_ECDSA
client_ecdh_rsa_server_ecdhe_ecdsa(Config) when is_list(Config) ->
- Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
+ Default = ssl_test_lib:default_cert_chain_conf(),
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdh_rsa, ecdhe_ecdsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
ssl_test_lib:ssl_options(SOpts, Config),
[{check_keyex, ecdhe_ecdsa} | proplists:delete(check_keyex, Config)]).
client_ecdh_ecdsa_server_ecdhe_ecdsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdh_ecdsa, ecdhe_ecdsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
ssl_test_lib:ssl_options(SOpts, Config),
[{check_keyex, ecdhe_ecdsa} | proplists:delete(check_keyex, Config)]).
client_ecdhe_ecdsa_server_ecdhe_ecdsa(Config) when is_list(Config) ->
+ Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Default = ssl_test_lib:default_cert_chain_conf(),
- {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain, Default},
+ {COpts, SOpts} = ssl_test_lib:make_ec_cert_chains([{server_chain,
+ [[], [], [{extensions, Ext}]]},
{client_chain, Default}],
ecdhe_ecdsa, ecdhe_ecdsa, Config),
ssl_test_lib:basic_test(ssl_test_lib:ssl_options(COpts, Config),
diff --git a/lib/ssl/test/ssl_ECC_openssl_SUITE.erl b/lib/ssl/test/ssl_ECC_openssl_SUITE.erl
index ba609aa0dc..5a08b152a6 100644
--- a/lib/ssl/test/ssl_ECC_openssl_SUITE.erl
+++ b/lib/ssl/test/ssl_ECC_openssl_SUITE.erl
@@ -33,39 +33,57 @@
%%--------------------------------------------------------------------
all() ->
- [
- {group, 'tlsv1.2'},
- {group, 'tlsv1.1'},
- {group, 'tlsv1'},
- {group, 'dtlsv1.2'},
- {group, 'dtlsv1'}
- ].
+ case test_cases() of
+ [_|_] ->
+ all_groups();
+ [] ->
+ [skip]
+ end.
+
+all_groups() ->
+ case ssl_test_lib:openssl_sane_dtls() of
+ true ->
+ [{group, 'tlsv1.2'},
+ {group, 'tlsv1.1'},
+ {group, 'tlsv1'},
+ {group, 'dtlsv1.2'},
+ {group, 'dtlsv1'}];
+ false ->
+ [{group, 'tlsv1.2'},
+ {group, 'tlsv1.1'},
+ {group, 'tlsv1'}]
+ end.
groups() ->
- [
- {'tlsv1.2', [], test_cases()},
- {'tlsv1.1', [], test_cases()},
- {'tlsv1', [], test_cases()},
- {'dtlsv1.2', [], test_cases()},
- {'dtlsv1', [], test_cases()}
- ].
+ case ssl_test_lib:openssl_sane_dtls() of
+ true ->
+ [{'tlsv1.2', [], test_cases()},
+ {'tlsv1.1', [], test_cases()},
+ {'tlsv1', [], test_cases()},
+ {'dtlsv1.2', [], test_cases()},
+ {'dtlsv1', [], test_cases()}];
+ false ->
+ [{'tlsv1.2', [], test_cases()},
+ {'tlsv1.1', [], test_cases()},
+ {'tlsv1', [], test_cases()}]
+ end.
test_cases()->
- %% cert_combinations().
- server_ecdh_rsa().
+ cert_combinations().
+
cert_combinations() ->
- lists:append(lists:filtermap(fun({Name, Suites}) ->
- case ssl_test_lib:openssl_filter(Name) of
- [] ->
- false;
- [_|_] ->
- {true, Suites}
- end
- end, [{"ECDH-RSA", server_ecdh_rsa()},
- {"ECDHE-RSA", server_ecdhe_rsa()},
- {"ECDH-ECDSA", server_ecdh_ecdsa()},
- {"ECDHE-ECDSA", server_ecdhe_ecdsa()}
- ])).
+ lists:append(lists:map(fun({Name, Suites}) ->
+ case ssl_test_lib:openssl_filter(Name) of
+ [] ->
+ [];
+ [_|_] ->
+ Suites
+ end
+ end, [{"ECDH-ECDSA", server_ecdh_ecdsa()},
+ {"ECDH-RSA", server_ecdh_rsa()},
+ {"ECDHE-RSA", server_ecdhe_rsa()},
+ {"ECDHE-ECDSA", server_ecdhe_ecdsa()}
+ ])).
server_ecdh_rsa() ->
[client_ecdh_rsa_server_ecdh_rsa,
client_ecdhe_rsa_server_ecdh_rsa,
@@ -91,11 +109,11 @@ init_per_suite(Config0) ->
end_per_suite(Config0),
try crypto:start() of
ok ->
- case ssl_test_lib:sufficient_crypto_support(cipher_ec) of
+ case ssl_test_lib:sufficient_crypto_support(cipher_ec) of
true ->
Config0;
false ->
- {skip, "Crypto does not support ECC"}
+ {skip, "Openssl does not support ECC"}
end
catch _:_ ->
{skip, "Crypto did not start"}
@@ -131,14 +149,15 @@ end_per_group(GroupName, Config0) ->
end.
%%--------------------------------------------------------------------
-
+init_per_testcase(skip, Config) ->
+ Config;
init_per_testcase(TestCase, Config) ->
ssl_test_lib:ct_log_supported_protocol_versions(Config),
Version = proplists:get_value(tls_version, Config),
ct:log("Ciphers: ~p~n ", [ssl:cipher_suites(default, Version)]),
end_per_testcase(TestCase, Config),
ssl:start(),
- ct:timetrap({seconds, 15}),
+ ct:timetrap({seconds, 30}),
Config.
end_per_testcase(_TestCase, Config) ->
@@ -149,6 +168,9 @@ end_per_testcase(_TestCase, Config) ->
%% Test Cases --------------------------------------------------------
%%--------------------------------------------------------------------
+skip(Config) when is_list(Config) ->
+ {skip, openssl_does_not_support_ECC}.
+
%% Test diffrent certificate chain types, note that it is the servers
%% chain that affect what cipher suit that will be choosen
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index fe4f02f100..162c63850f 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -273,7 +273,8 @@ init_per_suite(Config0) ->
proplists:get_value(priv_dir, Config0)),
Config1 = ssl_test_lib:make_dsa_cert(Config0),
Config2 = ssl_test_lib:make_ecdsa_cert(Config1),
- Config = ssl_test_lib:make_ecdh_rsa_cert(Config2),
+ Config3 = ssl_test_lib:make_rsa_cert(Config2),
+ Config = ssl_test_lib:make_ecdh_rsa_cert(Config3),
ssl_test_lib:cert_options(Config)
catch _:_ ->
{skip, "Crypto did not start"}
@@ -685,11 +686,16 @@ hello_client_cancel(Config) when is_list(Config) ->
{host, Hostname},
{from, self()},
{options, ssl_test_lib:ssl_options([{handshake, hello}], Config)},
- {continue_options, cancel}]),
-
- ssl_test_lib:check_result(Server, {error, {tls_alert, "user canceled"}}).
-%%--------------------------------------------------------------------
+ {continue_options, cancel}]),
+ receive
+ {Server, {error, {tls_alert, "user canceled"}}} ->
+ ok;
+ {Server, {error, closed}} ->
+ ct:pal("Did not receive the ALERT"),
+ ok
+ end.
+%%--------------------------------------------------------------------
hello_server_cancel() ->
[{doc, "Test API function ssl:handshake_cancel/1 on the server side"}].
hello_server_cancel(Config) when is_list(Config) ->
@@ -2538,7 +2544,7 @@ anonymous_cipher_suites()->
[{doc,"Test the anonymous ciphersuites"}].
anonymous_cipher_suites(Config) when is_list(Config) ->
NVersion = ssl_test_lib:protocol_version(Config, tuple),
- Ciphers = ssl_test_lib:anonymous_suites(NVersion),
+ Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(NVersion),
run_suites(Ciphers, Config, anonymous).
%%-------------------------------------------------------------------
psk_cipher_suites() ->
@@ -2634,7 +2640,7 @@ default_reject_anonymous(Config) when is_list(Config) ->
Version = ssl_test_lib:protocol_version(Config),
TLSVersion = ssl_test_lib:tls_version(Version),
- [CipherSuite | _] = ssl_test_lib:anonymous_suites(TLSVersion),
+ [CipherSuite | _] = ssl_test_lib:ecdh_dh_anonymous_suites(TLSVersion),
Server = ssl_test_lib:start_server_error([{node, ServerNode}, {port, 0},
{from, self()},
@@ -3180,10 +3186,10 @@ der_input(Config) when is_list(Config) ->
Size = ets:info(CADb, size),
- SeverVerifyOpts = ssl_test_lib:ssl_options(server_opts, Config),
+ SeverVerifyOpts = ssl_test_lib:ssl_options(server_rsa_opts, Config),
{ServerCert, ServerKey, ServerCaCerts, DHParams} = der_input_opts([{dhfile, DHParamFile} |
SeverVerifyOpts]),
- ClientVerifyOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ ClientVerifyOpts = ssl_test_lib:ssl_options(client_rsa_opts, Config),
{ClientCert, ClientKey, ClientCaCerts, DHParams} = der_input_opts([{dhfile, DHParamFile} |
ClientVerifyOpts]),
ServerOpts = [{verify, verify_peer}, {fail_if_no_peer_cert, true},
@@ -5045,8 +5051,14 @@ tls_downgrade_result(Socket) ->
tls_close(Socket) ->
ok = ssl_test_lib:send_recv_result(Socket),
- ok = ssl:close(Socket, 5000).
-
+ case ssl:close(Socket, 5000) of
+ ok ->
+ ok;
+ {error, closed} ->
+ ok;
+ Other ->
+ ct:fail(Other)
+ end.
%% First two clauses handles 1/n-1 splitting countermeasure Rizzo/Duong-Beast
treashold(N, {3,0}) ->
diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl
index 3a7e844cf8..1e88ca15de 100644
--- a/lib/ssl/test/ssl_test_lib.erl
+++ b/lib/ssl/test/ssl_test_lib.erl
@@ -1264,8 +1264,16 @@ string_regex_filter(Str, Search) when is_list(Str) ->
string_regex_filter(_Str, _Search) ->
false.
-anonymous_suites(Version) ->
- ssl:filter_cipher_suites([ssl_cipher:suite_definition(S) || S <- ssl_cipher:anonymous_suites(Version)],[]).
+ecdh_dh_anonymous_suites(Version) ->
+ ssl:filter_cipher_suites([ssl_cipher:suite_definition(S) || S <- ssl_cipher:anonymous_suites(Version)],
+ [{key_exchange,
+ fun(dh_anon) ->
+ true;
+ (ecdh_anon) ->
+ true;
+ (_) ->
+ false
+ end}]).
psk_suites(Version) ->
ssl:filter_cipher_suites([ssl_cipher:suite_definition(S) || S <- ssl_cipher:psk_suites(Version)], []).
@@ -1434,16 +1442,33 @@ sufficient_crypto_support(_) ->
check_key_exchange_send_active(Socket, false) ->
send_recv_result_active(Socket);
check_key_exchange_send_active(Socket, KeyEx) ->
- {ok, [{cipher_suite, Suite}]} = ssl:connection_information(Socket, [cipher_suite]),
- true = check_key_exchange(Suite, KeyEx),
+ {ok, Info} =
+ ssl:connection_information(Socket, [cipher_suite, protocol]),
+ Suite = proplists:get_value(cipher_suite, Info),
+ Version = proplists:get_value(protocol, Info),
+ true = check_key_exchange(Suite, KeyEx, Version),
send_recv_result_active(Socket).
-check_key_exchange({KeyEx,_, _}, KeyEx) ->
+check_key_exchange({KeyEx,_, _}, KeyEx, _) ->
+ true;
+check_key_exchange({KeyEx,_,_,_}, KeyEx, _) ->
+ true;
+check_key_exchange(KeyEx1, KeyEx2, Version) ->
+ case Version of
+ 'tlsv1.2' ->
+ v_1_2_check(element(1, KeyEx1), KeyEx2);
+ 'dtlsv1.2' ->
+ v_1_2_check(element(1, KeyEx1), KeyEx2);
+ _ ->
+ ct:pal("Negotiated ~p Expected ~p", [KeyEx1, KeyEx2]),
+ false
+ end.
+
+v_1_2_check(ecdh_ecdsa, ecdh_rsa) ->
true;
-check_key_exchange({KeyEx,_,_,_}, KeyEx) ->
+v_1_2_check(ecdh_rsa, ecdh_ecdsa) ->
true;
-check_key_exchange(KeyEx1, KeyEx2) ->
- ct:pal("Negotiated ~p Expected ~p", [KeyEx1, KeyEx2]),
+v_1_2_check(_, _) ->
false.
send_recv_result_active(Socket) ->
@@ -1567,12 +1592,62 @@ openssl_dsa_support() ->
true
end.
+%% Acctual support is tested elsewhere, this is to exclude some LibreSSL and OpenSSL versions
+openssl_sane_dtls() ->
+ case os:cmd("openssl version") of
+ "OpenSSL 0." ++ _ ->
+ false;
+ "OpenSSL 1.0.1s-freebsd" ++ _ ->
+ false;
+ "OpenSSL 1.0.2k-freebsd" ++ _ ->
+ false;
+ "OpenSSL 1.0.2d" ++ _ ->
+ false;
+ "OpenSSL 1.0.2n" ++ _ ->
+ false;
+ "OpenSSL 1.0.2m" ++ _ ->
+ false;
+ "OpenSSL 1.0.0" ++ _ ->
+ false;
+ "OpenSSL" ++ _ ->
+ true;
+ "LibreSSL 2.7" ++ _ ->
+ true;
+ _ ->
+ false
+ end.
+openssl_sane_client_cert() ->
+ case os:cmd("openssl version") of
+ "LibreSSL 2.5.2" ++ _ ->
+ true;
+ "LibreSSL 2.4" ++ _ ->
+ false;
+ "LibreSSL 2.3" ++ _ ->
+ false;
+ "LibreSSL 2.1" ++ _ ->
+ false;
+ "LibreSSL 2.0" ++ _ ->
+ false;
+ "LibreSSL 2.0" ++ _ ->
+ false;
+ "OpenSSL 1.0.1s-freebsd" ->
+ false;
+ "OpenSSL 1.0.0" ++ _ ->
+ false;
+ _ ->
+ true
+ end.
+
check_sane_openssl_version(Version) ->
case supports_ssl_tls_version(Version) of
true ->
case {Version, os:cmd("openssl version")} of
{'sslv3', "OpenSSL 1.0.2" ++ _} ->
false;
+ {'dtlsv1', _} ->
+ not is_fips(openssl);
+ {'dtlsv1.2', _} ->
+ not is_fips(openssl);
{_, "OpenSSL 1.0.2" ++ _} ->
true;
{_, "OpenSSL 1.0.1" ++ _} ->
@@ -1581,7 +1656,7 @@ check_sane_openssl_version(Version) ->
false;
{'tlsv1.1', "OpenSSL 1.0.0" ++ _} ->
false;
- {'dtlsv1.2', "OpenSSL 1.0.0" ++ _} ->
+ {'dtlsv1.2', "OpenSSL 1.0.2" ++ _} ->
false;
{'dtlsv1', "OpenSSL 1.0.0" ++ _} ->
false;
@@ -1703,9 +1778,12 @@ supports_ssl_tls_version(sslv2 = Version) ->
VersionFlag = version_flag(Version),
Exe = "openssl",
Args = ["s_client", VersionFlag],
+ [{trap_exit, Trap}] = process_info(self(), [trap_exit]),
+ process_flag(trap_exit, true),
Port = ssl_test_lib:portable_open_port(Exe, Args),
Bool = do_supports_ssl_tls_version(Port, ""),
consume_port_exit(Port),
+ process_flag(trap_exit, Trap),
Bool
end;
diff --git a/lib/ssl/test/ssl_to_openssl_SUITE.erl b/lib/ssl/test/ssl_to_openssl_SUITE.erl
index 9c60a6315e..4f02d8d15d 100644
--- a/lib/ssl/test/ssl_to_openssl_SUITE.erl
+++ b/lib/ssl/test/ssl_to_openssl_SUITE.erl
@@ -37,26 +37,43 @@
%%--------------------------------------------------------------------
all() ->
- [
- {group, basic},
- {group, 'tlsv1.2'},
- {group, 'tlsv1.1'},
- {group, 'tlsv1'},
- {group, 'sslv3'},
- {group, 'dtlsv1.2'},
- {group, 'dtlsv1'}
- ].
+ case ssl_test_lib:openssl_sane_dtls() of
+ true ->
+ [{group, basic},
+ {group, 'tlsv1.2'},
+ {group, 'tlsv1.1'},
+ {group, 'tlsv1'},
+ {group, 'sslv3'},
+ {group, 'dtlsv1.2'},
+ {group, 'dtlsv1'}];
+ false ->
+ [{group, basic},
+ {group, 'tlsv1.2'},
+ {group, 'tlsv1.1'},
+ {group, 'tlsv1'},
+ {group, 'sslv3'}]
+ end.
groups() ->
- [{basic, [], basic_tests()},
- {'tlsv1.2', [], all_versions_tests() ++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
- {'tlsv1.1', [], all_versions_tests() ++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
- {'tlsv1', [], all_versions_tests()++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
- {'sslv3', [], all_versions_tests()},
- {'dtlsv1.2', [], dtls_all_versions_tests()},
- {'dtlsv1', [], dtls_all_versions_tests()}
- ].
-
+ case ssl_test_lib:openssl_sane_dtls() of
+ true ->
+ [{basic, [], basic_tests()},
+ {'tlsv1.2', [], all_versions_tests() ++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
+ {'tlsv1.1', [], all_versions_tests() ++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
+ {'tlsv1', [], all_versions_tests()++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
+ {'sslv3', [], all_versions_tests()},
+ {'dtlsv1.2', [], dtls_all_versions_tests()},
+ {'dtlsv1', [], dtls_all_versions_tests()}
+ ];
+ false ->
+ [{basic, [], basic_tests()},
+ {'tlsv1.2', [], all_versions_tests() ++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
+ {'tlsv1.1', [], all_versions_tests() ++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
+ {'tlsv1', [], all_versions_tests()++ alpn_tests() ++ npn_tests() ++ sni_server_tests()},
+ {'sslv3', [], all_versions_tests()}
+ ]
+ end.
+
basic_tests() ->
[basic_erlang_client_openssl_server,
basic_erlang_server_openssl_client,
@@ -85,9 +102,20 @@ all_versions_tests() ->
expired_session,
ssl2_erlang_server_openssl_client
].
+
dtls_all_versions_tests() ->
- [
- erlang_client_openssl_server,
+ case ssl_test_lib:openssl_sane_client_cert() of
+ true ->
+ [erlang_server_openssl_client_client_cert,
+ erlang_client_openssl_server_no_server_ca_cert,
+ erlang_client_openssl_server_client_cert
+ | dtls_all_versions_tests_2()];
+ false ->
+ dtls_all_versions_tests_2()
+ end.
+
+dtls_all_versions_tests_2() ->
+ [erlang_client_openssl_server,
erlang_server_openssl_client,
erlang_client_openssl_server_dsa_cert,
erlang_server_openssl_client_dsa_cert,
@@ -98,12 +126,8 @@ dtls_all_versions_tests() ->
erlang_client_openssl_server_renegotiate,
erlang_client_openssl_server_nowrap_seqnum,
erlang_server_openssl_client_nowrap_seqnum,
- erlang_client_openssl_server_no_server_ca_cert,
- erlang_client_openssl_server_client_cert,
- erlang_server_openssl_client_client_cert,
ciphers_rsa_signed_certs,
ciphers_dsa_signed_certs
- %%erlang_client_bad_openssl_server,
%%expired_session
].
@@ -167,7 +191,15 @@ end_per_suite(_Config) ->
application:stop(crypto).
init_per_group(basic, Config0) ->
- ssl_test_lib:clean_tls_version(Config0);
+ case ssl_test_lib:supports_ssl_tls_version('tlsv1.2')
+ orelse ssl_test_lib:supports_ssl_tls_version('tlsv1.1')
+ orelse ssl_test_lib:supports_ssl_tls_version('tlsv1')
+ of
+ true ->
+ ssl_test_lib:clean_tls_version(Config0);
+ false ->
+ {skip, "only sslv3 supported by OpenSSL"}
+ end;
init_per_group(GroupName, Config) ->
case ssl_test_lib:is_tls_version(GroupName) of
@@ -380,8 +412,16 @@ basic_erlang_server_openssl_client(Config) when is_list(Config) ->
Port = ssl_test_lib:inet_port(Server),
Exe = "openssl",
- Args = ["s_client", "-connect", hostname_format(Hostname) ++
- ":" ++ integer_to_list(Port) ++ no_v2_flag() | workaround_openssl_s_clinent()],
+ Args = case no_low_flag("-no_ssl2") of
+ [] ->
+ ["s_client", "-connect", hostname_format(Hostname) ++
+ ":" ++ integer_to_list(Port), no_low_flag("-no_ssl3")
+ | workaround_openssl_s_clinent()];
+ Flag ->
+ ["s_client", "-connect", hostname_format(Hostname) ++
+ ":" ++ integer_to_list(Port), no_low_flag("-no_ssl3"), Flag
+ | workaround_openssl_s_clinent()]
+ end,
OpenSslPort = ssl_test_lib:portable_open_port(Exe, Args),
true = port_command(OpenSslPort, Data),
@@ -556,7 +596,7 @@ erlang_client_openssl_server_anon(Config) when is_list(Config) ->
ServerOpts = ssl_test_lib:ssl_options(server_rsa_opts, Config),
ClientOpts = ssl_test_lib:ssl_options(client_anon_opts, Config),
VersionTuple = ssl_test_lib:protocol_version(Config, tuple),
- Ciphers = ssl_test_lib:anonymous_suites(VersionTuple),
+ Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(VersionTuple),
{ClientNode, _, Hostname} = ssl_test_lib:run_where(Config),
@@ -599,7 +639,7 @@ erlang_server_openssl_client_anon(Config) when is_list(Config) ->
process_flag(trap_exit, true),
ServerOpts = ssl_test_lib:ssl_options(server_anon_opts, Config),
VersionTuple = ssl_test_lib:protocol_version(Config, tuple),
- Ciphers = ssl_test_lib:anonymous_suites(VersionTuple),
+ Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(VersionTuple),
{_, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
@@ -633,7 +673,7 @@ erlang_server_openssl_client_anon_with_cert(Config) when is_list(Config) ->
process_flag(trap_exit, true),
ServerOpts = ssl_test_lib:ssl_options(server_rsa_opts, Config),
VersionTuple = ssl_test_lib:protocol_version(Config, tuple),
- Ciphers = ssl_test_lib:anonymous_suites(VersionTuple),
+ Ciphers = ssl_test_lib:ecdh_dh_anonymous_suites(VersionTuple),
{_, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
@@ -1963,10 +2003,12 @@ hostname_format(Hostname) ->
"localhost"
end.
-no_v2_flag() ->
+no_low_flag("-no_ssl2" = Flag) ->
case ssl_test_lib:supports_ssl_tls_version(sslv2) of
true ->
- " -no_ssl2 ";
+ Flag;
false ->
""
- end.
+ end;
+no_low_flag(Flag) ->
+ Flag.
diff --git a/lib/ssl/vsn.mk b/lib/ssl/vsn.mk
index 0ff22c5eab..eb85a55717 100644
--- a/lib/ssl/vsn.mk
+++ b/lib/ssl/vsn.mk
@@ -1 +1 @@
-SSL_VSN = 8.2.5
+SSL_VSN = 8.2.6
diff --git a/lib/stdlib/doc/src/Makefile b/lib/stdlib/doc/src/Makefile
index 508a4fa2de..5c6b714f80 100644
--- a/lib/stdlib/doc/src/Makefile
+++ b/lib/stdlib/doc/src/Makefile
@@ -71,7 +71,6 @@ XML_REF3_FILES = \
gen_statem.xml \
io.xml \
io_lib.xml \
- lib.xml \
lists.xml \
log_mf_h.xml \
maps.xml \
diff --git a/lib/stdlib/doc/src/calendar.xml b/lib/stdlib/doc/src/calendar.xml
index 8f2b6b747a..6b4fa7f98a 100644
--- a/lib/stdlib/doc/src/calendar.xml
+++ b/lib/stdlib/doc/src/calendar.xml
@@ -323,7 +323,9 @@
<type name="rfc3339_string"/>
<type name="rfc3339_time_unit"/>
<desc>
- <p>Converts an RFC 3339 timestamp into system time.</p>
+ <p>Converts an RFC 3339 timestamp into system time. The data format
+ of RFC 3339 timestamps is described by
+ <url href="https://www.ietf.org/rfc/rfc3339.txt">RFC 3339</url>.</p>
<p>Valid option:</p>
<taglist>
<tag><c>{unit, Unit}</c></tag>
@@ -378,7 +380,10 @@
<type name="rfc3339_string"/>
<type name="rfc3339_time_unit"/>
<desc>
- <p>Converts a system time into RFC 3339 timestamp.</p>
+ <p>Converts a system time into an RFC 3339 timestamp. The data format
+ of RFC 3339 timestamps is described by
+ <url href="https://www.ietf.org/rfc/rfc3339.txt">RFC 3339</url>.
+ The data format of offsets is also described by RFC 3339.</p>
<p>Valid options:</p>
<taglist>
<tag><c>{offset, Offset}</c></tag>
diff --git a/lib/stdlib/doc/src/ets.xml b/lib/stdlib/doc/src/ets.xml
index 305376a425..1995262145 100644
--- a/lib/stdlib/doc/src/ets.xml
+++ b/lib/stdlib/doc/src/ets.xml
@@ -49,14 +49,16 @@
associated with each key. A <c>bag</c> or <c>duplicate_bag</c> table can
have many objects associated with each key.</p>
+ <marker id="max_ets_tables"></marker>
<note>
<p>
The number of tables stored at one Erlang node <em>used</em> to
be limited. This is no longer the case (except by memory usage).
The previous default limit was about 1400 tables and
could be increased by setting the environment variable
- <c>ERL_MAX_ETS_TABLES</c> before starting the Erlang runtime
- system. This hard limit has been removed, but it is currently
+ <c>ERL_MAX_ETS_TABLES</c> or the command line option
+ <seealso marker="erts:erl#+e"><c>+e</c></seealso> before starting the
+ Erlang runtime system. This hard limit has been removed, but it is currently
useful to set the <c>ERL_MAX_ETS_TABLES</c> anyway. It should be
set to an approximate of the maximum amount of tables used. This since
an internal table for named tables is sized using this value. If
diff --git a/lib/stdlib/doc/src/gen_event.xml b/lib/stdlib/doc/src/gen_event.xml
index 012737c390..6170801e87 100644
--- a/lib/stdlib/doc/src/gen_event.xml
+++ b/lib/stdlib/doc/src/gen_event.xml
@@ -207,7 +207,7 @@ gen_event:stop -----> Module:terminate/2
</item>
<item>
<p>If the event handler is deleted later, the event manager
- sends a message<c>{gen_event_EXIT,Handler,Reason}</c> to
+ sends a message <c>{gen_event_EXIT,Handler,Reason}</c> to
the calling process. <c>Reason</c> is one of the following:</p>
<list type="bulleted">
<item>
@@ -458,8 +458,7 @@ gen_event:stop -----> Module:terminate/2
with the expected reason. Any other reason than <c>normal</c>,
<c>shutdown</c>, or <c>{shutdown,Term}</c> causes an
error report to be issued using
- <seealso marker="kernel:error_logger#format/2">
- <c>error_logger:format/2</c></seealso>.
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>.
The default <c>Reason</c> is <c>normal</c>.</p>
<p><c>Timeout</c> is an integer greater than zero that
specifies how many milliseconds to wait for the event manager to
diff --git a/lib/stdlib/doc/src/gen_server.xml b/lib/stdlib/doc/src/gen_server.xml
index da74e793e6..27edbc8de7 100644
--- a/lib/stdlib/doc/src/gen_server.xml
+++ b/lib/stdlib/doc/src/gen_server.xml
@@ -486,8 +486,7 @@ gen_server:abcast -----> Module:handle_cast/2
with the expected reason. Any other reason than <c>normal</c>,
<c>shutdown</c>, or <c>{shutdown,Term}</c> causes an
error report to be issued using
- <seealso marker="kernel:error_logger#format/2">
- <c>error_logger:format/2</c></seealso>.
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>.
The default <c>Reason</c> is <c>normal</c>.</p>
<p><c>Timeout</c> is an integer greater than zero that
specifies how many milliseconds to wait for the server to
@@ -861,8 +860,7 @@ gen_server:abcast -----> Module:handle_cast/2
<c>shutdown</c>, or <c>{shutdown,Term}</c>, the <c>gen_server</c>
process is assumed to terminate because of an error and
an error report is issued using
- <seealso marker="kernel:error_logger#format/2">
- <c>error_logger:format/2</c></seealso>.</p>
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>.</p>
</desc>
</func>
</funcs>
diff --git a/lib/stdlib/doc/src/gen_statem.xml b/lib/stdlib/doc/src/gen_statem.xml
index e918e83df7..a808d3af55 100644
--- a/lib/stdlib/doc/src/gen_statem.xml
+++ b/lib/stdlib/doc/src/gen_statem.xml
@@ -1778,7 +1778,7 @@ handle_event(_, _, State, Data) ->
with the expected reason. Any other reason than <c>normal</c>,
<c>shutdown</c>, or <c>{shutdown,Term}</c> causes an
error report to be issued through
- <seealso marker="kernel:error_logger#format/2"><c>error_logger:format/2</c></seealso>.
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>.
The default <c><anno>Reason</anno></c> is <c>normal</c>.
</p>
<p>
@@ -2286,7 +2286,7 @@ init(Args) -> erlang:error(not_implemented, [Args]).</pre>
<c>shutdown</c>, or <c>{shutdown,Term}</c>,
the <c>gen_statem</c> is assumed to terminate because of an error
and an error report is issued using
- <seealso marker="kernel:error_logger#format/2"><c>error_logger:format/2</c></seealso>.
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>.
</p>
</desc>
</func>
diff --git a/lib/stdlib/doc/src/io_lib.xml b/lib/stdlib/doc/src/io_lib.xml
index 4a2b425e8e..a3df2897ac 100644
--- a/lib/stdlib/doc/src/io_lib.xml
+++ b/lib/stdlib/doc/src/io_lib.xml
@@ -163,16 +163,20 @@
<p>Returns a character list that represents <c><anno>Data</anno></c>
formatted in accordance with <c><anno>Format</anno></c> in
the same way as
- <seealso marker="#fwrite/2"><c>fwrite/2</c></seealso> and
- <seealso marker="#format/2"><c>format/2</c></seealso>,
- but takes an extra argument, a list of options.</p>
- <p>Available options:</p>
- <taglist>
- <tag><c><anno>CharsLimit</anno></c></tag>
- <item>
- <p>A soft limit on the number of characters returned.</p>
- </item>
- </taglist>
+ <seealso marker="#fwrite/2"><c>fwrite/2</c></seealso> and
+ <seealso marker="#format/2"><c>format/2</c></seealso>,
+ but takes an extra argument, a list of options.</p>
+ <p>Valid option:</p>
+ <taglist>
+ <tag><c>{chars_limit, <anno>CharsLimit</anno>}</c></tag>
+ <item>
+ <p>A soft limit on the number of characters returned.
+ When the number of characters is reached, remaining
+ structures are replaced by "<c>...</c>".
+ <c><anno>CharsLimit</anno></c> defaults to -1, which
+ means no limit on the number of characters returned.</p>
+ </item>
+ </taglist>
</desc>
</func>
@@ -390,11 +394,11 @@
everything below this level is replaced by "<c>...</c>".
<c><anno>Depth</anno></c> defaults to -1, which means
no limitation. Option <c><anno>CharsLimit</anno></c> puts a
- soft limit on the number of characters returned. When the
- number of characters is reached, remaining structures are
- replaced by "<c>...</c>". <c><anno>CharsLimit</anno></c>
- defaults to -1, which means no limit on the number of
- characters returned.</p>
+ soft limit on the number of characters returned. When the
+ number of characters is reached, remaining structures are
+ replaced by "<c>...</c>". <c><anno>CharsLimit</anno></c>
+ defaults to -1, which means no limit on the number of
+ characters returned.</p>
<p><em>Example:</em></p>
<pre>
1> <input>lists:flatten(io_lib:write({1,[2],[3],[4,5],6,7,8,9})).</input>
diff --git a/lib/stdlib/doc/src/lib.xml b/lib/stdlib/doc/src/lib.xml
deleted file mode 100644
index 58dad7c9e0..0000000000
--- a/lib/stdlib/doc/src/lib.xml
+++ /dev/null
@@ -1,103 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE erlref SYSTEM "erlref.dtd">
-
-<erlref>
- <header>
- <copyright>
- <year>1996</year><year>2016</year>
- <holder>Ericsson AB. All Rights Reserved.</holder>
- </copyright>
- <legalnotice>
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
- </legalnotice>
-
- <title>lib</title>
- <prepared></prepared>
- <docno></docno>
- <date></date>
- <rev></rev>
- </header>
- <module>lib</module>
- <modulesummary>Useful library functions.</modulesummary>
- <description>
- <warning>
- <p>This module is retained for backward compatibility. It can disappear
- without warning in a future Erlang/OTP release.</p>
- </warning>
- </description>
-
- <funcs>
- <func>
- <name name="error_message" arity="2"/>
- <fsummary>Print error message.</fsummary>
- <desc>
- <p>Prints error message <c><anno>Args</anno></c> in accordance with
- <c><anno>Format</anno></c>. Similar to
- <seealso marker="io#format/1"><c>io:format/2</c></seealso>.</p>
- </desc>
- </func>
-
- <func>
- <name name="flush_receive" arity="0"/>
- <fsummary>Flush messages.</fsummary>
- <desc>
- <p>Flushes the message buffer of the current process.</p>
- </desc>
- </func>
-
- <func>
- <name name="nonl" arity="1"/>
- <fsummary>Remove last newline.</fsummary>
- <desc>
- <p>Removes the last newline character, if any, in
- <c><anno>String1</anno></c>.</p>
- </desc>
- </func>
-
- <func>
- <name name="progname" arity="0"/>
- <fsummary>Return name of Erlang start script.</fsummary>
- <desc>
- <p>Returns the name of the script that started the current
- Erlang session.</p>
- </desc>
- </func>
-
- <func>
- <name name="send" arity="2"/>
- <fsummary>Send a message.</fsummary>
- <desc>
- <p>Makes it possible to send a message using the <c>apply/3</c> BIF.</p>
- </desc>
- </func>
-
- <func>
- <name name="sendw" arity="2"/>
- <fsummary>Send a message and wait for an answer.</fsummary>
- <desc>
- <p>As <seealso marker="#send/2"><c>send/2</c></seealso>,
- but waits for an answer. It is implemented as follows:</p>
- <code type="none">
-sendw(To, Msg) ->
- To ! {self(),Msg},
- receive
- Reply -> Reply
- end.</code>
- <p>The returned message is not necessarily a reply to the sent
- message.</p>
- </desc>
- </func>
- </funcs>
-</erlref>
-
diff --git a/lib/stdlib/doc/src/proc_lib.xml b/lib/stdlib/doc/src/proc_lib.xml
index cb152d1935..51380ae51c 100644
--- a/lib/stdlib/doc/src/proc_lib.xml
+++ b/lib/stdlib/doc/src/proc_lib.xml
@@ -59,18 +59,17 @@
<p>When a process that is started using <c>proc_lib</c> terminates
abnormally (that is, with another exit reason than <c>normal</c>,
<c>shutdown</c>, or <c>{shutdown,Term}</c>), a <em>crash report</em>
- is generated, which is written to terminal by the default SASL
- event handler. That is, the crash report is normally only visible
- if the SASL application is started; see
- <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso> and section
+ is generated, which is written to terminal by the default logger
+ handler setup by Kernel. For more information about how crash reports
+ were logged prior to Erlang/OTP 21.0, see
<seealso marker="sasl:error_logging">SASL Error Logging</seealso>
in the SASL User's Guide.</p>
<p>Unlike in "plain Erlang", <c>proc_lib</c> processes will not generate
<em>error reports</em>, which are written to the terminal by the
- emulator and do not require SASL to be started. All exceptions are
+ emulator. All exceptions are
converted to <em>exits</em> which are ignored by the default
- <c>error_logger</c> handler.</p>
+ <c>logger</c> handler.</p>
<p>The crash report contains the previously stored information, such
as ancestors and initial function, the termination reason, and
@@ -115,12 +114,22 @@
<name name="format" arity="2"/>
<fsummary>Format a crash report.</fsummary>
<desc>
- <p>This function can be used by a user-defined event handler to
+ <note>
+ <p>This function is deprecated in the sense that
+ the <c>error_logger</c> is no longer the preferred
+ interface for logging in Erlang/OTP. A
+ new <seealso marker="kernel:logger_chapter">logging
+ API</seealso> was added in Erlang/OTP 21.0, but
+ legacy <c>error_logger</c> handlers can still be used. New
+ Logger handlers do not need to use this function, since
+ the formatting callback (<c>report_cb</c>) is included as
+ metadata in the log event.</p>
+ </note>
+ <p>This function can be used by a user-defined legacy
+ <c>error_logger</c> event handler to
format a crash report. The crash report is sent using
- <seealso marker="kernel:error_logger#error_report/2">
- <c>error_logger:error_report(crash_report,
- <anno>CrashReport</anno>)</c></seealso>.
- That is, the event to be handled is of the format
+ <seealso marker="kernel:logger">
+ <c>logger(3)</c></seealso>, and the event to be handled is of the format
<c>{error_report, GL, {Pid, crash_report,
<anno>CrashReport</anno>}}</c>,
where <c>GL</c> is the group leader pid of process
@@ -132,7 +141,19 @@
<name name="format" arity="3"/>
<fsummary>Format a crash report.</fsummary>
<desc>
- <p>This function can be used by a user-defined event handler to
+ <note>
+ <p>This function is deprecated in the sense that
+ the <c>error_logger</c> is no longer the preferred
+ interface for logging in Erlang/OTP. A
+ new <seealso marker="kernel:logger_chapter">logging
+ API</seealso> was added in Erlang/OTP 21.0, but
+ legacy <c>error_logger</c> handlers can still be used. New
+ Logger handlers do not need to used this function, since
+ the formatting callback (<c>report_cb</c>) is included as
+ metadata in the log event.</p>
+ </note>
+ <p>This function can be used by a user-defined legacy
+ <c>error_logger</c> event handler to
format a crash report. When <anno>Depth</anno> is specified as a
positive integer, it is used in the format string to
limit the output as follows: <c>io_lib:format("~P",
@@ -395,6 +416,8 @@ init(Parent) ->
<title>See Also</title>
<p><seealso marker="kernel:error_logger">
<c>error_logger(3)</c></seealso></p>
+ <p><seealso marker="kernel:logger">
+ <c>logger(3)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/stdlib/doc/src/ref_man.xml b/lib/stdlib/doc/src/ref_man.xml
index 68bfddbc71..c6f30d272d 100644
--- a/lib/stdlib/doc/src/ref_man.xml
+++ b/lib/stdlib/doc/src/ref_man.xml
@@ -66,7 +66,6 @@
<xi:include href="gen_statem.xml"/>
<xi:include href="io.xml"/>
<xi:include href="io_lib.xml"/>
- <xi:include href="lib.xml"/>
<xi:include href="lists.xml"/>
<xi:include href="log_mf_h.xml"/>
<xi:include href="maps.xml"/>
diff --git a/lib/stdlib/doc/src/specs.xml b/lib/stdlib/doc/src/specs.xml
index d559adf9b6..fd2d625685 100644
--- a/lib/stdlib/doc/src/specs.xml
+++ b/lib/stdlib/doc/src/specs.xml
@@ -33,7 +33,6 @@
<xi:include href="../specs/specs_gen_statem.xml"/>
<xi:include href="../specs/specs_io.xml"/>
<xi:include href="../specs/specs_io_lib.xml"/>
- <xi:include href="../specs/specs_lib.xml"/>
<xi:include href="../specs/specs_lists.xml"/>
<xi:include href="../specs/specs_log_mf_h.xml"/>
<xi:include href="../specs/specs_maps.xml"/>
diff --git a/lib/stdlib/doc/src/string.xml b/lib/stdlib/doc/src/string.xml
index c7772d63a3..4a3d37dcb6 100644
--- a/lib/stdlib/doc/src/string.xml
+++ b/lib/stdlib/doc/src/string.xml
@@ -641,7 +641,7 @@ ÖÄÅ</pre>
<note><p>
The functions are kept for backward compatibility, but are
not recommended.
- They will be deprecated in Erlang/OTP 21.
+ They will be deprecated in a future release.
</p>
<p>Any undocumented functions in <c>string</c> are not to be used.</p>
</note>
diff --git a/lib/stdlib/src/Makefile b/lib/stdlib/src/Makefile
index dc3735055a..dfe6bf3e68 100644
--- a/lib/stdlib/src/Makefile
+++ b/lib/stdlib/src/Makefile
@@ -62,6 +62,7 @@ MODULES= \
erl_anno \
erl_bits \
erl_compile \
+ erl_error \
erl_eval \
erl_expand_records \
erl_internal \
@@ -91,7 +92,6 @@ MODULES= \
io_lib_format \
io_lib_fread \
io_lib_pretty \
- lib \
lists \
log_mf_h \
maps \
@@ -176,6 +176,7 @@ docs:
primary_bootstrap_compiler: \
$(BOOTSTRAP_COMPILER)/ebin/epp.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_anno.beam \
+ $(BOOTSTRAP_COMPILER)/ebin/erl_error.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_scan.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_parse.beam \
$(BOOTSTRAP_COMPILER)/ebin/erl_lint.beam \
diff --git a/lib/stdlib/src/array.erl b/lib/stdlib/src/array.erl
index a237eaa489..939b1fb488 100644
--- a/lib/stdlib/src/array.erl
+++ b/lib/stdlib/src/array.erl
@@ -290,7 +290,7 @@ new(Size, Fixed, Default) ->
end,
#array{size = Size, max = M, default = Default, elements = E}.
--spec find_max(integer(), integer()) -> integer().
+-spec find_max(integer(), non_neg_integer()) -> non_neg_integer().
find_max(I, M) when I >= M ->
find_max(I, ?extend(M));
diff --git a/lib/stdlib/src/epp.erl b/lib/stdlib/src/epp.erl
index 77cc88eb08..cc34d4bdd3 100644
--- a/lib/stdlib/src/epp.erl
+++ b/lib/stdlib/src/epp.erl
@@ -38,7 +38,7 @@
-type epp_handle() :: pid().
-type source_encoding() :: latin1 | utf8.
--type ifdef() :: 'ifdef' | 'ifndef' | 'else'.
+-type ifdef() :: 'ifdef' | 'ifndef' | 'if' | 'else'.
-type name() :: atom().
-type argspec() :: 'none' %No arguments
@@ -221,6 +221,8 @@ format_error({illegal_function,Macro}) ->
io_lib:format("?~s can only be used within a function", [Macro]);
format_error({illegal_function_usage,Macro}) ->
io_lib:format("?~s must not begin a form", [Macro]);
+format_error(elif_after_else) ->
+ "'elif' following 'else'";
format_error({'NYI',What}) ->
io_lib:format("not yet implemented '~s'", [What]);
format_error({error,Term}) ->
@@ -571,6 +573,7 @@ init_server(Pid, Name, Options, St0) ->
predef_macros(File) ->
Machine = list_to_atom(erlang:system_info(machine)),
Anno = line1(),
+ OtpVersion = list_to_integer(erlang:system_info(otp_release)),
Defs = [{'FILE', {none,[{string,Anno,File}]}},
{'FUNCTION_NAME', undefined},
{'FUNCTION_ARITY', undefined},
@@ -580,7 +583,8 @@ predef_macros(File) ->
{'BASE_MODULE', undefined},
{'BASE_MODULE_STRING', undefined},
{'MACHINE', {none,[{atom,Anno,Machine}]}},
- {Machine, {none,[{atom,Anno,true}]}}
+ {Machine, {none,[{atom,Anno,true}]}},
+ {'OTP_RELEASE', {none,[{integer,Anno,OtpVersion}]}}
],
maps:from_list(Defs).
@@ -1085,21 +1089,118 @@ scan_else(_Toks, Else, From, St) ->
epp_reply(From, {error,{loc(Else),epp,{bad,'else'}}}),
wait_req_scan(St).
-%% scan_if(Tokens, EndifToken, From, EppState)
+%% scan_if(Tokens, IfToken, From, EppState)
%% Handle the conditional parsing of a file.
-%% Report a badly formed if test and then treat as false macro.
+scan_if([{'(',_}|_]=Toks, If, From, St) ->
+ try eval_if(Toks, St) of
+ true ->
+ scan_toks(From, St#epp{istk=['if'|St#epp.istk]});
+ _ ->
+ skip_toks(From, St, ['if'])
+ catch
+ throw:Error0 ->
+ Error = case Error0 of
+ {_,erl_parse,_} ->
+ {error,Error0};
+ _ ->
+ {error,{loc(If),epp,Error0}}
+ end,
+ epp_reply(From, Error),
+ wait_req_skip(St, ['if'])
+ end;
scan_if(_Toks, If, From, St) ->
- epp_reply(From, {error,{loc(If),epp,{'NYI','if'}}}),
+ epp_reply(From, {error,{loc(If),epp,{bad,'if'}}}),
wait_req_skip(St, ['if']).
+eval_if(Toks0, St) ->
+ Toks = expand_macros(Toks0, St),
+ Es1 = case erl_parse:parse_exprs(Toks) of
+ {ok,Es0} -> Es0;
+ {error,E} -> throw(E)
+ end,
+ Es = rewrite_expr(Es1, St),
+ assert_guard_expr(Es),
+ Bs = erl_eval:new_bindings(),
+ LocalFun = fun(_Name, _Args) ->
+ error(badarg)
+ end,
+ try erl_eval:exprs(Es, Bs, {value,LocalFun}) of
+ {value,Res,_} ->
+ Res
+ catch
+ _:_ ->
+ false
+ end.
+
+assert_guard_expr([E0]) ->
+ E = rewrite_expr(E0, none),
+ case erl_lint:is_guard_expr(E) of
+ false ->
+ throw({bad,'if'});
+ true ->
+ ok
+ end;
+assert_guard_expr(_) ->
+ throw({bad,'if'}).
+
+%% Dual-purpose rewriting function. When the second argument is
+%% an #epp{} record, calls to defined(Symbol) will be evaluated.
+%% When the second argument is 'none', legal calls to our built-in
+%% functions are eliminated in order to turn the expression into
+%% a legal guard expression.
+
+rewrite_expr({call,_,{atom,_,defined},[N0]}, #epp{macs=Macs}) ->
+ %% Evaluate defined(Symbol).
+ N = case N0 of
+ {var,_,N1} -> N1;
+ {atom,_,N1} -> N1;
+ _ -> throw({bad,'if'})
+ end,
+ {atom,0,maps:is_key(N, Macs)};
+rewrite_expr({call,_,{atom,_,Name},As0}, none) ->
+ As = rewrite_expr(As0, none),
+ Arity = length(As),
+ case erl_internal:bif(Name, Arity) andalso
+ not erl_internal:guard_bif(Name, Arity) of
+ false ->
+ %% A guard BIF, an -if built-in, or an unknown function.
+ %% Eliminate the call so that erl_lint will not complain.
+ %% The call might fail later at evaluation time.
+ to_conses(As);
+ true ->
+ %% An auto-imported BIF (not guard BIF). Not allowed.
+ throw({bad,'if'})
+ end;
+rewrite_expr([H|T], St) ->
+ [rewrite_expr(H, St)|rewrite_expr(T, St)];
+rewrite_expr(Tuple, St) when is_tuple(Tuple) ->
+ list_to_tuple(rewrite_expr(tuple_to_list(Tuple), St));
+rewrite_expr(Other, _) ->
+ Other.
+
+to_conses([H|T]) ->
+ {cons,0,H,to_conses(T)};
+to_conses([]) ->
+ {nil,0}.
+
%% scan_elif(Tokens, EndifToken, From, EppState)
%% Handle the conditional parsing of a file.
%% Report a badly formed if test and then treat as false macro.
scan_elif(_Toks, Elif, From, St) ->
- epp_reply(From, {error,{loc(Elif),epp,{'NYI','elif'}}}),
- wait_req_scan(St).
+ case St#epp.istk of
+ ['else'|Cis] ->
+ epp_reply(From, {error,{loc(Elif),
+ epp,{illegal,"unbalanced",'elif'}}}),
+ wait_req_skip(St#epp{istk=Cis}, ['else']);
+ [_I|Cis] ->
+ skip_toks(From, St#epp{istk=Cis}, ['elif']);
+ [] ->
+ epp_reply(From, {error,{loc(Elif),epp,
+ {illegal,"unbalanced",elif}}}),
+ wait_req_scan(St)
+ end.
%% scan_endif(Tokens, EndifToken, From, EppState)
%% If we are in an if body then exit it, else report an error.
@@ -1158,6 +1259,8 @@ skip_toks(From, St, [I|Sis]) ->
skip_toks(From, St#epp{location=Cl}, ['if',I|Sis]);
{ok,[{'-',_Lh},{atom,_Le,'else'}=Else|_Toks],Cl}->
skip_else(Else, From, St#epp{location=Cl}, [I|Sis]);
+ {ok,[{'-',_Lh},{atom,_Le,'elif'}=Elif|Toks],Cl}->
+ skip_elif(Toks, Elif, From, St#epp{location=Cl}, [I|Sis]);
{ok,[{'-',_Lh},{atom,_Le,endif}|_Toks],Cl} ->
skip_toks(From, St#epp{location=Cl}, Sis);
{ok,_Toks,Cl} ->
@@ -1188,11 +1291,21 @@ skip_toks(From, St, []) ->
skip_else(Else, From, St, ['else'|Sis]) ->
epp_reply(From, {error,{loc(Else),epp,{illegal,"repeated",'else'}}}),
wait_req_skip(St, ['else'|Sis]);
+skip_else(_Else, From, St, ['elif'|Sis]) ->
+ skip_toks(From, St, ['else'|Sis]);
skip_else(_Else, From, St, [_I]) ->
scan_toks(From, St#epp{istk=['else'|St#epp.istk]});
skip_else(_Else, From, St, Sis) ->
skip_toks(From, St, Sis).
+skip_elif(_Toks, Elif, From, St, ['else'|_]=Sis) ->
+ epp_reply(From, {error,{loc(Elif),epp,elif_after_else}}),
+ wait_req_skip(St, Sis);
+skip_elif(Toks, Elif, From, St, [_I]) ->
+ scan_if(Toks, Elif, From, St);
+skip_elif(_Toks, _Elif, From, St, Sis) ->
+ skip_toks(From, St, Sis).
+
%% macro_pars(Tokens, ArgStack)
%% macro_expansion(Tokens, Anno)
%% Extract the macro parameters and the expansion from a macro definition.
diff --git a/lib/stdlib/src/lib.erl b/lib/stdlib/src/erl_error.erl
index 51e0c3f77e..fdcb9e824c 100644
--- a/lib/stdlib/src/lib.erl
+++ b/lib/stdlib/src/erl_error.erl
@@ -17,337 +17,12 @@
%%
%% %CopyrightEnd%
%%
--module(lib).
-
--export([flush_receive/0, error_message/2, progname/0, nonl/1, send/2,
- sendw/2, eval_str/1]).
-
--export([extended_parse_exprs/1, extended_parse_term/1,
- subst_values_for_vars/2]).
+-module(erl_error).
-export([format_exception/6, format_exception/7,
format_stacktrace/4, format_stacktrace/5,
format_call/4, format_call/5, format_fun/1, format_fun/2]).
--spec flush_receive() -> 'ok'.
-
-flush_receive() ->
- receive
- _Any ->
- flush_receive()
- after
- 0 ->
- ok
- end.
-
-%%
-%% Functions for doing standard system format i/o.
-%%
--spec error_message(Format, Args) -> 'ok' when
- Format :: io:format(),
- Args :: [term()].
-
-error_message(Format, Args) ->
- io:format(<<"** ~ts **\n">>, [io_lib:format(Format, Args)]).
-
-%% Return the name of the script that starts (this) erlang
-%%
--spec progname() -> atom().
-
-progname() ->
- case init:get_argument(progname) of
- {ok, [[Prog]]} ->
- list_to_atom(Prog);
- _Other ->
- no_prog_name
- end.
-
--spec nonl(String1) -> String2 when
- String1 :: string(),
- String2 :: string().
-
-nonl([10]) -> [];
-nonl([]) -> [];
-nonl([H|T]) -> [H|nonl(T)].
-
--spec send(To, Msg) -> Msg when
- To :: pid() | atom() | {atom(), node()},
- Msg :: term().
-
-send(To, Msg) -> To ! Msg.
-
--spec sendw(To, Msg) -> term() when
- To :: pid() | atom() | {atom(), node()},
- Msg :: term().
-
-sendw(To, Msg) ->
- To ! {self(), Msg},
- receive
- Reply -> Reply
- end.
-
-%% eval_str(InStr) -> {ok, OutStr} | {error, ErrStr'}
-%% InStr must represent a body
-%% Note: If InStr is a binary it has to be a Latin-1 string.
-%% If you have a UTF-8 encoded binary you have to call
-%% unicode:characters_to_list/1 before the call to eval_str().
-
--define(result(F,D), lists:flatten(io_lib:format(F, D))).
-
--spec eval_str(string() | unicode:latin1_binary()) ->
- {'ok', string()} | {'error', string()}.
-
-eval_str(Str) when is_list(Str) ->
- case erl_scan:tokens([], Str, 0) of
- {more, _} ->
- {error, "Incomplete form (missing .<cr>)??"};
- {done, {ok, Toks, _}, Rest} ->
- case all_white(Rest) of
- true ->
- case erl_parse:parse_exprs(Toks) of
- {ok, Exprs} ->
- case catch erl_eval:exprs(Exprs, erl_eval:new_bindings()) of
- {value, Val, _} ->
- {ok, Val};
- Other ->
- {error, ?result("*** eval: ~p", [Other])}
- end;
- {error, {_Line, Mod, Args}} ->
- Msg = ?result("*** ~ts",[Mod:format_error(Args)]),
- {error, Msg}
- end;
- false ->
- {error, ?result("Non-white space found after "
- "end-of-form :~ts", [Rest])}
- end
- end;
-eval_str(Bin) when is_binary(Bin) ->
- eval_str(binary_to_list(Bin)).
-
-all_white([$\s|T]) -> all_white(T);
-all_white([$\n|T]) -> all_white(T);
-all_white([$\t|T]) -> all_white(T);
-all_white([]) -> true;
-all_white(_) -> false.
-
-%% `Tokens' is assumed to have been scanned with the 'text' option.
-%% The annotations of the returned expressions are locations.
-%%
-%% Can handle pids, ports, references, and external funs ("items").
-%% Known items are represented by variables in the erl_parse tree, and
-%% the items themselves are stored in the returned bindings.
-
--spec extended_parse_exprs(Tokens) ->
- {'ok', ExprList, Bindings} | {'error', ErrorInfo} when
- Tokens :: [erl_scan:token()],
- ExprList :: [erl_parse:abstract_expr()],
- Bindings :: erl_eval:binding_struct(),
- ErrorInfo :: erl_parse:error_info().
-
-extended_parse_exprs(Tokens) ->
- Ts = tokens_fixup(Tokens),
- case erl_parse:parse_exprs(Ts) of
- {ok, Exprs0} ->
- {Exprs, Bs} = expr_fixup(Exprs0),
- {ok, reset_expr_anno(Exprs), Bs};
- _ErrorInfo ->
- erl_parse:parse_exprs(reset_token_anno(Ts))
- end.
-
-tokens_fixup([]) -> [];
-tokens_fixup([T|Ts]=Ts0) ->
- try token_fixup(Ts0) of
- {NewT, NewTs} ->
- [NewT|tokens_fixup(NewTs)]
- catch
- _:_ ->
- [T|tokens_fixup(Ts)]
- end.
-
-token_fixup(Ts) ->
- {AnnoL, NewTs, FixupTag} = unscannable(Ts),
- String = lists:append([erl_anno:text(A) || A <- AnnoL]),
- _ = (fixup_fun(FixupTag))(String),
- NewAnno = erl_anno:set_text(fixup_text(FixupTag), hd(AnnoL)),
- {{string, NewAnno, String}, NewTs}.
-
-unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
- {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
- {[A1, A2, A3, A4, A5, A6, A7], Ts, function};
-unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
- {'.', A5}, {atom, A6, _}, {'.', A7}, {integer, A8, _},
- {'>', A9}|Ts]) ->
- {[A1, A2, A3, A4, A5, A6, A7, A8, A9], Ts, function};
-unscannable([{'<', A1}, {float, A2, _}, {'.', A3}, {integer, A4, _},
- {'>', A5}|Ts]) ->
- {[A1, A2, A3, A4, A5], Ts, pid};
-unscannable([{'#', A1}, {var, A2, 'Port'}, {'<', A3}, {float, A4, _},
- {'>', A5}|Ts]) ->
- {[A1, A2, A3, A4, A5], Ts, port};
-unscannable([{'#', A1}, {var, A2, 'Ref'}, {'<', A3}, {float, A4, _},
- {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
- {[A1, A2, A3, A4, A5, A6, A7], Ts, reference}.
-
-expr_fixup(Expr0) ->
- {Expr, Bs, _} = expr_fixup(Expr0, erl_eval:new_bindings(), 1),
- {Expr, Bs}.
-
-expr_fixup({string,A,S}=T, Bs0, I) ->
- try string_fixup(A, S) of
- Value ->
- Var = new_var(I),
- Bs = erl_eval:add_binding(Var, Value, Bs0),
- {{var, A, Var}, Bs, I+1}
- catch
- _:_ ->
- {T, Bs0, I}
- end;
-expr_fixup(Tuple, Bs0, I0) when is_tuple(Tuple) ->
- {L, Bs, I} = expr_fixup(tuple_to_list(Tuple), Bs0, I0),
- {list_to_tuple(L), Bs, I};
-expr_fixup([E0|Es0], Bs0, I0) ->
- {E, Bs1, I1} = expr_fixup(E0, Bs0, I0),
- {Es, Bs, I} = expr_fixup(Es0, Bs1, I1),
- {[E|Es], Bs, I};
-expr_fixup(T, Bs, I) ->
- {T, Bs, I}.
-
-string_fixup(A, S) ->
- Text = erl_anno:text(A),
- FixupTag = fixup_tag(Text, S),
- (fixup_fun(FixupTag))(S).
-
-new_var(I) ->
- list_to_atom(lists:concat(['__ExtendedParseExprs_', I, '__'])).
-
-reset_token_anno(Tokens) ->
- [setelement(2, T, (reset_anno())(element(2, T))) || T <- Tokens].
-
-reset_expr_anno(Exprs) ->
- [erl_parse:map_anno(reset_anno(), E) || E <- Exprs].
-
-reset_anno() ->
- fun(A) -> erl_anno:new(erl_anno:location(A)) end.
-
-fixup_fun(function) -> fun function/1;
-fixup_fun(pid) -> fun erlang:list_to_pid/1;
-fixup_fun(port) -> fun erlang:list_to_port/1;
-fixup_fun(reference) -> fun erlang:list_to_ref/1.
-
-function(S) ->
- %% External function.
- {ok, [_, _, _,
- {atom, _, Module}, _,
- {atom, _, Function}, _,
- {integer, _, Arity}|_], _} = erl_scan:string(S),
- erlang:make_fun(Module, Function, Arity).
-
-fixup_text(function) -> "function";
-fixup_text(pid) -> "pid";
-fixup_text(port) -> "port";
-fixup_text(reference) -> "reference".
-
-fixup_tag("function", "#"++_) -> function;
-fixup_tag("pid", "<"++_) -> pid;
-fixup_tag("port", "#"++_) -> port;
-fixup_tag("reference", "#"++_) -> reference.
-
-%%% End of extended_parse_exprs.
-
-%% `Tokens' is assumed to have been scanned with the 'text' option.
-%%
-%% Can handle pids, ports, references, and external funs.
-
--spec extended_parse_term(Tokens) ->
- {'ok', Term} | {'error', ErrorInfo} when
- Tokens :: [erl_scan:token()],
- Term :: term(),
- ErrorInfo :: erl_parse:error_info().
-
-extended_parse_term(Tokens) ->
- case extended_parse_exprs(Tokens) of
- {ok, [Expr], Bindings} ->
- try normalise(Expr, Bindings) of
- Term ->
- {ok, Term}
- catch
- _:_ ->
- Loc = erl_anno:location(element(2, Expr)),
- {error,{Loc,?MODULE,"bad term"}}
- end;
- {ok, [_,Expr|_], _Bindings} ->
- Loc = erl_anno:location(element(2, Expr)),
- {error,{Loc,?MODULE,"bad term"}};
- {error, _} = Error ->
- Error
- end.
-
-%% From erl_parse.
-normalise({var, _, V}, Bs) ->
- {value, Value} = erl_eval:binding(V, Bs),
- Value;
-normalise({char,_,C}, _Bs) -> C;
-normalise({integer,_,I}, _Bs) -> I;
-normalise({float,_,F}, _Bs) -> F;
-normalise({atom,_,A}, _Bs) -> A;
-normalise({string,_,S}, _Bs) -> S;
-normalise({nil,_}, _Bs) -> [];
-normalise({bin,_,Fs}, Bs) ->
- {value, B, _} =
- eval_bits:expr_grp(Fs, [],
- fun(E, _) ->
- {value, normalise(E, Bs), []}
- end, [], true),
- B;
-normalise({cons,_,Head,Tail}, Bs) ->
- [normalise(Head, Bs)|normalise(Tail, Bs)];
-normalise({tuple,_,Args}, Bs) ->
- list_to_tuple(normalise_list(Args, Bs));
-normalise({map,_,Pairs}, Bs) ->
- maps:from_list(lists:map(fun
- %% only allow '=>'
- ({map_field_assoc,_,K,V}) ->
- {normalise(K, Bs),normalise(V, Bs)}
- end, Pairs));
-%% Special case for unary +/-.
-normalise({op,_,'+',{char,_,I}}, _Bs) -> I;
-normalise({op,_,'+',{integer,_,I}}, _Bs) -> I;
-normalise({op,_,'+',{float,_,F}}, _Bs) -> F;
-normalise({op,_,'-',{char,_,I}}, _Bs) -> -I; %Weird, but compatible!
-normalise({op,_,'-',{integer,_,I}}, _Bs) -> -I;
-normalise({op,_,'-',{float,_,F}}, _Bs) -> -F;
-normalise({'fun',_,{function,{atom,_,M},{atom,_,F},{integer,_,A}}}, _Bs) ->
- %% Since "#Fun<M.F.A>" is recognized, "fun M:F/A" should be too.
- fun M:F/A.
-
-normalise_list([H|T], Bs) ->
- [normalise(H, Bs)|normalise_list(T, Bs)];
-normalise_list([], _Bs) ->
- [].
-
-%% To be used on ExprList and Bindings returned from extended_parse_exprs().
-%% Substitute {value, A, Item} for {var, A, ExtendedParseVar}.
-%% {value, A, Item} is a shell/erl_eval convention, and for example
-%% the linter cannot handle it.
-
--spec subst_values_for_vars(ExprList, Bindings) -> [term()] when
- ExprList :: [erl_parse:abstract_expr()],
- Bindings :: erl_eval:binding_struct().
-
-subst_values_for_vars({var, A, V}=Var, Bs) ->
- case erl_eval:binding(V, Bs) of
- {value, Value} ->
- {value, A, Value};
- unbound ->
- Var
- end;
-subst_values_for_vars(L, Bs) when is_list(L) ->
- [subst_values_for_vars(E, Bs) || E <- L];
-subst_values_for_vars(T, Bs) when is_tuple(T) ->
- list_to_tuple(subst_values_for_vars(tuple_to_list(T), Bs));
-subst_values_for_vars(T, _Bs) ->
- T.
-
%%% Formatting of exceptions, mfa:s and funs.
%% -> iolist() (no \n at end)
diff --git a/lib/stdlib/src/erl_eval.erl b/lib/stdlib/src/erl_eval.erl
index 4ee11383da..0f6d48b9a3 100644
--- a/lib/stdlib/src/erl_eval.erl
+++ b/lib/stdlib/src/erl_eval.erl
@@ -27,7 +27,8 @@
-export([exprs/2,exprs/3,exprs/4,expr/2,expr/3,expr/4,expr/5,
expr_list/2,expr_list/3,expr_list/4]).
-export([new_bindings/0,bindings/1,binding/2,add_binding/3,del_binding/2]).
-
+-export([extended_parse_exprs/1, extended_parse_term/1,
+ subst_values_for_vars/2]).
-export([is_constant_expr/1, partial_eval/1]).
%% Is used by standalone Erlang (escript).
@@ -1286,6 +1287,224 @@ merge_bindings(Bs1, Bs2) ->
%% error -> Bs
%% end
%% end, Bs2, Bs1).
+
+%% Substitute {value, A, Item} for {var, A, Var}, preserving A.
+%% {value, A, Item} is a shell/erl_eval convention, and for example
+%% the linter cannot handle it.
+
+-spec subst_values_for_vars(ExprList, Bindings) -> [term()] when
+ ExprList :: [erl_parse:abstract_expr()],
+ Bindings :: binding_struct().
+
+subst_values_for_vars({var, A, V}=Var, Bs) ->
+ case erl_eval:binding(V, Bs) of
+ {value, Value} ->
+ {value, A, Value};
+ unbound ->
+ Var
+ end;
+subst_values_for_vars(L, Bs) when is_list(L) ->
+ [subst_values_for_vars(E, Bs) || E <- L];
+subst_values_for_vars(T, Bs) when is_tuple(T) ->
+ list_to_tuple(subst_values_for_vars(tuple_to_list(T), Bs));
+subst_values_for_vars(T, _Bs) ->
+ T.
+
+%% `Tokens' is assumed to have been scanned with the 'text' option.
+%% The annotations of the returned expressions are locations.
+%%
+%% Can handle pids, ports, references, and external funs ("items").
+%% Known items are represented by variables in the erl_parse tree, and
+%% the items themselves are stored in the returned bindings.
+
+-spec extended_parse_exprs(Tokens) ->
+ {'ok', ExprList, Bindings} | {'error', ErrorInfo} when
+ Tokens :: [erl_scan:token()],
+ ExprList :: [erl_parse:abstract_expr()],
+ Bindings :: erl_eval:binding_struct(),
+ ErrorInfo :: erl_parse:error_info().
+
+extended_parse_exprs(Tokens) ->
+ Ts = tokens_fixup(Tokens),
+ case erl_parse:parse_exprs(Ts) of
+ {ok, Exprs0} ->
+ {Exprs, Bs} = expr_fixup(Exprs0),
+ {ok, reset_expr_anno(Exprs), Bs};
+ _ErrorInfo ->
+ erl_parse:parse_exprs(reset_token_anno(Ts))
+ end.
+
+tokens_fixup([]) -> [];
+tokens_fixup([T|Ts]=Ts0) ->
+ try token_fixup(Ts0) of
+ {NewT, NewTs} ->
+ [NewT|tokens_fixup(NewTs)]
+ catch
+ _:_ ->
+ [T|tokens_fixup(Ts)]
+ end.
+
+token_fixup(Ts) ->
+ {AnnoL, NewTs, FixupTag} = unscannable(Ts),
+ String = lists:append([erl_anno:text(A) || A <- AnnoL]),
+ _ = (fixup_fun(FixupTag))(String),
+ NewAnno = erl_anno:set_text(fixup_text(FixupTag), hd(AnnoL)),
+ {{string, NewAnno, String}, NewTs}.
+
+unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
+ {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
+ {[A1, A2, A3, A4, A5, A6, A7], Ts, function};
+unscannable([{'#', A1}, {var, A2, 'Fun'}, {'<', A3}, {atom, A4, _},
+ {'.', A5}, {atom, A6, _}, {'.', A7}, {integer, A8, _},
+ {'>', A9}|Ts]) ->
+ {[A1, A2, A3, A4, A5, A6, A7, A8, A9], Ts, function};
+unscannable([{'<', A1}, {float, A2, _}, {'.', A3}, {integer, A4, _},
+ {'>', A5}|Ts]) ->
+ {[A1, A2, A3, A4, A5], Ts, pid};
+unscannable([{'#', A1}, {var, A2, 'Port'}, {'<', A3}, {float, A4, _},
+ {'>', A5}|Ts]) ->
+ {[A1, A2, A3, A4, A5], Ts, port};
+unscannable([{'#', A1}, {var, A2, 'Ref'}, {'<', A3}, {float, A4, _},
+ {'.', A5}, {float, A6, _}, {'>', A7}|Ts]) ->
+ {[A1, A2, A3, A4, A5, A6, A7], Ts, reference}.
+
+expr_fixup(Expr0) ->
+ {Expr, Bs, _} = expr_fixup(Expr0, erl_eval:new_bindings(), 1),
+ {Expr, Bs}.
+
+expr_fixup({string,A,S}=T, Bs0, I) ->
+ try string_fixup(A, S) of
+ Value ->
+ Var = new_var(I),
+ Bs = erl_eval:add_binding(Var, Value, Bs0),
+ {{var, A, Var}, Bs, I+1}
+ catch
+ _:_ ->
+ {T, Bs0, I}
+ end;
+expr_fixup(Tuple, Bs0, I0) when is_tuple(Tuple) ->
+ {L, Bs, I} = expr_fixup(tuple_to_list(Tuple), Bs0, I0),
+ {list_to_tuple(L), Bs, I};
+expr_fixup([E0|Es0], Bs0, I0) ->
+ {E, Bs1, I1} = expr_fixup(E0, Bs0, I0),
+ {Es, Bs, I} = expr_fixup(Es0, Bs1, I1),
+ {[E|Es], Bs, I};
+expr_fixup(T, Bs, I) ->
+ {T, Bs, I}.
+
+string_fixup(A, S) ->
+ Text = erl_anno:text(A),
+ FixupTag = fixup_tag(Text, S),
+ (fixup_fun(FixupTag))(S).
+
+new_var(I) ->
+ list_to_atom(lists:concat(['__ExtendedParseExprs_', I, '__'])).
+
+reset_token_anno(Tokens) ->
+ [setelement(2, T, (reset_anno())(element(2, T))) || T <- Tokens].
+
+reset_expr_anno(Exprs) ->
+ [erl_parse:map_anno(reset_anno(), E) || E <- Exprs].
+
+reset_anno() ->
+ fun(A) -> erl_anno:new(erl_anno:location(A)) end.
+
+fixup_fun(function) -> fun function/1;
+fixup_fun(pid) -> fun erlang:list_to_pid/1;
+fixup_fun(port) -> fun erlang:list_to_port/1;
+fixup_fun(reference) -> fun erlang:list_to_ref/1.
+
+function(S) ->
+ %% External function.
+ {ok, [_, _, _,
+ {atom, _, Module}, _,
+ {atom, _, Function}, _,
+ {integer, _, Arity}|_], _} = erl_scan:string(S),
+ erlang:make_fun(Module, Function, Arity).
+
+fixup_text(function) -> "function";
+fixup_text(pid) -> "pid";
+fixup_text(port) -> "port";
+fixup_text(reference) -> "reference".
+
+fixup_tag("function", "#"++_) -> function;
+fixup_tag("pid", "<"++_) -> pid;
+fixup_tag("port", "#"++_) -> port;
+fixup_tag("reference", "#"++_) -> reference.
+
+%%% End of extended_parse_exprs.
+
+%% `Tokens' is assumed to have been scanned with the 'text' option.
+%%
+%% Can handle pids, ports, references, and external funs.
+
+-spec extended_parse_term(Tokens) ->
+ {'ok', Term} | {'error', ErrorInfo} when
+ Tokens :: [erl_scan:token()],
+ Term :: term(),
+ ErrorInfo :: erl_parse:error_info().
+
+extended_parse_term(Tokens) ->
+ case extended_parse_exprs(Tokens) of
+ {ok, [Expr], Bindings} ->
+ try normalise(Expr, Bindings) of
+ Term ->
+ {ok, Term}
+ catch
+ _:_ ->
+ Loc = erl_anno:location(element(2, Expr)),
+ {error,{Loc,?MODULE,"bad term"}}
+ end;
+ {ok, [_,Expr|_], _Bindings} ->
+ Loc = erl_anno:location(element(2, Expr)),
+ {error,{Loc,?MODULE,"bad term"}};
+ {error, _} = Error ->
+ Error
+ end.
+
+%% From erl_parse.
+normalise({var, _, V}, Bs) ->
+ {value, Value} = erl_eval:binding(V, Bs),
+ Value;
+normalise({char,_,C}, _Bs) -> C;
+normalise({integer,_,I}, _Bs) -> I;
+normalise({float,_,F}, _Bs) -> F;
+normalise({atom,_,A}, _Bs) -> A;
+normalise({string,_,S}, _Bs) -> S;
+normalise({nil,_}, _Bs) -> [];
+normalise({bin,_,Fs}, Bs) ->
+ {value, B, _} =
+ eval_bits:expr_grp(Fs, [],
+ fun(E, _) ->
+ {value, normalise(E, Bs), []}
+ end, [], true),
+ B;
+normalise({cons,_,Head,Tail}, Bs) ->
+ [normalise(Head, Bs)|normalise(Tail, Bs)];
+normalise({tuple,_,Args}, Bs) ->
+ list_to_tuple(normalise_list(Args, Bs));
+normalise({map,_,Pairs}, Bs) ->
+ maps:from_list(lists:map(fun
+ %% only allow '=>'
+ ({map_field_assoc,_,K,V}) ->
+ {normalise(K, Bs),normalise(V, Bs)}
+ end, Pairs));
+%% Special case for unary +/-.
+normalise({op,_,'+',{char,_,I}}, _Bs) -> I;
+normalise({op,_,'+',{integer,_,I}}, _Bs) -> I;
+normalise({op,_,'+',{float,_,F}}, _Bs) -> F;
+normalise({op,_,'-',{char,_,I}}, _Bs) -> -I; %Weird, but compatible!
+normalise({op,_,'-',{integer,_,I}}, _Bs) -> -I;
+normalise({op,_,'-',{float,_,F}}, _Bs) -> -F;
+normalise({'fun',_,{function,{atom,_,M},{atom,_,F},{integer,_,A}}}, _Bs) ->
+ %% Since "#Fun<M.F.A>" is recognized, "fun M:F/A" should be too.
+ fun M:F/A.
+
+normalise_list([H|T], Bs) ->
+ [normalise(H, Bs)|normalise_list(T, Bs)];
+normalise_list([], _Bs) ->
+ [].
+
%%----------------------------------------------------------------------------
%%
%% Evaluate expressions:
diff --git a/lib/stdlib/src/erl_internal.erl b/lib/stdlib/src/erl_internal.erl
index 6d3d5baa23..dd509191ef 100644
--- a/lib/stdlib/src/erl_internal.erl
+++ b/lib/stdlib/src/erl_internal.erl
@@ -109,6 +109,7 @@ new_type_test(is_function, 2) -> true;
new_type_test(is_integer, 1) -> true;
new_type_test(is_list, 1) -> true;
new_type_test(is_map, 1) -> true;
+new_type_test(is_map_key, 2) -> true;
new_type_test(is_number, 1) -> true;
new_type_test(is_pid, 1) -> true;
new_type_test(is_port, 1) -> true;
@@ -315,6 +316,7 @@ bif(is_function, 2) -> true;
bif(is_integer, 1) -> true;
bif(is_list, 1) -> true;
bif(is_map, 1) -> true;
+bif(is_map_key, 2) -> true;
bif(is_number, 1) -> true;
bif(is_pid, 1) -> true;
bif(is_port, 1) -> true;
diff --git a/lib/stdlib/src/escript.erl b/lib/stdlib/src/escript.erl
index beea9927d2..89a81684f5 100644
--- a/lib/stdlib/src/escript.erl
+++ b/lib/stdlib/src/escript.erl
@@ -882,7 +882,7 @@ format_exception(Class, Reason, StackTrace) ->
io_lib:format("~." ++ integer_to_list(I) ++ P, [Term, 50])
end,
StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
- lib:format_exception(1, Class, Reason, StackTrace, StackFun, PF, Enc).
+ erl_error:format_exception(1, Class, Reason, StackTrace, StackFun, PF, Enc).
encoding() ->
[{encoding, Encoding}] = enc(),
diff --git a/lib/stdlib/src/ets.erl b/lib/stdlib/src/ets.erl
index 6a559f0be5..a35f79c0d9 100644
--- a/lib/stdlib/src/ets.erl
+++ b/lib/stdlib/src/ets.erl
@@ -77,7 +77,9 @@
whereis/1]).
%% internal exports
--export([internal_request_all/0]).
+-export([internal_request_all/0,
+ internal_delete_all/2,
+ internal_select_delete/2]).
-spec all() -> [Tab] when
Tab :: tab().
@@ -116,7 +118,15 @@ delete(_, _) ->
-spec delete_all_objects(Tab) -> true when
Tab :: tab().
-delete_all_objects(_) ->
+delete_all_objects(Tab) ->
+ _ = ets:internal_delete_all(Tab, undefined),
+ true.
+
+-spec internal_delete_all(Tab, undefined) -> NumDeleted when
+ Tab :: tab(),
+ NumDeleted :: non_neg_integer().
+
+internal_delete_all(_, _) ->
erlang:nif_error(undef).
-spec delete_object(Tab, Object) -> true when
@@ -378,7 +388,17 @@ select_count(_, _) ->
MatchSpec :: match_spec(),
NumDeleted :: non_neg_integer().
-select_delete(_, _) ->
+select_delete(Tab, [{'_',[],[true]}]) ->
+ ets:internal_delete_all(Tab, undefined);
+select_delete(Tab, MatchSpec) ->
+ ets:internal_select_delete(Tab, MatchSpec).
+
+-spec internal_select_delete(Tab, MatchSpec) -> NumDeleted when
+ Tab :: tab(),
+ MatchSpec :: match_spec(),
+ NumDeleted :: non_neg_integer().
+
+internal_select_delete(_, _) ->
erlang:nif_error(undef).
-spec select_replace(Tab, MatchSpec) -> NumReplaced when
diff --git a/lib/stdlib/src/gen_event.erl b/lib/stdlib/src/gen_event.erl
index 53042251cc..3ee2031d02 100644
--- a/lib/stdlib/src/gen_event.erl
+++ b/lib/stdlib/src/gen_event.erl
@@ -122,7 +122,7 @@
-type add_handler_ret() :: ok | term() | {'EXIT',term()}.
-type del_handler_ret() :: ok | term() | {'EXIT',term()}.
--type emgr_name() :: {'local', atom()} | {'global', atom()}
+-type emgr_name() :: {'local', atom()} | {'global', term()}
| {'via', atom(), term()}.
-type debug_flag() :: 'trace' | 'log' | 'statistics' | 'debug'
| {'logfile', string()}.
@@ -130,7 +130,7 @@
| {'debug', [debug_flag()]}
| {'spawn_opt', [proc_lib:spawn_option()]}
| {'hibernate_after', timeout()}.
--type emgr_ref() :: atom() | {atom(), atom()} | {'global', atom()}
+-type emgr_ref() :: atom() | {atom(), atom()} | {'global', term()}
| {'via', atom(), term()} | pid().
-type start_ret() :: {'ok', pid()} | {'error', term()}.
@@ -146,7 +146,7 @@
%% start_link()
%% start_link(MgrName | Options)
%% start_link(MgrName, Options)
-%% MgrName ::= {local, atom()} | {global, atom()} | {via, atom(), term()}
+%% MgrName ::= {local, atom()} | {global, term()} | {via, atom(), term()}
%% Options ::= [{timeout, Timeout} | {debug, [Flag]} | {spawn_opt,SOpts}]
%% Flag ::= trace | log | {logfile, File} | statistics | debug
%% (debug == log && statistics)
diff --git a/lib/stdlib/src/gen_fsm.erl b/lib/stdlib/src/gen_fsm.erl
index 77826c3dc6..1646186761 100644
--- a/lib/stdlib/src/gen_fsm.erl
+++ b/lib/stdlib/src/gen_fsm.erl
@@ -129,25 +129,25 @@
%% logger callback
-export([format_log/1]).
--deprecated({start, 3, next_major_release}).
--deprecated({start, 4, next_major_release}).
--deprecated({start_link, 3, next_major_release}).
--deprecated({start_link, 4, next_major_release}).
--deprecated({stop, 1, next_major_release}).
--deprecated({stop, 3, next_major_release}).
--deprecated({send_event, 2, next_major_release}).
--deprecated({sync_send_event, 2, next_major_release}).
--deprecated({sync_send_event, 3, next_major_release}).
--deprecated({send_all_state_event, 2, next_major_release}).
--deprecated({sync_send_all_state_event, 2, next_major_release}).
--deprecated({sync_send_all_state_event, 3, next_major_release}).
--deprecated({reply, 2, next_major_release}).
--deprecated({start_timer, 2, next_major_release}).
--deprecated({send_event_after, 2, next_major_release}).
--deprecated({cancel_timer, 1, next_major_release}).
--deprecated({enter_loop, 4, next_major_release}).
--deprecated({enter_loop, 5, next_major_release}).
--deprecated({enter_loop, 6, next_major_release}).
+-deprecated({start, 3, eventually}).
+-deprecated({start, 4, eventually}).
+-deprecated({start_link, 3, eventually}).
+-deprecated({start_link, 4, eventually}).
+-deprecated({stop, 1, eventually}).
+-deprecated({stop, 3, eventually}).
+-deprecated({send_event, 2, eventually}).
+-deprecated({sync_send_event, 2, eventually}).
+-deprecated({sync_send_event, 3, eventually}).
+-deprecated({send_all_state_event, 2, eventually}).
+-deprecated({sync_send_all_state_event, 2, eventually}).
+-deprecated({sync_send_all_state_event, 3, eventually}).
+-deprecated({reply, 2, eventually}).
+-deprecated({start_timer, 2, eventually}).
+-deprecated({send_event_after, 2, eventually}).
+-deprecated({cancel_timer, 1, eventually}).
+-deprecated({enter_loop, 4, eventually}).
+-deprecated({enter_loop, 5, eventually}).
+-deprecated({enter_loop, 6, eventually}).
%%% ---------------------------------------------------
%%% Interface functions.
diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl
index f65ef78636..09f77c0810 100644
--- a/lib/stdlib/src/gen_server.erl
+++ b/lib/stdlib/src/gen_server.erl
@@ -166,7 +166,7 @@
%%% start(Name, Mod, Args, Options)
%%% start_link(Mod, Args, Options)
%%% start_link(Name, Mod, Args, Options) where:
-%%% Name ::= {local, atom()} | {global, atom()} | {via, atom(), term()}
+%%% Name ::= {local, atom()} | {global, term()} | {via, atom(), term()}
%%% Mod ::= atom(), callback module implementing the 'real' server
%%% Args ::= term(), init arguments (to Mod:init/1)
%%% Options ::= [{timeout, Timeout} | {debug, [Flag]}]
@@ -934,14 +934,14 @@ format_log(#{label:={gen_server,terminate},
end
end;
_ ->
- logger:limit_term(Reason)
+ error_logger:limit_term(Reason)
end,
{ClientFmt,ClientArgs} = format_client_log(Client),
{"** Generic server ~tp terminating \n"
"** Last message in was ~tp~n"
"** When Server state == ~tp~n"
"** Reason for termination == ~n** ~tp~n" ++ ClientFmt,
- [Name, Msg, logger:limit_term(State), Reason1] ++ ClientArgs};
+ [Name, Msg, error_logger:limit_term(State), Reason1] ++ ClientArgs};
format_log(#{label:={gen_server,no_handle_info},
module:=Mod,
message:=Msg}) ->
diff --git a/lib/stdlib/src/gen_statem.erl b/lib/stdlib/src/gen_statem.erl
index f558f0d33e..b36b8cd5a5 100644
--- a/lib/stdlib/src/gen_statem.erl
+++ b/lib/stdlib/src/gen_statem.erl
@@ -1938,7 +1938,7 @@ format_log(#{label:={gen_statem,terminate},
_ -> {Reason,Stacktrace}
end,
[LimitedP, LimitedFmtData, LimitedFixedReason] =
- [logger:limit_term(D) || D <- [P, FmtData, FixedReason]],
+ [error_logger:limit_term(D) || D <- [P, FmtData, FixedReason]],
CBMode =
case StateEnter of
true ->
diff --git a/lib/stdlib/src/io.erl b/lib/stdlib/src/io.erl
index f510f61e9f..5d5773c80c 100644
--- a/lib/stdlib/src/io.erl
+++ b/lib/stdlib/src/io.erl
@@ -86,7 +86,16 @@ put_chars(Chars) ->
CharData :: unicode:chardata().
put_chars(Io, Chars) ->
- o_request(Io, {put_chars,unicode,Chars}, put_chars).
+ put_chars(Io, unicode, Chars).
+
+%% This function is here to make the erlang:raise in o_request actually raise to
+%% a valid function.
+-spec put_chars(IoDevice, Encoding, CharData) -> 'ok' when
+ IoDevice :: device(),
+ Encoding :: unicode,
+ CharData :: unicode:chardata().
+put_chars(Io, Encoding, Chars) ->
+ o_request(Io, {put_chars,Encoding,Chars}, put_chars).
-spec nl() -> 'ok'.
diff --git a/lib/stdlib/src/ms_transform.erl b/lib/stdlib/src/ms_transform.erl
index ec8cfd56c2..428c23524b 100644
--- a/lib/stdlib/src/ms_transform.erl
+++ b/lib/stdlib/src/ms_transform.erl
@@ -929,6 +929,7 @@ bool_test(is_port,1) -> true;
bool_test(is_reference,1) -> true;
bool_test(is_tuple,1) -> true;
bool_test(is_map,1) -> true;
+bool_test(is_map_key, 2) -> true;
bool_test(is_binary,1) -> true;
bool_test(is_function,1) -> true;
bool_test(is_record,2) -> true;
diff --git a/lib/stdlib/src/otp_internal.erl b/lib/stdlib/src/otp_internal.erl
index a17addcc42..ceec3079a1 100644
--- a/lib/stdlib/src/otp_internal.erl
+++ b/lib/stdlib/src/otp_internal.erl
@@ -612,6 +612,15 @@ obsolete_1(erlang, get_stacktrace, 0) ->
obsolete_1(erlang, hash, 2) ->
{removed, {erlang, phash2, 2}, "20.0"};
+%% Add in OTP 21.
+
+obsolete_1(ssl, ssl_accept, 1) ->
+ {deprecated, "deprecated; use ssl:handshake/1 instead"};
+obsolete_1(ssl, ssl_accept, 2) ->
+ {deprecated, "deprecated; use ssl:handshake/2 instead"};
+obsolete_1(ssl, ssl_accept, 3) ->
+ {deprecated, "deprecated; use ssl:handshake/3 instead"};
+
%% not obsolete
obsolete_1(_, _, _) ->
diff --git a/lib/stdlib/src/proc_lib.erl b/lib/stdlib/src/proc_lib.erl
index 8d01840313..5f14e78f91 100644
--- a/lib/stdlib/src/proc_lib.erl
+++ b/lib/stdlib/src/proc_lib.erl
@@ -553,10 +553,10 @@ get_ancestors(Pid) ->
%% assumed that all report handlers call proc_lib:format().
get_messages(Pid) ->
Messages = get_process_messages(Pid),
- {messages, logger:limit_term(Messages)}.
+ {messages, error_logger:limit_term(Messages)}.
get_process_messages(Pid) ->
- Depth = logger:get_format_depth(),
+ Depth = error_logger:get_format_depth(),
case Pid =/= self() orelse Depth =:= unlimited of
true ->
{messages, Messages} = get_process_info(Pid, messages),
@@ -586,7 +586,7 @@ get_cleaned_dictionary(Pid) ->
cleaned_dict(Dict) ->
CleanDict = clean_dict(Dict),
- logger:limit_term(CleanDict).
+ error_logger:limit_term(CleanDict).
clean_dict([{'$ancestors',_}|Dict]) ->
clean_dict(Dict);
@@ -756,7 +756,7 @@ check(Res) -> Res.
Args :: [term()].
report_cb(#{label:={proc_lib,crash},
report:=CrashReport}) ->
- Depth = logger:get_format_depth(),
+ Depth = error_logger:get_format_depth(),
get_format_and_args(CrashReport, utf8, Depth).
-spec format(CrashReport) -> string() when
@@ -841,8 +841,8 @@ format_exception(Class, Reason, StackTrace, {Enc,_}=Extra) ->
StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
%% EI = " exception: ",
EI = " ",
- [EI, lib:format_exception(1+length(EI), Class, Reason,
- StackTrace, StackFun, PF, Enc), "\n"].
+ [EI, erl_error:format_exception(1+length(EI), Class, Reason,
+ StackTrace, StackFun, PF, Enc), "\n"].
to_string(A, latin1) ->
io_lib:write_atom_as_latin1(A);
diff --git a/lib/stdlib/src/qlc.erl b/lib/stdlib/src/qlc.erl
index 3a66f6930b..4a0e976ba4 100644
--- a/lib/stdlib/src/qlc.erl
+++ b/lib/stdlib/src/qlc.erl
@@ -638,7 +638,7 @@ string_to_handle(Str, Options, Bindings) when is_list(Str) ->
case erl_scan:string(Str, 1, [text]) of
{ok, Tokens, _} ->
ScanRes =
- case lib:extended_parse_exprs(Tokens) of
+ case erl_eval:extended_parse_exprs(Tokens) of
{ok, [Expr0], SBs} ->
{ok, Expr0, SBs};
{ok, _ExprList, _SBs} ->
@@ -1196,8 +1196,8 @@ abstract1({table, TableDesc}, _NElements, _Depth, _A) ->
{ok, Tokens, _} =
erl_scan:string(lists:flatten(TableDesc++"."), 1, [text]),
{ok, Es, Bs} =
- lib:extended_parse_exprs(Tokens),
- [Expr] = lib:subst_values_for_vars(Es, Bs),
+ erl_eval:extended_parse_exprs(Tokens),
+ [Expr] = erl_eval:subst_values_for_vars(Es, Bs),
special(Expr);
false -> % abstract expression
TableDesc
@@ -3749,7 +3749,7 @@ maybe_error_logger(Name, Why) ->
expand_stacktrace(),
Trimmer = fun(M, _F, _A) -> M =:= erl_eval end,
Formater = fun(Term, I) -> io_lib:print(Term, I, 80, -1) end,
- X = lib:format_stacktrace(1, Stacktrace, Trimmer, Formater),
+ X = erl_error:format_stacktrace(1, Stacktrace, Trimmer, Formater),
error_logger:Name("qlc: temporary file was needed for ~w\n~ts\n",
[Why, lists:flatten(X)]).
diff --git a/lib/stdlib/src/shell.erl b/lib/stdlib/src/shell.erl
index 1be37672e7..c73cf22943 100644
--- a/lib/stdlib/src/shell.erl
+++ b/lib/stdlib/src/shell.erl
@@ -230,7 +230,7 @@ server_loop(N0, Eval_0, Bs00, RT, Ds00, History0, Results0) ->
{Res,Eval0} = get_command(Prompt, Eval_1, Bs0, RT, Ds0),
case Res of
{ok,Es0,XBs} ->
- Es1 = lib:subst_values_for_vars(Es0, XBs),
+ Es1 = erl_eval:subst_values_for_vars(Es0, XBs),
case expand_hist(Es1, N) of
{ok,Es} ->
{V,Eval,Bs,Ds} = shell_cmd(Es, Eval0, Bs0, RT, Ds0, cmd),
@@ -280,7 +280,7 @@ get_command(Prompt, Eval, Bs, RT, Ds) ->
io:scan_erl_exprs(group_leader(), Prompt, 1, [text])
of
{ok,Toks,_EndPos} ->
- lib:extended_parse_exprs(Toks);
+ erl_eval:extended_parse_exprs(Toks);
{eof,_EndPos} ->
eof;
{error,ErrorInfo,_EndPos} ->
@@ -589,7 +589,7 @@ report_exception(Class, Severity, {Reason,Stacktrace}, RT) ->
PF = fun(Term, I1) -> pp(Term, I1, RT) end,
SF = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
Enc = encoding(),
- Str = lib:format_exception(I, Class, Reason, Stacktrace, SF, PF, Enc),
+ Str = erl_error:format_exception(I, Class, Reason, Stacktrace, SF, PF, Enc),
io:requests([{put_chars, latin1, Tag},
{put_chars, unicode, Str},
nl]).
diff --git a/lib/stdlib/src/slave.erl b/lib/stdlib/src/slave.erl
index b3f3206d67..37c1f6bfd9 100644
--- a/lib/stdlib/src/slave.erl
+++ b/lib/stdlib/src/slave.erl
@@ -187,7 +187,7 @@ start_link(Host, Name, Args) ->
start(Host, Name, Args, self()).
start(Host0, Name, Args, LinkTo) ->
- Prog = lib:progname(),
+ Prog = progname(),
start(Host0, Name, Args, LinkTo, Prog).
start(Host0, Name, Args, LinkTo, Prog) ->
@@ -296,7 +296,6 @@ mk_cmd(Host, Name, Args, Waiter, Prog0) ->
" -s slave slave_start ", node(),
" ", Waiter,
" ", Args]),
-
case after_char($@, atom_to_list(node())) of
Host ->
{ok, BasicCmd};
@@ -309,6 +308,15 @@ mk_cmd(Host, Name, Args, Waiter, Prog0) ->
end
end.
+%% Return the name of the script that starts (this) erlang
+progname() ->
+ case init:get_argument(progname) of
+ {ok, [[Prog]]} ->
+ Prog;
+ _Other ->
+ "no_prog_name"
+ end.
+
%% This is an attempt to distinguish between spaces in the program
%% path and spaces that separate arguments. The program is quoted to
%% allow spaces in the path.
@@ -317,7 +325,7 @@ mk_cmd(Host, Name, Args, Waiter, Prog0) ->
%% (through start/5) or if the -program switch to beam is used and
%% includes arguments (typically done by cerl in OTP test environment
%% in order to ensure that slave/peer nodes are started with the same
-%% emulator and flags as the test node. The return from lib:progname()
+%% emulator and flags as the test node. The result from progname()
%% could then typically be '/<full_path_to>/cerl -gcov').
quote_progname(Progname) ->
do_quote_progname(string:lexemes(to_list(Progname)," ")).
diff --git a/lib/stdlib/src/stdlib.app.src b/lib/stdlib/src/stdlib.app.src
index 5fb48acfab..cd09872b87 100644
--- a/lib/stdlib/src/stdlib.app.src
+++ b/lib/stdlib/src/stdlib.app.src
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -43,6 +43,7 @@
erl_anno,
erl_bits,
erl_compile,
+ erl_error,
erl_eval,
erl_expand_records,
erl_internal,
@@ -71,7 +72,6 @@
io_lib_format,
io_lib_fread,
io_lib_pretty,
- lib,
lists,
log_mf_h,
maps,
diff --git a/lib/stdlib/src/string.erl b/lib/stdlib/src/string.erl
index 0736374f21..cf48b882e4 100644
--- a/lib/stdlib/src/string.erl
+++ b/lib/stdlib/src/string.erl
@@ -323,16 +323,30 @@ take(Str, Sep0, true, trailing) ->
%% Uppercase all chars in Str
-spec uppercase(String::unicode:chardata()) -> unicode:chardata().
uppercase(CD) when is_list(CD) ->
- uppercase_list(CD);
-uppercase(CD) when is_binary(CD) ->
- uppercase_bin(CD,<<>>).
+ try uppercase_list(CD, false)
+ catch unchanged -> CD
+ end;
+uppercase(<<CP1/utf8, Rest/binary>>=Orig) ->
+ try uppercase_bin(CP1, Rest, false) of
+ List -> unicode:characters_to_binary(List)
+ catch unchanged -> Orig
+ end;
+uppercase(<<>>) ->
+ <<>>.
%% Lowercase all chars in Str
-spec lowercase(String::unicode:chardata()) -> unicode:chardata().
lowercase(CD) when is_list(CD) ->
- lowercase_list(CD);
-lowercase(CD) when is_binary(CD) ->
- lowercase_bin(CD,<<>>).
+ try lowercase_list(CD, false)
+ catch unchanged -> CD
+ end;
+lowercase(<<CP1/utf8, Rest/binary>>=Orig) ->
+ try lowercase_bin(CP1, Rest, false) of
+ List -> unicode:characters_to_binary(List)
+ catch unchanged -> Orig
+ end;
+lowercase(<<>>) ->
+ <<>>.
%% Make a titlecase of the first char in Str
-spec titlecase(String::unicode:chardata()) -> unicode:chardata().
@@ -352,9 +366,16 @@ titlecase(CD) when is_binary(CD) ->
%% Make a comparable string of the Str should be used for equality tests only
-spec casefold(String::unicode:chardata()) -> unicode:chardata().
casefold(CD) when is_list(CD) ->
- casefold_list(CD);
-casefold(CD) when is_binary(CD) ->
- casefold_bin(CD,<<>>).
+ try casefold_list(CD, false)
+ catch unchanged -> CD
+ end;
+casefold(<<CP1/utf8, Rest/binary>>=Orig) ->
+ try casefold_bin(CP1, Rest, false) of
+ List -> unicode:characters_to_binary(List)
+ catch unchanged -> Orig
+ end;
+casefold(<<>>) ->
+ <<>>.
-spec to_integer(String) -> {Int, Rest} | {'error', Reason} when
String :: unicode:chardata(),
@@ -652,52 +673,127 @@ slice_bin(CD, CP1, N) when N > 0 ->
slice_bin(CD, CP1, 0) ->
byte_size(CD)+byte_size(<<CP1/utf8>>).
-uppercase_list(CPs0) ->
+uppercase_list([CP1|[CP2|_]=Cont], _Changed) when $a =< CP1, CP1 =< $z, CP2 < 256 ->
+ [CP1-32|uppercase_list(Cont, true)];
+uppercase_list([CP1|[CP2|_]=Cont], Changed) when CP1 < 128, CP2 < 256 ->
+ [CP1|uppercase_list(Cont, Changed)];
+uppercase_list([], true) ->
+ [];
+uppercase_list([], false) ->
+ throw(unchanged);
+uppercase_list(CPs0, Changed) ->
case unicode_util:uppercase(CPs0) of
- [Char|CPs] -> append(Char,uppercase_list(CPs));
- [] -> []
+ [Char|CPs] when Char =:= hd(CPs0) -> [Char|uppercase_list(CPs, Changed)];
+ [Char|CPs] -> append(Char,uppercase_list(CPs, true));
+ [] -> uppercase_list([], Changed)
end.
-uppercase_bin(CPs0, Acc) ->
- case unicode_util:uppercase(CPs0) of
- [Char|CPs] when is_integer(Char) ->
- uppercase_bin(CPs, <<Acc/binary, Char/utf8>>);
- [Chars|CPs] ->
- uppercase_bin(CPs, <<Acc/binary,
- << <<CP/utf8>> || CP <- Chars>>/binary >>);
- [] -> Acc
+uppercase_bin(CP1, <<CP2/utf8, Bin/binary>>, _Changed)
+ when $a =< CP1, CP1 =< $z, CP2 < 256 ->
+ [CP1-32|uppercase_bin(CP2, Bin, true)];
+uppercase_bin(CP1, <<CP2/utf8, Bin/binary>>, Changed)
+ when CP1 < 128, CP2 < 256 ->
+ [CP1|uppercase_bin(CP2, Bin, Changed)];
+uppercase_bin(CP1, Bin, Changed) ->
+ case unicode_util:uppercase([CP1|Bin]) of
+ [CP1|CPs] ->
+ case unicode_util:cp(CPs) of
+ [Next|Rest] ->
+ [CP1|uppercase_bin(Next, Rest, Changed)];
+ [] when Changed ->
+ [CP1];
+ [] ->
+ throw(unchanged)
+ end;
+ [Char|CPs] ->
+ case unicode_util:cp(CPs) of
+ [Next|Rest] ->
+ [Char|uppercase_bin(Next, Rest, true)];
+ [] ->
+ [Char]
+ end
end.
-lowercase_list(CPs0) ->
+lowercase_list([CP1|[CP2|_]=Cont], _Changed) when $A =< CP1, CP1 =< $Z, CP2 < 256 ->
+ [CP1+32|lowercase_list(Cont, true)];
+lowercase_list([CP1|[CP2|_]=Cont], Changed) when CP1 < 128, CP2 < 256 ->
+ [CP1|lowercase_list(Cont, Changed)];
+lowercase_list([], true) ->
+ [];
+lowercase_list([], false) ->
+ throw(unchanged);
+lowercase_list(CPs0, Changed) ->
case unicode_util:lowercase(CPs0) of
- [Char|CPs] -> append(Char,lowercase_list(CPs));
- [] -> []
+ [Char|CPs] when Char =:= hd(CPs0) -> [Char|lowercase_list(CPs, Changed)];
+ [Char|CPs] -> append(Char,lowercase_list(CPs, true));
+ [] -> lowercase_list([], Changed)
end.
-lowercase_bin(CPs0, Acc) ->
- case unicode_util:lowercase(CPs0) of
- [Char|CPs] when is_integer(Char) ->
- lowercase_bin(CPs, <<Acc/binary, Char/utf8>>);
- [Chars|CPs] ->
- lowercase_bin(CPs, <<Acc/binary,
- << <<CP/utf8>> || CP <- Chars>>/binary >>);
- [] -> Acc
+lowercase_bin(CP1, <<CP2/utf8, Bin/binary>>, _Changed)
+ when $A =< CP1, CP1 =< $Z, CP2 < 256 ->
+ [CP1+32|lowercase_bin(CP2, Bin, true)];
+lowercase_bin(CP1, <<CP2/utf8, Bin/binary>>, Changed)
+ when CP1 < 128, CP2 < 256 ->
+ [CP1|lowercase_bin(CP2, Bin, Changed)];
+lowercase_bin(CP1, Bin, Changed) ->
+ case unicode_util:lowercase([CP1|Bin]) of
+ [CP1|CPs] ->
+ case unicode_util:cp(CPs) of
+ [Next|Rest] ->
+ [CP1|lowercase_bin(Next, Rest, Changed)];
+ [] when Changed ->
+ [CP1];
+ [] ->
+ throw(unchanged)
+ end;
+ [Char|CPs] ->
+ case unicode_util:cp(CPs) of
+ [Next|Rest] ->
+ [Char|lowercase_bin(Next, Rest, true)];
+ [] ->
+ [Char]
+ end
end.
-casefold_list(CPs0) ->
+casefold_list([CP1|[CP2|_]=Cont], _Changed) when $A =< CP1, CP1 =< $Z, CP2 < 256 ->
+ [CP1+32|casefold_list(Cont, true)];
+casefold_list([CP1|[CP2|_]=Cont], Changed) when CP1 < 128, CP2 < 256 ->
+ [CP1|casefold_list(Cont, Changed)];
+casefold_list([], true) ->
+ [];
+casefold_list([], false) ->
+ throw(unchanged);
+casefold_list(CPs0, Changed) ->
case unicode_util:casefold(CPs0) of
- [Char|CPs] -> append(Char, casefold_list(CPs));
- [] -> []
+ [Char|CPs] when Char =:= hd(CPs0) -> [Char|casefold_list(CPs, Changed)];
+ [Char|CPs] -> append(Char,casefold_list(CPs, true));
+ [] -> casefold_list([], Changed)
end.
-casefold_bin(CPs0, Acc) ->
- case unicode_util:casefold(CPs0) of
- [Char|CPs] when is_integer(Char) ->
- casefold_bin(CPs, <<Acc/binary, Char/utf8>>);
- [Chars|CPs] ->
- casefold_bin(CPs, <<Acc/binary,
- << <<CP/utf8>> || CP <- Chars>>/binary >>);
- [] -> Acc
+casefold_bin(CP1, <<CP2/utf8, Bin/binary>>, _Changed)
+ when $A =< CP1, CP1 =< $Z, CP2 < 256 ->
+ [CP1+32|casefold_bin(CP2, Bin, true)];
+casefold_bin(CP1, <<CP2/utf8, Bin/binary>>, Changed)
+ when CP1 < 128, CP2 < 256 ->
+ [CP1|casefold_bin(CP2, Bin, Changed)];
+casefold_bin(CP1, Bin, Changed) ->
+ case unicode_util:casefold([CP1|Bin]) of
+ [CP1|CPs] ->
+ case unicode_util:cp(CPs) of
+ [Next|Rest] ->
+ [CP1|casefold_bin(Next, Rest, Changed)];
+ [] when Changed ->
+ [CP1];
+ [] ->
+ throw(unchanged)
+ end;
+ [Char|CPs] ->
+ case unicode_util:cp(CPs) of
+ [Next|Rest] ->
+ [Char|casefold_bin(Next, Rest, true)];
+ [] ->
+ [Char]
+ end
end.
%% Fast path for ascii searching for one character in lists
diff --git a/lib/stdlib/test/epp_SUITE.erl b/lib/stdlib/test/epp_SUITE.erl
index 9123bf2f28..a3e294ffea 100644
--- a/lib/stdlib/test/epp_SUITE.erl
+++ b/lib/stdlib/test/epp_SUITE.erl
@@ -28,7 +28,8 @@
otp_8130/1, overload_mac/1, otp_8388/1, otp_8470/1,
otp_8562/1, otp_8665/1, otp_8911/1, otp_10302/1, otp_10820/1,
otp_11728/1, encoding/1, extends/1, function_macro/1,
- test_error/1, test_warning/1, otp_14285/1]).
+ test_error/1, test_warning/1, otp_14285/1,
+ test_if/1]).
-export([epp_parse_erl_form/2]).
@@ -69,7 +70,7 @@ all() ->
overload_mac, otp_8388, otp_8470, otp_8562,
otp_8665, otp_8911, otp_10302, otp_10820, otp_11728,
encoding, extends, function_macro, test_error, test_warning,
- otp_14285].
+ otp_14285, test_if].
groups() ->
[{upcase_mac, [], [upcase_mac_1, upcase_mac_2]},
@@ -799,7 +800,8 @@ otp_8130(Config) when is_list(Config) ->
PreDefMacs = macs(Epp),
['BASE_MODULE','BASE_MODULE_STRING','BEAM','FILE',
'FUNCTION_ARITY','FUNCTION_NAME',
- 'LINE','MACHINE','MODULE','MODULE_STRING'] = PreDefMacs,
+ 'LINE','MACHINE','MODULE','MODULE_STRING',
+ 'OTP_RELEASE'] = PreDefMacs,
{ok,[{'-',_},{atom,_,file}|_]} = epp:scan_erl_form(Epp),
{ok,[{'-',_},{atom,_,module}|_]} = epp:scan_erl_form(Epp),
{ok,[{atom,_,t}|_]} = epp:scan_erl_form(Epp),
@@ -952,27 +954,7 @@ ifdef(Config) ->
{define_c5,
<<"-\ndefine a.\n">>,
- {errors,[{{2,1},epp,{bad,define}}],[]}},
-
- {define_c6,
- <<"\n-if.\n"
- "-endif.\n">>,
- {errors,[{{2,2},epp,{'NYI','if'}}],[]}},
-
- {define_c7,
- <<"-ifndef(a).\n"
- "-elif.\n"
- "-endif.\n">>,
- {errors,[{{2,2},epp,{'NYI',elif}}],[]}},
-
- {define_c7,
- <<"-ifndef(a).\n"
- "-if.\n"
- "-elif.\n"
- "-endif.\n"
- "-endif.\n"
- "t() -> a.\n">>,
- {errors,[{{2,2},epp,{'NYI','if'}}],[]}}
+ {errors,[{{2,1},epp,{bad,define}}],[]}}
],
[] = compile(Config, Cs),
@@ -1117,6 +1099,147 @@ test_warning(Config) ->
[] = compile(Config, Cs),
ok.
+%% OTP-12847: Test the -if and -elif directives and the built-in
+%% function defined(Symbol).
+test_if(Config) ->
+ Cs = [{if_1c,
+ <<"-if.\n"
+ "-endif.\n"
+ "-if no_parentheses.\n"
+ "-endif.\n"
+ "-if(syntax error.\n"
+ "-endif.\n"
+ "-if(true).\n"
+ "-if(a+3).\n"
+ "syntax error not triggered here.\n"
+ "-endif.\n">>,
+ {errors,[{1,epp,{bad,'if'}},
+ {3,epp,{bad,'if'}},
+ {5,erl_parse,["syntax error before: ","error"]},
+ {11,epp,{illegal,"unterminated",'if'}}],
+ []}},
+
+ {if_2c, %Bad guard expressions.
+ <<"-if(is_list(integer_to_list(42))).\n" %Not guard BIF.
+ "-endif.\n"
+ "-if(begin true end).\n"
+ "-endif.\n">>,
+ {errors,[{1,epp,{bad,'if'}},
+ {3,epp,{bad,'if'}}],
+ []}},
+
+ {if_3c, %Invalid use of defined/1.
+ <<"-if defined(42).\n"
+ "-endif.\n">>,
+ {errors,[{1,epp,{bad,'if'}}],[]}},
+
+ {if_4c,
+ <<"-elif OTP_RELEASE > 18.\n">>,
+ {errors,[{1,epp,{illegal,"unbalanced",'elif'}}],[]}},
+
+ {if_5c,
+ <<"-ifdef(not_defined_today).\n"
+ "-else.\n"
+ "-elif OTP_RELEASE > 18.\n"
+ "-endif.\n">>,
+ {errors,[{3,epp,{illegal,"unbalanced",'elif'}}],[]}},
+
+ {if_6c,
+ <<"-if(defined(OTP_RELEASE)).\n"
+ "-else.\n"
+ "-elif(true).\n"
+ "-endif.\n">>,
+ {errors,[{3,epp,elif_after_else}],[]}},
+
+ {if_7c,
+ <<"-if(begin true end).\n" %Not a guard expression.
+ "-endif.\n">>,
+ {errors,[{1,epp,{bad,'if'}}],[]}}
+
+ ],
+ [] = compile(Config, Cs),
+
+ Ts = [{if_1,
+ <<"-if(?OTP_RELEASE > 18).\n"
+ "t() -> ok.\n"
+ "-else.\n"
+ "a bug.\n"
+ "-endif.\n">>,
+ ok},
+
+ {if_2,
+ <<"-if(false).\n"
+ "a bug.\n"
+ "-elif(?OTP_RELEASE > 18).\n"
+ "t() -> ok.\n"
+ "-else.\n"
+ "a bug.\n"
+ "-endif.\n">>,
+ ok},
+
+ {if_3,
+ <<"-if(true).\n"
+ "t() -> ok.\n"
+ "-elif(?OTP_RELEASE > 18).\n"
+ "a bug.\n"
+ "-else.\n"
+ "a bug.\n"
+ "-endif.\n">>,
+ ok},
+
+ {if_4,
+ <<"-define(a, 1).\n"
+ "-if(defined(a) andalso defined(OTP_RELEASE)).\n"
+ "t() -> ok.\n"
+ "-else.\n"
+ "a bug.\n"
+ "-endif.\n">>,
+ ok},
+
+ {if_5,
+ <<"-if(defined(a)).\n"
+ "a bug.\n"
+ "-else.\n"
+ "t() -> ok.\n"
+ "-endif.\n">>,
+ ok},
+
+ {if_6,
+ <<"-if(defined(not_defined_today)).\n"
+ " -if(true).\n"
+ " bug1.\n"
+ " -elif(true).\n"
+ " bug2.\n"
+ " -elif(true).\n"
+ " bug3.\n"
+ " -else.\n"
+ " bug4.\n"
+ " -endif.\n"
+ "-else.\n"
+ "t() -> ok.\n"
+ "-endif.\n">>,
+ ok},
+
+ {if_7,
+ <<"-if(not_builtin()).\n"
+ "a bug.\n"
+ "-else.\n"
+ "t() -> ok.\n"
+ "-endif.\n">>,
+ ok},
+
+ {if_8,
+ <<"-if(42).\n" %Not boolean.
+ "a bug.\n"
+ "-else.\n"
+ "t() -> ok.\n"
+ "-endif.\n">>,
+ ok}
+ ],
+ [] = run(Config, Ts),
+
+ ok.
+
%% Advanced test on overloading macros.
overload_mac(Config) when is_list(Config) ->
Cs = [
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index 02211fa8df..a97fe4a5d9 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -87,6 +87,7 @@
-export([t_select_reverse/1]).
+-include_lib("stdlib/include/ms_transform.hrl"). % ets:fun2ms
-include_lib("common_test/include/ct.hrl").
-define(m(A,B), assert_eq(A,B)).
@@ -173,10 +174,12 @@ groups() ->
init_per_suite(Config) ->
erts_debug:set_internal_state(available_internal_state, true),
+ erts_debug:set_internal_state(ets_force_trap, true),
Config.
end_per_suite(_Config) ->
stop_spawn_logger(),
+ erts_debug:set_internal_state(ets_force_trap, false),
catch erts_debug:set_internal_state(available_internal_state, false),
ok.
@@ -812,7 +815,60 @@ t_delete_all_objects_do(Opts) ->
4000 = ets:info(T,size),
true = ets:delete_all_objects(T),
0 = ets:info(T,size),
- ets:delete(T).
+ ets:delete(T),
+
+ %% Test delete_all_objects is atomic
+ T2 = ets:new(t_delete_all_objects, [public | Opts]),
+ Self = self(),
+ Inserters = [spawn_link(fun() -> inserter(T2, 100*1000, 1, Self) end) || _ <- [1,2,3,4]],
+ [receive {Ipid, running} -> ok end || Ipid <- Inserters],
+
+ ets:delete_all_objects(T2),
+ erlang:yield(),
+ [Ipid ! stop || Ipid <- Inserters],
+ Result = [receive {Ipid, stopped, Highest} -> {Ipid,Highest} end || Ipid <- Inserters],
+
+ %% Verify unbroken sequences of objects inserted _after_ ets:delete_all_objects.
+ Sum = lists:foldl(fun({Ipid, Highest}, AccSum) ->
+ %% ets:fun2ms(fun({{K,Ipid}}) when K =< Highest -> true end),
+ AliveMS = [{{{'$1',Ipid}},[{'=<','$1',{const,Highest}}],[true]}],
+ Alive = ets:select_count(T2, AliveMS),
+ Lowest = Highest - (Alive-1),
+
+ %% ets:fun2ms(fun({{K,Ipid}}) when K < Lowest -> true end)
+ DeletedMS = [{{{'$1',Ipid}},[{'<','$1',{const,Lowest}}],[true]}],
+ 0 = ets:select_count(T2, DeletedMS),
+ AccSum + Alive
+ end,
+ 0,
+ Result),
+ ok = case ets:info(T2, size) of
+ Sum -> ok;
+ Size ->
+ io:format("Sum = ~p\nSize = ~p\n", [Sum, Size]),
+ {Sum,Size}
+ end,
+
+ ets:delete(T2).
+
+inserter(_, 0, _, _) ->
+ ok;
+inserter(T, N, Next, Papa) ->
+ case Next of
+ 10*1000 ->
+ Papa ! {self(), running};
+ _ ->
+ ok
+ end,
+
+ ets:insert(T, {{Next, self()}}),
+ receive
+ stop ->
+ Papa ! {self(), stopped, Next},
+ ok
+ after 0 ->
+ inserter(T, N-1, Next+1, Papa)
+ end.
%% Test ets:delete_object/2.
@@ -6153,20 +6209,23 @@ spawn_logger(Procs) ->
ok;
(Proc) ->
Mon = erlang:monitor(process, Proc),
- receive
+ ok = receive
{'DOWN', Mon, _, _, _} ->
ok
after 0 ->
case Kill of
true -> exit(Proc, kill);
- _ ->
- erlang:display({"Waiting for 'DOWN' from", Proc,
- process_info(Proc),
- pid_status(Proc)})
+ _ -> ok
end,
receive
{'DOWN', Mon, _, _, _} ->
ok
+ after 5000 ->
+ io:format("Waiting for 'DOWN' from ~w, status=~w\n"
+ "info = ~p\n", [Proc,
+ pid_status(Proc),
+ process_info(Proc)]),
+ timeout
end
end
end, Procs),
diff --git a/lib/stdlib/test/io_SUITE.erl b/lib/stdlib/test/io_SUITE.erl
index 9f48fbf5e3..13f2cbd27b 100644
--- a/lib/stdlib/test/io_SUITE.erl
+++ b/lib/stdlib/test/io_SUITE.erl
@@ -1808,7 +1808,7 @@ rpc_call_max(Node, M, F, Args) ->
%% Make sure that a bad specification for a printable range is rejected.
bad_printable_range(Config) when is_list(Config) ->
- Cmd = lists:concat([lib:progname()," +pcunnnnnicode -run erlang halt"]),
+ Cmd = ct:get_progname() ++ " +pcunnnnnicode -run erlang halt",
P = open_port({spawn, Cmd}, [stderr_to_stdout, {line, 200}]),
ok = receive
{P, {data, {eol , "bad range of printable characters" ++ _}}} ->
diff --git a/lib/stdlib/test/qlc_SUITE.erl b/lib/stdlib/test/qlc_SUITE.erl
index 8f8a0f6e73..5c189a6c73 100644
--- a/lib/stdlib/test/qlc_SUITE.erl
+++ b/lib/stdlib/test/qlc_SUITE.erl
@@ -7468,7 +7468,7 @@ strip_qlc_call(H) ->
strip_qlc_call2(H) ->
S = qlc:info(H, {flat, false}),
{ok, Tokens, _EndLine} = erl_scan:string(S++".", 1, [text]),
- {ok, [Expr], Bs} = lib:extended_parse_exprs(Tokens),
+ {ok, [Expr], Bs} = erl_eval:extended_parse_exprs(Tokens),
{case Expr of
{call,_,{remote,_,{atom,_,qlc},{atom,_,q}},[LC]} ->
{qlc, lists:flatten([erl_pp:expr(LC), "."]), []};
@@ -7489,7 +7489,7 @@ strip_qlc_call2(H) ->
join_info_count(H) ->
S = qlc:info(H, {flat, false}),
{ok, Tokens, _EndLine} = erl_scan:string(S++".", 1, [text]),
- {ok, [Expr], _Bs} = lib:extended_parse_exprs(Tokens),
+ {ok, [Expr], _Bs} = erl_eval:extended_parse_exprs(Tokens),
#ji{nmerge = Nmerge, nlookup = Nlookup,
nkeysort = NKeysort, nnested_loop = Nnested_loop} =
ji(Expr, #ji{}),
@@ -7533,7 +7533,7 @@ lookup_keys({generate,_,Q}, L) ->
lookup_keys(Q, L);
lookup_keys({table,Chars}, L) when is_list(Chars) ->
{ok, Tokens, _} = erl_scan:string(lists:flatten(Chars++"."), 1, [text]),
- {ok, [Expr], _Bs} = lib:extended_parse_exprs(Tokens),
+ {ok, [Expr], _Bs} = erl_eval:extended_parse_exprs(Tokens),
case Expr of
{call,_,_,[_fun,AKs]} ->
case erl_parse:normalise(AKs) of
diff --git a/lib/stdlib/test/shell_SUITE.erl b/lib/stdlib/test/shell_SUITE.erl
index ca85314775..22136d687c 100644
--- a/lib/stdlib/test/shell_SUITE.erl
+++ b/lib/stdlib/test/shell_SUITE.erl
@@ -2780,7 +2780,7 @@ otp_10302(Config) when is_list(Config) ->
rpc:call(Node,shell, prompt_func, [default]),
_ = shell:prompt_func(default),
- %% Test lib:format_exception() (cf. OTP-6554)
+ %% Test erl_error:format_exception() (cf. OTP-6554)
Test6 =
<<"begin
A = <<\"\\xaa\">>,
@@ -2967,10 +2967,10 @@ otp_14296(Config) when is_list(Config) ->
R = t(S)
end(),
- %% Test lib:extended_parse_term/1
+ %% Test erl_eval:extended_parse_term/1
TF = fun(S) ->
{ok, Ts, _} = erl_scan:string(S++".", 1, [text]),
- case lib:extended_parse_term(Ts) of
+ case erl_eval:extended_parse_term(Ts) of
{ok, Term} -> Term;
{error, _}=Error -> Error
end
diff --git a/lib/stdlib/test/string_SUITE.erl b/lib/stdlib/test/string_SUITE.erl
index fdff2d24b8..ab412dcc70 100644
--- a/lib/stdlib/test/string_SUITE.erl
+++ b/lib/stdlib/test/string_SUITE.erl
@@ -409,8 +409,8 @@ uppercase(_) ->
?TEST("abc", [], "ABC"),
?TEST("ABC", [], "ABC"),
?TEST("abcdefghiljklmnopqrstvxyzåäö",[], "ABCDEFGHILJKLMNOPQRSTVXYZÅÄÖ"),
- ?TEST("åäö", [], "ÅÄÖ"),
- ?TEST("ÅÄÖ", [], "ÅÄÖ"),
+ ?TEST("åäö ", [], "ÅÄÖ "),
+ ?TEST("ÅÄÖ ", [], "ÅÄÖ "),
?TEST("Michał", [], "MICHAŁ"),
?TEST(["Mic",<<"hał"/utf8>>], [], "MICHAŁ"),
?TEST("ljLJ", [], "LJLJ"),
@@ -423,8 +423,8 @@ lowercase(_) ->
?TEST("123", [], "123"),
?TEST("abc", [], "abc"),
?TEST("ABC", [], "abc"),
- ?TEST("åäö", [], "åäö"),
- ?TEST("ÅÄÖ", [], "åäö"),
+ ?TEST("åäö ", [], "åäö "),
+ ?TEST("ÅÄÖ ", [], "åäö "),
?TEST("MICHAŁ", [], "michał"),
?TEST(["Mic",<<"HAŁ"/utf8>>], [], "michał"),
?TEST("ß SHARP S", [], "ß sharp s"),
@@ -449,8 +449,8 @@ casefold(_) ->
?TEST("123", [], "123"),
?TEST("abc", [], "abc"),
?TEST("ABC", [], "abc"),
- ?TEST("åäö", [], "åäö"),
- ?TEST("ÅÄÖ", [], "åäö"),
+ ?TEST("åäö ", [], "åäö "),
+ ?TEST("ÅÄÖ ", [], "åäö "),
?TEST("MICHAŁ", [], "michał"),
?TEST(["Mic",<<"HAŁ"/utf8>>], [], "michał"),
?TEST("ß SHARP S", [], "ss sharp s"),
@@ -810,6 +810,18 @@ do_measure(DataDir) ->
Do2(slice, repeat(fun() -> string:slice(S0, 20, 15) end), list),
Do2(slice, repeat(fun() -> string:slice(S0B, 20, 15) end), binary),
+ LCase = "areaa reare rerar earea reare reare",
+ LCaseB = unicode:characters_to_binary(LCase),
+ UCase = string:uppercase(LCase),
+ UCaseB = unicode:characters_to_binary(UCase),
+
+ Do2(to_upper_0, repeat(fun() -> string:to_upper(UCase) end), list),
+ Do2(uppercase_0, repeat(fun() -> string:uppercase(UCase) end), list),
+ Do2(uppercase_0, repeat(fun() -> string:uppercase(UCaseB) end), binary),
+ Do2(to_upper_a, repeat(fun() -> string:to_upper(LCase) end), list),
+ Do2(uppercase_a, repeat(fun() -> string:uppercase(LCase) end), list),
+ Do2(uppercase_a, repeat(fun() -> string:uppercase(LCaseB) end), binary),
+
io:format("--~n",[]),
NthTokens = {nth_lexemes, fun(Str) -> string:nth_lexeme(Str, 18000, [$\n,$\r]) end},
[Do(Name,Fun,Mode) || {Name,Fun} <- [NthTokens], Mode <- [list, binary]],
diff --git a/lib/syntax_tools/src/epp_dodger.erl b/lib/syntax_tools/src/epp_dodger.erl
index 0a12e8fd8b..7e741cc649 100644
--- a/lib/syntax_tools/src/epp_dodger.erl
+++ b/lib/syntax_tools/src/epp_dodger.erl
@@ -502,6 +502,10 @@ quickscan_form([{'-', _L}, {atom, La, ifdef} | _Ts]) ->
kill_form(La);
quickscan_form([{'-', _L}, {atom, La, ifndef} | _Ts]) ->
kill_form(La);
+quickscan_form([{'-', _L}, {'if', La} | _Ts]) ->
+ kill_form(La);
+quickscan_form([{'-', _L}, {atom, La, elif} | _Ts]) ->
+ kill_form(La);
quickscan_form([{'-', _L}, {atom, La, else} | _Ts]) ->
kill_form(La);
quickscan_form([{'-', _L}, {atom, La, endif} | _Ts]) ->
@@ -615,8 +619,13 @@ filter_form(T) ->
%% ---------------------------------------------------------------------
%% Normal parsing - try to preserve all information
-normal_parser(Ts, Opt) ->
- rewrite_form(parse_tokens(scan_form(Ts, Opt))).
+normal_parser(Ts0, Opt) ->
+ case scan_form(Ts0, Opt) of
+ Ts when is_list(Ts) ->
+ rewrite_form(parse_tokens(Ts));
+ Node ->
+ Node
+ end.
scan_form([{'-', _L}, {atom, La, define} | Ts], Opt) ->
[{atom, La, ?pp_form}, {'(', La}, {')', La}, {'->', La},
@@ -636,12 +645,26 @@ scan_form([{'-', _L}, {atom, La, ifdef} | Ts], Opt) ->
scan_form([{'-', _L}, {atom, La, ifndef} | Ts], Opt) ->
[{atom, La, ?pp_form}, {'(', La}, {')', La}, {'->', La},
{atom, La, ifndef} | scan_macros(Ts, Opt)];
+scan_form([{'-', _L}, {'if', La} | Ts], Opt) ->
+ [{atom, La, ?pp_form}, {'(', La}, {')', La}, {'->', La},
+ {atom, La, 'if'} | scan_macros(Ts, Opt)];
+scan_form([{'-', _L}, {atom, La, elif} | Ts], Opt) ->
+ [{atom, La, ?pp_form}, {'(', La}, {')', La}, {'->', La},
+ {atom, La, 'elif'} | scan_macros(Ts, Opt)];
scan_form([{'-', _L}, {atom, La, else} | Ts], Opt) ->
[{atom, La, ?pp_form}, {'(', La}, {')', La}, {'->', La},
{atom, La, else} | scan_macros(Ts, Opt)];
scan_form([{'-', _L}, {atom, La, endif} | Ts], Opt) ->
[{atom, La, ?pp_form}, {'(', La}, {')', La}, {'->', La},
{atom, La, endif} | scan_macros(Ts, Opt)];
+scan_form([{'-', _L}, {atom, La, error} | Ts], _Opt) ->
+ Desc = build_info_string("-error", Ts),
+ ErrorInfo = {La, ?MODULE, {error, Desc}},
+ erl_syntax:error_marker(ErrorInfo);
+scan_form([{'-', _L}, {atom, La, warning} | Ts], _Opt) ->
+ Desc = build_info_string("-warning", Ts),
+ ErrorInfo = {La, ?MODULE, {warning, Desc}},
+ erl_syntax:error_marker(ErrorInfo);
scan_form([{'-', L}, {'?', L1}, {Type, _, _}=N | [{'(', _} | _]=Ts], Opt)
when Type =:= atom; Type =:= var ->
%% minus, macro and open parenthesis at start of form - assume that
@@ -657,6 +680,11 @@ scan_form([{'?', L}, {Type, _, _}=N | [{'(', _} | _]=Ts], Opt)
scan_form(Ts, Opt) ->
scan_macros(Ts, Opt).
+build_info_string(Prefix, Ts0) ->
+ Ts = lists:droplast(Ts0),
+ String = lists:droplast(tokens_to_string(Ts)),
+ Prefix ++ " " ++ String ++ ".".
+
scan_macros(Ts, Opt) ->
scan_macros(Ts, [], Opt).
@@ -865,6 +893,10 @@ tokens_to_string([]) ->
format_error(macro_args) ->
errormsg("macro call missing end parenthesis");
+format_error({error, Error}) ->
+ Error;
+format_error({warning, Error}) ->
+ Error;
format_error({unknown, Reason}) ->
errormsg(io_lib:format("unknown error: ~tP", [Reason, 15])).
diff --git a/lib/syntax_tools/src/erl_prettypr.erl b/lib/syntax_tools/src/erl_prettypr.erl
index 60a15c8e3f..6906ef1553 100644
--- a/lib/syntax_tools/src/erl_prettypr.erl
+++ b/lib/syntax_tools/src/erl_prettypr.erl
@@ -675,7 +675,12 @@ lay_2(Node, Ctxt) ->
%% attribute name, without following parentheses.
Ctxt1 = reset_prec(Ctxt),
Args = erl_syntax:attribute_arguments(Node),
- N = erl_syntax:attribute_name(Node),
+ N = case erl_syntax:attribute_name(Node) of
+ {atom, _, 'if'} ->
+ erl_syntax:variable('if');
+ N0 ->
+ N0
+ end,
D = case attribute_type(Node) of
spec ->
[SpecTuple] = Args,
diff --git a/lib/syntax_tools/src/erl_syntax.erl b/lib/syntax_tools/src/erl_syntax.erl
index b816c0699c..029f1e88ac 100644
--- a/lib/syntax_tools/src/erl_syntax.erl
+++ b/lib/syntax_tools/src/erl_syntax.erl
@@ -5328,7 +5328,7 @@ revert_map_type_assoc(Node) ->
Pos = get_pos(Node),
Name = map_type_assoc_name(Node),
Value = map_type_assoc_value(Node),
- {type, Pos, map_type_assoc, [Name, Value]}.
+ {type, Pos, map_field_assoc, [Name, Value]}.
%% =====================================================================
@@ -5386,7 +5386,7 @@ revert_map_type_exact(Node) ->
Pos = get_pos(Node),
Name = map_type_exact_name(Node),
Value = map_type_exact_value(Node),
- {type, Pos, map_type_exact, [Name, Value]}.
+ {type, Pos, map_field_exact, [Name, Value]}.
%% =====================================================================
diff --git a/lib/syntax_tools/src/erl_syntax_lib.erl b/lib/syntax_tools/src/erl_syntax_lib.erl
index c7f477c4d2..ced0dba3e2 100644
--- a/lib/syntax_tools/src/erl_syntax_lib.erl
+++ b/lib/syntax_tools/src/erl_syntax_lib.erl
@@ -1317,6 +1317,8 @@ analyze_attribute(Node) ->
include_lib -> preprocessor;
ifdef -> preprocessor;
ifndef -> preprocessor;
+ 'if' -> preprocessor;
+ elif -> preprocessor;
else -> preprocessor;
endif -> preprocessor;
A ->
diff --git a/lib/syntax_tools/test/syntax_tools_SUITE.erl b/lib/syntax_tools/test/syntax_tools_SUITE.erl
index ae2c67c03e..c8e6448d37 100644
--- a/lib/syntax_tools/test/syntax_tools_SUITE.erl
+++ b/lib/syntax_tools/test/syntax_tools_SUITE.erl
@@ -24,13 +24,14 @@
%% Test cases
-export([app_test/1,appup_test/1,smoke_test/1,revert/1,revert_map/1,
+ revert_map_type/1,
t_abstract_type/1,t_erl_parse_type/1,t_epp_dodger/1,
t_comment_scan/1,t_igor/1,t_erl_tidy/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
- [app_test,appup_test,smoke_test,revert,revert_map,
+ [app_test,appup_test,smoke_test,revert,revert_map,revert_map_type,
t_abstract_type,t_erl_parse_type,t_epp_dodger,
t_comment_scan,t_igor,t_erl_tidy].
@@ -121,7 +122,26 @@ revert_map(Config) when is_list(Config) ->
{map_field_assoc,{atom,17,name},{var,18,'Value'}}}]),
?t:timetrap_cancel(Dog).
-
+%% Testing bug fix for reverting map_field_assoc in types
+revert_map_type(Config) when is_list(Config) ->
+ Dog = ?t:timetrap(?t:minutes(1)),
+ Form1 = {attribute,4,record,
+ {state,
+ [{typed_record_field,
+ {record_field,5,{atom,5,x}},
+ {type,5,map,
+ [{type,5,map_field_exact,[{atom,5,y},{atom,5,z}]}]}}]}},
+ Mapped1 = erl_syntax_lib:map(fun(X) -> X end, Form1),
+ Form1 = erl_syntax:revert(Mapped1),
+ Form2 = {attribute,4,record,
+ {state,
+ [{typed_record_field,
+ {record_field,5,{atom,5,x}},
+ {type,5,map,
+ [{type,5,map_field_assoc,[{atom,5,y},{atom,5,z}]}]}}]}},
+ Mapped2 = erl_syntax_lib:map(fun(X) -> X end, Form2),
+ Form2 = erl_syntax:revert(Mapped2),
+ ?t:timetrap_cancel(Dog).
%% api tests
diff --git a/lib/tools/doc/src/fprof.xml b/lib/tools/doc/src/fprof.xml
index 4c9e48045e..72624bd33b 100644
--- a/lib/tools/doc/src/fprof.xml
+++ b/lib/tools/doc/src/fprof.xml
@@ -328,10 +328,16 @@
purposes. This option is only
allowed with the <c>start</c> option.</item>
<tag><c>cpu_time</c>| <c>{cpu_time, bool()}</c></tag>
- <item>The options <c>cpu_time</c> or <c>{cpu_time, true></c>
+ <item>The options <c>cpu_time</c> or <c>{cpu_time, true}</c>
makes the timestamps in the trace be in CPU time instead
of wallclock time which is the default. This option is
- only allowed with the <c>start</c> option.</item>
+ only allowed with the <c>start</c> option.
+ <warning><p>Getting correct values out of cpu_time can be difficult.
+ The best way to get correct values is to run using a single
+ scheduler and bind that scheduler to a specific CPU,
+ i.e. <c>erl +S 1 +sbt db</c>.</p>
+ </warning>
+ </item>
<tag><c>{procs, PidSpec}</c>| <c>{procs, [PidSpec]}</c></tag>
<item>Specifies which processes that shall be traced. If
this option is not given, the calling process is
diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el
index fd51aca861..e08db0ea79 100644
--- a/lib/tools/emacs/erlang.el
+++ b/lib/tools/emacs/erlang.el
@@ -804,6 +804,7 @@ resulting regexp is surrounded by \\_< and \\_>."
"is_integer"
"is_list"
"is_map"
+ "is_map_key"
"is_number"
"is_pid"
"is_port"
diff --git a/lib/tools/src/lcnt.erl b/lib/tools/src/lcnt.erl
index d0152a4915..1db90c1d86 100644
--- a/lib/tools/src/lcnt.erl
+++ b/lib/tools/src/lcnt.erl
@@ -125,7 +125,7 @@
%% -------------------------------------------------------------------- %%
start() -> gen_server:start({local, ?MODULE}, ?MODULE, [], []).
-stop() -> gen_server:call(?MODULE, stop, infinity).
+stop() -> gen_server:stop(?MODULE, normal, infinity).
init([]) -> {ok, #state{ locks = [], duration = 0 } }.
start_internal() ->
@@ -442,9 +442,6 @@ handle_call({save, Filename}, _From, State) ->
{reply, {error, Error}, State}
end;
-handle_call(stop, _From, State) ->
- {stop, normal, ok, State};
-
handle_call(Command, _From, State) ->
{reply, {error, {undefined, Command}}, State}.
diff --git a/lib/tools/src/xref.erl b/lib/tools/src/xref.erl
index 32efa36fa2..466ec7d331 100644
--- a/lib/tools/src/xref.erl
+++ b/lib/tools/src/xref.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -182,7 +182,9 @@ split_args(Opts) ->
end.
stop(Name) ->
- gen_server:call(Name, stop, infinity).
+ try gen_server:call(Name, stop, infinity)
+ after catch unregister(Name) % ensure the name is gone
+ end.
add_release(Name, Dir) ->
gen_server:call(Name, {add_release, Dir}, infinity).
diff --git a/lib/tools/test/eprof_SUITE_data/eed.erl b/lib/tools/test/eprof_SUITE_data/eed.erl
index 5f2a21aa60..9fe49c6f5c 100644
--- a/lib/tools/test/eprof_SUITE_data/eed.erl
+++ b/lib/tools/test/eprof_SUITE_data/eed.erl
@@ -54,7 +54,7 @@ edit(Name) ->
loop(St0) ->
{ok, St1, Cmd} = get_line(St0),
- case catch command(lib:nonl(Cmd), St1) of
+ case catch command(nonl(Cmd), St1) of
{'EXIT', Reason} ->
%% XXX Should clear outstanding global command here.
loop(print_error({'EXIT', Reason}, St1));
@@ -66,6 +66,10 @@ loop(St0) ->
loop(St2)
end.
+nonl([$\n]) -> [];
+nonl([]) -> [];
+nonl([H|T]) -> [H|nonl(T)].
+
command(Cmd, St) ->
case parse_command(Cmd, St) of
quit ->
diff --git a/make/otp_release_targets.mk b/make/otp_release_targets.mk
index 779aaa1a1e..f57116569c 100644
--- a/make/otp_release_targets.mk
+++ b/make/otp_release_targets.mk
@@ -36,7 +36,9 @@ endif
# -------------------------------------------------------
# Take the XML files and add the github link info to them
# -------------------------------------------------------
+ifneq ($(strip $(XMLDIR)),)
_create_xml_dirs := $(shell mkdir -p $(XMLDIR))
+endif
XML_GEN_FILES+=$(patsubst %.xml,$(XMLDIR)/%.xml,$(XML_FILES))
$(XMLDIR)/%.xml: %.xml
diff --git a/otp_versions.table b/otp_versions.table
index f83d9e4f8b..22762f0691 100644
--- a/otp_versions.table
+++ b/otp_versions.table
@@ -1,3 +1,6 @@
+OTP-20.3.7 : erl_docgen-0.7.3 erts-9.3.2 inets-6.5.2 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_interface-3.10.2 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.10 ssh-4.6.9 ssl-8.2.6 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
+OTP-20.3.6 : crypto-4.2.2 ssh-4.6.9 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.2 erl_interface-3.10.2 erts-9.3.1 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4 inets-6.5.1 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.10 ssl-8.2.6 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
+OTP-20.3.5 : erts-9.3.1 ssl-8.2.6 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.1 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.2 erl_interface-3.10.2 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4 inets-6.5.1 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.10 ssh-4.6.8 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
OTP-20.3.4 : erl_interface-3.10.2 ic-4.4.4 inets-6.5.1 ssh-4.6.8 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.1 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.2 erts-9.3 et-1.6.1 eunit-2.3.5 hipe-3.17.1 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.10 ssl-8.2.5 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
OTP-20.3.3 : sasl-3.1.2 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.1 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.2 erl_interface-3.10.1 erts-9.3 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.3 inets-6.5 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 snmp-5.2.10 ssh-4.6.7 ssl-8.2.5 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
OTP-20.3.2 : ssh-4.6.7 stdlib-3.4.5 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.1 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.2 erl_interface-3.10.1 erts-9.3 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.3 inets-6.5 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.1 snmp-5.2.10 ssl-8.2.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 :
@@ -23,6 +26,7 @@ OTP-20.0.3 : asn1-5.0.2 compiler-7.1.1 erts-9.0.3 ssh-4.5.1 # common_test-1.15.1
OTP-20.0.2 : asn1-5.0.1 erts-9.0.2 kernel-5.3.1 # common_test-1.15.1 compiler-7.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 eldap-1.2.2 erl_docgen-0.7 erl_interface-3.10 et-1.6 eunit-2.3.3 hipe-3.16 ic-4.4.2 inets-6.4 jinterface-1.8 megaco-3.18.2 mnesia-4.15 observer-2.4 odbc-2.12 orber-3.8.3 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 runtime_tools-1.12.1 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 stdlib-3.4.1 syntax_tools-2.1.2 tools-2.10.1 wx-1.8.1 xmerl-1.3.15 :
OTP-20.0.1 : common_test-1.15.1 erts-9.0.1 runtime_tools-1.12.1 stdlib-3.4.1 tools-2.10.1 # asn1-5.0 compiler-7.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 eldap-1.2.2 erl_docgen-0.7 erl_interface-3.10 et-1.6 eunit-2.3.3 hipe-3.16 ic-4.4.2 inets-6.4 jinterface-1.8 kernel-5.3 megaco-3.18.2 mnesia-4.15 observer-2.4 odbc-2.12 orber-3.8.3 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 syntax_tools-2.1.2 wx-1.8.1 xmerl-1.3.15 :
OTP-20.0 : asn1-5.0 common_test-1.15 compiler-7.1 cosProperty-1.2.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 erl_docgen-0.7 erl_interface-3.10 erts-9.0 eunit-2.3.3 hipe-3.16 inets-6.4 jinterface-1.8 kernel-5.3 megaco-3.18.2 mnesia-4.15 observer-2.4 orber-3.8.3 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 runtime_tools-1.12 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 stdlib-3.4 syntax_tools-2.1.2 tools-2.10 wx-1.8.1 xmerl-1.3.15 # cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 eldap-1.2.2 et-1.6 ic-4.4.2 odbc-2.12 os_mon-2.4.2 otp_mibs-1.1.1 :
+OTP-19.3.6.9 : ssh-4.4.2.4 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
OTP-19.3.6.8 : ssh-4.4.2.3 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
OTP-19.3.6.7 : kernel-5.2.0.1 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssh-4.4.2.2 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
OTP-19.3.6.6 : ssh-4.4.2.2 ssl-8.1.3.1.1 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 :
diff --git a/scripts/build-otp b/scripts/build-otp
index 92031c79c8..ad0eb07359 100755
--- a/scripts/build-otp
+++ b/scripts/build-otp
@@ -40,4 +40,8 @@ do_and_log "Autoconfing" autoconf
do_and_log "Configuring" configure
do_and_log "Building OTP" boot -a
+if [ $1 = "release" ]; then
+ do_and_log "Releasing OTP" release -a
+fi
+
exit 0
diff --git a/scripts/diffable b/scripts/diffable
index f22194e99f..08d2d5cb35 100755
--- a/scripts/diffable
+++ b/scripts/diffable
@@ -60,6 +60,7 @@ do_compile(OutDir, Opts0) ->
"asn1",
"stdlib",
"kernel",
+ "hipe",
"reltool",
"runtime_tools",
"xmerl",
@@ -129,6 +130,10 @@ add_opts([], _Opts) ->
get_src(["preloaded"|Apps]) ->
WC = filename:join(code:root_dir(), "erts/preloaded/src/*.erl"),
filelib:wildcard(WC) ++ get_src(Apps);
+get_src(["hipe"|Apps]) ->
+ LibDir = code:lib_dir(hipe),
+ WC = filename:join(LibDir, "*/*.erl"),
+ filelib:wildcard(WC) ++ get_src(Apps);
get_src(["inets"|Apps]) ->
LibDir = code:lib_dir(inets),
WC = filename:join(LibDir, "src/*/*.erl"),
diff --git a/system/doc/design_principles/gen_server_concepts.xml b/system/doc/design_principles/gen_server_concepts.xml
index 3a1f876646..06413a3d93 100644
--- a/system/doc/design_principles/gen_server_concepts.xml
+++ b/system/doc/design_principles/gen_server_concepts.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>1997</year><year>2016</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/design_principles/release_structure.xml b/system/doc/design_principles/release_structure.xml
index 8e62ba845f..e8dfcad805 100644
--- a/system/doc/design_principles/release_structure.xml
+++ b/system/doc/design_principles/release_structure.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2003</year><year>2016</year>
+ <year>2003</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/design_principles/spec_proc.xml b/system/doc/design_principles/spec_proc.xml
index f910c3dba3..65f5492bdd 100644
--- a/system/doc/design_principles/spec_proc.xml
+++ b/system/doc/design_principles/spec_proc.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>1997</year><year>2017</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/efficiency_guide/Makefile b/system/doc/efficiency_guide/Makefile
index b1630a36e1..f6ad638853 100644
--- a/system/doc/efficiency_guide/Makefile
+++ b/system/doc/efficiency_guide/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2001-2016. All Rights Reserved.
+# Copyright Ericsson AB 2001-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/efficiency_guide/advanced.xml b/system/doc/efficiency_guide/advanced.xml
index 21d4a66d77..fe77ce8ea4 100644
--- a/system/doc/efficiency_guide/advanced.xml
+++ b/system/doc/efficiency_guide/advanced.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2001</year><year>2016</year>
+ <year>2001</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -188,11 +188,6 @@
limit can be raised or lowered using the <c>+t</c> option.</cell>
</row>
<row>
- <cell>Ets tables</cell>
- <cell>Default is 1400. It can be changed with the environment
- variable <c>ERL_MAX_ETS_TABLES</c>.</cell>
- </row>
- <row>
<cell>Elements in a tuple</cell>
<cell>The maximum number of elements in a tuple is 16,777,215
(24-bit unsigned integer).</cell>
diff --git a/system/doc/efficiency_guide/binaryhandling.xml b/system/doc/efficiency_guide/binaryhandling.xml
index 19f40c9abe..b500329ef9 100644
--- a/system/doc/efficiency_guide/binaryhandling.xml
+++ b/system/doc/efficiency_guide/binaryhandling.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2007</year>
- <year>2017</year>
+ <year>2018</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -357,25 +357,8 @@ all_but_zeroes_to_list(<<Byte,T/binary>>, Acc, Remaining) ->
<c>Buffer</c> from a match context to a sub binary (or do nothing if
<c>Buffer</c> is a binary already).</p>
- <p>Before you begin to think that the compiler can optimize any binary
- patterns, the following function cannot be optimized by the compiler
- (currently, at least):</p>
-
- <code type="erl"><![CDATA[
-non_opt_eq([H|T1], <<H,T2/binary>>) ->
- non_opt_eq(T1, T2);
-non_opt_eq([_|_], <<_,_/binary>>) ->
- false;
-non_opt_eq([], <<>>) ->
- true.]]></code>
-
- <p>It was mentioned earlier that the compiler can only delay creation of
- sub binaries if it knows that the binary will not be shared. In this case,
- the compiler cannot know.</p>
-
- <p>Soon it is shown how to rewrite <c>non_opt_eq/2</c> so that the delayed
- sub binary optimization can be applied, and more importantly, it is shown
- how you can find out whether your code can be optimized.</p>
+ <p>But in more complicated code, how can one know whether the
+ optimization is applied or not?</p>
<section>
<marker id="bin_opt_info"></marker>
@@ -422,67 +405,6 @@ after_zero(<<>>) ->
binary cannot be delayed, because it will be returned.
The warning for the second clause says that a sub binary will not be
created (yet).</p>
-
- <p>Let us revisit the earlier example of the code that could not
- be optimized and find out why:</p>
-
- <code type="erl"><![CDATA[
-non_opt_eq([H|T1], <<H,T2/binary>>) ->
- %% INFO: matching anything else but a plain variable to
- %% the left of binary pattern will prevent delayed
- %% sub binary optimization;
- %% SUGGEST changing argument order
- %% NOT OPTIMIZED: called function non_opt_eq/2 does not
- %% begin with a suitable binary matching instruction
- non_opt_eq(T1, T2);
-non_opt_eq([_|_], <<_,_/binary>>) ->
- false;
-non_opt_eq([], <<>>) ->
- true.]]></code>
-
- <p>The compiler emitted two warnings. The <c>INFO</c> warning refers
- to the function <c>non_opt_eq/2</c> as a callee, indicating that any
- function that call <c>non_opt_eq/2</c> cannot make delayed sub binary
- optimization. There is also a suggestion to change argument order.
- The second warning (that happens to refer to the same line) refers to
- the construction of the sub binary itself.</p>
-
- <p>Soon another example will show the difference between the
- <c>INFO</c> and <c>NOT OPTIMIZED</c> warnings somewhat clearer, but
- let us first follow the suggestion to change argument order:</p>
-
- <code type="erl"><![CDATA[
-opt_eq(<<H,T1/binary>>, [H|T2]) ->
- %% OPTIMIZED: creation of sub binary delayed
- opt_eq(T1, T2);
-opt_eq(<<_,_/binary>>, [_|_]) ->
- false;
-opt_eq(<<>>, []) ->
- true.]]></code>
-
- <p>The compiler gives a warning for the following code fragment:</p>
-
- <code type="erl"><![CDATA[
-match_body([0|_], <<H,_/binary>>) ->
- %% INFO: matching anything else but a plain variable to
- %% the left of binary pattern will prevent delayed
- %% sub binary optimization;
- %% SUGGEST changing argument order
- done;
-...]]></code>
-
- <p>The warning means that <em>if</em> there is a call to <c>match_body/2</c>
- (from another clause in <c>match_body/2</c> or another function), the
- delayed sub binary optimization will not be possible. More warnings will
- occur for any place where a sub binary is matched out at the end of and
- passed as the second argument to <c>match_body/2</c>, for example:</p>
-
- <code type="erl"><![CDATA[
-match_head(List, <<_:10,Data/binary>>) ->
- %% NOT OPTIMIZED: called function match_body/2 does not
- %% begin with a suitable binary matching instruction
- match_body(List, Data).]]></code>
-
</section>
<section>
diff --git a/system/doc/efficiency_guide/efficiency_guide.erl b/system/doc/efficiency_guide/efficiency_guide.erl
index e982bdae65..c57785aaa3 100644
--- a/system/doc/efficiency_guide/efficiency_guide.erl
+++ b/system/doc/efficiency_guide/efficiency_guide.erl
@@ -1,5 +1,5 @@
-module(efficiency_guide).
--compile(export_all).
+-compile([export_all,nowarn_export_all).
%% DO NOT
naive_reverse([H|T]) ->
@@ -71,28 +71,6 @@ all_but_zeroes_to_list(<<0,T/binary>>, Acc, Remaining) ->
all_but_zeroes_to_list(<<Byte,T/binary>>, Acc, Remaining) ->
all_but_zeroes_to_list(T, [Byte|Acc], Remaining-1).
-non_opt_eq([H|T1], <<H,T2/binary>>) ->
- non_opt_eq(T1, T2);
-non_opt_eq([_|_], <<_,_/binary>>) ->
- false;
-non_opt_eq([], <<>>) ->
- true.
-
-opt_eq(<<H,T1/binary>>, [H|T2]) ->
- opt_eq(T1, T2);
-opt_eq(<<_,_/binary>>, [_|_]) ->
- false;
-opt_eq(<<>>, []) ->
- true.
-
-match_head(List, <<_:10,Data/binary>>) ->
- match_body(List, Data).
-
-match_body([0|_], <<H,_/binary>>) ->
- done;
-match_body([H|T1], <<H,T2/binary>>) ->
- {T1,T2}.
-
count1(<<_,T/binary>>, Count) -> count1(T, Count+1);
count1(<<>>, Count) -> Count.
diff --git a/system/doc/efficiency_guide/profiling.xml b/system/doc/efficiency_guide/profiling.xml
index f185456158..cdc80289cf 100644
--- a/system/doc/efficiency_guide/profiling.xml
+++ b/system/doc/efficiency_guide/profiling.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2001</year><year>2016</year>
+ <year>2001</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/efficiency_guide/xmlfiles.mk b/system/doc/efficiency_guide/xmlfiles.mk
index 23c0d991b4..e275823dd1 100644
--- a/system/doc/efficiency_guide/xmlfiles.mk
+++ b/system/doc/efficiency_guide/xmlfiles.mk
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2016. All Rights Reserved.
+# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/embedded/Makefile b/system/doc/embedded/Makefile
index 23d3168e34..2b09c5b852 100644
--- a/system/doc/embedded/Makefile
+++ b/system/doc/embedded/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/getting_started/Makefile b/system/doc/getting_started/Makefile
index 13d767daf5..7b90fe1337 100644
--- a/system/doc/getting_started/Makefile
+++ b/system/doc/getting_started/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2016. All Rights Reserved.
+# Copyright Ericsson AB 1996-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/getting_started/conc_prog.xml b/system/doc/getting_started/conc_prog.xml
index dc378dd582..4374a59e04 100644
--- a/system/doc/getting_started/conc_prog.xml
+++ b/system/doc/getting_started/conc_prog.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2003</year><year>2017</year>
+ <year>2003</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/installation_guide/Makefile b/system/doc/installation_guide/Makefile
index 002c2a536a..91e7cb2772 100644
--- a/system/doc/installation_guide/Makefile
+++ b/system/doc/installation_guide/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2016. All Rights Reserved.
+# Copyright Ericsson AB 1996-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/installation_guide/xmlfiles.mk b/system/doc/installation_guide/xmlfiles.mk
index 37fbeca96b..f005c8404b 100644
--- a/system/doc/installation_guide/xmlfiles.mk
+++ b/system/doc/installation_guide/xmlfiles.mk
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2016. All Rights Reserved.
+# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/oam/Makefile b/system/doc/oam/Makefile
index dfebc6aca0..b09ae1aed2 100644
--- a/system/doc/oam/Makefile
+++ b/system/doc/oam/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/programming_examples/Makefile b/system/doc/programming_examples/Makefile
index af731f85b4..2d04e8b5e2 100644
--- a/system/doc/programming_examples/Makefile
+++ b/system/doc/programming_examples/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2016. All Rights Reserved.
+# Copyright Ericsson AB 2003-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/programming_examples/xmlfiles.mk b/system/doc/programming_examples/xmlfiles.mk
index 20b08d8cd3..e457ca0cce 100644
--- a/system/doc/programming_examples/xmlfiles.mk
+++ b/system/doc/programming_examples/xmlfiles.mk
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2016. All Rights Reserved.
+# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/reference_manual/Makefile b/system/doc/reference_manual/Makefile
index 75c15e4b5f..02a7f002ed 100644
--- a/system/doc/reference_manual/Makefile
+++ b/system/doc/reference_manual/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2003-2016. All Rights Reserved.
+# Copyright Ericsson AB 2003-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/reference_manual/introduction.xml b/system/doc/reference_manual/introduction.xml
index afbdfa7434..c9f5fcb2db 100644
--- a/system/doc/reference_manual/introduction.xml
+++ b/system/doc/reference_manual/introduction.xml
@@ -48,7 +48,7 @@
System Principles</seealso></p>
<p>Starting and stopping, boot scripts, code loading,
<seealso marker="doc/system_principles:error_logging">
- error logging</seealso>,
+ logging</seealso>,
<seealso marker="doc/system_principles:create_target">
creating target systems</seealso></p>
</item>
diff --git a/system/doc/reference_manual/macros.xml b/system/doc/reference_manual/macros.xml
index a341307ab7..8a8d5f3a4c 100644
--- a/system/doc/reference_manual/macros.xml
+++ b/system/doc/reference_manual/macros.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2003</year><year>2017</year>
+ <year>2003</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -150,6 +150,11 @@ bar(X) ->
<item>The name of the current function.</item>
<tag><c>?FUNCTION_ARITY</c></tag>
<item>The arity (number of arguments) for the current function.</item>
+ <tag><c>?OTP_RELEASE</c></tag>
+ <item>The OTP release that the currently executing ERTS
+ application is part of, as an integer. For details, see
+ <seealso marker="erts:erlang#system_info/1"><c>erlang:system_info(otp_release)</c></seealso>.
+ This macro was introduced in OTP release 21.</item>
</taglist>
</section>
@@ -202,8 +207,16 @@ f() ->
directive. If that condition is false, the lines following
<c>else</c> are evaluated instead.</item>
<tag><c>-endif.</c></tag>
- <item>Specifies the end of an <c>ifdef</c> or <c>ifndef</c>
- directive.</item>
+ <item>Specifies the end of an <c>ifdef</c>, an <c>ifndef</c>
+ directive, or the end of an <c>if</c> or <c>elif</c> directive.</item>
+ <tag><c>-if(Condition).</c></tag>
+ <item>Evaluates the following lines only if <c>Condition</c>
+ evaluates to true.</item>
+ <tag><c>-elif(Condition).</c></tag>
+ <item>Only allowed after an <c>if</c> or another <c>elif</c> directive.
+ If the preceding <c>if</c> or <c>elif</c> directives do not
+ evaluate to true, and the <c>Condition</c> evaluates to true,
+ the lines following the <c>elif</c> are evaluated instead.</item>
</taglist>
<note>
<p>The macro directives cannot be used inside functions.</p>
@@ -231,6 +244,24 @@ or
{ok,m}</pre>
<p><c>?LOG(Arg)</c> is then expanded to a call to <c>io:format/2</c>
and provide the user with some simple trace output.</p>
+
+ <p><em>Example:</em></p>
+ <code type="none">
+-module(m)
+...
+-ifdef(OTP_RELEASE).
+ %% OTP 21 or higher
+ -if(?OTP_RELEASE >= 22).
+ %% Code that will work in OTP 22 or higher
+ -elif(?OTP_RELEASE >= 21).
+ %% Code that will work in OTP 21 or higher
+ -endif.
+-else.
+ %% OTP 20 or lower.
+-endif.
+...</code>
+ <p>The code uses the <c>OTP_RELEASE</c> macro to conditionally
+ select code depending on release.</p>
</section>
<section>
diff --git a/system/doc/reference_manual/modules.xml b/system/doc/reference_manual/modules.xml
index 7dc71eb307..6f93198ec1 100644
--- a/system/doc/reference_manual/modules.xml
+++ b/system/doc/reference_manual/modules.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2003</year><year>2017</year>
+ <year>2003</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/reference_manual/xmlfiles.mk b/system/doc/reference_manual/xmlfiles.mk
index fffcbdd911..92d232b628 100644
--- a/system/doc/reference_manual/xmlfiles.mk
+++ b/system/doc/reference_manual/xmlfiles.mk
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2016. All Rights Reserved.
+# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/system_architecture_intro/Makefile b/system/doc/system_architecture_intro/Makefile
index 7a10f305ba..ebfcc3a1c8 100644
--- a/system/doc/system_architecture_intro/Makefile
+++ b/system/doc/system_architecture_intro/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/system_architecture_intro/sys_arch_intro.xml b/system/doc/system_architecture_intro/sys_arch_intro.xml
index bf7659765f..e8ada6427b 100644
--- a/system/doc/system_architecture_intro/sys_arch_intro.xml
+++ b/system/doc/system_architecture_intro/sys_arch_intro.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2000</year><year>2016</year>
+ <year>2000</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/system_principles/Makefile b/system/doc/system_principles/Makefile
index ec6591ec6b..bb74125f3a 100644
--- a/system/doc/system_principles/Makefile
+++ b/system/doc/system_principles/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2016. All Rights Reserved.
+# Copyright Ericsson AB 1996-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/system_principles/create_target.xmlsrc b/system/doc/system_principles/create_target.xmlsrc
index dc6cbbe980..47b84e5760 100644
--- a/system/doc/system_principles/create_target.xmlsrc
+++ b/system/doc/system_principles/create_target.xmlsrc
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2002</year><year>2016</year>
+ <year>2002</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/system_principles/error_logging.xml b/system/doc/system_principles/error_logging.xml
index c99d59ddb7..9d95ce8f3b 100644
--- a/system/doc/system_principles/error_logging.xml
+++ b/system/doc/system_principles/error_logging.xml
@@ -39,80 +39,69 @@
<code type="none"><![CDATA[
=ERROR REPORT==== 9-Dec-2003::13:25:02 ===
Error in process <0.27.0> with exit value: {{badmatch,[1,2,3]},[{m,f,1},{shell,eval_loop,2}]}]]></code>
- <p>The error information is handled by the <em>error logger</em>, a
- system process registered as <c>error_logger</c>. This process
- receives all error messages from the Erlang runtime system as
- well as from the standard behaviours and different Erlang/OTP
- applications.</p>
+ <p>The error information is handled by Logger, which is part of
+ the Kernel application.</p>
<p>The exit reasons (such as <c>badarg</c>) used by
the runtime system are described in
<seealso marker="doc/reference_manual:errors#exit_reasons">
Errors and Error Handling</seealso>.</p>
- <p>For information about the process <c>error_logger</c> and its user
- interface (with the same name), see the
- <seealso marker="kernel:error_logger">error_logger(3)</seealso>
- manual page in Kernel. The system can be configured so that
- error information
- is written to file or to tty, or both. In addition, user-defined
- applications can send and format error information using
- <c>error_logger</c>.</p>
+ <p>For information about Logger and its user
+ interface, see the
+ <seealso marker="kernel:logger">logger(3)</seealso>
+ manual page and
+ the <seealso marker="kernel:logger_chapter">Logging</seealso>
+ section in the Kernel User's Guide. The system can be configured so that
+ log events are
+ written to file or to tty, or both. In addition, user-defined
+ applications can send and format log events using
+ Logger.</p>
</section>
<section>
- <title>SASL Error Logging</title>
+ <title>Log events from OTP behaviours</title>
<p>The standard behaviours (<c>supervisor</c>, <c>gen_server</c>,
- and so on) send progress and error information to <c>error_logger</c>.
- If the SASL application is started, this information is
- written to tty as well. For more information, see
+ and so on) send progress and error information to
+ Logger. Progress reports are by default not logged, but can be
+ enabled by setting the Kernel configuration
+ parameter <c>logger_progress_reports</c> to <c>log</c>. Supervisor
+ reports, crash reports and other error and information reports
+ are by default logged through the log handler which is
+ set up when the Kernel application is started.</p>
+ <p>Prior to Erlang/OTP 21.0, supervisor, crash, and progress
+ reports were only logged when the SASL application was
+ running. This behaviour can, for backwards compatibility, be
+ enabled by setting the Kernel configuration
+ parameter <c>logger_sasl_compatible</c> to <c>true</c>. For more
+ information, see
<seealso marker="sasl:error_logging">SASL Error Logging</seealso>
in the SASL User's Guide.</p>
<pre>
-% <input>erl -boot start_sasl</input>
-Erlang (BEAM) emulator version 5.4.13 [hipe] [threads:0] [kernel-poll]
+% <input>erl -kernel logger_progress_reports log</input>
+Erlang/OTP 21 [erts-10.0] [source-76388a1] [64-bit] [smp:4:4] [ds:4:4:10] [async-threads:1] [hipe]
-
-=PROGRESS REPORT==== 31-Mar-2006::12:45:58 ===
- supervisor: {local,sasl_safe_sup}
- started: [{pid,&lt;0.33.0>},
- {name,alarm_handler},
- {mfa,{alarm_handler,start_link,[]}},
- {restart_type,permanent},
- {shutdown,2000},
- {child_type,worker}]
-
-=PROGRESS REPORT==== 31-Mar-2006::12:45:58 ===
- supervisor: {local,sasl_safe_sup}
- started: [{pid,&lt;0.34.0>},
- {name,overload},
- {mfa,{overload,start_link,[]}},
- {restart_type,permanent},
- {shutdown,2000},
- {child_type,worker}]
-
-=PROGRESS REPORT==== 31-Mar-2006::12:45:58 ===
- supervisor: {local,sasl_sup}
- started: [{pid,&lt;0.32.0>},
- {name,sasl_safe_sup},
- {mfa,{supervisor,
- start_link,
- [{local,sasl_safe_sup},sasl,safe]}},
- {restart_type,permanent},
- {shutdown,infinity},
- {child_type,supervisor}]
-
-=PROGRESS REPORT==== 31-Mar-2006::12:45:58 ===
- supervisor: {local,sasl_sup}
- started: [{pid,&lt;0.35.0>},
- {name,release_handler},
- {mfa,{release_handler,start_link,[]}},
- {restart_type,permanent},
- {shutdown,2000},
- {child_type,worker}]
-
-=PROGRESS REPORT==== 31-Mar-2006::12:45:58 ===
- application: sasl
- started_at: nonode@nohost
-Eshell V5.4.13 (abort with ^G)
+=PROGRESS REPORT==== 18-May-2018::21:33:41.705292 ===
+ application: kernel
+ started_at: nonode@nohost
+=PROGRESS REPORT==== 18-May-2018::21:33:41.708900 ===
+ application: stdlib
+ started_at: nonode@nohost
+=PROGRESS REPORT==== 18-May-2018::21:33:41.726003 ===
+ supervisor: {local,kernel_safe_sup}
+ started: [{pid,&lt;0.75.0>},
+ {id,disk_log_sup},
+ {mfargs,{disk_log_sup,start_link,[]}},
+ {restart_type,permanent},
+ {shutdown,1000},
+ {child_type,supervisor}]
+=PROGRESS REPORT==== 18-May-2018::21:33:41.726348 ===
+ supervisor: {local,kernel_safe_sup}
+ started: [{pid,&lt;0.76.0>},
+ {id,disk_log_server},
+ {mfargs,{disk_log_server,start_link,[]}},
+ {restart_type,permanent},
+ {shutdown,2000},
+ {child_type,worker}]
+Eshell V9.3.1 (abort with ^G)
1> </pre>
</section>
</chapter>
diff --git a/system/doc/system_principles/system_principles.xml b/system/doc/system_principles/system_principles.xml
index 3605df3ade..b3981f0735 100644
--- a/system/doc/system_principles/system_principles.xml
+++ b/system/doc/system_principles/system_principles.xml
@@ -93,7 +93,7 @@ init:stop()</pre>
{progress,kernel_load_completed}
{progress,modules_loaded}
{start,heart}
-{start,error_logger}
+{start,logger}
...</pre>
<p>For a detailed description of the syntax and contents of the
boot script, see the <c>script(4)</c> manual page in SASL.</p>
diff --git a/system/doc/system_principles/xmlfiles.mk b/system/doc/system_principles/xmlfiles.mk
index f8972b24a7..77d6747414 100644
--- a/system/doc/system_principles/xmlfiles.mk
+++ b/system/doc/system_principles/xmlfiles.mk
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2016. All Rights Reserved.
+# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/top/Makefile b/system/doc/top/Makefile
index 73c943caa1..0703b821f1 100644
--- a/system/doc/top/Makefile
+++ b/system/doc/top/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1999-2016. All Rights Reserved.
+# Copyright Ericsson AB 1999-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/top/book.xml b/system/doc/top/book.xml
index 540b6bfd24..61e75591ef 100644
--- a/system/doc/top/book.xml
+++ b/system/doc/top/book.xml
@@ -4,7 +4,7 @@
<book xmlns:xi="http://www.w3.org/2001/XInclude">
<header titlestyle="normal">
<copyright>
- <year>1997</year><year>2016</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
diff --git a/system/doc/tutorial/Makefile b/system/doc/tutorial/Makefile
index 606064da72..70aba663b5 100644
--- a/system/doc/tutorial/Makefile
+++ b/system/doc/tutorial/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2000-2016. All Rights Reserved.
+# Copyright Ericsson AB 2000-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/system/doc/tutorial/xmlfiles.mk b/system/doc/tutorial/xmlfiles.mk
index 53f82c6475..74e174f6d4 100644
--- a/system/doc/tutorial/xmlfiles.mk
+++ b/system/doc/tutorial/xmlfiles.mk
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2009-2016. All Rights Reserved.
+# Copyright Ericsson AB 2009-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.